From 443c852ecbb59ced428262675201c47ceeef2e65 Mon Sep 17 00:00:00 2001 From: Narendranath Gogineni Date: Thu, 5 Feb 2026 02:05:59 +0530 Subject: [PATCH 1/4] feat: update vendored OpenAI SDK from v2.7.1 to v2.16.0 --- .env.test.example | 38 + claude.md | 432 ++++ portkey_ai/_vendor/bin/openai | 2 +- .../INSTALLER | 0 .../METADATA | 43 +- .../_vendor/openai-2.16.0.dist-info/RECORD | 2010 +++++++++++++++++ .../REQUESTED | 0 .../WHEEL | 0 .../entry_points.txt | 0 .../licenses/LICENSE | 2 +- .../_vendor/openai-2.7.1.dist-info/RECORD | 1964 ---------------- portkey_ai/_vendor/openai/_base_client.py | 186 +- portkey_ai/_vendor/openai/_models.py | 70 +- portkey_ai/_vendor/openai/_streaming.py | 176 +- portkey_ai/_vendor/openai/_types.py | 14 +- portkey_ai/_vendor/openai/_utils/_sync.py | 34 +- portkey_ai/_vendor/openai/_version.py | 2 +- .../_vendor/openai/lib/_parsing/_responses.py | 5 + portkey_ai/_vendor/openai/lib/_realtime.py | 4 +- .../_vendor/openai/resources/audio/speech.py | 18 +- .../openai/resources/audio/transcriptions.py | 65 +- .../_vendor/openai/resources/batches.py | 22 +- .../openai/resources/beta/assistants.py | 199 +- .../resources/beta/threads/runs/runs.py | 102 +- .../resources/chat/completions/completions.py | 150 +- .../openai/resources/containers/containers.py | 8 + portkey_ai/_vendor/openai/resources/files.py | 34 +- portkey_ai/_vendor/openai/resources/images.py | 450 ++-- .../openai/resources/realtime/calls.py | 50 +- .../openai/resources/realtime/realtime.py | 8 +- .../resources/responses/input_tokens.py | 8 +- .../openai/resources/responses/responses.py | 384 +++- portkey_ai/_vendor/openai/resources/videos.py | 27 +- portkey_ai/_vendor/openai/types/__init__.py | 1 + .../types/audio/speech_create_params.py | 8 +- .../openai/types/audio/speech_model.py | 2 +- .../openai/types/audio/transcription.py | 10 + .../audio/transcription_create_params.py | 12 +- .../types/audio/transcription_diarized.py | 10 + .../audio/transcription_diarized_segment.py | 2 + .../audio/transcription_text_delta_event.py | 5 + .../audio/transcription_text_done_event.py | 9 + .../audio/transcription_text_segment_event.py | 4 + .../types/audio/transcription_verbose.py | 6 + .../_vendor/openai/types/audio_model.py | 8 +- .../auto_file_chunking_strategy_param.py | 5 + .../openai/types/batch_create_params.py | 16 +- .../openai/types/batch_request_counts.py | 2 + .../_vendor/openai/types/batch_usage.py | 10 + .../_vendor/openai/types/beta/assistant.py | 7 + .../types/beta/assistant_create_params.py | 25 +- .../types/beta/assistant_stream_event.py | 96 + .../types/beta/assistant_tool_choice.py | 5 + .../types/beta/assistant_tool_choice_param.py | 5 + .../types/beta/assistant_update_params.py | 22 +- .../openai/types/beta/chatkit/chat_session.py | 2 + .../chat_session_automatic_thread_titling.py | 2 + .../chat_session_chatkit_configuration.py | 2 + ...hat_session_chatkit_configuration_param.py | 17 + .../chat_session_expires_after_param.py | 2 + .../beta/chatkit/chat_session_file_upload.py | 2 + .../beta/chatkit/chat_session_history.py | 2 + .../beta/chatkit/chat_session_rate_limits.py | 2 + .../chatkit/chat_session_rate_limits_param.py | 2 + .../chatkit/chat_session_workflow_param.py | 7 + .../types/beta/chatkit/chatkit_attachment.py | 2 + .../chatkit/chatkit_response_output_text.py | 10 + .../types/beta/chatkit/chatkit_thread.py | 8 + .../chatkit_thread_assistant_message_item.py | 2 + .../beta/chatkit/chatkit_thread_item_list.py | 10 + .../chatkit_thread_user_message_item.py | 10 + .../types/beta/chatkit/chatkit_widget_item.py | 2 + .../beta/chatkit/thread_delete_response.py | 2 + .../openai/types/beta/chatkit_workflow.py | 4 + .../openai/types/beta/file_search_tool.py | 9 + .../types/beta/file_search_tool_param.py | 9 + .../_vendor/openai/types/beta/thread.py | 8 + .../beta/thread_create_and_run_params.py | 25 + .../openai/types/beta/thread_create_params.py | 9 + .../openai/types/beta/thread_update_params.py | 4 + .../beta/threads/file_citation_annotation.py | 4 + .../threads/file_citation_delta_annotation.py | 4 + .../beta/threads/file_path_annotation.py | 4 + .../threads/file_path_delta_annotation.py | 4 + .../beta/threads/image_file_content_block.py | 4 + .../threads/image_file_content_block_param.py | 4 + .../beta/threads/image_file_delta_block.py | 4 + .../beta/threads/image_url_content_block.py | 2 + .../threads/image_url_content_block_param.py | 2 + .../beta/threads/image_url_delta_block.py | 2 + .../openai/types/beta/threads/message.py | 6 + .../types/beta/threads/message_delta.py | 2 + .../types/beta/threads/message_delta_event.py | 5 + .../beta/threads/refusal_content_block.py | 2 + .../types/beta/threads/refusal_delta_block.py | 2 + .../required_action_function_tool_call.py | 4 + .../_vendor/openai/types/beta/threads/run.py | 28 + .../types/beta/threads/run_create_params.py | 20 +- .../threads/runs/code_interpreter_logs.py | 2 + .../runs/code_interpreter_tool_call.py | 6 + .../runs/code_interpreter_tool_call_delta.py | 4 + .../threads/runs/file_search_tool_call.py | 6 + .../beta/threads/runs/function_tool_call.py | 2 + .../threads/runs/function_tool_call_delta.py | 2 + .../runs/message_creation_step_details.py | 2 + .../types/beta/threads/runs/run_step.py | 12 + .../types/beta/threads/runs/run_step_delta.py | 2 + .../beta/threads/runs/run_step_delta_event.py | 5 + .../runs/run_step_delta_message_delta.py | 2 + .../threads/runs/tool_call_delta_object.py | 2 + .../threads/runs/tool_calls_step_details.py | 2 + .../types/beta/threads/text_content_block.py | 2 + .../beta/threads/text_content_block_param.py | 2 + .../types/beta/threads/text_delta_block.py | 2 + .../openai/types/chat/chat_completion.py | 6 + ...at_completion_allowed_tool_choice_param.py | 2 + .../chat_completion_allowed_tools_param.py | 2 + ...chat_completion_assistant_message_param.py | 12 + .../types/chat/chat_completion_audio.py | 5 + .../types/chat/chat_completion_audio_param.py | 10 +- .../types/chat/chat_completion_chunk.py | 15 + .../chat_completion_content_part_image.py | 2 + ...hat_completion_content_part_image_param.py | 2 + ...mpletion_content_part_input_audio_param.py | 2 + .../chat_completion_content_part_param.py | 4 + .../chat/chat_completion_content_part_text.py | 4 + ...chat_completion_content_part_text_param.py | 4 + .../chat/chat_completion_custom_tool_param.py | 10 + ...chat_completion_developer_message_param.py | 6 + ...t_completion_function_call_option_param.py | 4 + .../chat/chat_completion_function_tool.py | 2 + .../chat_completion_function_tool_param.py | 2 + .../types/chat/chat_completion_message.py | 11 + ...hat_completion_message_custom_tool_call.py | 4 + ...mpletion_message_custom_tool_call_param.py | 4 + ...t_completion_message_function_tool_call.py | 4 + ...letion_message_function_tool_call_param.py | 4 + ...mpletion_named_tool_choice_custom_param.py | 5 + ...chat_completion_named_tool_choice_param.py | 5 + ...hat_completion_prediction_content_param.py | 5 + .../chat/chat_completion_store_message.py | 2 + .../chat_completion_stream_options_param.py | 2 + .../chat_completion_system_message_param.py | 6 + .../chat_completion_user_message_param.py | 5 + .../types/chat/completion_create_params.py | 32 +- portkey_ai/_vendor/openai/types/completion.py | 5 + .../_vendor/openai/types/completion_usage.py | 6 + .../openai/types/container_create_params.py | 5 + .../openai/types/container_create_response.py | 12 + .../openai/types/container_list_response.py | 12 + .../types/container_retrieve_response.py | 12 + .../computer_screenshot_content.py | 2 + .../types/conversations/conversation_item.py | 26 + .../conversations/conversation_item_list.py | 2 + .../openai/types/conversations/message.py | 4 + .../conversations/summary_text_content.py | 2 + .../types/conversations/text_content.py | 2 + .../openai/types/create_embedding_response.py | 2 + portkey_ai/_vendor/openai/types/embedding.py | 2 + .../openai/types/eval_create_params.py | 46 +- .../openai/types/eval_create_response.py | 19 + .../types/eval_custom_data_source_config.py | 7 + .../openai/types/eval_list_response.py | 19 + .../openai/types/eval_retrieve_response.py | 19 + ...l_stored_completions_data_source_config.py | 2 + .../openai/types/eval_update_response.py | 19 + ...create_eval_completions_run_data_source.py | 42 +- ..._eval_completions_run_data_source_param.py | 42 +- .../create_eval_jsonl_run_data_source.py | 4 + ...create_eval_jsonl_run_data_source_param.py | 4 + .../openai/types/evals/eval_api_error.py | 2 + .../openai/types/evals/run_cancel_response.py | 71 +- .../openai/types/evals/run_create_params.py | 67 +- .../openai/types/evals/run_create_response.py | 71 +- .../openai/types/evals/run_list_response.py | 71 +- .../types/evals/run_retrieve_response.py | 71 +- .../evals/runs/output_item_list_response.py | 10 + .../runs/output_item_retrieve_response.py | 10 + .../openai/types/file_create_params.py | 19 +- .../_vendor/openai/types/file_object.py | 2 + .../checkpoints/permission_create_response.py | 4 + .../permission_retrieve_response.py | 4 + .../types/fine_tuning/dpo_hyperparameters.py | 2 + .../fine_tuning/dpo_hyperparameters_param.py | 2 + .../openai/types/fine_tuning/dpo_method.py | 2 + .../types/fine_tuning/dpo_method_param.py | 2 + .../types/fine_tuning/fine_tuning_job.py | 15 + .../fine_tuning/fine_tuning_job_event.py | 2 + .../fine_tuning_job_wandb_integration.py | 7 + .../types/fine_tuning/job_create_params.py | 14 + .../jobs/fine_tuning_job_checkpoint.py | 6 + .../reinforcement_hyperparameters.py | 2 + .../reinforcement_hyperparameters_param.py | 2 + .../types/fine_tuning/reinforcement_method.py | 2 + .../fine_tuning/reinforcement_method_param.py | 2 + .../fine_tuning/supervised_hyperparameters.py | 2 + .../supervised_hyperparameters_param.py | 2 + .../types/fine_tuning/supervised_method.py | 2 + .../fine_tuning/supervised_method_param.py | 2 + .../_vendor/openai/types/graders/__init__.py | 2 + .../openai/types/graders/grader_inputs.py | 43 + .../types/graders/grader_inputs_param.py | 53 + .../types/graders/label_model_grader.py | 26 +- .../types/graders/label_model_grader_param.py | 26 +- .../openai/types/graders/multi_grader.py | 4 + .../types/graders/multi_grader_param.py | 4 + .../openai/types/graders/python_grader.py | 2 + .../types/graders/python_grader_param.py | 2 + .../types/graders/score_model_grader.py | 48 +- .../types/graders/score_model_grader_param.py | 48 +- .../types/graders/string_check_grader.py | 4 + .../graders/string_check_grader_param.py | 4 + .../types/graders/text_similarity_grader.py | 2 + .../graders/text_similarity_grader_param.py | 2 + portkey_ai/_vendor/openai/types/image.py | 10 +- .../types/image_edit_completed_event.py | 13 +- .../_vendor/openai/types/image_edit_params.py | 31 +- .../types/image_edit_partial_image_event.py | 2 + .../openai/types/image_gen_completed_event.py | 13 +- .../types/image_gen_partial_image_event.py | 2 + .../openai/types/image_generate_params.py | 40 +- .../_vendor/openai/types/image_model.py | 2 +- .../_vendor/openai/types/images_response.py | 21 +- portkey_ai/_vendor/openai/types/model.py | 2 + portkey_ai/_vendor/openai/types/moderation.py | 8 + .../types/moderation_create_response.py | 2 + .../types/moderation_image_url_input_param.py | 4 + .../types/moderation_text_input_param.py | 2 + .../other_file_chunking_strategy_object.py | 5 + .../_vendor/openai/types/realtime/__init__.py | 3 + .../types/realtime/audio_transcription.py | 19 +- .../realtime/audio_transcription_param.py | 17 +- .../types/realtime/call_accept_params.py | 25 +- .../realtime/client_secret_create_params.py | 8 + .../realtime/client_secret_create_response.py | 2 + .../realtime/conversation_created_event.py | 4 + .../types/realtime/conversation_item_added.py | 10 + .../conversation_item_create_event.py | 22 +- .../conversation_item_create_event_param.py | 22 +- .../conversation_item_created_event.py | 13 + .../conversation_item_delete_event.py | 8 + .../conversation_item_delete_event_param.py | 8 + .../conversation_item_deleted_event.py | 6 + .../types/realtime/conversation_item_done.py | 5 + ...put_audio_transcription_completed_event.py | 19 + ...m_input_audio_transcription_delta_event.py | 4 + ..._input_audio_transcription_failed_event.py | 8 + ..._item_input_audio_transcription_segment.py | 2 + .../conversation_item_retrieve_event.py | 7 + .../conversation_item_retrieve_event_param.py | 7 + .../conversation_item_truncate_event.py | 15 + .../conversation_item_truncate_event_param.py | 15 + .../conversation_item_truncated_event.py | 9 + .../input_audio_buffer_append_event.py | 17 + .../input_audio_buffer_append_event_param.py | 17 + .../input_audio_buffer_clear_event.py | 6 + .../input_audio_buffer_clear_event_param.py | 6 + .../input_audio_buffer_cleared_event.py | 5 + .../input_audio_buffer_commit_event.py | 6 + .../input_audio_buffer_commit_event_param.py | 6 + .../input_audio_buffer_committed_event.py | 7 + ..._audio_buffer_dtmf_event_received_event.py | 26 + ...input_audio_buffer_speech_started_event.py | 13 + ...input_audio_buffer_speech_stopped_event.py | 6 + .../input_audio_buffer_timeout_triggered.py | 17 + .../types/realtime/log_prob_properties.py | 2 + .../realtime/mcp_list_tools_completed.py | 2 + .../types/realtime/mcp_list_tools_failed.py | 2 + .../realtime/mcp_list_tools_in_progress.py | 2 + .../output_audio_buffer_clear_event.py | 9 + .../output_audio_buffer_clear_event_param.py | 9 + .../realtime/rate_limits_updated_event.py | 8 + .../types/realtime/realtime_audio_config.py | 2 + .../realtime/realtime_audio_config_input.py | 7 + .../realtime_audio_config_input_param.py | 7 + .../realtime/realtime_audio_config_output.py | 6 +- .../realtime_audio_config_output_param.py | 6 +- .../realtime/realtime_audio_config_param.py | 2 + .../types/realtime/realtime_audio_formats.py | 6 + .../realtime/realtime_audio_formats_param.py | 6 + .../realtime_audio_input_turn_detection.py | 27 +- ...altime_audio_input_turn_detection_param.py | 27 +- ...ime_conversation_item_assistant_message.py | 2 + ...nversation_item_assistant_message_param.py | 2 + ...ealtime_conversation_item_function_call.py | 2 + ..._conversation_item_function_call_output.py | 2 + ...rsation_item_function_call_output_param.py | 2 + ...e_conversation_item_function_call_param.py | 2 + ...altime_conversation_item_system_message.py | 4 + ..._conversation_item_system_message_param.py | 4 + ...realtime_conversation_item_user_message.py | 2 + ...me_conversation_item_user_message_param.py | 2 + .../openai/types/realtime/realtime_error.py | 2 + .../types/realtime/realtime_error_event.py | 6 + .../realtime/realtime_mcp_approval_request.py | 2 + .../realtime_mcp_approval_request_param.py | 2 + .../realtime_mcp_approval_response.py | 2 + .../realtime_mcp_approval_response_param.py | 2 + .../types/realtime/realtime_mcp_list_tools.py | 4 + .../realtime/realtime_mcp_list_tools_param.py | 4 + .../types/realtime/realtime_mcp_tool_call.py | 2 + .../realtime/realtime_mcp_tool_call_param.py | 2 + .../types/realtime/realtime_response.py | 4 + .../realtime_response_create_audio_output.py | 9 +- ...time_response_create_audio_output_param.py | 9 +- .../realtime_response_create_mcp_tool.py | 18 + ...realtime_response_create_mcp_tool_param.py | 18 + .../realtime_response_create_params.py | 2 + .../realtime_response_create_params_param.py | 2 + .../realtime/realtime_response_status.py | 7 + .../types/realtime/realtime_response_usage.py | 8 + ...time_response_usage_input_token_details.py | 7 + ...ime_response_usage_output_token_details.py | 2 + .../types/realtime/realtime_server_event.py | 30 + .../realtime_session_client_secret.py | 2 + .../realtime_session_create_request.py | 27 +- .../realtime_session_create_request_param.py | 27 +- .../realtime_session_create_response.py | 87 +- .../realtime/realtime_tools_config_param.py | 18 + .../realtime/realtime_tools_config_union.py | 18 + .../realtime_tools_config_union_param.py | 18 + .../types/realtime/realtime_tracing_config.py | 2 + .../realtime/realtime_tracing_config_param.py | 2 + .../realtime_transcription_session_audio.py | 2 + ...ltime_transcription_session_audio_input.py | 7 + ...transcription_session_audio_input_param.py | 7 + ...tion_session_audio_input_turn_detection.py | 27 +- ...ession_audio_input_turn_detection_param.py | 27 +- ...ltime_transcription_session_audio_param.py | 2 + ...me_transcription_session_create_request.py | 2 + ...nscription_session_create_request_param.py | 2 + ...e_transcription_session_create_response.py | 6 + ...me_transcription_session_turn_detection.py | 7 + .../realtime_truncation_retention_ratio.py | 9 + ...altime_truncation_retention_ratio_param.py | 9 + .../realtime/response_audio_delta_event.py | 2 + .../realtime/response_audio_done_event.py | 6 + .../response_audio_transcript_delta_event.py | 2 + .../response_audio_transcript_done_event.py | 6 + .../types/realtime/response_cancel_event.py | 9 + .../realtime/response_cancel_event_param.py | 9 + .../response_content_part_added_event.py | 7 + .../response_content_part_done_event.py | 7 + .../types/realtime/response_create_event.py | 28 + .../realtime/response_create_event_param.py | 28 + .../types/realtime/response_created_event.py | 6 + .../types/realtime/response_done_event.py | 13 + ...nse_function_call_arguments_delta_event.py | 2 + ...onse_function_call_arguments_done_event.py | 5 + .../response_mcp_call_arguments_delta.py | 2 + .../response_mcp_call_arguments_done.py | 2 + .../realtime/response_mcp_call_completed.py | 2 + .../realtime/response_mcp_call_failed.py | 2 + .../realtime/response_mcp_call_in_progress.py | 2 + .../response_output_item_added_event.py | 2 + .../response_output_item_done_event.py | 6 + .../realtime/response_text_delta_event.py | 2 + .../realtime/response_text_done_event.py | 6 + .../types/realtime/session_created_event.py | 7 + .../types/realtime/session_update_event.py | 12 + .../realtime/session_update_event_param.py | 12 + .../types/realtime/session_updated_event.py | 5 + .../openai/types/responses/__init__.py | 25 + .../types/responses/apply_patch_tool.py | 14 + .../types/responses/apply_patch_tool_param.py | 14 + .../types/responses/compacted_response.py | 33 + .../openai/types/responses/computer_tool.py | 5 + .../types/responses/computer_tool_param.py | 5 + .../openai/types/responses/custom_tool.py | 5 + .../types/responses/custom_tool_param.py | 5 + .../types/responses/easy_input_message.py | 8 + .../responses/easy_input_message_param.py | 8 + .../types/responses/file_search_tool.py | 11 + .../types/responses/file_search_tool_param.py | 11 + .../types/responses/function_shell_tool.py | 14 + .../responses/function_shell_tool_param.py | 14 + .../openai/types/responses/function_tool.py | 5 + .../types/responses/function_tool_param.py | 5 + .../responses/input_token_count_params.py | 18 +- .../openai/types/responses/parsed_response.py | 11 +- .../openai/types/responses/response.py | 38 +- .../response_apply_patch_tool_call.py | 84 + .../response_apply_patch_tool_call_output.py | 33 + .../responses/response_audio_delta_event.py | 2 + .../responses/response_audio_done_event.py | 2 + .../response_audio_transcript_delta_event.py | 2 + .../response_audio_transcript_done_event.py | 2 + ..._code_interpreter_call_code_delta_event.py | 2 + ...e_code_interpreter_call_code_done_event.py | 2 + ...e_code_interpreter_call_completed_event.py | 2 + ...code_interpreter_call_in_progress_event.py | 2 + ...ode_interpreter_call_interpreting_event.py | 2 + .../response_code_interpreter_tool_call.py | 6 + ...sponse_code_interpreter_tool_call_param.py | 6 + .../responses/response_compact_params.py | 133 ++ .../responses/response_compaction_item.py | 26 + .../response_compaction_item_param.py | 23 + .../response_compaction_item_param_param.py | 23 + .../responses/response_completed_event.py | 2 + .../responses/response_computer_tool_call.py | 28 + ...response_computer_tool_call_output_item.py | 2 + ...se_computer_tool_call_output_screenshot.py | 2 + ...puter_tool_call_output_screenshot_param.py | 2 + .../response_computer_tool_call_param.py | 28 + .../response_content_part_added_event.py | 4 + .../response_content_part_done_event.py | 4 + .../responses/response_conversation_param.py | 2 + .../types/responses/response_create_params.py | 14 + .../types/responses/response_created_event.py | 2 + .../responses/response_custom_tool_call.py | 2 + ...onse_custom_tool_call_input_delta_event.py | 2 + ...ponse_custom_tool_call_input_done_event.py | 2 + .../response_custom_tool_call_output.py | 2 + .../response_custom_tool_call_output_param.py | 2 + .../response_custom_tool_call_param.py | 2 + .../openai/types/responses/response_error.py | 2 + .../types/responses/response_error_event.py | 2 + .../types/responses/response_failed_event.py | 2 + ...sponse_file_search_call_completed_event.py | 2 + ...onse_file_search_call_in_progress_event.py | 2 + ...sponse_file_search_call_searching_event.py | 2 + .../response_file_search_tool_call.py | 6 + .../response_file_search_tool_call_param.py | 6 + ...response_format_text_json_schema_config.py | 6 + ...se_format_text_json_schema_config_param.py | 6 + ...nse_function_call_arguments_delta_event.py | 2 + ...onse_function_call_arguments_done_event.py | 2 + ...onse_function_shell_call_output_content.py | 42 + ...unction_shell_call_output_content_param.py | 41 + .../response_function_shell_tool_call.py | 48 + ...esponse_function_shell_tool_call_output.py | 88 + .../responses/response_function_tool_call.py | 6 + .../response_function_tool_call_item.py | 6 + .../response_function_tool_call_param.py | 6 + .../responses/response_function_web_search.py | 19 +- .../response_function_web_search_param.py | 21 +- ...response_image_gen_call_completed_event.py | 4 + ...esponse_image_gen_call_generating_event.py | 4 + ...sponse_image_gen_call_in_progress_event.py | 2 + ...onse_image_gen_call_partial_image_event.py | 2 + .../responses/response_in_progress_event.py | 2 + .../responses/response_incomplete_event.py | 2 + .../types/responses/response_input_audio.py | 2 + .../responses/response_input_audio_param.py | 2 + .../types/responses/response_input_file.py | 2 + .../responses/response_input_file_content.py | 2 + .../response_input_file_content_param.py | 2 + .../responses/response_input_file_param.py | 2 + .../types/responses/response_input_image.py | 5 + .../responses/response_input_image_content.py | 5 + .../response_input_image_content_param.py | 5 + .../responses/response_input_image_param.py | 5 + .../types/responses/response_input_item.py | 211 ++ .../responses/response_input_item_param.py | 210 ++ .../types/responses/response_input_param.py | 210 ++ .../types/responses/response_input_text.py | 2 + .../responses/response_input_text_content.py | 2 + .../response_input_text_content_param.py | 2 + .../responses/response_input_text_param.py | 2 + .../openai/types/responses/response_item.py | 26 + .../types/responses/response_item_list.py | 2 + ...response_mcp_call_arguments_delta_event.py | 4 + .../response_mcp_call_arguments_done_event.py | 2 + .../response_mcp_call_completed_event.py | 2 + .../response_mcp_call_failed_event.py | 2 + .../response_mcp_call_in_progress_event.py | 2 + ...response_mcp_list_tools_completed_event.py | 2 + .../response_mcp_list_tools_failed_event.py | 2 + ...sponse_mcp_list_tools_in_progress_event.py | 4 + .../types/responses/response_output_item.py | 24 + .../response_output_item_added_event.py | 2 + .../response_output_item_done_event.py | 2 + .../responses/response_output_message.py | 2 + .../response_output_message_param.py | 2 + .../responses/response_output_refusal.py | 2 + .../response_output_refusal_param.py | 2 + .../types/responses/response_output_text.py | 14 + ...onse_output_text_annotation_added_event.py | 2 + .../responses/response_output_text_param.py | 14 + .../openai/types/responses/response_prompt.py | 5 + .../types/responses/response_prompt_param.py | 5 + .../types/responses/response_queued_event.py | 2 + .../responses/response_reasoning_item.py | 11 + .../response_reasoning_item_param.py | 11 + ...onse_reasoning_summary_part_added_event.py | 4 + ...ponse_reasoning_summary_part_done_event.py | 4 + ...onse_reasoning_summary_text_delta_event.py | 2 + ...ponse_reasoning_summary_text_done_event.py | 2 + .../response_reasoning_text_delta_event.py | 2 + .../response_reasoning_text_done_event.py | 2 + .../responses/response_refusal_delta_event.py | 2 + .../responses/response_refusal_done_event.py | 2 + .../types/responses/response_text_config.py | 8 + .../responses/response_text_config_param.py | 8 + .../responses/response_text_delta_event.py | 8 + .../responses/response_text_done_event.py | 8 + .../openai/types/responses/response_usage.py | 9 + ...esponse_web_search_call_completed_event.py | 2 + ...ponse_web_search_call_in_progress_event.py | 2 + ...esponse_web_search_call_searching_event.py | 2 + .../_vendor/openai/types/responses/tool.py | 44 +- .../types/responses/tool_choice_allowed.py | 2 + .../responses/tool_choice_allowed_param.py | 2 + .../responses/tool_choice_apply_patch.py | 14 + .../tool_choice_apply_patch_param.py | 14 + .../types/responses/tool_choice_custom.py | 2 + .../responses/tool_choice_custom_param.py | 2 + .../types/responses/tool_choice_function.py | 2 + .../responses/tool_choice_function_param.py | 2 + .../openai/types/responses/tool_choice_mcp.py | 4 + .../types/responses/tool_choice_mcp_param.py | 4 + .../types/responses/tool_choice_shell.py | 14 + .../responses/tool_choice_shell_param.py | 14 + .../types/responses/tool_choice_types.py | 5 + .../responses/tool_choice_types_param.py | 5 + .../openai/types/responses/tool_param.py | 44 +- .../responses/web_search_preview_tool.py | 7 + .../web_search_preview_tool_param.py | 7 + .../openai/types/responses/web_search_tool.py | 10 + .../types/responses/web_search_tool_param.py | 10 + .../_vendor/openai/types/shared/all_models.py | 1 + .../_vendor/openai/types/shared/chat_model.py | 10 + .../openai/types/shared/comparison_filter.py | 4 + .../openai/types/shared/compound_filter.py | 2 + .../types/shared/custom_tool_input_format.py | 4 + .../_vendor/openai/types/shared/reasoning.py | 26 +- .../openai/types/shared/reasoning_effort.py | 2 +- .../shared/response_format_json_object.py | 8 + .../shared/response_format_json_schema.py | 8 + .../types/shared/response_format_text.py | 2 + .../shared/response_format_text_grammar.py | 5 + .../shared/response_format_text_python.py | 6 + .../openai/types/shared/responses_model.py | 1 + .../openai/types/shared_params/chat_model.py | 10 + .../types/shared_params/comparison_filter.py | 4 + .../types/shared_params/compound_filter.py | 2 + .../shared_params/custom_tool_input_format.py | 4 + .../openai/types/shared_params/reasoning.py | 26 +- .../types/shared_params/reasoning_effort.py | 2 +- .../response_format_json_object.py | 8 + .../response_format_json_schema.py | 8 + .../shared_params/response_format_text.py | 2 + .../types/shared_params/responses_model.py | 1 + ...tic_file_chunking_strategy_object_param.py | 2 + portkey_ai/_vendor/openai/types/upload.py | 2 + .../openai/types/upload_create_params.py | 5 + .../openai/types/uploads/upload_part.py | 2 + .../_vendor/openai/types/vector_store.py | 6 + .../types/vector_store_create_params.py | 2 + .../types/vector_store_search_params.py | 2 + .../types/vector_store_update_params.py | 2 + .../types/vector_stores/vector_store_file.py | 7 + .../vector_stores/vector_store_file_batch.py | 2 + portkey_ai/_vendor/openai/types/video.py | 2 + .../openai/types/video_create_error.py | 4 + .../openai/types/video_create_params.py | 16 +- .../openai/types/video_delete_response.py | 2 + .../_vendor/openai/types/video_model.py | 5 +- .../_vendor/openai/types/video_model_param.py | 12 + .../webhooks/batch_cancelled_webhook_event.py | 4 + .../webhooks/batch_completed_webhook_event.py | 4 + .../webhooks/batch_expired_webhook_event.py | 4 + .../webhooks/batch_failed_webhook_event.py | 4 + .../eval_run_canceled_webhook_event.py | 4 + .../webhooks/eval_run_failed_webhook_event.py | 4 + .../eval_run_succeeded_webhook_event.py | 4 + ...fine_tuning_job_cancelled_webhook_event.py | 4 + .../fine_tuning_job_failed_webhook_event.py | 4 + ...fine_tuning_job_succeeded_webhook_event.py | 4 + .../realtime_call_incoming_webhook_event.py | 6 + .../response_cancelled_webhook_event.py | 4 + .../response_completed_webhook_event.py | 4 + .../webhooks/response_failed_webhook_event.py | 4 + .../response_incomplete_webhook_event.py | 4 + .../api_resources/apis/chat_complete.py | 16 + tests/models.json | 87 - vendorize.toml | 2 +- 577 files changed, 9151 insertions(+), 3039 deletions(-) create mode 100644 .env.test.example create mode 100644 claude.md rename portkey_ai/_vendor/{openai-2.7.1.dist-info => openai-2.16.0.dist-info}/INSTALLER (100%) rename portkey_ai/_vendor/{openai-2.7.1.dist-info => openai-2.16.0.dist-info}/METADATA (97%) create mode 100644 portkey_ai/_vendor/openai-2.16.0.dist-info/RECORD rename portkey_ai/_vendor/{openai-2.7.1.dist-info => openai-2.16.0.dist-info}/REQUESTED (100%) rename portkey_ai/_vendor/{openai-2.7.1.dist-info => openai-2.16.0.dist-info}/WHEEL (100%) rename portkey_ai/_vendor/{openai-2.7.1.dist-info => openai-2.16.0.dist-info}/entry_points.txt (100%) rename portkey_ai/_vendor/{openai-2.7.1.dist-info => openai-2.16.0.dist-info}/licenses/LICENSE (99%) delete mode 100644 portkey_ai/_vendor/openai-2.7.1.dist-info/RECORD create mode 100644 portkey_ai/_vendor/openai/types/graders/grader_inputs.py create mode 100644 portkey_ai/_vendor/openai/types/graders/grader_inputs_param.py create mode 100644 portkey_ai/_vendor/openai/types/realtime/input_audio_buffer_dtmf_event_received_event.py create mode 100644 portkey_ai/_vendor/openai/types/responses/apply_patch_tool.py create mode 100644 portkey_ai/_vendor/openai/types/responses/apply_patch_tool_param.py create mode 100644 portkey_ai/_vendor/openai/types/responses/compacted_response.py create mode 100644 portkey_ai/_vendor/openai/types/responses/function_shell_tool.py create mode 100644 portkey_ai/_vendor/openai/types/responses/function_shell_tool_param.py create mode 100644 portkey_ai/_vendor/openai/types/responses/response_apply_patch_tool_call.py create mode 100644 portkey_ai/_vendor/openai/types/responses/response_apply_patch_tool_call_output.py create mode 100644 portkey_ai/_vendor/openai/types/responses/response_compact_params.py create mode 100644 portkey_ai/_vendor/openai/types/responses/response_compaction_item.py create mode 100644 portkey_ai/_vendor/openai/types/responses/response_compaction_item_param.py create mode 100644 portkey_ai/_vendor/openai/types/responses/response_compaction_item_param_param.py create mode 100644 portkey_ai/_vendor/openai/types/responses/response_function_shell_call_output_content.py create mode 100644 portkey_ai/_vendor/openai/types/responses/response_function_shell_call_output_content_param.py create mode 100644 portkey_ai/_vendor/openai/types/responses/response_function_shell_tool_call.py create mode 100644 portkey_ai/_vendor/openai/types/responses/response_function_shell_tool_call_output.py create mode 100644 portkey_ai/_vendor/openai/types/responses/tool_choice_apply_patch.py create mode 100644 portkey_ai/_vendor/openai/types/responses/tool_choice_apply_patch_param.py create mode 100644 portkey_ai/_vendor/openai/types/responses/tool_choice_shell.py create mode 100644 portkey_ai/_vendor/openai/types/responses/tool_choice_shell_param.py create mode 100644 portkey_ai/_vendor/openai/types/video_model_param.py diff --git a/.env.test.example b/.env.test.example new file mode 100644 index 00000000..00c0268e --- /dev/null +++ b/.env.test.example @@ -0,0 +1,38 @@ +# Portkey Configuration +# Base URL for the Portkey API +PORTKEY_BASE_URL= + +# Portkey API Key - Required for all tests +PORTKEY_API_KEY= + +# Virtual Keys - These are configured in your Portkey dashboard +# OpenAI virtual key - Required for most tests +OPENAI_VIRTUAL_KEY= + +# Optional: Additional virtual keys for testing different providers +# These are used in loadbalance and fallback test configurations +# ANYSCALE_VIRTUAL_KEY= +# AZURE_VIRTUAL_KEY= +# COHERE_VIRTUAL_KEY= +# ANTHROPIC_VIRTUAL_KEY= +# STABILITY_VIRTUAL_KEY= + +# Direct Provider API Keys +# These are used when tests directly access provider APIs (not through Portkey virtual keys) +# Required for tests that use models.json configuration + +# OpenAI API Key - Required for OpenAI model tests +OPENAI_API_KEY= + +# Anyscale API Key - Required for Anyscale model tests (Llama, Mistral models) +ANYSCALE_API_KEY= + +# Anthropic API Key - Required for Anthropic/Claude model tests +ANTHROPIC_API_KEY= + +# Cohere API Key - Required for Cohere model tests +COHERE_API_KEY= + +# LlamaIndex API Key - Required for LlamaIndex integration tests +# Note: This may be the same as OPENAI_API_KEY for OpenAI embeddings +LLAMA_INDEX_API_KEY= diff --git a/claude.md b/claude.md new file mode 100644 index 00000000..b06b367e --- /dev/null +++ b/claude.md @@ -0,0 +1,432 @@ +# Portkey Python SDK - Architecture Guide + +## Overview + +Portkey is an AI Gateway that unifies LLM signatures. This SDK wraps the OpenAI Python SDK using a **vendoring approach** to avoid conflicts with user-installed OpenAI packages. + +## Directory Structure + +``` +portkey-python-sdk/ +├── portkey_ai/ +│ ├── __init__.py # Public API - exports all classes +│ ├── version.py # SDK version +│ ├── _vendor/ # Vendored dependencies +│ │ └── openai/ # Vendored OpenAI SDK (e.g., v2.7.1) +│ ├── api_resources/ # Core SDK implementation +│ │ ├── __init__.py # Internal exports +│ │ ├── client.py # Portkey & AsyncPortkey clients +│ │ ├── base_client.py # APIClient & AsyncAPIClient base classes +│ │ ├── apis/ # API endpoint implementations +│ │ │ ├── api_resource.py # Base class for all resources +│ │ │ ├── chat_complete.py # OpenAI wrapper example +│ │ │ ├── containers.py # OpenAI wrapper example +│ │ │ ├── feedback.py # Portkey-specific endpoint +│ │ │ └── ... +│ │ └── types/ # Response type definitions +│ │ ├── chat_complete_type.py +│ │ ├── containers_type.py +│ │ └── ... +│ ├── integrations/ # Third-party integrations (ADK, Strands) +│ ├── langchain/ # LangChain integration +│ └── llamaindex/ # LlamaIndex integration +├── vendorize.toml # Vendoring configuration +└── tests/ # Test suite +``` + +## Vendoring Approach + +The SDK vendors the OpenAI Python SDK to avoid version conflicts: + +```toml +# vendorize.toml +target = "portkey_ai/_vendor" +packages = ["openai==2.7.1"] +``` + +### Key Vendoring Quirks + +#### 1. Import Paths +Two styles are used (both are valid): +```python +# Absolute import (preferred for types used in type hints) +from portkey_ai._vendor.openai import OpenAI, AsyncOpenAI +from portkey_ai._vendor.openai._types import Omit, omit + +# Relative import (used within api_resources/) +from ..._vendor.openai._types import Omit, omit, FileTypes +``` + +#### 2. Placeholder API Key +The vendored OpenAI client requires an API key, but Portkey uses headers for auth: +```python +# global_constants.py +OPEN_AI_API_KEY = "OPENAI_API_KEY" # Just a placeholder string! + +# client.py +self.openai_client = OpenAI( + api_key=OPEN_AI_API_KEY, # Satisfies OpenAI client requirement + base_url=self.base_url, # Points to Portkey gateway + default_headers=self.allHeaders, # Actual auth via x-portkey-api-key header +) +``` + +#### 3. Linting Exclusions +The vendored code is excluded from all linters/formatters: +```toml +# pyproject.toml +[tool.mypy] +exclude = ['portkey_ai/_vendor', 'tests'] + +[[tool.mypy.overrides]] +module = 'portkey_ai._vendor.*' +ignore_errors = true + +[tool.black] +force-exclude = '''(portkey_ai/_vendor)/''' + +[tool.ruff] +exclude = ["portkey_ai/_vendor", "tests"] +``` + +#### 4. Type Reuse vs Redefinition +**Response types** - OpenAI types are imported directly for nested types: +```python +# types/response_type.py +from portkey_ai._vendor.openai.types.responses.response import ToolChoice +from portkey_ai._vendor.openai.types.responses.response_output_item import ResponseOutputItem + +class Response(BaseModel, extra="allow"): + tool_choice: ToolChoice # Direct reuse of OpenAI type + output: List[ResponseOutputItem] # Direct reuse + # ... but top-level response adds _headers support + _headers: Optional[httpx.Headers] = PrivateAttr() +``` + +**Param types** - OpenAI param types are imported directly in API wrappers: +```python +# apis/containers.py +from portkey_ai._vendor.openai.types import container_create_params + +def create( + self, + expires_after: Union[container_create_params.ExpiresAfter, Omit] = omit, +): + ... +``` + +While **top-level response types** are redefined in Portkey to add header support: +```python +# Portkey redefines simple types to add _headers +class ContainerCreateResponse(BaseModel, extra="allow"): + id: Optional[str] = None + # ... mirrors OpenAI type but adds: + _headers: Optional[httpx.Headers] = PrivateAttr() +``` + +#### 5. _vendor/__init__.py is Empty +The `_vendor/__init__.py` file is empty - imports go directly to `_vendor/openai/` + +#### 6. Custom Streaming Implementation +Portkey has its **own SSE streaming implementation** in `streaming.py` for Portkey-specific endpoints, separate from OpenAI's streaming (used for OpenAI wrapper endpoints) + +## Client Architecture + +### Main Clients (`client.py`) + +```python +class Portkey(APIClient): + def __init__(self, *, api_key, base_url, virtual_key, config, ...): + # Initialize base client with headers + super().__init__(...) + + # Create vendored OpenAI client pointing to Portkey gateway + self.openai_client = OpenAI( + api_key=OPEN_AI_API_KEY, # Placeholder key + base_url=self.base_url, # Portkey gateway URL + default_headers=self.allHeaders, + ) + + # Initialize all API resources + self.completions = apis.Completion(self) + self.chat = apis.ChatCompletion(self) + self.containers = apis.Containers(self) + # ... more resources +``` + +### Two Types of API Endpoints + +#### 1. OpenAI Wrapper Endpoints + +These wrap the vendored OpenAI client and proxy requests through Portkey: + +```python +# Example: containers.py +class Containers(APIResource): + def __init__(self, client: Portkey): + super().__init__(client) + self.openai_client = client.openai_client + + def create(self, *, name: str, expires_after: Union[ExpiresAfter, Omit] = omit, ...): + # Use with_raw_response to access headers + response = self.openai_client.with_raw_response.containers.create( + name=name, + expires_after=expires_after, + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + ) + # Parse response and attach headers + data = ContainerCreateResponse(**json.loads(response.text)) + data._headers = response.headers + return data +``` + +#### 2. Portkey-Specific Endpoints + +These use the internal HTTP client directly: + +```python +# Example: feedback.py +class Feedback(APIResource): + def create(self, *, trace_id, value, weight, metadata): + body = dict(trace_id=trace_id, value=value, weight=weight, metadata=metadata) + return self._post( + PortkeyApiPaths.FEEDBACK_API, + body=body, + params=None, + cast_to=FeedbackResponse, + stream_cls=Stream[FeedbackResponse], + stream=False, + headers={}, + ) +``` + +## Key Patterns + +### 1. Union Types with `Omit` vs `NotGiven` + +**Important distinction** - These are NOT interchangeable: + +| Sentinel | Use Case | Behavior | +|----------|----------|----------| +| `Omit` / `omit` | Parameter should not be sent in request | Omits key from JSON body | +| `NotGiven` / `NOT_GIVEN` | Parameter has meaningful `None` value | Distinguishes "not provided" from "explicitly None" | + +```python +from portkey_ai._vendor.openai._types import Omit, omit, NOT_GIVEN, NotGiven + +def create( + self, + *, + # Use Omit for params that should be omitted when not provided + expires_after: Union[ExpiresAfter, Omit] = omit, + file_ids: Union[List[str], Omit] = omit, + + # Use NotGiven for params where None is a valid, distinct value + # e.g., timeout=None means "no timeout", timeout=NOT_GIVEN means "use default" + timeout: Union[float, httpx.Timeout, None, NotGiven] = NOT_GIVEN, +): + ... +``` + +**Pattern used in codebase:** +- OpenAI wrapper endpoints: Prefer `Omit`/`omit` for most optional params +- `timeout` parameter: Always uses `NotGiven`/`NOT_GIVEN` +- The codebase uses both `NOT_GIVEN` (constant) and `not_given` (instance) - they're aliases + +### 2. Response Headers Handling + +All response types include a private `_headers` attribute: + +```python +# types/containers_type.py +class ContainerCreateResponse(BaseModel, extra="allow"): + id: Optional[str] = None + name: Optional[str] = None + # ... other fields + _headers: Optional[httpx.Headers] = PrivateAttr() + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) +``` + +Set headers after parsing response: +```python +response = self.openai_client.with_raw_response.containers.create(...) +data = ContainerCreateResponse(**json.loads(response.text)) +data._headers = response.headers +return data +``` + +### 3. Extra Parameters via `**kwargs` + +Extract OpenAI's extra parameters from kwargs: + +```python +def create(self, *, name: str, **kwargs): + extra_headers = kwargs.pop("extra_headers", None) + extra_query = kwargs.pop("extra_query", None) + extra_body = kwargs.pop("extra_body", None) + timeout = kwargs.pop("timeout", None) + + # Merge remaining kwargs into extra_body + user_extra_body = extra_body or {} + merged_extra_body = {**user_extra_body, **kwargs} +``` + +### 4. Streaming vs Non-Streaming + +For endpoints that support streaming: + +```python +def create(self, *, stream: Union[bool, Omit] = omit, **kwargs): + if stream is True: + # Return iterator directly (no raw response needed) + return self.openai_client.chat.completions.create(stream=True, ...) + else: + # Use with_raw_response for headers + response = self.openai_client.with_raw_response.chat.completions.create(...) + data = ChatCompletions(**json.loads(response.text)) + data._headers = response.headers + return data +``` + +### 5. Sync/Async Pairs + +Every resource has both sync and async versions: + +```python +class Containers(APIResource): + def create(self, ...) -> ContainerCreateResponse: + ... + +class AsyncContainers(AsyncAPIResource): + async def create(self, ...) -> ContainerCreateResponse: + ... +``` + +## Type Definitions + +Types are defined in `portkey_ai/api_resources/types/`: + +```python +# types/containers_type.py +class ContainerCreateResponse(BaseModel, extra="allow"): + id: Optional[str] = None + created_at: Optional[int] = None + name: Optional[str] = None + object: Optional[str] = None + status: Optional[str] = None + expires_after: Optional[ExpiresAfter] = None + _headers: Optional[httpx.Headers] = PrivateAttr() + + def __str__(self): + del self._headers + return json.dumps(self.dict(), indent=4) + + def __getitem__(self, key): + return getattr(self, key, None) + + def get(self, key: str, default: Optional[Any] = None): + return getattr(self, key, None) or default + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) +``` + +Key conventions: +- Use `BaseModel` with `extra="allow"` for forward compatibility +- All fields are `Optional` with `None` default +- Include `_headers` as `PrivateAttr()` +- Implement `__str__`, `__getitem__`, `get`, and `get_headers` methods + +## Adding New Endpoints + +### For OpenAI Wrapper Endpoints + +1. Create wrapper class in `api_resources/apis/`: + ```python + class NewResource(APIResource): + def __init__(self, client: Portkey): + super().__init__(client) + self.openai_client = client.openai_client + ``` + +2. Create response types in `api_resources/types/` + +3. Register in `client.py`: + ```python + self.new_resource = apis.NewResource(self) + ``` + +4. Export in `api_resources/__init__.py` and `portkey_ai/__init__.py` + +### For Portkey-Specific Endpoints + +1. Create class using internal HTTP client: + ```python + class NewResource(APIResource): + def create(self, **kwargs): + return self._post( + "/portkey/new-endpoint", + body=body, + cast_to=ResponseType, + ... + ) + ``` + +2. Follow same registration and export steps + +## Portkey Headers + +The SDK automatically adds Portkey-specific headers via `createHeaders()`: + +- `x-portkey-api-key` - Portkey API key +- `x-portkey-virtual-key` - Virtual key for provider auth +- `x-portkey-config` - Gateway config +- `x-portkey-provider` - Target provider +- `x-portkey-trace-id` - Tracing ID +- `x-portkey-metadata` - Custom metadata +- Provider-specific headers (AWS, Azure, Vertex, etc.) + +## Export Chain + +Classes must be exported through the full chain: + +``` +api_resources/apis/new_resource.py # Define class + ↓ +api_resources/apis/__init__.py # Export from apis + ↓ +api_resources/__init__.py # Re-export from api_resources + ↓ +portkey_ai/__init__.py # Final public export + add to __all__ +``` + +## Updating Vendored OpenAI SDK + +1. Update version in `vendorize.toml`: + ```toml + packages = ["openai==X.Y.Z"] + ``` + +2. Run vendorize tool (typically `vendorize` or `python -m vendorize`) + +3. The tool rewrites imports from `openai.*` to `portkey_ai._vendor.openai.*` + +4. After vendoring: + - Check for new OpenAI resources that need Portkey wrappers + - Update any Portkey types that mirror OpenAI types + - Test thoroughly - OpenAI SDK changes can break wrappers + +## Common Gotchas + +1. **Don't use `Optional[X]` for omittable params** - Use `Union[X, Omit]` instead +2. **Always use `with_raw_response`** for non-streaming calls to access headers +3. **Streaming calls cannot use `with_raw_response`** - Return the stream directly +4. **Remember both sync and async versions** - Every class needs `Async` counterpart +5. **Extra kwargs go to `extra_body`** - Any unknown kwargs are merged into extra_body +6. **FileTypes comes from vendor** - Use `from ..._vendor.openai._types import FileTypes` +7. **Response parsing uses `json.loads(response.text)`** - Not `response.json()` diff --git a/portkey_ai/_vendor/bin/openai b/portkey_ai/_vendor/bin/openai index c10f3e4a..5b7b43cf 100755 --- a/portkey_ai/_vendor/bin/openai +++ b/portkey_ai/_vendor/bin/openai @@ -1,4 +1,4 @@ -#!/Users/chandeep/Documents/Workspace/Portkey/SDK/portkey-python-sdk/venv/bin/python3 +#!/Users/naren/Code/portkey-repositories/portkey-python-sdk/venv/bin/python # -*- coding: utf-8 -*- import re import sys diff --git a/portkey_ai/_vendor/openai-2.7.1.dist-info/INSTALLER b/portkey_ai/_vendor/openai-2.16.0.dist-info/INSTALLER similarity index 100% rename from portkey_ai/_vendor/openai-2.7.1.dist-info/INSTALLER rename to portkey_ai/_vendor/openai-2.16.0.dist-info/INSTALLER diff --git a/portkey_ai/_vendor/openai-2.7.1.dist-info/METADATA b/portkey_ai/_vendor/openai-2.16.0.dist-info/METADATA similarity index 97% rename from portkey_ai/_vendor/openai-2.7.1.dist-info/METADATA rename to portkey_ai/_vendor/openai-2.16.0.dist-info/METADATA index 110ee06c..69197f70 100644 --- a/portkey_ai/_vendor/openai-2.7.1.dist-info/METADATA +++ b/portkey_ai/_vendor/openai-2.16.0.dist-info/METADATA @@ -1,6 +1,6 @@ Metadata-Version: 2.3 Name: openai -Version: 2.7.1 +Version: 2.16.0 Summary: The official Python library for the openai API Project-URL: Homepage, https://github.com/openai/openai-python Project-URL: Repository, https://github.com/openai/openai-python @@ -13,15 +13,15 @@ Classifier: Operating System :: Microsoft :: Windows Classifier: Operating System :: OS Independent Classifier: Operating System :: POSIX Classifier: Operating System :: POSIX :: Linux -Classifier: Programming Language :: Python :: 3.8 Classifier: Programming Language :: Python :: 3.9 Classifier: Programming Language :: Python :: 3.10 Classifier: Programming Language :: Python :: 3.11 Classifier: Programming Language :: Python :: 3.12 Classifier: Programming Language :: Python :: 3.13 +Classifier: Programming Language :: Python :: 3.14 Classifier: Topic :: Software Development :: Libraries :: Python Modules Classifier: Typing :: Typed -Requires-Python: >=3.8 +Requires-Python: >=3.9 Requires-Dist: anyio<5,>=3.5.0 Requires-Dist: distro<2,>=1.7.0 Requires-Dist: httpx<1,>=0.23.0 @@ -49,7 +49,7 @@ Description-Content-Type: text/markdown [![PyPI version](https://img.shields.io/pypi/v/openai.svg?label=pypi%20(stable))](https://pypi.org/project/openai/) -The OpenAI Python library provides convenient access to the OpenAI REST API from any Python 3.8+ +The OpenAI Python library provides convenient access to the OpenAI REST API from any Python 3.9+ application. The library includes type definitions for all request params and response fields, and offers both synchronous and asynchronous clients powered by [httpx](https://github.com/encode/httpx). @@ -82,7 +82,7 @@ client = OpenAI( ) response = client.responses.create( - model="gpt-4o", + model="gpt-5.2", instructions="You are a coding assistant that talks like a pirate.", input="How do I check if a Python object is an instance of a class?", ) @@ -98,7 +98,7 @@ from openai import OpenAI client = OpenAI() completion = client.chat.completions.create( - model="gpt-4o", + model="gpt-5.2", messages=[ {"role": "developer", "content": "Talk like a pirate."}, { @@ -126,7 +126,7 @@ prompt = "What is in this image?" img_url = "https://upload.wikimedia.org/wikipedia/commons/thumb/d/d5/2023_06_08_Raccoon1.jpg/1599px-2023_06_08_Raccoon1.jpg" response = client.responses.create( - model="gpt-4o-mini", + model="gpt-5.2", input=[ { "role": "user", @@ -152,7 +152,7 @@ with open("path/to/image.png", "rb") as image_file: b64_image = base64.b64encode(image_file.read()).decode("utf-8") response = client.responses.create( - model="gpt-4o-mini", + model="gpt-5.2", input=[ { "role": "user", @@ -182,7 +182,7 @@ client = AsyncOpenAI( async def main() -> None: response = await client.responses.create( - model="gpt-4o", input="Explain disestablishmentarianism to a smart five year old." + model="gpt-5.2", input="Explain disestablishmentarianism to a smart five year old." ) print(response.output_text) @@ -206,6 +206,7 @@ pip install openai[aiohttp] Then you can enable it by instantiating the client with `http_client=DefaultAioHttpClient()`: ```python +import os import asyncio from openai import DefaultAioHttpClient from openai import AsyncOpenAI @@ -213,7 +214,7 @@ from openai import AsyncOpenAI async def main() -> None: async with AsyncOpenAI( - api_key="My API Key", + api_key=os.environ.get("OPENAI_API_KEY"), # This is the default and can be omitted http_client=DefaultAioHttpClient(), ) as client: chat_completion = await client.chat.completions.create( @@ -223,7 +224,7 @@ async def main() -> None: "content": "Say this is a test", } ], - model="gpt-4o", + model="gpt-5.2", ) @@ -240,7 +241,7 @@ from openai import OpenAI client = OpenAI() stream = client.responses.create( - model="gpt-4o", + model="gpt-5.2", input="Write a one-sentence bedtime story about a unicorn.", stream=True, ) @@ -260,7 +261,7 @@ client = AsyncOpenAI() async def main(): stream = await client.responses.create( - model="gpt-4o", + model="gpt-5.2", input="Write a one-sentence bedtime story about a unicorn.", stream=True, ) @@ -431,7 +432,7 @@ response = client.chat.responses.create( "content": "How much ?", } ], - model="gpt-4o", + model="gpt-5.2", response_format={"type": "json_object"}, ) ``` @@ -586,7 +587,7 @@ All object responses in the SDK provide a `_request_id` property which is added ```python response = await client.responses.create( - model="gpt-4o-mini", + model="gpt-5.2", input="Say 'this is a test'.", ) print(response._request_id) # req_123 @@ -604,7 +605,7 @@ import openai try: completion = await client.chat.completions.create( - messages=[{"role": "user", "content": "Say this is a test"}], model="gpt-4" + messages=[{"role": "user", "content": "Say this is a test"}], model="gpt-5.2" ) except openai.APIStatusError as exc: print(exc.request_id) # req_123 @@ -636,7 +637,7 @@ client.with_options(max_retries=5).chat.completions.create( "content": "How can I get the name of the current day in JavaScript?", } ], - model="gpt-4o", + model="gpt-5.2", ) ``` @@ -667,7 +668,7 @@ client.with_options(timeout=5.0).chat.completions.create( "content": "How can I list all files in a directory using Python?", } ], - model="gpt-4o", + model="gpt-5.2", ) ``` @@ -714,7 +715,7 @@ response = client.chat.completions.with_raw_response.create( "role": "user", "content": "Say this is a test", }], - model="gpt-4o", + model="gpt-5.2", ) print(response.headers.get('X-My-Header')) @@ -747,7 +748,7 @@ with client.chat.completions.with_streaming_response.create( "content": "Say this is a test", } ], - model="gpt-4o", + model="gpt-5.2", ) as response: print(response.headers.get("X-My-Header")) @@ -900,7 +901,7 @@ print(openai.__version__) ## Requirements -Python 3.8 or higher. +Python 3.9 or higher. ## Contributing diff --git a/portkey_ai/_vendor/openai-2.16.0.dist-info/RECORD b/portkey_ai/_vendor/openai-2.16.0.dist-info/RECORD new file mode 100644 index 00000000..22342e7e --- /dev/null +++ b/portkey_ai/_vendor/openai-2.16.0.dist-info/RECORD @@ -0,0 +1,2010 @@ +../../bin/openai,sha256=5Jct5kiiHC4vRDm7YaFyG9z68HSoNekUZLWDLWR-2pk,266 +openai-2.16.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +openai-2.16.0.dist-info/METADATA,sha256=ysvsnWaf9QhIY9i9Bpvw_Te-aRUR3ZmSwCyFrvUl0bs,29167 +openai-2.16.0.dist-info/RECORD,, +openai-2.16.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +openai-2.16.0.dist-info/WHEEL,sha256=C2FUgwZgiLbznR-k0b_5k3Ai_1aASOXDss3lzCUsUug,87 +openai-2.16.0.dist-info/entry_points.txt,sha256=kAYhQEmziJwsKs5raYAIOvJ2LWmbz5dulEXOzsY71ro,43 +openai-2.16.0.dist-info/licenses/LICENSE,sha256=Y263152pu21RWks_1BeqJmees88WOW3atLxV-nTmFuQ,11336 +openai/__init__.py,sha256=Fvc0dwOoaIZDN_s3iV62jlxeU5d7qn-Q8eQIaPIdD8g,11196 +openai/__main__.py,sha256=bYt9eEaoRQWdejEHFD8REx9jxVEdZptECFsV7F49Ink,30 +openai/__pycache__/__init__.cpython-310.pyc,, +openai/__pycache__/__main__.cpython-310.pyc,, +openai/__pycache__/_base_client.cpython-310.pyc,, +openai/__pycache__/_client.cpython-310.pyc,, +openai/__pycache__/_compat.cpython-310.pyc,, +openai/__pycache__/_constants.cpython-310.pyc,, +openai/__pycache__/_exceptions.cpython-310.pyc,, +openai/__pycache__/_files.cpython-310.pyc,, +openai/__pycache__/_legacy_response.cpython-310.pyc,, +openai/__pycache__/_models.cpython-310.pyc,, +openai/__pycache__/_module_client.cpython-310.pyc,, +openai/__pycache__/_qs.cpython-310.pyc,, +openai/__pycache__/_resource.cpython-310.pyc,, +openai/__pycache__/_response.cpython-310.pyc,, +openai/__pycache__/_streaming.cpython-310.pyc,, +openai/__pycache__/_types.cpython-310.pyc,, +openai/__pycache__/_version.cpython-310.pyc,, +openai/__pycache__/pagination.cpython-310.pyc,, +openai/__pycache__/version.cpython-310.pyc,, +openai/_base_client.py,sha256=oaTAUxbeNjdxieHoF3bw5pcXYM_OTZZrkbqNxZPCWxI,74599 +openai/_client.py,sha256=uZlue35miO_zv84wqaCRvqchb6WH3UaKPbZGIvIs0Gw,44645 +openai/_compat.py,sha256=k2XpUhYfgp5ZXkZkQAftJHt_UWFjUct1Sm2ye2kPBXo,6964 +openai/_constants.py,sha256=WmCwgT4tGmFsSrltb26f3bM8ftUyFYkzh32Ny5yl-So,467 +openai/_exceptions.py,sha256=TYcCxnfT7fln5duvVnCVJ0znuUHXSAbCT5sAMnaeKjU,5008 +openai/_extras/__init__.py,sha256=sainrYWujCxIyL24wNpKfMVr-ZyBPlnSZfqXcg2S6Xg,165 +openai/_extras/__pycache__/__init__.cpython-310.pyc,, +openai/_extras/__pycache__/_common.cpython-310.pyc,, +openai/_extras/__pycache__/numpy_proxy.cpython-310.pyc,, +openai/_extras/__pycache__/pandas_proxy.cpython-310.pyc,, +openai/_extras/__pycache__/sounddevice_proxy.cpython-310.pyc,, +openai/_extras/_common.py,sha256=NWWtgbdJsO3hQGQxaXGfVk0LjeIE5AFZ8VS_795hhMc,364 +openai/_extras/numpy_proxy.py,sha256=LyTZkKDdnjz0h1SKLsphrhmXyUsJ_xEUhTFMrCf7k7g,805 +openai/_extras/pandas_proxy.py,sha256=NCEt1Dqwc_0H85YdsWPDE3lPDJtYnBT8G-gJE_BCeEc,637 +openai/_extras/sounddevice_proxy.py,sha256=xDoE21YGu13dSAJJkiOM9Qdb7uOIv5zskaJRX6xciEg,725 +openai/_files.py,sha256=cQOoF0UFpnyH5JMIdu_EvGpj_dGzH1ojtJvyX7Xwqn0,3612 +openai/_legacy_response.py,sha256=fx9I0IInZY1zr2bUmpqW2ZUcL9JW2xS6S4NqFuwhdPM,16237 +openai/_models.py,sha256=mJ9fhqRLDypXY73XoPy5CpCZa5zwce6N3-OV9nX-GzI,33856 +openai/_module_client.py,sha256=33fORSMWHuxqpvlROvYVMEIvaWUishUpSeaqpsOjWuI,5033 +openai/_qs.py,sha256=craIKyvPktJ94cvf9zn8j8ekG9dWJzhWv0ob34lIOv4,4828 +openai/_resource.py,sha256=IQihFzFLhGOiGSlT2dO1ESWSTg2XypgbtAldtGdTOqU,1100 +openai/_response.py,sha256=zLVaMPYE1o2Tz1eS5_bnJNGMikRN1byMpMcVpW1tgIU,29510 +openai/_streaming.py,sha256=dPO6F5Klse8Nax72QfB_R3VGLoPzJwsw1Yrj0sgEo2Y,13769 +openai/_types.py,sha256=EBBMTWsJ2GtJsDhUFLrZhXHF22zmwLOJ70ncG--ODO8,7722 +openai/_utils/__init__.py,sha256=qiOG_n0G-sP5r5jNvD4OUaeaVLFEw5s-h7h7b0nD7Nk,2465 +openai/_utils/__pycache__/__init__.cpython-310.pyc,, +openai/_utils/__pycache__/_compat.cpython-310.pyc,, +openai/_utils/__pycache__/_datetime_parse.cpython-310.pyc,, +openai/_utils/__pycache__/_logs.cpython-310.pyc,, +openai/_utils/__pycache__/_proxy.cpython-310.pyc,, +openai/_utils/__pycache__/_reflection.cpython-310.pyc,, +openai/_utils/__pycache__/_resources_proxy.cpython-310.pyc,, +openai/_utils/__pycache__/_streams.cpython-310.pyc,, +openai/_utils/__pycache__/_sync.cpython-310.pyc,, +openai/_utils/__pycache__/_transform.cpython-310.pyc,, +openai/_utils/__pycache__/_typing.cpython-310.pyc,, +openai/_utils/__pycache__/_utils.cpython-310.pyc,, +openai/_utils/_compat.py,sha256=D8gtAvjJQrDWt9upS0XaG9Rr5l1QhiAx_I_1utT_tt0,1195 +openai/_utils/_datetime_parse.py,sha256=bABTs0Bc6rabdFvnIwXjEhWL15TcRgWZ_6XGTqN8xUk,4204 +openai/_utils/_logs.py,sha256=IC5iwPflwelNpJEpWsvK3up-pol5hR8k_VL9fSukk_Y,1351 +openai/_utils/_proxy.py,sha256=aglnj2yBTDyGX9Akk2crZHrl10oqRmceUy2Zp008XEs,1975 +openai/_utils/_reflection.py,sha256=aTXm-W0Kww4PJo5LPkUnQ92N-2UvrK1-D67cJVBlIgw,1426 +openai/_utils/_resources_proxy.py,sha256=AHHZCOgv-2CRqB4B52dB7ySlE5q6QCWj0bsTqNmzikw,589 +openai/_utils/_streams.py,sha256=SMC90diFFecpEg_zgDRVbdR3hSEIgVVij4taD-noMLM,289 +openai/_utils/_sync.py,sha256=HBnZkkBnzxtwOZe0212C4EyoRvxhTVtTrLFDz2_xVCg,1589 +openai/_utils/_transform.py,sha256=hzILp2ijV9J7D-uoEDmadtyCmzMK6DprJP8IlwEg0ZY,15999 +openai/_utils/_typing.py,sha256=N_5PPuFNsaygbtA_npZd98SVN1LQQvFTKL6bkWPBZGU,4786 +openai/_utils/_utils.py,sha256=Z2y9rNbK-worRedH9Ub9tO_FSIjl0SH2AV9Tdgz9LUA,12667 +openai/_version.py,sha256=bJPiIwvwNSWU56_IZn2UmZYbi97vB6FNxxSZpgEx2I8,159 +openai/cli/__init__.py,sha256=soGgtqyomgddl92H0KJRqHqGuaXIaghq86qkzLuVp7U,31 +openai/cli/__pycache__/__init__.cpython-310.pyc,, +openai/cli/__pycache__/_cli.cpython-310.pyc,, +openai/cli/__pycache__/_errors.cpython-310.pyc,, +openai/cli/__pycache__/_models.cpython-310.pyc,, +openai/cli/__pycache__/_progress.cpython-310.pyc,, +openai/cli/__pycache__/_utils.cpython-310.pyc,, +openai/cli/_api/__init__.py,sha256=cj92MZq-9_1PQM8A4TQVsqKn5mcTDAGxHllJ0UvJOPE,58 +openai/cli/_api/__pycache__/__init__.cpython-310.pyc,, +openai/cli/_api/__pycache__/_main.cpython-310.pyc,, +openai/cli/_api/__pycache__/audio.cpython-310.pyc,, +openai/cli/_api/__pycache__/completions.cpython-310.pyc,, +openai/cli/_api/__pycache__/files.cpython-310.pyc,, +openai/cli/_api/__pycache__/image.cpython-310.pyc,, +openai/cli/_api/__pycache__/models.cpython-310.pyc,, +openai/cli/_api/_main.py,sha256=3xVyycq-4HEYMBdMDJFk893PTXpr8yvkGL3eCiuSx8E,501 +openai/cli/_api/audio.py,sha256=0GU49a-XurLlyVEy2V9IZ_pDmjL1XEBI7Jp7fQfJ5Sk,3757 +openai/cli/_api/chat/__init__.py,sha256=MhFUQH9F6QCtbPMlbsU_DWTd7wc5DSCZ7Wy3FBGVij0,300 +openai/cli/_api/chat/__pycache__/__init__.cpython-310.pyc,, +openai/cli/_api/chat/__pycache__/completions.cpython-310.pyc,, +openai/cli/_api/chat/completions.py,sha256=GyfAo3B2w2ySV0dK9D2IIVA4fOb0zqJZadQ-Yc8a_yU,5536 +openai/cli/_api/completions.py,sha256=Jy1rlQqw__12ZfbTrnZJgoGBbDKJ58kOUAT-vkLr5kE,6334 +openai/cli/_api/files.py,sha256=6nKXFnsC2QE0bGnVUAG7BTLSu6K1_MhPE0ZJACmzgRY,2345 +openai/cli/_api/fine_tuning/__init__.py,sha256=hZeWhTZtIRAl1xgSbznjpCYy9lnUUXngh8uEIbVn__Y,286 +openai/cli/_api/fine_tuning/__pycache__/__init__.cpython-310.pyc,, +openai/cli/_api/fine_tuning/__pycache__/jobs.cpython-310.pyc,, +openai/cli/_api/fine_tuning/jobs.py,sha256=4wj9DPfw3343fJQW9j52Q-ga4jYa1haOTn4yYsH_zqk,5311 +openai/cli/_api/image.py,sha256=3UDZ1R8SjYh4IOhhdJqf20FPqPgPdhpRxqu3eo5BKhU,5014 +openai/cli/_api/models.py,sha256=pGmIGZToj3raGGpKvPSq_EVUR-dqg4Vi0PNfZH98D2E,1295 +openai/cli/_cli.py,sha256=42j_eI8PPdFbVjufluregmNYTdwrw3yQtsHtTzyNvcQ,6779 +openai/cli/_errors.py,sha256=nejlu1HnOyAIr2n7uqpFtWn8XclWj_9N8FwgfT3BPK8,471 +openai/cli/_models.py,sha256=_budygMbXh3Fv-w-TDfWecZNiKfox6f0lliCUytxE1Q,491 +openai/cli/_progress.py,sha256=aMLssU9jh-LoqRYH3608jNos7r6vZKnHTRlHxFznzv4,1406 +openai/cli/_tools/__init__.py,sha256=cj92MZq-9_1PQM8A4TQVsqKn5mcTDAGxHllJ0UvJOPE,58 +openai/cli/_tools/__pycache__/__init__.cpython-310.pyc,, +openai/cli/_tools/__pycache__/_main.cpython-310.pyc,, +openai/cli/_tools/__pycache__/fine_tunes.cpython-310.pyc,, +openai/cli/_tools/__pycache__/migrate.cpython-310.pyc,, +openai/cli/_tools/_main.py,sha256=pakjEXHRHqYlTml-RxV7fNrRtRXzmZBinoPi1AJipFY,467 +openai/cli/_tools/fine_tunes.py,sha256=RQgYMzifk6S7Y1I1K6huqco2QxmXa7gVUlHl6SrKTSU,1543 +openai/cli/_tools/migrate.py,sha256=o-iomzhtC6N6X5H5GDlgQ_QOaIovE2YA9oHc_tIAUj8,4497 +openai/cli/_utils.py,sha256=oiTc9MnxQh_zxAZ1OIHPkoDpCll0NF9ZgkdFHz4T-Bs,848 +openai/helpers/__init__.py,sha256=F0x_Pguq1XC2KXZYbfxUG-G_FxJ3mlsi7HaFZ1x-g9A,130 +openai/helpers/__pycache__/__init__.cpython-310.pyc,, +openai/helpers/__pycache__/local_audio_player.cpython-310.pyc,, +openai/helpers/__pycache__/microphone.cpython-310.pyc,, +openai/helpers/local_audio_player.py,sha256=7MWwt1BYEh579z1brnQ2mUEB0Ble4UoGMHDKusOfZJQ,5852 +openai/helpers/microphone.py,sha256=6tIHWZGpRA5XvUoer-nPBvHbrmxK7CWx3_Ta-qp1H54,3341 +openai/lib/.keep,sha256=wuNrz-5SXo3jJaJOJgz4vFHM41YH_g20F5cRQo0vLes,224 +openai/lib/__init__.py,sha256=BMTfMnlbugMgDA1STDIAlx4bI4t4l_8bQmJxd0th0n8,126 +openai/lib/__pycache__/__init__.cpython-310.pyc,, +openai/lib/__pycache__/_old_api.cpython-310.pyc,, +openai/lib/__pycache__/_pydantic.cpython-310.pyc,, +openai/lib/__pycache__/_realtime.cpython-310.pyc,, +openai/lib/__pycache__/_tools.cpython-310.pyc,, +openai/lib/__pycache__/_validators.cpython-310.pyc,, +openai/lib/__pycache__/azure.cpython-310.pyc,, +openai/lib/_old_api.py,sha256=XZnXBrEKuTd70iJirj5mGW35fZoqruJobbBTq6bvg10,1947 +openai/lib/_parsing/__init__.py,sha256=wS3BYvMGj9TqiPqOe3rO1sleaAJqHVuCaQuCE5rZIUw,539 +openai/lib/_parsing/__pycache__/__init__.cpython-310.pyc,, +openai/lib/_parsing/__pycache__/_completions.cpython-310.pyc,, +openai/lib/_parsing/__pycache__/_responses.cpython-310.pyc,, +openai/lib/_parsing/_completions.py,sha256=3vihFrFWJIrToaWYjJMqn42gTyNmrQhXvi2vr5Wduo8,10629 +openai/lib/_parsing/_responses.py,sha256=g47-6Vbw4cAjkUrHRAG_PAeJzJlwSxngiezog5UUYwI,6246 +openai/lib/_pydantic.py,sha256=Cf0vGwuWdNEuIUg8WNREjWRGApMObgl8DjdLU4f5jAc,5623 +openai/lib/_realtime.py,sha256=4ani2j6lt21SXrC6Ep_GQnLA7eEOo4UoZ8I1JAuqtn0,3980 +openai/lib/_tools.py,sha256=Dc4U2TXKvfAvVUvDS30SDeftrwgGM2vZ85t5ojLHiEg,1969 +openai/lib/_validators.py,sha256=cXJXFuaAl7jeJcYHXXnFa4NHGtHs-_zt3Zs1VVCmQo4,35288 +openai/lib/azure.py,sha256=dLzUXTXUOnfarLdDyO6dVzp8wY2vTMFFHUJZLuFznWY,26537 +openai/lib/streaming/__init__.py,sha256=kD3LpjsqU7caDQDhB-YjTUl9qqbb5sPnGGSI2yQYC70,379 +openai/lib/streaming/__pycache__/__init__.cpython-310.pyc,, +openai/lib/streaming/__pycache__/_assistants.cpython-310.pyc,, +openai/lib/streaming/__pycache__/_deltas.cpython-310.pyc,, +openai/lib/streaming/_assistants.py,sha256=LUWSinmYopQIkQ5xSg73b6BWbkRkQS5JvX62w_V9xSw,40692 +openai/lib/streaming/_deltas.py,sha256=I7B_AznXZwlBmE8Puau7ayTQUx6hMIEVE8FYTQm2fjs,2502 +openai/lib/streaming/chat/__init__.py,sha256=7krL_atOvvpQkY_byWSglSfDsMs5hdoxHmz4Ulq7lcc,1305 +openai/lib/streaming/chat/__pycache__/__init__.cpython-310.pyc,, +openai/lib/streaming/chat/__pycache__/_completions.cpython-310.pyc,, +openai/lib/streaming/chat/__pycache__/_events.cpython-310.pyc,, +openai/lib/streaming/chat/__pycache__/_types.cpython-310.pyc,, +openai/lib/streaming/chat/_completions.py,sha256=4PDLu_1-wQOrAwHY-Gz8NIQ8UnJ9gshwrmxuMDesFp8,30775 +openai/lib/streaming/chat/_events.py,sha256=lstVmM6YR2Cs9drikzrY9JCZn9Nbfym0aKIPtNpxL6w,2618 +openai/lib/streaming/chat/_types.py,sha256=-SYVBNhGkOUoJ-8dotxpCRqPJpfyOQ8hwR2_HrsQCRI,739 +openai/lib/streaming/responses/__init__.py,sha256=MwE1Oc3OIiXjtuRFsuP_k5Ra8pNiqKpc1GZum-8ZRJM,543 +openai/lib/streaming/responses/__pycache__/__init__.cpython-310.pyc,, +openai/lib/streaming/responses/__pycache__/_events.cpython-310.pyc,, +openai/lib/streaming/responses/__pycache__/_responses.cpython-310.pyc,, +openai/lib/streaming/responses/__pycache__/_types.cpython-310.pyc,, +openai/lib/streaming/responses/_events.py,sha256=3UWmeYgg23E3XTkYVlrpXJPnhBM2kmQFoXh3WiT9CrE,5576 +openai/lib/streaming/responses/_responses.py,sha256=Myeo4so-aMFrzEyNCjX0ypYWTWvY5uDelhe2ygC93lY,13614 +openai/lib/streaming/responses/_types.py,sha256=msq1KWj3e3BLn7NKu5j2kzHgj9kShuoitgXEyTmQxus,276 +openai/pagination.py,sha256=dtPji3wApb_0rkvYDwh50rl8cjxT3i6EUS6PfTXwhQI,4770 +openai/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +openai/resources/__init__.py,sha256=YDrG7nC0vTf4xk-JCSs0132OA5XWmqAMtjWu4wypnB4,6067 +openai/resources/__pycache__/__init__.cpython-310.pyc,, +openai/resources/__pycache__/batches.cpython-310.pyc,, +openai/resources/__pycache__/completions.cpython-310.pyc,, +openai/resources/__pycache__/embeddings.cpython-310.pyc,, +openai/resources/__pycache__/files.cpython-310.pyc,, +openai/resources/__pycache__/images.cpython-310.pyc,, +openai/resources/__pycache__/models.cpython-310.pyc,, +openai/resources/__pycache__/moderations.cpython-310.pyc,, +openai/resources/__pycache__/videos.cpython-310.pyc,, +openai/resources/__pycache__/webhooks.cpython-310.pyc,, +openai/resources/audio/__init__.py,sha256=YM7FHvPKVlj_v6EIgfpUQsb6q4hS2hVQ3gfkgic0sP0,1687 +openai/resources/audio/__pycache__/__init__.cpython-310.pyc,, +openai/resources/audio/__pycache__/audio.cpython-310.pyc,, +openai/resources/audio/__pycache__/speech.cpython-310.pyc,, +openai/resources/audio/__pycache__/transcriptions.cpython-310.pyc,, +openai/resources/audio/__pycache__/translations.cpython-310.pyc,, +openai/resources/audio/audio.py,sha256=nEIB4q7a1MSYdQkcYH2O6jB-_rNCMDCBJyUuqOL67CI,5491 +openai/resources/audio/speech.py,sha256=OT6cTwSeQWx_9zBu4eeCGnJ2KoBcK82WoNXM2d7dsXQ,10316 +openai/resources/audio/transcriptions.py,sha256=SHB5wio25FMIxL12_ErjuGCtuQ3fRpkT2Zo0aDFthAY,52213 +openai/resources/audio/translations.py,sha256=IsPiYZtr9BLS7pgAWAneU7yNq1E9igDCa-QXN12PhZM,15505 +openai/resources/batches.py,sha256=kdQbfxnHMZ7sB8Z5ZQriwxYE2u9_6_Z4LM4wxulYwjA,21002 +openai/resources/beta/__init__.py,sha256=chKjkpkqNxO1Dbl9OsCJNXVC1AbDcvTrvfvvAIh5B5I,1570 +openai/resources/beta/__pycache__/__init__.cpython-310.pyc,, +openai/resources/beta/__pycache__/assistants.cpython-310.pyc,, +openai/resources/beta/__pycache__/beta.cpython-310.pyc,, +openai/resources/beta/assistants.py,sha256=UUUeaqFksHnx8lRgxlvkv6gNtTv4VSgxrs8LomgSc-0,50594 +openai/resources/beta/beta.py,sha256=Lrsu8f9haXb4bZphmw9wgHzn8uZUBUUC11mZa3XRbr0,5725 +openai/resources/beta/chatkit/__init__.py,sha256=lJAQpi-JogtnSAlOegSae6WfCfgRLMd8rpPBuT9_2FE,1216 +openai/resources/beta/chatkit/__pycache__/__init__.cpython-310.pyc,, +openai/resources/beta/chatkit/__pycache__/chatkit.cpython-310.pyc,, +openai/resources/beta/chatkit/__pycache__/sessions.cpython-310.pyc,, +openai/resources/beta/chatkit/__pycache__/threads.cpython-310.pyc,, +openai/resources/beta/chatkit/chatkit.py,sha256=CleguF_80H_gAsyX_tLoGxFTD7YItZPshRAZ2QAqfzY,4333 +openai/resources/beta/chatkit/sessions.py,sha256=H8oIyd8V553LruOilYuTnXwU8Mh_z5xOjuu7GunaGIc,11837 +openai/resources/beta/chatkit/threads.py,sha256=kqngETyzx9uRBKtgfq9r9WrtOXpNfnHej4PkrVfnklo,20077 +openai/resources/beta/realtime/__init__.py,sha256=dOXRjPiDqRJXIFoGKSVjzKh3IwSXnLbwHx4ND5OdnVs,1412 +openai/resources/beta/realtime/__pycache__/__init__.cpython-310.pyc,, +openai/resources/beta/realtime/__pycache__/realtime.cpython-310.pyc,, +openai/resources/beta/realtime/__pycache__/sessions.cpython-310.pyc,, +openai/resources/beta/realtime/__pycache__/transcription_sessions.cpython-310.pyc,, +openai/resources/beta/realtime/realtime.py,sha256=tuiq_0PdFmC2p-LNOfQNrVuDEMlLAHKEgeAsPsHLUHU,43694 +openai/resources/beta/realtime/sessions.py,sha256=EQva_qI71CgS35qkK9TGxuibviHwUQ6VzErIzunP4gU,22098 +openai/resources/beta/realtime/transcription_sessions.py,sha256=uTDGEat50lojdD0N8slnZu2RVzMP96rlicpDp4tpl34,14124 +openai/resources/beta/threads/__init__.py,sha256=fQ_qdUVSfouVS5h47DlTb5mamChT4K-v-siPuuAB6do,1177 +openai/resources/beta/threads/__pycache__/__init__.cpython-310.pyc,, +openai/resources/beta/threads/__pycache__/messages.cpython-310.pyc,, +openai/resources/beta/threads/__pycache__/threads.cpython-310.pyc,, +openai/resources/beta/threads/messages.py,sha256=a8HEG-QKIgG8r4XtE0M7ixRBikAmdQEUDWUDf1gkaSg,30794 +openai/resources/beta/threads/runs/__init__.py,sha256=2FfDaqwmJJCd-IVpY_CrzWcFvw0KFyQ3cm5jnTfI-DQ,771 +openai/resources/beta/threads/runs/__pycache__/__init__.cpython-310.pyc,, +openai/resources/beta/threads/runs/__pycache__/runs.cpython-310.pyc,, +openai/resources/beta/threads/runs/__pycache__/steps.cpython-310.pyc,, +openai/resources/beta/threads/runs/runs.py,sha256=JQ5LaI33KcXLumleh-TxyTTJWYkbeQhTh8FxhHNmYzg,155523 +openai/resources/beta/threads/runs/steps.py,sha256=YkoPMeMXEzoL09AWF7Eh1lNaJocykV1igmcsZpXKw5Y,16981 +openai/resources/beta/threads/threads.py,sha256=3C3OzlgL0S1mDdnRBowU14Di8W7T81C2BEGFm5Mx42Y,97651 +openai/resources/chat/__init__.py,sha256=8Q9ODRo1wIpFa34VaNwuaWFmxqFxagDtUhIAkQNvxEU,849 +openai/resources/chat/__pycache__/__init__.cpython-310.pyc,, +openai/resources/chat/__pycache__/chat.cpython-310.pyc,, +openai/resources/chat/chat.py,sha256=HjcasSCmt-g3-J-RkZQ9HRj_-hPfImakFxdUvvk5mCg,3364 +openai/resources/chat/completions/__init__.py,sha256=KOi8blzNyHWD7nKgcoW3CxZ4428IcNVP0gCU74HySf8,901 +openai/resources/chat/completions/__pycache__/__init__.cpython-310.pyc,, +openai/resources/chat/completions/__pycache__/completions.cpython-310.pyc,, +openai/resources/chat/completions/__pycache__/messages.cpython-310.pyc,, +openai/resources/chat/completions/completions.py,sha256=r5M4ICb-GHJUsbrokxXvQizh_iBsygiRX-kho_X78JQ,164608 +openai/resources/chat/completions/messages.py,sha256=AYVwQ24jPQGs2Y-vE6Yjl5nbCECtuw-HpcBEEpCgC-0,8010 +openai/resources/completions.py,sha256=wO39_sLxmSzTI6Mp13KzjqaxMgFZw4l-t0_9xxDbX_4,59201 +openai/resources/containers/__init__.py,sha256=7VzY-TFwG3x5D_kUCs_iAQaaCKAswt1Jk70KpmnU8Do,849 +openai/resources/containers/__pycache__/__init__.cpython-310.pyc,, +openai/resources/containers/__pycache__/containers.cpython-310.pyc,, +openai/resources/containers/containers.py,sha256=IjkEbXITLjCMt0rpZpvocfBro_M0ZEB4rF5fYEfLWs8,19650 +openai/resources/containers/files/__init__.py,sha256=nDhg0wY7eHRMO-xOErno0mV0Ya_ynlmKAp-4a3nj-us,810 +openai/resources/containers/files/__pycache__/__init__.cpython-310.pyc,, +openai/resources/containers/files/__pycache__/content.cpython-310.pyc,, +openai/resources/containers/files/__pycache__/files.cpython-310.pyc,, +openai/resources/containers/files/content.py,sha256=-jupriq97X2kq_yCdYihZ1h2qCx-IMbaaR10M4lz6TA,6491 +openai/resources/containers/files/files.py,sha256=jjiRGS489CzoOXb3nvsD-i3qTSINE9CrAo2jZPWxyLI,21042 +openai/resources/conversations/__init__.py,sha256=Uslb4pakT8pQJGQ29CvoiN-SvN2AgMum-TeIDyYTzQE,888 +openai/resources/conversations/__pycache__/__init__.cpython-310.pyc,, +openai/resources/conversations/__pycache__/conversations.cpython-310.pyc,, +openai/resources/conversations/__pycache__/items.cpython-310.pyc,, +openai/resources/conversations/conversations.py,sha256=IjnSvilsJG_yK4IoRP86R6_5MFlHSpZt6lWxgpbGP-Y,19151 +openai/resources/conversations/items.py,sha256=q3XbPsh09Gb9qYisb6BEa9BExX4HF5oMu-Z0khdAFlY,23969 +openai/resources/embeddings.py,sha256=GYA_sI2h5auPwyHKm44-brPxRxqvcQaH0JQMZW13bMA,12374 +openai/resources/evals/__init__.py,sha256=DXhYb6mCKKY2bDdS3s4raH1SvwPUyaBFvdHgPEbwRWY,771 +openai/resources/evals/__pycache__/__init__.cpython-310.pyc,, +openai/resources/evals/__pycache__/evals.cpython-310.pyc,, +openai/resources/evals/evals.py,sha256=goQ9ek2_xI34SG7GkwpqKhXO2hZouq5bxS26EejY-cI,25904 +openai/resources/evals/runs/__init__.py,sha256=7EtKZ43tGlmAOYyDdyFXy80tk2X8AmXb5taTWRRXBXE,850 +openai/resources/evals/runs/__pycache__/__init__.cpython-310.pyc,, +openai/resources/evals/runs/__pycache__/output_items.cpython-310.pyc,, +openai/resources/evals/runs/__pycache__/runs.cpython-310.pyc,, +openai/resources/evals/runs/output_items.py,sha256=7pcGpGc61Df4jQIgxRYLX-27wz_8qc0Ux-ni_EfVvwA,12530 +openai/resources/evals/runs/runs.py,sha256=228Vf9S8_dz0tZAWCh2ehECQYg_Z4JXNV5MRuvUtDh4,24359 +openai/resources/files.py,sha256=kCQlINr3EBeknrv9tc_SluYWhlRHx5W1oDh4bH8XMqc,30610 +openai/resources/fine_tuning/__init__.py,sha256=RQPC5QfqE-ByhRQbJK-j7ooUrkBO9s9bKt5xkzOL8ls,1597 +openai/resources/fine_tuning/__pycache__/__init__.cpython-310.pyc,, +openai/resources/fine_tuning/__pycache__/fine_tuning.cpython-310.pyc,, +openai/resources/fine_tuning/alpha/__init__.py,sha256=QKAYZscx1Fw3GLD8cVdZAYG9L_i6MnPGeifn8GgcztU,810 +openai/resources/fine_tuning/alpha/__pycache__/__init__.cpython-310.pyc,, +openai/resources/fine_tuning/alpha/__pycache__/alpha.cpython-310.pyc,, +openai/resources/fine_tuning/alpha/__pycache__/graders.cpython-310.pyc,, +openai/resources/fine_tuning/alpha/alpha.py,sha256=P-zLOHpI-Aa0jUUWspkanL7WpUtfjwIGDH8KTGDNeHY,3274 +openai/resources/fine_tuning/alpha/graders.py,sha256=TA39PsdXWjxsts6p_UjPhyTwE4a1O7nQOkUC0V2ZHbU,10758 +openai/resources/fine_tuning/checkpoints/__init__.py,sha256=rvsbut5FCQNAr-VjvL-14GFT3Tld49FlFuBJDpfxBug,940 +openai/resources/fine_tuning/checkpoints/__pycache__/__init__.cpython-310.pyc,, +openai/resources/fine_tuning/checkpoints/__pycache__/checkpoints.cpython-310.pyc,, +openai/resources/fine_tuning/checkpoints/__pycache__/permissions.cpython-310.pyc,, +openai/resources/fine_tuning/checkpoints/checkpoints.py,sha256=njpz496JifeZ8RXjoYUb1Tj9tBItuXRxGJHW2jrrfwo,3606 +openai/resources/fine_tuning/checkpoints/permissions.py,sha256=A9SfSQk7o0gbqhu2NMZTW53Tq5c3zbBDSgL_0K0t1WQ,17103 +openai/resources/fine_tuning/fine_tuning.py,sha256=UL4MXoUqEnbSZ5e4dnbUPTtd4tE-1p2L7Hh_0CQ_0s0,5410 +openai/resources/fine_tuning/jobs/__init__.py,sha256=_smlrwijZOCcsDWqKnofLxQM2QLucZzXgboL9zJBPHw,849 +openai/resources/fine_tuning/jobs/__pycache__/__init__.cpython-310.pyc,, +openai/resources/fine_tuning/jobs/__pycache__/checkpoints.cpython-310.pyc,, +openai/resources/fine_tuning/jobs/__pycache__/jobs.cpython-310.pyc,, +openai/resources/fine_tuning/jobs/checkpoints.py,sha256=-QQNOZJplnCJyHCFTFO-DMN-AWc1Dp8p9Hifffgz5a0,7442 +openai/resources/fine_tuning/jobs/jobs.py,sha256=jIXuCijf7v9ufH3SqgWBrQAFg5uqPKAuyXgNDmLEXK4,37033 +openai/resources/images.py,sha256=CbfPekwgHdib4TZH7Wj3nKd_JaUggcX4ot9wjVjrLKI,97665 +openai/resources/models.py,sha256=1PDMpmdtaGiNHZNWPL-sI_I-SDOjuK-yfm2oq7mKiGI,11232 +openai/resources/moderations.py,sha256=8BWoTw8QHsSUbgByBlLxHHYEeeozFsY8n3j-ah13YdI,7808 +openai/resources/realtime/__init__.py,sha256=5v7pt2NQKz1j-X1z4bTqupmE3G8O5_G4PYCyw3F3-eo,1269 +openai/resources/realtime/__pycache__/__init__.cpython-310.pyc,, +openai/resources/realtime/__pycache__/calls.cpython-310.pyc,, +openai/resources/realtime/__pycache__/client_secrets.cpython-310.pyc,, +openai/resources/realtime/__pycache__/realtime.cpython-310.pyc,, +openai/resources/realtime/calls.py,sha256=DIwWlEkd_6IYYnXptYeiuBpEJpP1cDrGoSwXc-G-s9A,33417 +openai/resources/realtime/client_secrets.py,sha256=Z8NmSg2GGN3we3w89Un26jWp5OO9lxOi8oS4lSYMrUg,7700 +openai/resources/realtime/realtime.py,sha256=ISKvDwqzbHeBM8OHlOW3L7oO1NwyrUM5joj_g9EH2aY,44876 +openai/resources/responses/__init__.py,sha256=9LkjQomOIh6B5Qg1HbdCgjMRoCzIBzyRaYNyt3moA38,1322 +openai/resources/responses/__pycache__/__init__.cpython-310.pyc,, +openai/resources/responses/__pycache__/input_items.cpython-310.pyc,, +openai/resources/responses/__pycache__/input_tokens.cpython-310.pyc,, +openai/resources/responses/__pycache__/responses.cpython-310.pyc,, +openai/resources/responses/input_items.py,sha256=tzg31yUowcCMqU32TBHI18YzRjqNs_EGwKdpSU8bSTs,8774 +openai/resources/responses/input_tokens.py,sha256=cQvZuYjdhAf6fcmXsDavpuhA-LBjmpQkm2KgaOD5iSk,14208 +openai/resources/responses/responses.py,sha256=2O-bCqpqw952DXPcngyh11C8f-B9BmpVAQpxe45YXGQ,173163 +openai/resources/uploads/__init__.py,sha256=HmY3WQgvUI2bN3CjfWHWQOk7UUC6Ozna97_lHhrrRSA,810 +openai/resources/uploads/__pycache__/__init__.cpython-310.pyc,, +openai/resources/uploads/__pycache__/parts.cpython-310.pyc,, +openai/resources/uploads/__pycache__/uploads.cpython-310.pyc,, +openai/resources/uploads/parts.py,sha256=2Vov0reg5wdOSGSJ7hhs9pqsIofkhqjoUoE_AgXHLZM,8121 +openai/resources/uploads/uploads.py,sha256=OeCCAEK1W1ICTfraOBbYRrBclnzroEOaAOpuT05Fyvg,25443 +openai/resources/vector_stores/__init__.py,sha256=11Xn1vhgndWiI0defJHv31vmbtbDgh2GwZT3gX8GgHk,1296 +openai/resources/vector_stores/__pycache__/__init__.cpython-310.pyc,, +openai/resources/vector_stores/__pycache__/file_batches.cpython-310.pyc,, +openai/resources/vector_stores/__pycache__/files.cpython-310.pyc,, +openai/resources/vector_stores/__pycache__/vector_stores.cpython-310.pyc,, +openai/resources/vector_stores/file_batches.py,sha256=eSrMBeNf-bKsHu40HHyKkejo8BNoFAE32LnG119FxIs,34196 +openai/resources/vector_stores/files.py,sha256=xJStwcbKIzVzqIXK7G-Mfll61wbt154SObua945XXEI,39703 +openai/resources/vector_stores/vector_stores.py,sha256=L1vifq5tiw7EnBuxYREA_VPMzyRcePiQG4QLQL5vd18,35451 +openai/resources/videos.py,sha256=FGyEsn-h57V6guzhUEZuls5JWx0RzFXXRn74-Nvlx88,32017 +openai/resources/webhooks.py,sha256=wz3filqxxUEhhW5RSa-1LiN10MzafKXJPl5-Wb1mCew,7820 +openai/types/__init__.py,sha256=S82oA2tM4b7YnQ7R8xFqxViIUOy5bFzoRUAFA2efN2I,7630 +openai/types/__pycache__/__init__.cpython-310.pyc,, +openai/types/__pycache__/audio_model.cpython-310.pyc,, +openai/types/__pycache__/audio_response_format.cpython-310.pyc,, +openai/types/__pycache__/auto_file_chunking_strategy_param.cpython-310.pyc,, +openai/types/__pycache__/batch.cpython-310.pyc,, +openai/types/__pycache__/batch_create_params.cpython-310.pyc,, +openai/types/__pycache__/batch_error.cpython-310.pyc,, +openai/types/__pycache__/batch_list_params.cpython-310.pyc,, +openai/types/__pycache__/batch_request_counts.cpython-310.pyc,, +openai/types/__pycache__/batch_usage.cpython-310.pyc,, +openai/types/__pycache__/chat_model.cpython-310.pyc,, +openai/types/__pycache__/completion.cpython-310.pyc,, +openai/types/__pycache__/completion_choice.cpython-310.pyc,, +openai/types/__pycache__/completion_create_params.cpython-310.pyc,, +openai/types/__pycache__/completion_usage.cpython-310.pyc,, +openai/types/__pycache__/container_create_params.cpython-310.pyc,, +openai/types/__pycache__/container_create_response.cpython-310.pyc,, +openai/types/__pycache__/container_list_params.cpython-310.pyc,, +openai/types/__pycache__/container_list_response.cpython-310.pyc,, +openai/types/__pycache__/container_retrieve_response.cpython-310.pyc,, +openai/types/__pycache__/create_embedding_response.cpython-310.pyc,, +openai/types/__pycache__/embedding.cpython-310.pyc,, +openai/types/__pycache__/embedding_create_params.cpython-310.pyc,, +openai/types/__pycache__/embedding_model.cpython-310.pyc,, +openai/types/__pycache__/eval_create_params.cpython-310.pyc,, +openai/types/__pycache__/eval_create_response.cpython-310.pyc,, +openai/types/__pycache__/eval_custom_data_source_config.cpython-310.pyc,, +openai/types/__pycache__/eval_delete_response.cpython-310.pyc,, +openai/types/__pycache__/eval_list_params.cpython-310.pyc,, +openai/types/__pycache__/eval_list_response.cpython-310.pyc,, +openai/types/__pycache__/eval_retrieve_response.cpython-310.pyc,, +openai/types/__pycache__/eval_stored_completions_data_source_config.cpython-310.pyc,, +openai/types/__pycache__/eval_update_params.cpython-310.pyc,, +openai/types/__pycache__/eval_update_response.cpython-310.pyc,, +openai/types/__pycache__/file_chunking_strategy.cpython-310.pyc,, +openai/types/__pycache__/file_chunking_strategy_param.cpython-310.pyc,, +openai/types/__pycache__/file_content.cpython-310.pyc,, +openai/types/__pycache__/file_create_params.cpython-310.pyc,, +openai/types/__pycache__/file_deleted.cpython-310.pyc,, +openai/types/__pycache__/file_list_params.cpython-310.pyc,, +openai/types/__pycache__/file_object.cpython-310.pyc,, +openai/types/__pycache__/file_purpose.cpython-310.pyc,, +openai/types/__pycache__/image.cpython-310.pyc,, +openai/types/__pycache__/image_create_variation_params.cpython-310.pyc,, +openai/types/__pycache__/image_edit_completed_event.cpython-310.pyc,, +openai/types/__pycache__/image_edit_params.cpython-310.pyc,, +openai/types/__pycache__/image_edit_partial_image_event.cpython-310.pyc,, +openai/types/__pycache__/image_edit_stream_event.cpython-310.pyc,, +openai/types/__pycache__/image_gen_completed_event.cpython-310.pyc,, +openai/types/__pycache__/image_gen_partial_image_event.cpython-310.pyc,, +openai/types/__pycache__/image_gen_stream_event.cpython-310.pyc,, +openai/types/__pycache__/image_generate_params.cpython-310.pyc,, +openai/types/__pycache__/image_model.cpython-310.pyc,, +openai/types/__pycache__/images_response.cpython-310.pyc,, +openai/types/__pycache__/model.cpython-310.pyc,, +openai/types/__pycache__/model_deleted.cpython-310.pyc,, +openai/types/__pycache__/moderation.cpython-310.pyc,, +openai/types/__pycache__/moderation_create_params.cpython-310.pyc,, +openai/types/__pycache__/moderation_create_response.cpython-310.pyc,, +openai/types/__pycache__/moderation_image_url_input_param.cpython-310.pyc,, +openai/types/__pycache__/moderation_model.cpython-310.pyc,, +openai/types/__pycache__/moderation_multi_modal_input_param.cpython-310.pyc,, +openai/types/__pycache__/moderation_text_input_param.cpython-310.pyc,, +openai/types/__pycache__/other_file_chunking_strategy_object.cpython-310.pyc,, +openai/types/__pycache__/static_file_chunking_strategy.cpython-310.pyc,, +openai/types/__pycache__/static_file_chunking_strategy_object.cpython-310.pyc,, +openai/types/__pycache__/static_file_chunking_strategy_object_param.cpython-310.pyc,, +openai/types/__pycache__/static_file_chunking_strategy_param.cpython-310.pyc,, +openai/types/__pycache__/upload.cpython-310.pyc,, +openai/types/__pycache__/upload_complete_params.cpython-310.pyc,, +openai/types/__pycache__/upload_create_params.cpython-310.pyc,, +openai/types/__pycache__/vector_store.cpython-310.pyc,, +openai/types/__pycache__/vector_store_create_params.cpython-310.pyc,, +openai/types/__pycache__/vector_store_deleted.cpython-310.pyc,, +openai/types/__pycache__/vector_store_list_params.cpython-310.pyc,, +openai/types/__pycache__/vector_store_search_params.cpython-310.pyc,, +openai/types/__pycache__/vector_store_search_response.cpython-310.pyc,, +openai/types/__pycache__/vector_store_update_params.cpython-310.pyc,, +openai/types/__pycache__/video.cpython-310.pyc,, +openai/types/__pycache__/video_create_error.cpython-310.pyc,, +openai/types/__pycache__/video_create_params.cpython-310.pyc,, +openai/types/__pycache__/video_delete_response.cpython-310.pyc,, +openai/types/__pycache__/video_download_content_params.cpython-310.pyc,, +openai/types/__pycache__/video_list_params.cpython-310.pyc,, +openai/types/__pycache__/video_model.cpython-310.pyc,, +openai/types/__pycache__/video_model_param.cpython-310.pyc,, +openai/types/__pycache__/video_remix_params.cpython-310.pyc,, +openai/types/__pycache__/video_seconds.cpython-310.pyc,, +openai/types/__pycache__/video_size.cpython-310.pyc,, +openai/types/__pycache__/websocket_connection_options.cpython-310.pyc,, +openai/types/audio/__init__.py,sha256=8DwArrrSRwIemWLhWLItaV3F_EgXgtVPSu4yUIf8iyM,1723 +openai/types/audio/__pycache__/__init__.cpython-310.pyc,, +openai/types/audio/__pycache__/speech_create_params.cpython-310.pyc,, +openai/types/audio/__pycache__/speech_model.cpython-310.pyc,, +openai/types/audio/__pycache__/transcription.cpython-310.pyc,, +openai/types/audio/__pycache__/transcription_create_params.cpython-310.pyc,, +openai/types/audio/__pycache__/transcription_create_response.cpython-310.pyc,, +openai/types/audio/__pycache__/transcription_diarized.cpython-310.pyc,, +openai/types/audio/__pycache__/transcription_diarized_segment.cpython-310.pyc,, +openai/types/audio/__pycache__/transcription_include.cpython-310.pyc,, +openai/types/audio/__pycache__/transcription_segment.cpython-310.pyc,, +openai/types/audio/__pycache__/transcription_stream_event.cpython-310.pyc,, +openai/types/audio/__pycache__/transcription_text_delta_event.cpython-310.pyc,, +openai/types/audio/__pycache__/transcription_text_done_event.cpython-310.pyc,, +openai/types/audio/__pycache__/transcription_text_segment_event.cpython-310.pyc,, +openai/types/audio/__pycache__/transcription_verbose.cpython-310.pyc,, +openai/types/audio/__pycache__/transcription_word.cpython-310.pyc,, +openai/types/audio/__pycache__/translation.cpython-310.pyc,, +openai/types/audio/__pycache__/translation_create_params.cpython-310.pyc,, +openai/types/audio/__pycache__/translation_create_response.cpython-310.pyc,, +openai/types/audio/__pycache__/translation_verbose.cpython-310.pyc,, +openai/types/audio/speech_create_params.py,sha256=HReviZr1BsD038PCmbw_NlhJqb-5V7IN4ezr1iPfqnw,1838 +openai/types/audio/speech_model.py,sha256=swuN1lLQxGSuYj_X_OeQuUx5abIUpEfZZqNjDES7kU0,267 +openai/types/audio/transcription.py,sha256=7q7nfUTe-GDMpicrWOugFU4bSBIyqhQkndnKCTD1X2M,2407 +openai/types/audio/transcription_create_params.py,sha256=Mxl-LoHCiEkO41fGUcsSmN35fOtNWXYDgMa-nP90DNg,6977 +openai/types/audio/transcription_create_response.py,sha256=-x20GMKDHrHzl_37fsGjpPlJC91ZNGwrD_5-AkZgnOw,459 +openai/types/audio/transcription_diarized.py,sha256=fYKkV0B5Rjcx9GC-6TfJP4TjSWUbbIdvle1Sb3DQp2I,2374 +openai/types/audio/transcription_diarized_segment.py,sha256=s63ItoQlqLqxi17vUniZagifOw8Y-nVR17-SwQUWW8Y,931 +openai/types/audio/transcription_include.py,sha256=mclUP_50njW7TG4d9m_E6zSjAFW8djPJ6ZTYub71kq0,227 +openai/types/audio/transcription_segment.py,sha256=-pPAGolwIIXUBMic-H5U7aR0u_Aq-pipSA4xTtn_viA,1153 +openai/types/audio/transcription_stream_event.py,sha256=MJNeIqgrVJekLGA3KSzE7lHK3dyhrvvhGLIwgQsZDPE,648 +openai/types/audio/transcription_text_delta_event.py,sha256=JH09I9eYFZLZKAJcSMc3-4owS9CtTpTmNajXgAlcQ7c,1551 +openai/types/audio/transcription_text_done_event.py,sha256=AtXZflObQVBwETQlVcUqN4h-SFiiod4oAH9f7UHZ39g,2341 +openai/types/audio/transcription_text_segment_event.py,sha256=amH-qLV2l1Y4d2bnZrWplsz41_LzYvmyOdHv1rlXEtE,990 +openai/types/audio/transcription_verbose.py,sha256=vI30MetsMh3yT2hTCUGdKxjHNZ0wkM2mJ-yhu2opGuM,1303 +openai/types/audio/transcription_word.py,sha256=s2aWssAgHjMOZHhiihs1m4gYWQpjBP2rkI1DE5eZBXc,367 +openai/types/audio/translation.py,sha256=Dlu9YMo0cc44NSCAtLfZnEugkM7VBA6zw2v9bfrLMh0,193 +openai/types/audio/translation_create_params.py,sha256=ejrom_64QOe47gZtrYmDAQkb65wLaZL4-iU-mKVTVq0,1572 +openai/types/audio/translation_create_response.py,sha256=x6H0yjTbZR3vd3d7LdABcn9nrMDNdeMjepcjW1oUfVc,362 +openai/types/audio/translation_verbose.py,sha256=lGB5FqkV-ne__aaGbMTFbEciJ-Sl3wBhlKmETmtROT8,615 +openai/types/audio_model.py,sha256=U5nv4NKBd3A5k3mDw0BmK0PiSc0VQADOkirXnKsC-eo,344 +openai/types/audio_response_format.py,sha256=67QSPDpT9_yYhxFYYd15N3nukwKrHJ7f8pvVQiVOQuk,276 +openai/types/auto_file_chunking_strategy_param.py,sha256=wvFMNI7RvIPLBoGZpdRMgVa-VlQkovurGi1aypefqwg,495 +openai/types/batch.py,sha256=o8ADxSZQe7F_1VTGSC5_RDUajU03SbWvN1wPiH98dVQ,3517 +openai/types/batch_create_params.py,sha256=XDHXPpI1PFDpTr3HXYecAgYPA8XAckyBY0xbMKnb3jo,2627 +openai/types/batch_error.py,sha256=Xxl-gYm0jerpYyI-mKSSVxRMQRubkoLUiOP9U3v72EM,622 +openai/types/batch_list_params.py,sha256=X1_sfRspuIMSDyXWVh0YnJ9vJLeOOH66TrvgEHueC84,705 +openai/types/batch_request_counts.py,sha256=iIPVKk4s5FcHlfLvLYetuXb_RxGPUvCGhRGYTryszV8,479 +openai/types/batch_usage.py,sha256=myAsCyOT7xWiZ_GDznGptUHeJVYx_Mg7c1cEtBMKmEc,1260 +openai/types/beta/__init__.py,sha256=kl4wEKnYF_urBLZV6wZ6ZCTwaLhlNYSOfFR64jO-Adc,2393 +openai/types/beta/__pycache__/__init__.cpython-310.pyc,, +openai/types/beta/__pycache__/assistant.cpython-310.pyc,, +openai/types/beta/__pycache__/assistant_create_params.cpython-310.pyc,, +openai/types/beta/__pycache__/assistant_deleted.cpython-310.pyc,, +openai/types/beta/__pycache__/assistant_list_params.cpython-310.pyc,, +openai/types/beta/__pycache__/assistant_response_format_option.cpython-310.pyc,, +openai/types/beta/__pycache__/assistant_response_format_option_param.cpython-310.pyc,, +openai/types/beta/__pycache__/assistant_stream_event.cpython-310.pyc,, +openai/types/beta/__pycache__/assistant_tool.cpython-310.pyc,, +openai/types/beta/__pycache__/assistant_tool_choice.cpython-310.pyc,, +openai/types/beta/__pycache__/assistant_tool_choice_function.cpython-310.pyc,, +openai/types/beta/__pycache__/assistant_tool_choice_function_param.cpython-310.pyc,, +openai/types/beta/__pycache__/assistant_tool_choice_option.cpython-310.pyc,, +openai/types/beta/__pycache__/assistant_tool_choice_option_param.cpython-310.pyc,, +openai/types/beta/__pycache__/assistant_tool_choice_param.cpython-310.pyc,, +openai/types/beta/__pycache__/assistant_tool_param.cpython-310.pyc,, +openai/types/beta/__pycache__/assistant_update_params.cpython-310.pyc,, +openai/types/beta/__pycache__/chatkit_workflow.cpython-310.pyc,, +openai/types/beta/__pycache__/code_interpreter_tool.cpython-310.pyc,, +openai/types/beta/__pycache__/code_interpreter_tool_param.cpython-310.pyc,, +openai/types/beta/__pycache__/file_search_tool.cpython-310.pyc,, +openai/types/beta/__pycache__/file_search_tool_param.cpython-310.pyc,, +openai/types/beta/__pycache__/function_tool.cpython-310.pyc,, +openai/types/beta/__pycache__/function_tool_param.cpython-310.pyc,, +openai/types/beta/__pycache__/thread.cpython-310.pyc,, +openai/types/beta/__pycache__/thread_create_and_run_params.cpython-310.pyc,, +openai/types/beta/__pycache__/thread_create_params.cpython-310.pyc,, +openai/types/beta/__pycache__/thread_deleted.cpython-310.pyc,, +openai/types/beta/__pycache__/thread_update_params.cpython-310.pyc,, +openai/types/beta/assistant.py,sha256=K97cr0lg4fiZuLO6zNqIZVuUBjMFxtRtoszjyNI70DA,5394 +openai/types/beta/assistant_create_params.py,sha256=3q29vKotDs9f_oGs5HDmfBVBUYAW8dZqevw9GKaVH2g,8863 +openai/types/beta/assistant_deleted.py,sha256=bTTUl5FPHTBI5nRm7d0sGuR9VCSBDZ-IbOn9G_IpmJQ,301 +openai/types/beta/assistant_list_params.py,sha256=yW-lj6AUkG0IRZQKre0veEr9p4VMN-9YdELFMYs74Cw,1222 +openai/types/beta/assistant_response_format_option.py,sha256=yNeoAWxM-_8Sjmwqu8exqyKRFhVZIKeTypetPY55VFA,561 +openai/types/beta/assistant_response_format_option_param.py,sha256=dyPMhwRSLBZ0ltpxiD7KM-9X6BzWnbGeG-nT_3SenuQ,628 +openai/types/beta/assistant_stream_event.py,sha256=FDkbgQcmL64ExC2Q3mrPD73pYdGFcIMPt-wboshBWHg,10055 +openai/types/beta/assistant_tool.py,sha256=_0FC7Db4Ctq_0yLaKJ93zNTB5HthuJWEAHx3fadDRlw,506 +openai/types/beta/assistant_tool_choice.py,sha256=QXy2nQ2-_JLh3aU_BRZ3K17uZJUxrHC1cygqUMnr0U4,652 +openai/types/beta/assistant_tool_choice_function.py,sha256=p5YEbTnED_kZpPfj5fMQqWSgLXAUEajsDd0LXGdlENU,269 +openai/types/beta/assistant_tool_choice_function_param.py,sha256=-O38277LhSaqOVhTp0haHP0ZnVTLpEBvcLJa5MRo7wE,355 +openai/types/beta/assistant_tool_choice_option.py,sha256=jrXMd_IYIQ1pt8Lkc-KrPd4CR3lR8sFV4m7_lpG8A4Y,362 +openai/types/beta/assistant_tool_choice_option_param.py,sha256=VcatO5Nej9e5eqfrwetG4uM1vFoewnBEcFz47IxAK2E,424 +openai/types/beta/assistant_tool_choice_param.py,sha256=vB7bLafOjDZ4Ww3GMT_CXzcpFtVUtH6nYvHXpVjg_xA,680 +openai/types/beta/assistant_tool_param.py,sha256=6DcaU3nMjurur2VkVIYcCaRAY1QLQscXXjCd0ZHHGho,501 +openai/types/beta/assistant_update_params.py,sha256=sveL8Z489CV1SemVlQzGlNPjDy6JINfDLYoGgCFdrsg,7417 +openai/types/beta/chat/__init__.py,sha256=OKfJYcKb4NObdiRObqJV_dOyDQ8feXekDUge2o_4pXQ,122 +openai/types/beta/chat/__pycache__/__init__.cpython-310.pyc,, +openai/types/beta/chatkit/__init__.py,sha256=K4Q3JL1OSz8tRSJyUoReRkBKsksw5QZdBy4HBvuBjZ4,2116 +openai/types/beta/chatkit/__pycache__/__init__.cpython-310.pyc,, +openai/types/beta/chatkit/__pycache__/chat_session.cpython-310.pyc,, +openai/types/beta/chatkit/__pycache__/chat_session_automatic_thread_titling.cpython-310.pyc,, +openai/types/beta/chatkit/__pycache__/chat_session_chatkit_configuration.cpython-310.pyc,, +openai/types/beta/chatkit/__pycache__/chat_session_chatkit_configuration_param.cpython-310.pyc,, +openai/types/beta/chatkit/__pycache__/chat_session_expires_after_param.cpython-310.pyc,, +openai/types/beta/chatkit/__pycache__/chat_session_file_upload.cpython-310.pyc,, +openai/types/beta/chatkit/__pycache__/chat_session_history.cpython-310.pyc,, +openai/types/beta/chatkit/__pycache__/chat_session_rate_limits.cpython-310.pyc,, +openai/types/beta/chatkit/__pycache__/chat_session_rate_limits_param.cpython-310.pyc,, +openai/types/beta/chatkit/__pycache__/chat_session_status.cpython-310.pyc,, +openai/types/beta/chatkit/__pycache__/chat_session_workflow_param.cpython-310.pyc,, +openai/types/beta/chatkit/__pycache__/chatkit_attachment.cpython-310.pyc,, +openai/types/beta/chatkit/__pycache__/chatkit_response_output_text.cpython-310.pyc,, +openai/types/beta/chatkit/__pycache__/chatkit_thread.cpython-310.pyc,, +openai/types/beta/chatkit/__pycache__/chatkit_thread_assistant_message_item.cpython-310.pyc,, +openai/types/beta/chatkit/__pycache__/chatkit_thread_item_list.cpython-310.pyc,, +openai/types/beta/chatkit/__pycache__/chatkit_thread_user_message_item.cpython-310.pyc,, +openai/types/beta/chatkit/__pycache__/chatkit_widget_item.cpython-310.pyc,, +openai/types/beta/chatkit/__pycache__/session_create_params.cpython-310.pyc,, +openai/types/beta/chatkit/__pycache__/thread_delete_response.cpython-310.pyc,, +openai/types/beta/chatkit/__pycache__/thread_list_items_params.cpython-310.pyc,, +openai/types/beta/chatkit/__pycache__/thread_list_params.cpython-310.pyc,, +openai/types/beta/chatkit/chat_session.py,sha256=NjVT5SQUWNroXynit10bwKsj8mntUSLouST46C9sBhM,1401 +openai/types/beta/chatkit/chat_session_automatic_thread_titling.py,sha256=R0PpWfBT5VMZp--Xxn082UjWbha7U5K9lJeSa4Cbhuk,360 +openai/types/beta/chatkit/chat_session_chatkit_configuration.py,sha256=-ailkIhSj6W2rHS3xPNc7XJ0duVG0fTmJaaVqqoE9RE,739 +openai/types/beta/chatkit/chat_session_chatkit_configuration_param.py,sha256=4_3sCd7BIeNsy-kArfNgDO1jHjaUn703ib4_CHbsuZQ,2178 +openai/types/beta/chatkit/chat_session_expires_after_param.py,sha256=E9IcLvoz0FIIxE73sOF-WuqySbMpk5E9jvMj28_mDdo,597 +openai/types/beta/chatkit/chat_session_file_upload.py,sha256=ruKpNTM8zhXZfXgBZDYOGgRDT4Y3J4IjeMSnEwdlFTI,558 +openai/types/beta/chatkit/chat_session_history.py,sha256=XuxpwdFgpfCuw8Uc_TFB7H33683fSAfqhfIRdJoLQ6U,534 +openai/types/beta/chatkit/chat_session_rate_limits.py,sha256=irV4fZrQs6e6GbBT7rPsjsJBlheq3rz9tBsGq_fnb9k,353 +openai/types/beta/chatkit/chat_session_rate_limits_param.py,sha256=n8aeeakdT7L6liR6I5umF9BHABInNx7rL2WhmFWx0jQ,446 +openai/types/beta/chatkit/chat_session_status.py,sha256=l-SGwIDV4EFdX13Wjl2t-EipKFyYw7mckp9gxJhydqI,243 +openai/types/beta/chatkit/chat_session_workflow_param.py,sha256=bP2hQ-PXLLC1c8gjDjOh_fVLRPOFVSj7WV2-RrT9rfE,1202 +openai/types/beta/chatkit/chatkit_attachment.py,sha256=ZQPepId3dMRd-Jrzc0gCatbsXqYoj51-3S2XbPfAOmE,665 +openai/types/beta/chatkit/chatkit_response_output_text.py,sha256=e3rNmV2eTU0msEh67TaZRvE58wduZdaeN30wXpf39I0,1882 +openai/types/beta/chatkit/chatkit_thread.py,sha256=JAqb28gFwoGHgF6QPy0brlUc1jCzV1BFoiRikKrJ-IM,1918 +openai/types/beta/chatkit/chatkit_thread_assistant_message_item.py,sha256=va96N80NNiHFRYEArjBNYlwFilbqZXvOtlHR0Dxec5o,941 +openai/types/beta/chatkit/chatkit_thread_item_list.py,sha256=2Qv0iPKkvIANfWQpIZh7iqZYBz51tgBAYtcSh3M8eaw,4302 +openai/types/beta/chatkit/chatkit_thread_user_message_item.py,sha256=pcpYEqmODoV-1MWgQsElSS8rcEbsp9OYRLKkr9SceqA,2505 +openai/types/beta/chatkit/chatkit_widget_item.py,sha256=d6l-XTELPMSLCizd2d6gnzBnYocDkaaFPei9WWhKggk,774 +openai/types/beta/chatkit/session_create_params.py,sha256=OUWYjVEXdYa8Y7mMKjR8CwZCAKI9QMZ1aqM8N1JDcQw,1274 +openai/types/beta/chatkit/thread_delete_response.py,sha256=laKdWnyeplfi0Tos0ps-izneu2ZGEnst7FJn87BS8pU,549 +openai/types/beta/chatkit/thread_list_items_params.py,sha256=9WerQ8VLMzWFSGJDnmg82bRNZy-YT7S6OjESDN9a9vU,697 +openai/types/beta/chatkit/thread_list_params.py,sha256=IHHdXtlMoV7a19YJg8dFLYzVgSzEUnSgMW2kQc_RE9Y,812 +openai/types/beta/chatkit_workflow.py,sha256=TCG5iL_5j9Lq_EsRx7HKrUYWhz9SFIJbHrYlYkXnSAg,961 +openai/types/beta/code_interpreter_tool.py,sha256=7mgQc9OtD_ZUnZeNhoobMFcmmvtZPFCNYGB-PEnNnfs,333 +openai/types/beta/code_interpreter_tool_param.py,sha256=X6mwzFyZx1RCKEYbBCPs4kh_tZkxFxydPMK4yFNJkLs,389 +openai/types/beta/file_search_tool.py,sha256=1r_kHrJYPD8HJ2lCg_npyXvG-BSydlOyo4dpnEI3w0o,2136 +openai/types/beta/file_search_tool_param.py,sha256=LZYlCZoHFOzqQnE7y55Oq5k20R9S0ZHJrtBpGnhYr_I,2132 +openai/types/beta/function_tool.py,sha256=oYGJfcfPpUohKw2ikgshDjOI1HXCK-5pAWyegYNezeU,397 +openai/types/beta/function_tool_param.py,sha256=hCclpGO4Re-TxiGy_QxX75g1kcN6_ElubicO6SdJ_YI,471 +openai/types/beta/realtime/__init__.py,sha256=trJb-lqh3vHHMYdohrgiU2cHwReFZyw4cXM-Xj8Dwq8,7364 +openai/types/beta/realtime/__pycache__/__init__.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/conversation_created_event.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/conversation_item.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/conversation_item_content.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/conversation_item_content_param.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/conversation_item_create_event.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/conversation_item_create_event_param.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/conversation_item_created_event.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/conversation_item_delete_event.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/conversation_item_delete_event_param.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/conversation_item_deleted_event.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/conversation_item_input_audio_transcription_completed_event.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/conversation_item_input_audio_transcription_delta_event.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/conversation_item_input_audio_transcription_failed_event.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/conversation_item_param.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/conversation_item_retrieve_event.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/conversation_item_retrieve_event_param.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/conversation_item_truncate_event.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/conversation_item_truncate_event_param.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/conversation_item_truncated_event.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/conversation_item_with_reference.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/conversation_item_with_reference_param.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/error_event.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/input_audio_buffer_append_event.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/input_audio_buffer_append_event_param.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/input_audio_buffer_clear_event.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/input_audio_buffer_clear_event_param.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/input_audio_buffer_cleared_event.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/input_audio_buffer_commit_event.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/input_audio_buffer_commit_event_param.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/input_audio_buffer_committed_event.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/input_audio_buffer_speech_started_event.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/input_audio_buffer_speech_stopped_event.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/rate_limits_updated_event.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/realtime_client_event.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/realtime_client_event_param.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/realtime_connect_params.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/realtime_response.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/realtime_response_status.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/realtime_response_usage.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/realtime_server_event.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/response_audio_delta_event.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/response_audio_done_event.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/response_audio_transcript_delta_event.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/response_audio_transcript_done_event.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/response_cancel_event.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/response_cancel_event_param.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/response_content_part_added_event.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/response_content_part_done_event.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/response_create_event.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/response_create_event_param.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/response_created_event.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/response_done_event.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/response_function_call_arguments_delta_event.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/response_function_call_arguments_done_event.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/response_output_item_added_event.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/response_output_item_done_event.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/response_text_delta_event.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/response_text_done_event.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/session.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/session_create_params.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/session_create_response.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/session_created_event.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/session_update_event.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/session_update_event_param.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/session_updated_event.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/transcription_session.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/transcription_session_create_params.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/transcription_session_update.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/transcription_session_update_param.cpython-310.pyc,, +openai/types/beta/realtime/__pycache__/transcription_session_updated_event.cpython-310.pyc,, +openai/types/beta/realtime/conversation_created_event.py,sha256=U4-nesN8rAep2_25E2DrkXUMafQejj3NE_0llXKj5Y8,752 +openai/types/beta/realtime/conversation_item.py,sha256=eIFg9zl3qzEijcQZvCnkvVLpSZgvEdubasgxGsQuFM4,2327 +openai/types/beta/realtime/conversation_item_content.py,sha256=KWZY8EUkjAi6K_IkWVjjrNZLG3KK2vGCy2_O30CEhzY,1050 +openai/types/beta/realtime/conversation_item_content_param.py,sha256=CrGi3XKwnfJdQGs-kJaGCsn53omdJF6_je0GWnVXhjQ,972 +openai/types/beta/realtime/conversation_item_create_event.py,sha256=jYXYdmqJh_znzcAgDuCxJXo5shf-t_DwmsyFkaDVnAE,1081 +openai/types/beta/realtime/conversation_item_create_event_param.py,sha256=vxTag6TrOLu1bf46F3mUmRkl5dd1Kb6bUp65gBDVmhM,1101 +openai/types/beta/realtime/conversation_item_created_event.py,sha256=cZBm_uKk5dkQXLlbF0Aetg4NJge3Ftz9kwRu2kCI3m4,817 +openai/types/beta/realtime/conversation_item_delete_event.py,sha256=p-O6R1Ku5pxZvaxhSi4YTPqLXS1SHhdLGgJuPQyPcHY,549 +openai/types/beta/realtime/conversation_item_delete_event_param.py,sha256=a17h8Hd8MxUbXT6NQg8YpTr1ICt1ztRecpfukHw4g34,569 +openai/types/beta/realtime/conversation_item_deleted_event.py,sha256=uWHSqX5ig550romSdhtROwrdQmdeN31Oz1Vpr9IuQFI,492 +openai/types/beta/realtime/conversation_item_input_audio_transcription_completed_event.py,sha256=FwZHHO4ZGMKoeQ80snCh_S-7anNUQtRLOhGjb8ScGOQ,2538 +openai/types/beta/realtime/conversation_item_input_audio_transcription_delta_event.py,sha256=5kjLmnRJug7L5fHxSSWWbhB70jGwNaMwbdENEwz9Xek,1143 +openai/types/beta/realtime/conversation_item_input_audio_transcription_failed_event.py,sha256=xYNSBIyERQJ4P-5YoFF1VptfPa8JnJ0sWaH6LGsPow0,1077 +openai/types/beta/realtime/conversation_item_param.py,sha256=HMB7MFR6WkztV1vMCFdIYNv8qOY4jzI2MIDtr9y8nEo,2207 +openai/types/beta/realtime/conversation_item_retrieve_event.py,sha256=5Cc7f0fM8ujwER0eIcQRwz0rmc6hdCUrAqiAvRNn9Zc,559 +openai/types/beta/realtime/conversation_item_retrieve_event_param.py,sha256=TRYaZ3btNaywRPaMOVRzK5VT7wh4taIGjbUdhkZ7gFc,579 +openai/types/beta/realtime/conversation_item_truncate_event.py,sha256=1c2_BamaTkgD26eyGZJU5xwbz7lRHupqU2HqcK0VniI,943 +openai/types/beta/realtime/conversation_item_truncate_event_param.py,sha256=hSnVOSMMtLf16nn4ISHkevYCfEsiN9kNcgxXRtHa8Kc,983 +openai/types/beta/realtime/conversation_item_truncated_event.py,sha256=K4S35U85J-UNRba9nkm-7G1ReZu8gA8Sa1z0-Vlozc0,704 +openai/types/beta/realtime/conversation_item_with_reference.py,sha256=NDMfbnG0YKLqWJskFSHRIMkN2ISs8yNRxP6d6sZshws,3288 +openai/types/beta/realtime/conversation_item_with_reference_param.py,sha256=X0iEdjijFkoGtZtp0viB8bAFqMn4fNNSvJiCZbgJ-3Q,3079 +openai/types/beta/realtime/error_event.py,sha256=goNkorKXUHKiYVsVunEsnaRa6_3dsDKVtrxXQtzZCmk,877 +openai/types/beta/realtime/input_audio_buffer_append_event.py,sha256=lTKWd_WFbtDAy6AdaCjeQYBV0dgHuVNNt_PbrtPB8tg,662 +openai/types/beta/realtime/input_audio_buffer_append_event_param.py,sha256=XmN2bE6jBRrkKGVPJdnPjJql5dqMPqwbmFnxo-z22JE,682 +openai/types/beta/realtime/input_audio_buffer_clear_event.py,sha256=7AfCQfMxZQ-UoQXF9edYKw5GcTELPcfvvJWWpuLS41c,489 +openai/types/beta/realtime/input_audio_buffer_clear_event_param.py,sha256=y-zfWqJsh1n6r2i0MgLDpnNC4g1dq3GCS66Twfkng38,499 +openai/types/beta/realtime/input_audio_buffer_cleared_event.py,sha256=j9gpm7aGVmrUt48wqtvBMN8NOgtvqHciegjXjOnWm7A,429 +openai/types/beta/realtime/input_audio_buffer_commit_event.py,sha256=SLZR2xxRd6uO3IQL6-LuozkjROXiGyblKoHYQjwXk4I,493 +openai/types/beta/realtime/input_audio_buffer_commit_event_param.py,sha256=B8agXC-rUl-D-RijJ5MeTLgw43qVYzmf2_2oAVokhLY,503 +openai/types/beta/realtime/input_audio_buffer_committed_event.py,sha256=76XHl3ETfG5YiYce2OCUsv0wNfSiaabLzYVjGtBwux0,733 +openai/types/beta/realtime/input_audio_buffer_speech_started_event.py,sha256=NVp60RUsLFtte9Ilknmu_5lRk2dZp_1fXCgGHd4EvSM,861 +openai/types/beta/realtime/input_audio_buffer_speech_stopped_event.py,sha256=gszRuYQtAW8upIhd7CJZ7pxboDk-K7sqidjqxgf47q4,779 +openai/types/beta/realtime/rate_limits_updated_event.py,sha256=kBnf_p-49Q_LNdJsj0R1Szi8R4TGYAAJ_KifLuuyFZw,949 +openai/types/beta/realtime/realtime_client_event.py,sha256=0c48JcJH5yruF52zl0Sanm_dd2W5ZHV5GocRG0Xm6m4,1839 +openai/types/beta/realtime/realtime_client_event_param.py,sha256=xBeZ60Q-OWuZxstPQaoqE0DUTDOPOwrL8LWMmDJI2rM,1887 +openai/types/beta/realtime/realtime_connect_params.py,sha256=AvTypkFCYmDn9qMeektVqij6cqzgovr3PpgpMalJoJ4,290 +openai/types/beta/realtime/realtime_response.py,sha256=iUOItlPQv6-okCuiTsloe0LDVyJ0MUQ64ug8ZaLePnw,3567 +openai/types/beta/realtime/realtime_response_status.py,sha256=gU-59Pr_58TRfMZqFzdCloc53e1qOnU4aaHY3yURUK8,1326 +openai/types/beta/realtime/realtime_response_usage.py,sha256=6XOFjCjPWioHoICZ0Q8KXuUzktQugx6WuTz0O5UvzZg,1541 +openai/types/beta/realtime/realtime_server_event.py,sha256=-PpqZpg-DL_C_wseLMRQHWdBvxnVGRAfOF7x13Qr34E,5408 +openai/types/beta/realtime/response_audio_delta_event.py,sha256=UjbnK4u_WSNTOColZj8SmJgHnAc2H8iRXD76ZnPbz7E,742 +openai/types/beta/realtime/response_audio_done_event.py,sha256=1XEWBPh1JiOgyr6V03mRt_3sLm0YFUq5ft1AhfFlNEg,679 +openai/types/beta/realtime/response_audio_transcript_delta_event.py,sha256=HEVNQ_R2_Nyo6BvNvsliMnN__b17eVd2Jx5udRHg0Hg,773 +openai/types/beta/realtime/response_audio_transcript_done_event.py,sha256=Cn5l4mJnKK3LeSN9qFL4LLqs1WOWg4kt1SaYThB-5c0,787 +openai/types/beta/realtime/response_cancel_event.py,sha256=EKx8IZUISJHdl-_3tCdHtz2BINQ85Tq_ocadnsEGPSk,637 +openai/types/beta/realtime/response_cancel_event_param.py,sha256=nidzBL83liHwyImiNGiz9Ad0V34EtFAQDw1utqcF6ns,630 +openai/types/beta/realtime/response_content_part_added_event.py,sha256=a8-rm1NAwX685fk7GdT6Xi0Yr-JfeAkyUr94-RoFe34,1232 +openai/types/beta/realtime/response_content_part_done_event.py,sha256=jO2TZygxPabbnEG9E1AfNP-JYJv1QtCMnCzgcZ_3n18,1190 +openai/types/beta/realtime/response_create_event.py,sha256=46i-O9wwvhr1CzHNMDzhs2SGVwHiFJDOkQfOZZRfAWo,4763 +openai/types/beta/realtime/response_create_event_param.py,sha256=IPJlTWH0HzsknpSRrFgrQ3bfxsFZVRdQ6IYEsiGSZOk,4619 +openai/types/beta/realtime/response_created_event.py,sha256=zZtHx-1YjehXxX6aNE88SFINDaKOBzpzejo6sTNjq9g,506 +openai/types/beta/realtime/response_done_event.py,sha256=_yUPoECCli89iHLtV3NQkXQOW6Lc1JlxVPFw04ziBGY,494 +openai/types/beta/realtime/response_function_call_arguments_delta_event.py,sha256=Yh2mQZDucfnTLiO8LRyG9r7zeS1sjwLcMF1JPMdTFJc,793 +openai/types/beta/realtime/response_function_call_arguments_done_event.py,sha256=kxSPK6nbNWL6pxveY7zaNGgCkCXqyBFJPVYJrw9cbOw,793 +openai/types/beta/realtime/response_output_item_added_event.py,sha256=-_BZjvAqcgv3NIz-EMhvYMxIwvcXTt68FVNp0pw09dI,713 +openai/types/beta/realtime/response_output_item_done_event.py,sha256=0ClNVMZmeIxKghlEid9VGoWiZ97wp00hIdNnev4qBD8,709 +openai/types/beta/realtime/response_text_delta_event.py,sha256=B1yyuc6iMOMoG5Wh6W5KoQNYtVD1vEm2cKqHnl2CuFQ,721 +openai/types/beta/realtime/response_text_done_event.py,sha256=mPgVG6nWxwkZ3aZOX-JkVF7CpaWP5-bvtbxFrr4fK7g,724 +openai/types/beta/realtime/session.py,sha256=RZMR4d09k0erHFefzbYwQNyw0V6M5ESEPJ0qoO90lCU,10183 +openai/types/beta/realtime/session_create_params.py,sha256=PTFBt7w7fTrz2KWZIz5GjJqLoQkyv7qEIspFscs6zy8,10251 +openai/types/beta/realtime/session_create_response.py,sha256=HfCFE46q3IEfvLFEdU06DAg5GKIPlJjaU9DtKzKcr2U,6574 +openai/types/beta/realtime/session_created_event.py,sha256=rTElnBlE7z1htmkdmpdPN4q_dUYS6Su4BkmsqO65hUc,489 +openai/types/beta/realtime/session_update_event.py,sha256=w4SVhFjcoasgL1yyyNfykarnD-BzmxDt_0kED8pN8pw,11237 +openai/types/beta/realtime/session_update_event_param.py,sha256=Wu_4oa0R1YUTyI6_7AtOwy07qJf1YSCUZpdqO8CKkd0,10749 +openai/types/beta/realtime/session_updated_event.py,sha256=HyR-Pz3U9finVO-bUCvnmeqsANw-fceNvVqEIF6ey10,489 +openai/types/beta/realtime/transcription_session.py,sha256=Soo2LuEMJtkUD2oPJ1E23GUcoUrYBiSu_UtbLUKemfw,3184 +openai/types/beta/realtime/transcription_session_create_params.py,sha256=BVwSY41UX2njXAJpWynMJtC5XuKv6sNs7kp2Y8KSjnk,5976 +openai/types/beta/realtime/transcription_session_update.py,sha256=YMP9OB9P5FaSwaicXtYELjm4hD1gDSvKFq9YtF2sq64,6694 +openai/types/beta/realtime/transcription_session_update_param.py,sha256=b99v4yKnB2lC_cnYGiaxKnQuHB4eUW-v3eKT2UDsamk,6453 +openai/types/beta/realtime/transcription_session_updated_event.py,sha256=CKAS98QL7CuOVEWF6qGcC9qhTktdG2CPPJXbrW75GIM,833 +openai/types/beta/thread.py,sha256=bVlpfXpyA_tApIDBNAWJCgusLEiE-7xR-ptcIq1obg4,2545 +openai/types/beta/thread_create_and_run_params.py,sha256=PYD9furmj02jdHoET7O9D4QZn9DArGzR0L_YZGFGmiA,15853 +openai/types/beta/thread_create_params.py,sha256=BKzfwUsYNcGuIZluOAdqpUqFvoQ2tAkAEpGC-zD9EsA,6994 +openai/types/beta/thread_deleted.py,sha256=MaYG_jZIjSiB9h_ZBiTtpMsRSwFKkCY83ziM5GO_oUk,292 +openai/types/beta/thread_update_params.py,sha256=oPdEz_th3PNmWB9jMA3ZdkOPalaCfMEVminirac7Cas,2217 +openai/types/beta/threads/__init__.py,sha256=0WsJo0tXp08CgayozR7Tqc3b8sqzotWzvBun19CEIWc,3066 +openai/types/beta/threads/__pycache__/__init__.cpython-310.pyc,, +openai/types/beta/threads/__pycache__/annotation.cpython-310.pyc,, +openai/types/beta/threads/__pycache__/annotation_delta.cpython-310.pyc,, +openai/types/beta/threads/__pycache__/file_citation_annotation.cpython-310.pyc,, +openai/types/beta/threads/__pycache__/file_citation_delta_annotation.cpython-310.pyc,, +openai/types/beta/threads/__pycache__/file_path_annotation.cpython-310.pyc,, +openai/types/beta/threads/__pycache__/file_path_delta_annotation.cpython-310.pyc,, +openai/types/beta/threads/__pycache__/image_file.cpython-310.pyc,, +openai/types/beta/threads/__pycache__/image_file_content_block.cpython-310.pyc,, +openai/types/beta/threads/__pycache__/image_file_content_block_param.cpython-310.pyc,, +openai/types/beta/threads/__pycache__/image_file_delta.cpython-310.pyc,, +openai/types/beta/threads/__pycache__/image_file_delta_block.cpython-310.pyc,, +openai/types/beta/threads/__pycache__/image_file_param.cpython-310.pyc,, +openai/types/beta/threads/__pycache__/image_url.cpython-310.pyc,, +openai/types/beta/threads/__pycache__/image_url_content_block.cpython-310.pyc,, +openai/types/beta/threads/__pycache__/image_url_content_block_param.cpython-310.pyc,, +openai/types/beta/threads/__pycache__/image_url_delta.cpython-310.pyc,, +openai/types/beta/threads/__pycache__/image_url_delta_block.cpython-310.pyc,, +openai/types/beta/threads/__pycache__/image_url_param.cpython-310.pyc,, +openai/types/beta/threads/__pycache__/message.cpython-310.pyc,, +openai/types/beta/threads/__pycache__/message_content.cpython-310.pyc,, +openai/types/beta/threads/__pycache__/message_content_delta.cpython-310.pyc,, +openai/types/beta/threads/__pycache__/message_content_part_param.cpython-310.pyc,, +openai/types/beta/threads/__pycache__/message_create_params.cpython-310.pyc,, +openai/types/beta/threads/__pycache__/message_deleted.cpython-310.pyc,, +openai/types/beta/threads/__pycache__/message_delta.cpython-310.pyc,, +openai/types/beta/threads/__pycache__/message_delta_event.cpython-310.pyc,, +openai/types/beta/threads/__pycache__/message_list_params.cpython-310.pyc,, +openai/types/beta/threads/__pycache__/message_update_params.cpython-310.pyc,, +openai/types/beta/threads/__pycache__/refusal_content_block.cpython-310.pyc,, +openai/types/beta/threads/__pycache__/refusal_delta_block.cpython-310.pyc,, +openai/types/beta/threads/__pycache__/required_action_function_tool_call.cpython-310.pyc,, +openai/types/beta/threads/__pycache__/run.cpython-310.pyc,, +openai/types/beta/threads/__pycache__/run_create_params.cpython-310.pyc,, +openai/types/beta/threads/__pycache__/run_list_params.cpython-310.pyc,, +openai/types/beta/threads/__pycache__/run_status.cpython-310.pyc,, +openai/types/beta/threads/__pycache__/run_submit_tool_outputs_params.cpython-310.pyc,, +openai/types/beta/threads/__pycache__/run_update_params.cpython-310.pyc,, +openai/types/beta/threads/__pycache__/text.cpython-310.pyc,, +openai/types/beta/threads/__pycache__/text_content_block.cpython-310.pyc,, +openai/types/beta/threads/__pycache__/text_content_block_param.cpython-310.pyc,, +openai/types/beta/threads/__pycache__/text_delta.cpython-310.pyc,, +openai/types/beta/threads/__pycache__/text_delta_block.cpython-310.pyc,, +openai/types/beta/threads/annotation.py,sha256=Ce3Y0mSodmYRkoqyhtyIdep6WfWew6KJJgtrENOnfek,462 +openai/types/beta/threads/annotation_delta.py,sha256=iNsE-1Gn1yU0TlTHoxqKbOvPRUxWuXsF72qY_mMnWGY,510 +openai/types/beta/threads/file_citation_annotation.py,sha256=BNpkIVngBZKLi8Db-yrJP52Kz4Q3z4Zv4JPjGsyxHqo,819 +openai/types/beta/threads/file_citation_delta_annotation.py,sha256=1of3IGGzEDTclj2uYID5rS4yZeymFRuzYKmIKfr5aO0,1097 +openai/types/beta/threads/file_path_annotation.py,sha256=1bI1P-utzhQMbNWPKFFCM5ELMH82qrJ1WsqErmrjhB0,681 +openai/types/beta/threads/file_path_delta_annotation.py,sha256=jhEbT7AinewxqTDiBdriy9KIc085VV2HjUpsVUwFHIM,884 +openai/types/beta/threads/image_file.py,sha256=QVXLiplb-CigZqdMZtXlmebXKt6tF74kI-3vHxe_qUE,707 +openai/types/beta/threads/image_file_content_block.py,sha256=f2QUOVbfBCakboLwIf416C2LOR-Tz4KT5AZf6vbE32A,494 +openai/types/beta/threads/image_file_content_block_param.py,sha256=ErMMN4cM5D1fac1kBsrKtDRc3mUFD64qnaFhLPaAIBc,576 +openai/types/beta/threads/image_file_delta.py,sha256=nUJoSuP-3YyqqwBsmPJ0AqiQydz2FymVDCXQVkNYwOk,734 +openai/types/beta/threads/image_file_delta_block.py,sha256=G6UI6p4Cl0HLph3tXHU2uxA5PsEdfPyVN93nrOzFDw8,623 +openai/types/beta/threads/image_file_param.py,sha256=BaKD31JPxQ5CjRfZ_0RcOG3lDTZeW_k85XCvwyctD54,717 +openai/types/beta/threads/image_url.py,sha256=EzEK-CYoO0YyqFmejIPu7pMfTEgMmp5NFscsRd2pCos,592 +openai/types/beta/threads/image_url_content_block.py,sha256=AhYYI0pe55gEKM8FQT7jr7Z02NueszB7pvzLV_FuXdA,429 +openai/types/beta/threads/image_url_content_block_param.py,sha256=XBPXiRqqLAkAnK5H6zKiZ9v5eQ4IOcEctsdQn0P6ikI,511 +openai/types/beta/threads/image_url_delta.py,sha256=MXCp-OmuNT4njbWA9DWAbocP7pD3VpdcUy2wgeOjwm4,582 +openai/types/beta/threads/image_url_delta_block.py,sha256=7ufBlfrXXY8ACIi8M_u-ytH-4ZXew_ytKbwf__z-cLg,548 +openai/types/beta/threads/image_url_param.py,sha256=VRLaxZf-wxnvAOcKGwyF_o6KEvwktBfE3B6KmYE5LZo,602 +openai/types/beta/threads/message.py,sha256=0Xt_yrqhmLhVoDXRbW0YeXJx9BtlLjX8MNPG-B-ySyg,3613 +openai/types/beta/threads/message_content.py,sha256=b8IC_EG28hcXk28z09EABfJwPkYZ7U-lTp_9ykdoxvU,630 +openai/types/beta/threads/message_content_delta.py,sha256=o4Edlx9BtdH2Z4OMwGWWXex8wiijknNRihJ-wu8PDUQ,615 +openai/types/beta/threads/message_content_part_param.py,sha256=RXrnoDP2-UMQHoR2jJvaT3JHrCeffLi6WzXzH05cDGI,550 +openai/types/beta/threads/message_create_params.py,sha256=7fXlNyqy7tzuLgMsCYfJegL2sZcjKwYNLihwteODyg0,2083 +openai/types/beta/threads/message_deleted.py,sha256=DNnrSfGZ3kWEazmo4mVTdLhiKlIHxs-D8Ef5sNdHY1o,303 +openai/types/beta/threads/message_delta.py,sha256=1lh0FTbyqpcLVL8XqwTiAkZy03teJYIAYm_Zj8dbnY8,647 +openai/types/beta/threads/message_delta_event.py,sha256=Rg7g-1taxc2vTowD1J8ykXXRBeQJeTmTxxkZ8e95r1U,682 +openai/types/beta/threads/message_list_params.py,sha256=iuwzDccnViooUxHlq-WoE1FEJArNy5-zrYCoaNgVS8k,1296 +openai/types/beta/threads/message_update_params.py,sha256=XNCSLfRkk531F8mNbUB9bRYcCzJfW8NiFQ9c0Aq75Dk,757 +openai/types/beta/threads/refusal_content_block.py,sha256=C5ORJo34aPdqi-7rviVDUnzi6nneUersczpNM7KPDds,369 +openai/types/beta/threads/refusal_delta_block.py,sha256=F2EWI_H7aUpySDO2xhZTMkICMj1aaM-XwXMzsx7Klhg,481 +openai/types/beta/threads/required_action_function_tool_call.py,sha256=1RWejiSE3wAM7bXWB5q3H_bVkRX2SKXwQ09SEJHOM98,953 +openai/types/beta/threads/run.py,sha256=MLn7qaAPJLijjQ3D-M0ftrbRD4WEQdzDMHC0XPSJcpU,9141 +openai/types/beta/threads/run_create_params.py,sha256=WdjVbHlKhCh2g7uIh-_9YyY8t29UKHk0hd_LHSTKKo8,10956 +openai/types/beta/threads/run_list_params.py,sha256=TgepSLrupUUtuQV2kbVcoGH1YA0FVUX9ESkszKuwyHY,1210 +openai/types/beta/threads/run_status.py,sha256=OU1hzoyYXaRJ3lupX4YcZ-HZkTpctNE4tzAcp6X8Q9U,351 +openai/types/beta/threads/run_submit_tool_outputs_params.py,sha256=cKiyD374BsZN_Oih5o5n5gOf_DYsxErVrbgxveNhmPI,1643 +openai/types/beta/threads/run_update_params.py,sha256=sVjkl6ayjU75Tk8t69r6xgIg80OlTikyRdS0sa2Gavg,749 +openai/types/beta/threads/runs/__init__.py,sha256=mg_roY9yL1bClJ8isizkQgHOAkN17iSdVr2m65iyBrs,1653 +openai/types/beta/threads/runs/__pycache__/__init__.cpython-310.pyc,, +openai/types/beta/threads/runs/__pycache__/code_interpreter_logs.cpython-310.pyc,, +openai/types/beta/threads/runs/__pycache__/code_interpreter_output_image.cpython-310.pyc,, +openai/types/beta/threads/runs/__pycache__/code_interpreter_tool_call.cpython-310.pyc,, +openai/types/beta/threads/runs/__pycache__/code_interpreter_tool_call_delta.cpython-310.pyc,, +openai/types/beta/threads/runs/__pycache__/file_search_tool_call.cpython-310.pyc,, +openai/types/beta/threads/runs/__pycache__/file_search_tool_call_delta.cpython-310.pyc,, +openai/types/beta/threads/runs/__pycache__/function_tool_call.cpython-310.pyc,, +openai/types/beta/threads/runs/__pycache__/function_tool_call_delta.cpython-310.pyc,, +openai/types/beta/threads/runs/__pycache__/message_creation_step_details.cpython-310.pyc,, +openai/types/beta/threads/runs/__pycache__/run_step.cpython-310.pyc,, +openai/types/beta/threads/runs/__pycache__/run_step_delta.cpython-310.pyc,, +openai/types/beta/threads/runs/__pycache__/run_step_delta_event.cpython-310.pyc,, +openai/types/beta/threads/runs/__pycache__/run_step_delta_message_delta.cpython-310.pyc,, +openai/types/beta/threads/runs/__pycache__/run_step_include.cpython-310.pyc,, +openai/types/beta/threads/runs/__pycache__/step_list_params.cpython-310.pyc,, +openai/types/beta/threads/runs/__pycache__/step_retrieve_params.cpython-310.pyc,, +openai/types/beta/threads/runs/__pycache__/tool_call.cpython-310.pyc,, +openai/types/beta/threads/runs/__pycache__/tool_call_delta.cpython-310.pyc,, +openai/types/beta/threads/runs/__pycache__/tool_call_delta_object.cpython-310.pyc,, +openai/types/beta/threads/runs/__pycache__/tool_calls_step_details.cpython-310.pyc,, +openai/types/beta/threads/runs/code_interpreter_logs.py,sha256=UhfSrLKN3NSIOTYGCZhJCv1s_d_8_gTdppFIbBPdgSk,564 +openai/types/beta/threads/runs/code_interpreter_output_image.py,sha256=8o99k0ZHMHpqH0taXkOkYR9WaDUpCN-G0Ifd5XsJpb8,613 +openai/types/beta/threads/runs/code_interpreter_tool_call.py,sha256=u8htH0HfF8Yxc4cBOr6QgBSS1Q465rKz5z0L0bYBz-s,2029 +openai/types/beta/threads/runs/code_interpreter_tool_call_delta.py,sha256=qGBFdfYB0GUWNJpfvh_x387zCUxLrkQtSSOL9EV4K0g,1616 +openai/types/beta/threads/runs/file_search_tool_call.py,sha256=6R27ipmR0J4qc8qjBak00Leov8L_zWLNP6tplXLcfgY,2127 +openai/types/beta/threads/runs/file_search_tool_call_delta.py,sha256=Gx8c7GSgGYuOvGadcAr3ZIspEFMZS3e2OY7vBo_MYnM,655 +openai/types/beta/threads/runs/function_tool_call.py,sha256=V0-x41j1lAY_JBZ0jzd7olIY8HN-IPzzLz_oYTIgb30,979 +openai/types/beta/threads/runs/function_tool_call_delta.py,sha256=DxCHC0yHmryCfkwon1z5LMV-hGaBwacMoP74Pzwed0s,1135 +openai/types/beta/threads/runs/message_creation_step_details.py,sha256=Bfeq7HnLcQjHer0zen013YuU087_ZEitbEurEQS6xds,566 +openai/types/beta/threads/runs/run_step.py,sha256=oU0Rj1XfidZWiWZnY_koSBI56vnoRciMEDQC-sMLeLw,3882 +openai/types/beta/threads/runs/run_step_delta.py,sha256=YpcPvXnwBvlp6tlMhcWGGjCgNsKfiOJVzGhs-tyKe2U,741 +openai/types/beta/threads/runs/run_step_delta_event.py,sha256=f21Eb0e9Syf938ehZM2lh_tzy_T7CVpJTIOQoW35jfs,690 +openai/types/beta/threads/runs/run_step_delta_message_delta.py,sha256=JCbaG1UIrVT1mDtjgdVw4vD68dQkzst5eSjuTqdX_vE,624 +openai/types/beta/threads/runs/run_step_include.py,sha256=u-9Cw1hruRiWr70f_hw4XG0w1cwOAYfRJYKva2dEacs,264 +openai/types/beta/threads/runs/step_list_params.py,sha256=zorF5juogCzLMsZLjzMZTs_iIBcPj9WUur5HcrXuH8M,1752 +openai/types/beta/threads/runs/step_retrieve_params.py,sha256=aJ7l8RDJLPyEmqjfO4XsTV54VZOOqyb_gKSUvqp33ZI,815 +openai/types/beta/threads/runs/tool_call.py,sha256=1rwq4IbLgjQAQ-ORXYkNpmJyi9SREDnqA57nJbj_NiU,537 +openai/types/beta/threads/runs/tool_call_delta.py,sha256=t5wF8ndW3z99lHF981FL-IN5xXBS9p7eonH9bxvKu_c,600 +openai/types/beta/threads/runs/tool_call_delta_object.py,sha256=GsemUuIbB-0arodq5YBBqJjH4LtrGWXi8Fql4-v3f9c,652 +openai/types/beta/threads/runs/tool_calls_step_details.py,sha256=gpNk7AUCm3ND1z2O2ETSZiXqXqkhHpZ2GzxzXAuQXTs,611 +openai/types/beta/threads/text.py,sha256=9gjmDCqoptnxQ8Jhym87pECyd6m1lB3daCxKNzSFp4Y,319 +openai/types/beta/threads/text_content_block.py,sha256=sE7JQpaQDPFfnDRegY8rOqem1vhaL4GOologyT8FenU,374 +openai/types/beta/threads/text_content_block_param.py,sha256=PQ8yKhUp2REWA5opa-83JCYmCCjppkt72sMX57Ys_Ms,462 +openai/types/beta/threads/text_delta.py,sha256=2EFeQCkg_cc8nYEJ6BtYAA3_TqgMTbmEXoMvLjzaB34,389 +openai/types/beta/threads/text_delta_block.py,sha256=cyGoOBssDwxWn0YDKx8p665-v4AB9M3TWZUfgXgT_MM,503 +openai/types/chat/__init__.py,sha256=wyA0EWb0utj19dX0tCeGh4Jg5GrO3TGjmfQkR9HVxxE,6102 +openai/types/chat/__pycache__/__init__.cpython-310.pyc,, +openai/types/chat/__pycache__/chat_completion.cpython-310.pyc,, +openai/types/chat/__pycache__/chat_completion_allowed_tool_choice_param.cpython-310.pyc,, +openai/types/chat/__pycache__/chat_completion_allowed_tools_param.cpython-310.pyc,, +openai/types/chat/__pycache__/chat_completion_assistant_message_param.cpython-310.pyc,, +openai/types/chat/__pycache__/chat_completion_audio.cpython-310.pyc,, +openai/types/chat/__pycache__/chat_completion_audio_param.cpython-310.pyc,, +openai/types/chat/__pycache__/chat_completion_chunk.cpython-310.pyc,, +openai/types/chat/__pycache__/chat_completion_content_part_image.cpython-310.pyc,, +openai/types/chat/__pycache__/chat_completion_content_part_image_param.cpython-310.pyc,, +openai/types/chat/__pycache__/chat_completion_content_part_input_audio_param.cpython-310.pyc,, +openai/types/chat/__pycache__/chat_completion_content_part_param.cpython-310.pyc,, +openai/types/chat/__pycache__/chat_completion_content_part_refusal_param.cpython-310.pyc,, +openai/types/chat/__pycache__/chat_completion_content_part_text.cpython-310.pyc,, +openai/types/chat/__pycache__/chat_completion_content_part_text_param.cpython-310.pyc,, +openai/types/chat/__pycache__/chat_completion_custom_tool_param.cpython-310.pyc,, +openai/types/chat/__pycache__/chat_completion_deleted.cpython-310.pyc,, +openai/types/chat/__pycache__/chat_completion_developer_message_param.cpython-310.pyc,, +openai/types/chat/__pycache__/chat_completion_function_call_option_param.cpython-310.pyc,, +openai/types/chat/__pycache__/chat_completion_function_message_param.cpython-310.pyc,, +openai/types/chat/__pycache__/chat_completion_function_tool.cpython-310.pyc,, +openai/types/chat/__pycache__/chat_completion_function_tool_param.cpython-310.pyc,, +openai/types/chat/__pycache__/chat_completion_message.cpython-310.pyc,, +openai/types/chat/__pycache__/chat_completion_message_custom_tool_call.cpython-310.pyc,, +openai/types/chat/__pycache__/chat_completion_message_custom_tool_call_param.cpython-310.pyc,, +openai/types/chat/__pycache__/chat_completion_message_function_tool_call.cpython-310.pyc,, +openai/types/chat/__pycache__/chat_completion_message_function_tool_call_param.cpython-310.pyc,, +openai/types/chat/__pycache__/chat_completion_message_param.cpython-310.pyc,, +openai/types/chat/__pycache__/chat_completion_message_tool_call.cpython-310.pyc,, +openai/types/chat/__pycache__/chat_completion_message_tool_call_param.cpython-310.pyc,, +openai/types/chat/__pycache__/chat_completion_message_tool_call_union_param.cpython-310.pyc,, +openai/types/chat/__pycache__/chat_completion_modality.cpython-310.pyc,, +openai/types/chat/__pycache__/chat_completion_named_tool_choice_custom_param.cpython-310.pyc,, +openai/types/chat/__pycache__/chat_completion_named_tool_choice_param.cpython-310.pyc,, +openai/types/chat/__pycache__/chat_completion_prediction_content_param.cpython-310.pyc,, +openai/types/chat/__pycache__/chat_completion_reasoning_effort.cpython-310.pyc,, +openai/types/chat/__pycache__/chat_completion_role.cpython-310.pyc,, +openai/types/chat/__pycache__/chat_completion_store_message.cpython-310.pyc,, +openai/types/chat/__pycache__/chat_completion_stream_options_param.cpython-310.pyc,, +openai/types/chat/__pycache__/chat_completion_system_message_param.cpython-310.pyc,, +openai/types/chat/__pycache__/chat_completion_token_logprob.cpython-310.pyc,, +openai/types/chat/__pycache__/chat_completion_tool_choice_option_param.cpython-310.pyc,, +openai/types/chat/__pycache__/chat_completion_tool_message_param.cpython-310.pyc,, +openai/types/chat/__pycache__/chat_completion_tool_param.cpython-310.pyc,, +openai/types/chat/__pycache__/chat_completion_tool_union_param.cpython-310.pyc,, +openai/types/chat/__pycache__/chat_completion_user_message_param.cpython-310.pyc,, +openai/types/chat/__pycache__/completion_create_params.cpython-310.pyc,, +openai/types/chat/__pycache__/completion_list_params.cpython-310.pyc,, +openai/types/chat/__pycache__/completion_update_params.cpython-310.pyc,, +openai/types/chat/__pycache__/parsed_chat_completion.cpython-310.pyc,, +openai/types/chat/__pycache__/parsed_function_tool_call.cpython-310.pyc,, +openai/types/chat/chat_completion.py,sha256=rPTJBRPwYAvISXFRhupNZjBZtr6UEHRgIWBJkgJQj3o,3650 +openai/types/chat/chat_completion_allowed_tool_choice_param.py,sha256=iBicIuMGQ8vj-LNdat4huVSoalJuWfG3d4qzFyomOK8,713 +openai/types/chat/chat_completion_allowed_tools_param.py,sha256=Ya6LVup7cF69D0_vs-Xfk74L40i6_mz_0JSxVNxPpF0,1087 +openai/types/chat/chat_completion_assistant_message_param.py,sha256=TxOgyqhTYK5Q5pusSKqgEE1qqjkprbUZb9zuzHBASI0,2797 +openai/types/chat/chat_completion_audio.py,sha256=wOZxqzRU4G8TEm5e1syagWtILAEvgIY_Zyo-7MEktGY,851 +openai/types/chat/chat_completion_audio_param.py,sha256=0fxKAeYxliX8N7wTsJw895qYhlwU9TPlbHQ3NYFMs6s,1023 +openai/types/chat/chat_completion_chunk.py,sha256=rq8dljAql3jIRC10Qc9h0kBuKhb_Ju7DXBw6CWM3onA,6498 +openai/types/chat/chat_completion_content_part_image.py,sha256=9OD-MwO3od5471z5Il_w6EEunuuOAkKQQo25438KKyU,840 +openai/types/chat/chat_completion_content_part_image_param.py,sha256=U_HLJUD9WlpBuoX6Jg3YmS6MXdQG4ZWAbSonTNYLidM,884 +openai/types/chat/chat_completion_content_part_input_audio_param.py,sha256=0m6GBd4EPLddC29tkmB6kNfYNCqIyqzqi7FoNflc-gI,790 +openai/types/chat/chat_completion_content_part_param.py,sha256=J5ZhUW-SZvuEsKPGUytYLuEUdKgcdfQ68j5IJi6vvVk,1373 +openai/types/chat/chat_completion_content_part_refusal_param.py,sha256=TV1vu-IgrvKa5IBlPSIdBxUaW8g1zDhMOOBOEmhU2w0,467 +openai/types/chat/chat_completion_content_part_text.py,sha256=qgeE0bvOzD_BE4BdvEuFjBj1kWF74RhYQD7eSk7VCo8,468 +openai/types/chat/chat_completion_content_part_text_param.py,sha256=OR0mct4BbfJnDw6PAShySI8CsDxKBtr3wMwowJph40w,534 +openai/types/chat/chat_completion_custom_tool_param.py,sha256=5IU-XnMLDcCHleEjPebg6tNiY9gXvrKaH_hyQhkW-bk,1867 +openai/types/chat/chat_completion_deleted.py,sha256=O7oRuPI6YDa_h7uKnEubsjtw8raTcyVmVk95hoDfo74,470 +openai/types/chat/chat_completion_developer_message_param.py,sha256=XkL5kaXoSeAlvBMr7etkm1Ony6Z26gNakwfS3ch2Hq8,1049 +openai/types/chat/chat_completion_function_call_option_param.py,sha256=_Lii1uFcSbXO0blmJn3VHkYXuXKO2cRT-Z-JikD2mrA,489 +openai/types/chat/chat_completion_function_message_param.py,sha256=jIaZbBHHbt4v4xHCIyvYtYLst_X4jOznRjYNcTf0MF0,591 +openai/types/chat/chat_completion_function_tool.py,sha256=IGZSLdIbxJFgF-P0DTZeEanGFcf5iUdj5sAYd-6EbRI,513 +openai/types/chat/chat_completion_function_tool_param.py,sha256=085qkwOGKX3M-Laf66N3KBfalXbUQb3lp76wWRi68B8,587 +openai/types/chat/chat_completion_message.py,sha256=xtsA62BEMKI-06xRbiZo-ljpyq0FtHEsgr8BXJ1AqsM,2829 +openai/types/chat/chat_completion_message_custom_tool_call.py,sha256=dGX_nKX-qj4Yup44mov1rFPGPxhfarHf17wGHE4jB4g,750 +openai/types/chat/chat_completion_message_custom_tool_call_param.py,sha256=A2d2BTzl1-XrXhWmLyAPdqzwzQwTLfaA_M9ObIpIJeE,859 +openai/types/chat/chat_completion_message_function_tool_call.py,sha256=2kd0tKE_NUfvsR4nep1yap7D0TRzzD9yuSvn5wcyouo,1022 +openai/types/chat/chat_completion_message_function_tool_call_param.py,sha256=6FjTlBTsoFFBGcF2fFs6Uq3CoAnvY-9Jvs4dxHlepqY,1131 +openai/types/chat/chat_completion_message_param.py,sha256=aLrz_cX_CYymFdW9cMIPZpv0Z4zM50RECV3SH6QNZsc,1019 +openai/types/chat/chat_completion_message_tool_call.py,sha256=aWpKcV6NZZfx_-aGEwPz99IDWNCdRuwoYpUChs0Uvvc,738 +openai/types/chat/chat_completion_message_tool_call_param.py,sha256=rE_TbdN3N6JGzHecykgdFHZgI66p2lsl0loPpz5TxW0,458 +openai/types/chat/chat_completion_message_tool_call_union_param.py,sha256=L8IoSHXgIFxYyHSfXQJNN7FJlp31ez8X4l5eSPKGmYM,602 +openai/types/chat/chat_completion_modality.py,sha256=8Ga0kruwJc43WD2OIqNudn7KrVRTPDQaalVkh_8bp9I,236 +openai/types/chat/chat_completion_named_tool_choice_custom_param.py,sha256=yxPaDdBK71bQ4vHu-a4nSbvNLdxuOoT8utOJcCK9Ja4,680 +openai/types/chat/chat_completion_named_tool_choice_param.py,sha256=z8G5YhuzuTQwUGmRWu2jK5BDeJW5cuY14rpOa9DTJ6c,671 +openai/types/chat/chat_completion_prediction_content_param.py,sha256=7jK3nMFFPbxLx6-F6Y7u2kBygDm4IQbtEej5BeQ5KBg,988 +openai/types/chat/chat_completion_reasoning_effort.py,sha256=9sAGlM21dgRNOQRSsL_znZf9ruXcmvVriWeex0fRgMk,235 +openai/types/chat/chat_completion_role.py,sha256=LW6-tqXaqpD7H53PiSXrjvIo6g4RfHhWityDm6Nfvig,275 +openai/types/chat/chat_completion_store_message.py,sha256=JUcit8AUnUzrEJIokS15nssCZ_7g-dcEeXE9Fejb6Sg,977 +openai/types/chat/chat_completion_stream_options_param.py,sha256=pwt3pqOZNIzmeESTT2hzouRCNJ5cQHRZ1ZQ1as2vzSg,1397 +openai/types/chat/chat_completion_system_message_param.py,sha256=14_Nq02KOYn6bd9BYJ1pzUPcYG8lOZCuDFNcyrfiEZw,1024 +openai/types/chat/chat_completion_token_logprob.py,sha256=6-ipUFfsXMf5L7FDFi127NaVkDtmEooVgGBF6Ts965A,1769 +openai/types/chat/chat_completion_tool_choice_option_param.py,sha256=wPIjU-eeybPjRFr28mx8Njp2OCrKw3Xpu0231z4Kz1A,758 +openai/types/chat/chat_completion_tool_message_param.py,sha256=5K7jfKpwTuKNi1PTFabq_LHH-7wun8CUsLDh90U8zQE,730 +openai/types/chat/chat_completion_tool_param.py,sha256=5hFt0Izat_o50JMJzspCYeB0gubilRDB3a6yIfGHoN8,431 +openai/types/chat/chat_completion_tool_union_param.py,sha256=smpIoekwuuXKQx9jRRB2cqc3L7_fmN5lB4IIJHlKhys,504 +openai/types/chat/chat_completion_user_message_param.py,sha256=znqcR_CPZX8YxQpFq1xNQISVR3dM8hj5rjN33b00KHo,901 +openai/types/chat/completion_create_params.py,sha256=67j_HV9PRg6d5MQetiHAyivSozbdnUX92hLVRuvAKfY,18433 +openai/types/chat/completion_list_params.py,sha256=jOAiZ6vYSrxyD-3qzIXvXofJkejl6bet9_yNsC9p5ws,1154 +openai/types/chat/completion_update_params.py,sha256=VRDF28qoonjrveHhw8BT4Yo_NlLsV2Qzd_KUUQ6AEG8,742 +openai/types/chat/completions/__init__.py,sha256=nmKlohYbZmr7Pzv1qCDMSDbthcH6ySPFIgvXpHZtxK8,195 +openai/types/chat/completions/__pycache__/__init__.cpython-310.pyc,, +openai/types/chat/completions/__pycache__/message_list_params.cpython-310.pyc,, +openai/types/chat/completions/message_list_params.py,sha256=IArlye40xGlMVIDHxsK9RX_5usPL71wXPMgdwI7_wYU,583 +openai/types/chat/parsed_chat_completion.py,sha256=KwcwCtj0yexl6gB7yuOnyETRW-uUvNRYbVzPMkwCe5Q,1437 +openai/types/chat/parsed_function_tool_call.py,sha256=JDWYo1XhTDQ8CxssbgjpzBhUw8jeXAmEd5Tr_CqFrVA,945 +openai/types/chat_model.py,sha256=yFvzwm6VJXCn6jN21FS-utN6bcBBzRIpKYk1VTP8sdo,177 +openai/types/completion.py,sha256=mwIFVtTYVKOmvIQJz6M6jQS1r48_rvbVvOztDp0C9Wo,1347 +openai/types/completion_choice.py,sha256=PUk77T3Cp34UJSXoMfSzTKGWDK0rQQwq84X_PSlOUJo,965 +openai/types/completion_create_params.py,sha256=UqgYjUpYbQYPdYETVxhkwgbGRKTQCBoyeSFtrB8iuAo,7652 +openai/types/completion_usage.py,sha256=siq8f0jlCP3GYFHQr8Zzflf-BYxOggE_OjtsGs9ur4A,1895 +openai/types/container_create_params.py,sha256=AhtgxFOFr8vIayvK8pTJq0G9j_Mgnze1UlhIGb4P6ik,1015 +openai/types/container_create_response.py,sha256=4yCPrrUA9tIvgU64kNwVEtLrSBGwLYee1Uo1HYCbxis,1589 +openai/types/container_list_params.py,sha256=7RiUMBOEJj9QH9LYtPiwUrIufx8czF6kk2JcfO_LP_s,893 +openai/types/container_list_response.py,sha256=jnoI1Fyem9m8D7eVhDAhir1R6P3kWOO8SfhJmxYrHAA,1585 +openai/types/container_retrieve_response.py,sha256=wFYvtDPJzAxCZNAZFo82WOX7M6NEirorHJUH38i-ugE,1593 +openai/types/containers/__init__.py,sha256=SCdMa4GNxw-I23CwW03iVOoHRfDybyKEMmpDkdVuUcI,480 +openai/types/containers/__pycache__/__init__.cpython-310.pyc,, +openai/types/containers/__pycache__/file_create_params.cpython-310.pyc,, +openai/types/containers/__pycache__/file_create_response.cpython-310.pyc,, +openai/types/containers/__pycache__/file_list_params.cpython-310.pyc,, +openai/types/containers/__pycache__/file_list_response.cpython-310.pyc,, +openai/types/containers/__pycache__/file_retrieve_response.cpython-310.pyc,, +openai/types/containers/file_create_params.py,sha256=KXoZNG4DpiD7NDeQixdKJsuOv-iCZAlSN4sz7AQm49k,412 +openai/types/containers/file_create_response.py,sha256=Dh1OWf86XNMfmvVwfRGezfihdDuuAcdiQxT_3iefBzw,722 +openai/types/containers/file_list_params.py,sha256=9bU7uKeXPk7adFzwvKHFitFOV4phnIbbfFx5u6n1OFY,883 +openai/types/containers/file_list_response.py,sha256=xwvdMIUafkHSXJGQT1_mxt6T_8nJo-isp9M_5YTq-J8,718 +openai/types/containers/file_retrieve_response.py,sha256=wGPU9o5SKkg8s4aUJXhwC38u8KfTFKmIUk1ItUdYxJg,726 +openai/types/containers/files/__init__.py,sha256=OKfJYcKb4NObdiRObqJV_dOyDQ8feXekDUge2o_4pXQ,122 +openai/types/containers/files/__pycache__/__init__.cpython-310.pyc,, +openai/types/conversations/__init__.py,sha256=N7GRumNq1KeGR4X9STSKWLM1axUntyaMI_OwPihZmjI,1854 +openai/types/conversations/__pycache__/__init__.cpython-310.pyc,, +openai/types/conversations/__pycache__/computer_screenshot_content.cpython-310.pyc,, +openai/types/conversations/__pycache__/conversation.cpython-310.pyc,, +openai/types/conversations/__pycache__/conversation_create_params.cpython-310.pyc,, +openai/types/conversations/__pycache__/conversation_deleted_resource.cpython-310.pyc,, +openai/types/conversations/__pycache__/conversation_item.cpython-310.pyc,, +openai/types/conversations/__pycache__/conversation_item_list.cpython-310.pyc,, +openai/types/conversations/__pycache__/conversation_update_params.cpython-310.pyc,, +openai/types/conversations/__pycache__/input_file_content.cpython-310.pyc,, +openai/types/conversations/__pycache__/input_file_content_param.cpython-310.pyc,, +openai/types/conversations/__pycache__/input_image_content.cpython-310.pyc,, +openai/types/conversations/__pycache__/input_image_content_param.cpython-310.pyc,, +openai/types/conversations/__pycache__/input_text_content.cpython-310.pyc,, +openai/types/conversations/__pycache__/input_text_content_param.cpython-310.pyc,, +openai/types/conversations/__pycache__/item_create_params.cpython-310.pyc,, +openai/types/conversations/__pycache__/item_list_params.cpython-310.pyc,, +openai/types/conversations/__pycache__/item_retrieve_params.cpython-310.pyc,, +openai/types/conversations/__pycache__/message.cpython-310.pyc,, +openai/types/conversations/__pycache__/output_text_content.cpython-310.pyc,, +openai/types/conversations/__pycache__/output_text_content_param.cpython-310.pyc,, +openai/types/conversations/__pycache__/refusal_content.cpython-310.pyc,, +openai/types/conversations/__pycache__/refusal_content_param.cpython-310.pyc,, +openai/types/conversations/__pycache__/summary_text_content.cpython-310.pyc,, +openai/types/conversations/__pycache__/text_content.cpython-310.pyc,, +openai/types/conversations/computer_screenshot_content.py,sha256=Q-YXldRA9F_NiDRpDEu7w8IDI86HvUCLc9EDH9ElS-c,671 +openai/types/conversations/conversation.py,sha256=BVpec4hLHle_8iRf6v5y4CPYHtMhEntP0m8PDG_5GSY,886 +openai/types/conversations/conversation_create_params.py,sha256=dtgIXlZj1yKP3oJUYdFCb2MKIk6BH8e4QbKIPPGjHf8,976 +openai/types/conversations/conversation_deleted_resource.py,sha256=HagMTsOrDL7QYQSeZqMbBMfRzhWAgnrxtinGT5uhog4,326 +openai/types/conversations/conversation_item.py,sha256=BfsUtqxwdYKTyi2eL-gpSgHiE1iduphQbxAv7gNoMQc,7672 +openai/types/conversations/conversation_item_list.py,sha256=bFXSZFmB1H9-IwjDRTCGtszxt57B3RAbHGZaL08gcYA,708 +openai/types/conversations/conversation_update_params.py,sha256=YMyThjw2ObnqY-dhI4iy2pqf0cZW7rNV0TcxpBMs1bs,746 +openai/types/conversations/input_file_content.py,sha256=xxG8_PMhnjH1F6jXs6vZyj_T1HdO--48fTYFrvWCPzk,219 +openai/types/conversations/input_file_content_param.py,sha256=ATFOU1VRdw8SDRvwdC1KEamfAMna-mIfpER5bLpGIeg,244 +openai/types/conversations/input_image_content.py,sha256=LKKWx1y5Gi0nu34a8CFbDUaXUWQACeQ80lwJtukOx3U,224 +openai/types/conversations/input_image_content_param.py,sha256=AceRCBW-WuXG5rI4uDF2w0n_eaa8DzpCmbdWm3ofVMg,248 +openai/types/conversations/input_text_content.py,sha256=G5L4ln3tkWqSzaZlAkFuzkFOpjYqPVnE3QyXafiA6YU,219 +openai/types/conversations/input_text_content_param.py,sha256=HPl92LQHoA3_2azNJcVF1FD6OTJY200uwbCodF7_xPg,244 +openai/types/conversations/item_create_params.py,sha256=TRAsvDuMBjLeL5DzqC-WyqmorZTnu4qZRt9eE13SJ8E,874 +openai/types/conversations/item_list_params.py,sha256=nMzeK_XkVTWsa5pMQDGDuRPfGwiXFBDcdZ4NYwYV7H4,1896 +openai/types/conversations/item_retrieve_params.py,sha256=lHK-Sqbd7DXWQKuXGRBUvu_a7LxYNAT_tBQqLP-OC5A,690 +openai/types/conversations/message.py,sha256=Zbg75g2Nduq2_tbFvRbWe-SBmtwo6RSuqXaXaWRgCz4,2118 +openai/types/conversations/output_text_content.py,sha256=bFDVfODBGMwRLcKeo0OZzZumZdZwHzHkG1B_Bw43vWA,224 +openai/types/conversations/output_text_content_param.py,sha256=8NlazI-VuJ9DgQ-ZGt9xJ8su2-CZ1mb_ebI9O19YC7Q,248 +openai/types/conversations/refusal_content.py,sha256=ThoHeemlqaKlUf7oVYOTUwnHuqph-4RXS4Ud_kGbGg0,227 +openai/types/conversations/refusal_content_param.py,sha256=hWb2AoU0oTKCNLRZs5kzxY2Uk7HkeHhEy5leL29Uy64,254 +openai/types/conversations/summary_text_content.py,sha256=TuGepAPiMlauu9CdEwkQdkLwErBjx6kNXlIG-CSb-4g,447 +openai/types/conversations/text_content.py,sha256=eya3kB6nXl0KEGlvpH_LlE8CIPzD5GVg5r851-oWR0g,286 +openai/types/create_embedding_response.py,sha256=S_HDPpkr_2us12j1M8NsXTrTg6RJT2rgI3zAsZpMSZg,848 +openai/types/embedding.py,sha256=PDZAZRp7mzlvz5R2FMMf50vRIphHNCgyST2gbo2NdA4,711 +openai/types/embedding_create_params.py,sha256=asahWWNcMvXGDfbTMz4uDy7DU9g6OJ9wowqZByghzw8,2039 +openai/types/embedding_model.py,sha256=0dDL87len4vZ4DR6eCp7JZJCJpgwWphRmJhMK3Se8f4,281 +openai/types/eval_create_params.py,sha256=QW52PWt7EhyNdYJ976ZpZiAolzoFdfa7zoXPUZWj4lc,8239 +openai/types/eval_create_response.py,sha256=MRZlHRJrtyGS5JN3GLR6UzjTa3Fjt0v-cGFAaGD4wHE,4468 +openai/types/eval_custom_data_source_config.py,sha256=XmGu1MzqE9poFFH65qMpw0W_C89DJdP8cwgpTvNYuPU,876 +openai/types/eval_delete_response.py,sha256=iCMGN0JG5kFIYNPSCOMSWlTu0FDkd2lbAw1VLO73-bQ,245 +openai/types/eval_list_params.py,sha256=WmIJa3O9wyuDKXXwE3tSnQv1XOTe1hngttSvvhbtf28,754 +openai/types/eval_list_response.py,sha256=H35sv5yCN_ROUNb0pBO1RkXHC8kEQMPxEtpMVhJ5MNg,4464 +openai/types/eval_retrieve_response.py,sha256=tOpjCP4ww5XYZrniUh9CphrWaltfTKhTdAXPiEhz3Ts,4472 +openai/types/eval_stored_completions_data_source_config.py,sha256=OGMNipFZes_EaeTi5ediozfUHAeLoTZTISIQRGaQDCI,1137 +openai/types/eval_update_params.py,sha256=Wooz-3SDznbC3ihrhOs-10y9cxpTKGQgobDLfZ-23c0,757 +openai/types/eval_update_response.py,sha256=ZH82tSoBv64c_4VUXKOcGBLJQ6xSersZz8gnGzbvjw8,4468 +openai/types/evals/__init__.py,sha256=wiXRqdkT-SkjE0Sgv6MixeECZjF0xaoCPdSGFEh0rEs,1193 +openai/types/evals/__pycache__/__init__.cpython-310.pyc,, +openai/types/evals/__pycache__/create_eval_completions_run_data_source.cpython-310.pyc,, +openai/types/evals/__pycache__/create_eval_completions_run_data_source_param.cpython-310.pyc,, +openai/types/evals/__pycache__/create_eval_jsonl_run_data_source.cpython-310.pyc,, +openai/types/evals/__pycache__/create_eval_jsonl_run_data_source_param.cpython-310.pyc,, +openai/types/evals/__pycache__/eval_api_error.cpython-310.pyc,, +openai/types/evals/__pycache__/run_cancel_response.cpython-310.pyc,, +openai/types/evals/__pycache__/run_create_params.cpython-310.pyc,, +openai/types/evals/__pycache__/run_create_response.cpython-310.pyc,, +openai/types/evals/__pycache__/run_delete_response.cpython-310.pyc,, +openai/types/evals/__pycache__/run_list_params.cpython-310.pyc,, +openai/types/evals/__pycache__/run_list_response.cpython-310.pyc,, +openai/types/evals/__pycache__/run_retrieve_response.cpython-310.pyc,, +openai/types/evals/create_eval_completions_run_data_source.py,sha256=91mh923L-jJ2AyqzA3dh87hJR0KPDvaKaKr3OuH-4yY,9127 +openai/types/evals/create_eval_completions_run_data_source_param.py,sha256=Qa7TlKjpsJlUkVsxuI8q4pyBgyI-XWQKvQVqOnUIo6c,9210 +openai/types/evals/create_eval_jsonl_run_data_source.py,sha256=4BsR_n4iiFNoQ2-_Y8X0O2BONcSqCNY4Vav4RQPHX78,1323 +openai/types/evals/create_eval_jsonl_run_data_source_param.py,sha256=n0l8GC_RE89kcT4-WUE8tCpoOYbaW18b8XEgXQULqcc,1389 +openai/types/evals/eval_api_error.py,sha256=mBaebr3g8M0-mHtZQAg0QuCj3TDeXHeGLCBsHI5A7mA,339 +openai/types/evals/run_cancel_response.py,sha256=jF_JPZITqR3277nk4bh7ZpJvhZT5-LiL5mWir7YFsT8,15905 +openai/types/evals/run_create_params.py,sha256=caeqpTzX8grnYQ-JNa6iNk3EdosMA63PZT8MPXQbpkY,14849 +openai/types/evals/run_create_response.py,sha256=zh8DTmxlgUFEnUCfmITZuJFevYWSV4bq3GK8VA0bAj4,15905 +openai/types/evals/run_delete_response.py,sha256=WSQpOlZu53eWBCXSRGkthFn_Yz5rDCcSomqoa4HpUrk,323 +openai/types/evals/run_list_params.py,sha256=vgbJMYybzCep7e9rxUVHlWy_o4GNy4tJyGTwNu4n4ys,758 +openai/types/evals/run_list_response.py,sha256=VOCF7XSR3bloPGQR1froYWqmcJpEPb6vjmfzkQsyJ2g,15901 +openai/types/evals/run_retrieve_response.py,sha256=ImzKvC_ZMA_YaesVYfuHm1TZnWQ6-3OkpWZIJFX7qug,15909 +openai/types/evals/runs/__init__.py,sha256=sltNV1VwseIVr09gQ5E4IKbRKJuWJSLY1xUvAuC97Ec,393 +openai/types/evals/runs/__pycache__/__init__.cpython-310.pyc,, +openai/types/evals/runs/__pycache__/output_item_list_params.cpython-310.pyc,, +openai/types/evals/runs/__pycache__/output_item_list_response.cpython-310.pyc,, +openai/types/evals/runs/__pycache__/output_item_retrieve_response.cpython-310.pyc,, +openai/types/evals/runs/output_item_list_params.py,sha256=Lp1OQV1qXeEUwMS90_-BpOnO1jICwJOo9QgNC9OGJ2U,821 +openai/types/evals/runs/output_item_list_response.py,sha256=YWkB3RtLae4hl0xs6gQvllYOcTwViR806LA_W3n9Kyg,4120 +openai/types/evals/runs/output_item_retrieve_response.py,sha256=CG3oTPKn9OOyHMLa3A-EAmo09mb_j2mJz6zw9hJ2C00,4128 +openai/types/file_chunking_strategy.py,sha256=oT5tAbwt3wJsFqSj2sjDPBcisegNwJOecxS_V7M4EdA,559 +openai/types/file_chunking_strategy_param.py,sha256=mOFh18BKAGkzVTrWv_3Iphzbs-EbT6hq-jChCA4HgAE,517 +openai/types/file_content.py,sha256=qLlM4J8kgu1BfrtlmYftPsQVCJu4VqYeiS1T28u8EQ8,184 +openai/types/file_create_params.py,sha256=1QmHtJJdSAwqARQjde2BsDaL-WpFNIjxEnsQXmi-HQE,1584 +openai/types/file_deleted.py,sha256=H_r9U7XthT5xHAo_4ay1EGGkc21eURt8MkkIBRYiQcw,277 +openai/types/file_list_params.py,sha256=TmmqvM7droAJ49YlgpeFzrhPv5uVkSZDxqlG6hhumPo,960 +openai/types/file_object.py,sha256=s0hqehIWHSMHbiIRTrURTi4iiCu2PXmEGrd4tUB8lW8,1589 +openai/types/file_purpose.py,sha256=aNd8G-GC1UVCL9bvTgtL4kfkiF0uEjfiimRS-eh8VrY,265 +openai/types/fine_tuning/__init__.py,sha256=f8GH2rKGcIU1Kjrfjw5J0QoqlsC4jRmH96bU6axGD64,1832 +openai/types/fine_tuning/__pycache__/__init__.cpython-310.pyc,, +openai/types/fine_tuning/__pycache__/dpo_hyperparameters.cpython-310.pyc,, +openai/types/fine_tuning/__pycache__/dpo_hyperparameters_param.cpython-310.pyc,, +openai/types/fine_tuning/__pycache__/dpo_method.cpython-310.pyc,, +openai/types/fine_tuning/__pycache__/dpo_method_param.cpython-310.pyc,, +openai/types/fine_tuning/__pycache__/fine_tuning_job.cpython-310.pyc,, +openai/types/fine_tuning/__pycache__/fine_tuning_job_event.cpython-310.pyc,, +openai/types/fine_tuning/__pycache__/fine_tuning_job_integration.cpython-310.pyc,, +openai/types/fine_tuning/__pycache__/fine_tuning_job_wandb_integration.cpython-310.pyc,, +openai/types/fine_tuning/__pycache__/fine_tuning_job_wandb_integration_object.cpython-310.pyc,, +openai/types/fine_tuning/__pycache__/job_create_params.cpython-310.pyc,, +openai/types/fine_tuning/__pycache__/job_list_events_params.cpython-310.pyc,, +openai/types/fine_tuning/__pycache__/job_list_params.cpython-310.pyc,, +openai/types/fine_tuning/__pycache__/reinforcement_hyperparameters.cpython-310.pyc,, +openai/types/fine_tuning/__pycache__/reinforcement_hyperparameters_param.cpython-310.pyc,, +openai/types/fine_tuning/__pycache__/reinforcement_method.cpython-310.pyc,, +openai/types/fine_tuning/__pycache__/reinforcement_method_param.cpython-310.pyc,, +openai/types/fine_tuning/__pycache__/supervised_hyperparameters.cpython-310.pyc,, +openai/types/fine_tuning/__pycache__/supervised_hyperparameters_param.cpython-310.pyc,, +openai/types/fine_tuning/__pycache__/supervised_method.cpython-310.pyc,, +openai/types/fine_tuning/__pycache__/supervised_method_param.cpython-310.pyc,, +openai/types/fine_tuning/alpha/__init__.py,sha256=e_Evj3xLs7o_SONlqoXDM75oZMbxuGWhxBW-azsXD_w,429 +openai/types/fine_tuning/alpha/__pycache__/__init__.cpython-310.pyc,, +openai/types/fine_tuning/alpha/__pycache__/grader_run_params.cpython-310.pyc,, +openai/types/fine_tuning/alpha/__pycache__/grader_run_response.cpython-310.pyc,, +openai/types/fine_tuning/alpha/__pycache__/grader_validate_params.cpython-310.pyc,, +openai/types/fine_tuning/alpha/__pycache__/grader_validate_response.cpython-310.pyc,, +openai/types/fine_tuning/alpha/grader_run_params.py,sha256=ECVczgghTZ8J9xfqAbNc_VvAHfhOpkaVzQw_wUmE4r8,1414 +openai/types/fine_tuning/alpha/grader_run_response.py,sha256=So-fvQMRvpccsSYb0jfKGQ_MNWdqqS71OcE9GbeLASs,1556 +openai/types/fine_tuning/alpha/grader_validate_params.py,sha256=Jd6m3DjIZAUNY-PlLUWDbH3ojm8ztnfjHmPjKw2DrLM,875 +openai/types/fine_tuning/alpha/grader_validate_response.py,sha256=nLldMLyNG-ICS3HwykDWdKuAPKu4gR2A2I0C79C4khs,773 +openai/types/fine_tuning/checkpoints/__init__.py,sha256=xA69SYwf79pe8QIq9u9vXPjjCw7lf3ZW2arzg9c_bus,588 +openai/types/fine_tuning/checkpoints/__pycache__/__init__.cpython-310.pyc,, +openai/types/fine_tuning/checkpoints/__pycache__/permission_create_params.cpython-310.pyc,, +openai/types/fine_tuning/checkpoints/__pycache__/permission_create_response.cpython-310.pyc,, +openai/types/fine_tuning/checkpoints/__pycache__/permission_delete_response.cpython-310.pyc,, +openai/types/fine_tuning/checkpoints/__pycache__/permission_retrieve_params.cpython-310.pyc,, +openai/types/fine_tuning/checkpoints/__pycache__/permission_retrieve_response.cpython-310.pyc,, +openai/types/fine_tuning/checkpoints/permission_create_params.py,sha256=TI90xY-4dv8vDKKZ0FBdbly9JTCrw4FgXkcXz_gTUlk,407 +openai/types/fine_tuning/checkpoints/permission_create_response.py,sha256=ATIeO4fFBTtaylBYdC6Id-wvirln4lKh2tRLMaJW01Y,751 +openai/types/fine_tuning/checkpoints/permission_delete_response.py,sha256=X_RuOvxa6i3wGLP5joHixv4tNLUpuK-2umiUf6P7Ha8,558 +openai/types/fine_tuning/checkpoints/permission_retrieve_params.py,sha256=3zVCOq1676MizKhKSba2OLmBSPlBx6Az2ZdxyVl580o,610 +openai/types/fine_tuning/checkpoints/permission_retrieve_response.py,sha256=o8wXyRWH80oA8o80crPkaGDyPIwuQZysRK1ic6mPqj8,963 +openai/types/fine_tuning/dpo_hyperparameters.py,sha256=Kkylxhw94kImWo6-SS_7Jq66nftP0Hy0bbRxtcrChXM,1129 +openai/types/fine_tuning/dpo_hyperparameters_param.py,sha256=mXJJXLhC_6E-7IYqDOlYGTWLLxx5Fs2l4_fmVvRs9RM,1113 +openai/types/fine_tuning/dpo_method.py,sha256=pQVpzKu0WGMQkL1itgY2Wrec85u5QHHROa0FInGdVhw,434 +openai/types/fine_tuning/dpo_method_param.py,sha256=cYBljFPSCtdDoNmyJ4s8OcgU08vCdQK8sxTJ7JX7nEA,471 +openai/types/fine_tuning/fine_tuning_job.py,sha256=DFd9yrwcrh4Jmpjf4HBgUTw7FLu-EkBWmhSkXTyuHS4,5662 +openai/types/fine_tuning/fine_tuning_job_event.py,sha256=T_ESNL8qLk25Z9MHbLqTZtj-nR_LyAS4FHuHTsU-KAI,894 +openai/types/fine_tuning/fine_tuning_job_integration.py,sha256=uNFfuBV87nUHQORNGVLP_HbotooR_e37Bgd0dyZ4nUM,241 +openai/types/fine_tuning/fine_tuning_job_wandb_integration.py,sha256=WUCywzpaj6ZVg_ZZKZxRqt_XS1A22mMasdSGxlOQsbU,1343 +openai/types/fine_tuning/fine_tuning_job_wandb_integration_object.py,sha256=7vEc2uEV2c_DENBjhq0Qy5X8B-rzxsKvGECjnvF1Wdw,804 +openai/types/fine_tuning/job_create_params.py,sha256=-79Le_1QQI0c8W5M1NWLKlmRs9Wt8soE7Y_2wGLfxds,6719 +openai/types/fine_tuning/job_list_events_params.py,sha256=4xOED4H2ky2mI9sIDytjmfJz5bNAdNWb70WIb_0bBWs,400 +openai/types/fine_tuning/job_list_params.py,sha256=wUGXsQ4UDCKvAjHDZAZ-JDU6XAouiTGThb0Jo_9XX08,623 +openai/types/fine_tuning/jobs/__init__.py,sha256=nuWhOUsmsoVKTKMU35kknmr8sfpTF-kkIzyuOlRbJj0,295 +openai/types/fine_tuning/jobs/__pycache__/__init__.cpython-310.pyc,, +openai/types/fine_tuning/jobs/__pycache__/checkpoint_list_params.cpython-310.pyc,, +openai/types/fine_tuning/jobs/__pycache__/fine_tuning_job_checkpoint.cpython-310.pyc,, +openai/types/fine_tuning/jobs/checkpoint_list_params.py,sha256=XoDLkkKCWmf5an5rnoVEpNK8mtQHq1fHw9EqmezfrXM,415 +openai/types/fine_tuning/jobs/fine_tuning_job_checkpoint.py,sha256=UG9keznPmpRFAM4ivkMJ8KlBbojdF3m2bSmiftBCnT8,1548 +openai/types/fine_tuning/reinforcement_hyperparameters.py,sha256=DwLBbYDa4QDJQup7aeg_KUu5wwC4tSLDpp60axNsmC0,1501 +openai/types/fine_tuning/reinforcement_hyperparameters_param.py,sha256=t91X4q0WocQ9PcjGJ8yu1w8vOdPvCFthTD-zrZuCNTs,1432 +openai/types/fine_tuning/reinforcement_method.py,sha256=vKEmXkRWUdd5GeLNxzZsqfQ8kG5GR3EXLCqaXy-DOoM,1025 +openai/types/fine_tuning/reinforcement_method_param.py,sha256=rjE_b2IY7Zkaf6UpMuGNrvizor2mhrUrMC-cZFFDmlQ,1157 +openai/types/fine_tuning/supervised_hyperparameters.py,sha256=Y9h3_iGD5drhkAdMjR4Szb00BZqQ43ka1FFUSgKrzJI,926 +openai/types/fine_tuning/supervised_hyperparameters_param.py,sha256=9nnnvgvHbT-x1adrP2yTBCtU19CGrjcQ5AQvXb2wkZ0,923 +openai/types/fine_tuning/supervised_method.py,sha256=sGXoTMI_r3BN53I7FA45dz_JU9d_7eQlPXHvRSswgfY,472 +openai/types/fine_tuning/supervised_method_param.py,sha256=GKw3VgMuZ6V0B0NKwHh_lrHRSM4gYp0dPBPoamYPze8,509 +openai/types/graders/__init__.py,sha256=y-002SPDfVfefLY1hTugtFHv74beH51tCTXi6dZrCDk,1147 +openai/types/graders/__pycache__/__init__.cpython-310.pyc,, +openai/types/graders/__pycache__/grader_inputs.cpython-310.pyc,, +openai/types/graders/__pycache__/grader_inputs_param.cpython-310.pyc,, +openai/types/graders/__pycache__/label_model_grader.cpython-310.pyc,, +openai/types/graders/__pycache__/label_model_grader_param.cpython-310.pyc,, +openai/types/graders/__pycache__/multi_grader.cpython-310.pyc,, +openai/types/graders/__pycache__/multi_grader_param.cpython-310.pyc,, +openai/types/graders/__pycache__/python_grader.cpython-310.pyc,, +openai/types/graders/__pycache__/python_grader_param.cpython-310.pyc,, +openai/types/graders/__pycache__/score_model_grader.cpython-310.pyc,, +openai/types/graders/__pycache__/score_model_grader_param.cpython-310.pyc,, +openai/types/graders/__pycache__/string_check_grader.cpython-310.pyc,, +openai/types/graders/__pycache__/string_check_grader_param.cpython-310.pyc,, +openai/types/graders/__pycache__/text_similarity_grader.cpython-310.pyc,, +openai/types/graders/__pycache__/text_similarity_grader_param.cpython-310.pyc,, +openai/types/graders/grader_inputs.py,sha256=rboj5sT1i5dwbXH3gGw6NgACSchjECaFeNg9a4Iq4dc,1299 +openai/types/graders/grader_inputs_param.py,sha256=-kXE-BoLSannXeqP0SSdcU9x6ssRu0xxMdgnmffMxCA,1494 +openai/types/graders/label_model_grader.py,sha256=UwzUzamECdsrNpyAnKYfFqTmE_R-ORzIKDIvtJmFCSU,2765 +openai/types/graders/label_model_grader_param.py,sha256=w7tWVs69B7IiM0BzfBEvZ-DKNQqbO-IIZ0csIt-94QI,3061 +openai/types/graders/multi_grader.py,sha256=JULSGQyLMw8aUX35xiKVr_hoqaVn2vkHG2VAjSV0GbI,1127 +openai/types/graders/multi_grader_param.py,sha256=BMnqlkcSYAKjLjjWvqj9FHVUQTFseHvs9MXaX_iDcrc,1300 +openai/types/graders/python_grader.py,sha256=xM2ClJoMlai_VqVDs_7HCwBj7bGwQurq8Y2zsMV2gQU,607 +openai/types/graders/python_grader_param.py,sha256=uNRp2JEkNSdTFDVpo0BcrQ8oyEgLeYodyFzxsT7miWs,638 +openai/types/graders/score_model_grader.py,sha256=iBAl6DjyKoHLD8x-CRGzIXtbFHO4ahSyKoNfDcTREww,4412 +openai/types/graders/score_model_grader_param.py,sha256=0wQcPE-nbrNmYHKpJ_nfVvWc0YcrBC48yK_mohefncE,4589 +openai/types/graders/string_check_grader.py,sha256=xQwwSyqZT0JRwLOiBW5bo_AdAAa9nAntJwkwfYGwBcQ,814 +openai/types/graders/string_check_grader_param.py,sha256=HLFE2vMYtN42p3whZw7snhwlt8hb55UphGhmzOYRKKM,910 +openai/types/graders/text_similarity_grader.py,sha256=ye43c3vEbWeSoABRURyd1j3EVRACVENRqn9Qx7YzU_Y,975 +openai/types/graders/text_similarity_grader_param.py,sha256=KW-fjo4H3S7mdZbFMXj7N0R0iWT7C5FsUrjCe2zIStw,1133 +openai/types/image.py,sha256=21v1I9bcjq7p-rw20iSNxW0CQG7OLUB1VP3d3zTjID4,872 +openai/types/image_create_variation_params.py,sha256=Xeka4vp5V0o8R_6vnLsqiQhWH5O6tUSCyO3FKGVmAeU,1426 +openai/types/image_edit_completed_event.py,sha256=u8Rj9eW4C7htO0jl59OP0u4CKEhO0TcTL1dqLGN2JQU,2024 +openai/types/image_edit_params.py,sha256=jWwiZhG3m1ZzU2PTPfAF_gtaBGduDw3ziuEX5BH5qN8,5500 +openai/types/image_edit_partial_image_event.py,sha256=tJR59-lg3QQfoDNsItBoScAMhS-IdOHDon655vRz0CA,1180 +openai/types/image_edit_stream_event.py,sha256=GtHKc8VdumW5RnQtIiyMqhwIIaqYogKXZF1QNuq9Bd4,516 +openai/types/image_gen_completed_event.py,sha256=rpjnocJQ5imYRrHHxEz5yDzWppi9W6sxxHYX1dKamQg,2036 +openai/types/image_gen_partial_image_event.py,sha256=5VJhxTf6ZgjVFbQn72iOkolIHCMAa4LExMhXhq35NRw,1165 +openai/types/image_gen_stream_event.py,sha256=gVzdE6qzBPpK3kEFM7EdoUzBa4DgCaS3AdF9gjd0pUs,508 +openai/types/image_generate_params.py,sha256=-G3-zsmpaaKtFnCbD42Tc_g_i45WiDVT72E7pVOYlDo,5469 +openai/types/image_model.py,sha256=LXjOC6iPeoDDrrZcUcKS6R4CpdL16DL3OkwhGTJUNys,271 +openai/types/images_response.py,sha256=GyUSpxaUXJfr0qHdabXdeYn15MXv1vD2SziISO01Qdg,2574 +openai/types/model.py,sha256=cmrjNhjHXnJUfgp3al0B2s4O-PvFD-nHni7h8h2p6FM,609 +openai/types/model_deleted.py,sha256=ntKUfq9nnKB6esFmLBla1hYU29KjmFElr_i14IcWIUA,228 +openai/types/moderation.py,sha256=epjd2oQqw5uSaBGXVSu0hCUaDITvfMgwJe9cNjCjvTc,7099 +openai/types/moderation_create_params.py,sha256=bv5qr2y_MQ1MYBhWWUiCET2L18ypWtQpaIKzYTrl9xs,1032 +openai/types/moderation_create_response.py,sha256=mNPnXzdTgyVarKDENlkgud35yoK9CqWpKtcpC1bV_ds,552 +openai/types/moderation_image_url_input_param.py,sha256=AZhp5qCmWjuaZbDO3Y0MMXMRYv_cEK45PP5I7VYn-F4,758 +openai/types/moderation_model.py,sha256=BFeqSyel2My2WKC6MCa_mAIHJx4uXU3-p8UNudJANeM,319 +openai/types/moderation_multi_modal_input_param.py,sha256=RFdiEPsakWIscutX896ir5_rnEA2TLX5xQkjO5QR2vs,483 +openai/types/moderation_text_input_param.py,sha256=4egKHVxB5niYopwD6j3DRU1fIq3vcG2Q7utGQj24kGM,456 +openai/types/other_file_chunking_strategy_object.py,sha256=h4JKlz4p__U1vbZeXKO9v4DrPsDLIE5IilYdNrrnhFs,501 +openai/types/realtime/__init__.py,sha256=hnjSirz0039Qais1VXg42nRrZNClsrmLGWYW-6SQP8Y,17085 +openai/types/realtime/__pycache__/__init__.cpython-310.pyc,, +openai/types/realtime/__pycache__/audio_transcription.cpython-310.pyc,, +openai/types/realtime/__pycache__/audio_transcription_param.cpython-310.pyc,, +openai/types/realtime/__pycache__/call_accept_params.cpython-310.pyc,, +openai/types/realtime/__pycache__/call_create_params.cpython-310.pyc,, +openai/types/realtime/__pycache__/call_refer_params.cpython-310.pyc,, +openai/types/realtime/__pycache__/call_reject_params.cpython-310.pyc,, +openai/types/realtime/__pycache__/client_secret_create_params.cpython-310.pyc,, +openai/types/realtime/__pycache__/client_secret_create_response.cpython-310.pyc,, +openai/types/realtime/__pycache__/conversation_created_event.cpython-310.pyc,, +openai/types/realtime/__pycache__/conversation_item.cpython-310.pyc,, +openai/types/realtime/__pycache__/conversation_item_added.cpython-310.pyc,, +openai/types/realtime/__pycache__/conversation_item_create_event.cpython-310.pyc,, +openai/types/realtime/__pycache__/conversation_item_create_event_param.cpython-310.pyc,, +openai/types/realtime/__pycache__/conversation_item_created_event.cpython-310.pyc,, +openai/types/realtime/__pycache__/conversation_item_delete_event.cpython-310.pyc,, +openai/types/realtime/__pycache__/conversation_item_delete_event_param.cpython-310.pyc,, +openai/types/realtime/__pycache__/conversation_item_deleted_event.cpython-310.pyc,, +openai/types/realtime/__pycache__/conversation_item_done.cpython-310.pyc,, +openai/types/realtime/__pycache__/conversation_item_input_audio_transcription_completed_event.cpython-310.pyc,, +openai/types/realtime/__pycache__/conversation_item_input_audio_transcription_delta_event.cpython-310.pyc,, +openai/types/realtime/__pycache__/conversation_item_input_audio_transcription_failed_event.cpython-310.pyc,, +openai/types/realtime/__pycache__/conversation_item_input_audio_transcription_segment.cpython-310.pyc,, +openai/types/realtime/__pycache__/conversation_item_param.cpython-310.pyc,, +openai/types/realtime/__pycache__/conversation_item_retrieve_event.cpython-310.pyc,, +openai/types/realtime/__pycache__/conversation_item_retrieve_event_param.cpython-310.pyc,, +openai/types/realtime/__pycache__/conversation_item_truncate_event.cpython-310.pyc,, +openai/types/realtime/__pycache__/conversation_item_truncate_event_param.cpython-310.pyc,, +openai/types/realtime/__pycache__/conversation_item_truncated_event.cpython-310.pyc,, +openai/types/realtime/__pycache__/input_audio_buffer_append_event.cpython-310.pyc,, +openai/types/realtime/__pycache__/input_audio_buffer_append_event_param.cpython-310.pyc,, +openai/types/realtime/__pycache__/input_audio_buffer_clear_event.cpython-310.pyc,, +openai/types/realtime/__pycache__/input_audio_buffer_clear_event_param.cpython-310.pyc,, +openai/types/realtime/__pycache__/input_audio_buffer_cleared_event.cpython-310.pyc,, +openai/types/realtime/__pycache__/input_audio_buffer_commit_event.cpython-310.pyc,, +openai/types/realtime/__pycache__/input_audio_buffer_commit_event_param.cpython-310.pyc,, +openai/types/realtime/__pycache__/input_audio_buffer_committed_event.cpython-310.pyc,, +openai/types/realtime/__pycache__/input_audio_buffer_dtmf_event_received_event.cpython-310.pyc,, +openai/types/realtime/__pycache__/input_audio_buffer_speech_started_event.cpython-310.pyc,, +openai/types/realtime/__pycache__/input_audio_buffer_speech_stopped_event.cpython-310.pyc,, +openai/types/realtime/__pycache__/input_audio_buffer_timeout_triggered.cpython-310.pyc,, +openai/types/realtime/__pycache__/log_prob_properties.cpython-310.pyc,, +openai/types/realtime/__pycache__/mcp_list_tools_completed.cpython-310.pyc,, +openai/types/realtime/__pycache__/mcp_list_tools_failed.cpython-310.pyc,, +openai/types/realtime/__pycache__/mcp_list_tools_in_progress.cpython-310.pyc,, +openai/types/realtime/__pycache__/noise_reduction_type.cpython-310.pyc,, +openai/types/realtime/__pycache__/output_audio_buffer_clear_event.cpython-310.pyc,, +openai/types/realtime/__pycache__/output_audio_buffer_clear_event_param.cpython-310.pyc,, +openai/types/realtime/__pycache__/rate_limits_updated_event.cpython-310.pyc,, +openai/types/realtime/__pycache__/realtime_audio_config.cpython-310.pyc,, +openai/types/realtime/__pycache__/realtime_audio_config_input.cpython-310.pyc,, +openai/types/realtime/__pycache__/realtime_audio_config_input_param.cpython-310.pyc,, +openai/types/realtime/__pycache__/realtime_audio_config_output.cpython-310.pyc,, +openai/types/realtime/__pycache__/realtime_audio_config_output_param.cpython-310.pyc,, +openai/types/realtime/__pycache__/realtime_audio_config_param.cpython-310.pyc,, +openai/types/realtime/__pycache__/realtime_audio_formats.cpython-310.pyc,, +openai/types/realtime/__pycache__/realtime_audio_formats_param.cpython-310.pyc,, +openai/types/realtime/__pycache__/realtime_audio_input_turn_detection.cpython-310.pyc,, +openai/types/realtime/__pycache__/realtime_audio_input_turn_detection_param.cpython-310.pyc,, +openai/types/realtime/__pycache__/realtime_client_event.cpython-310.pyc,, +openai/types/realtime/__pycache__/realtime_client_event_param.cpython-310.pyc,, +openai/types/realtime/__pycache__/realtime_connect_params.cpython-310.pyc,, +openai/types/realtime/__pycache__/realtime_conversation_item_assistant_message.cpython-310.pyc,, +openai/types/realtime/__pycache__/realtime_conversation_item_assistant_message_param.cpython-310.pyc,, +openai/types/realtime/__pycache__/realtime_conversation_item_function_call.cpython-310.pyc,, +openai/types/realtime/__pycache__/realtime_conversation_item_function_call_output.cpython-310.pyc,, +openai/types/realtime/__pycache__/realtime_conversation_item_function_call_output_param.cpython-310.pyc,, +openai/types/realtime/__pycache__/realtime_conversation_item_function_call_param.cpython-310.pyc,, +openai/types/realtime/__pycache__/realtime_conversation_item_system_message.cpython-310.pyc,, +openai/types/realtime/__pycache__/realtime_conversation_item_system_message_param.cpython-310.pyc,, +openai/types/realtime/__pycache__/realtime_conversation_item_user_message.cpython-310.pyc,, +openai/types/realtime/__pycache__/realtime_conversation_item_user_message_param.cpython-310.pyc,, +openai/types/realtime/__pycache__/realtime_error.cpython-310.pyc,, +openai/types/realtime/__pycache__/realtime_error_event.cpython-310.pyc,, +openai/types/realtime/__pycache__/realtime_function_tool.cpython-310.pyc,, +openai/types/realtime/__pycache__/realtime_function_tool_param.cpython-310.pyc,, +openai/types/realtime/__pycache__/realtime_mcp_approval_request.cpython-310.pyc,, +openai/types/realtime/__pycache__/realtime_mcp_approval_request_param.cpython-310.pyc,, +openai/types/realtime/__pycache__/realtime_mcp_approval_response.cpython-310.pyc,, +openai/types/realtime/__pycache__/realtime_mcp_approval_response_param.cpython-310.pyc,, +openai/types/realtime/__pycache__/realtime_mcp_list_tools.cpython-310.pyc,, +openai/types/realtime/__pycache__/realtime_mcp_list_tools_param.cpython-310.pyc,, +openai/types/realtime/__pycache__/realtime_mcp_protocol_error.cpython-310.pyc,, +openai/types/realtime/__pycache__/realtime_mcp_protocol_error_param.cpython-310.pyc,, +openai/types/realtime/__pycache__/realtime_mcp_tool_call.cpython-310.pyc,, +openai/types/realtime/__pycache__/realtime_mcp_tool_call_param.cpython-310.pyc,, +openai/types/realtime/__pycache__/realtime_mcp_tool_execution_error.cpython-310.pyc,, +openai/types/realtime/__pycache__/realtime_mcp_tool_execution_error_param.cpython-310.pyc,, +openai/types/realtime/__pycache__/realtime_mcphttp_error.cpython-310.pyc,, +openai/types/realtime/__pycache__/realtime_mcphttp_error_param.cpython-310.pyc,, +openai/types/realtime/__pycache__/realtime_response.cpython-310.pyc,, +openai/types/realtime/__pycache__/realtime_response_create_audio_output.cpython-310.pyc,, +openai/types/realtime/__pycache__/realtime_response_create_audio_output_param.cpython-310.pyc,, +openai/types/realtime/__pycache__/realtime_response_create_mcp_tool.cpython-310.pyc,, +openai/types/realtime/__pycache__/realtime_response_create_mcp_tool_param.cpython-310.pyc,, +openai/types/realtime/__pycache__/realtime_response_create_params.cpython-310.pyc,, +openai/types/realtime/__pycache__/realtime_response_create_params_param.cpython-310.pyc,, +openai/types/realtime/__pycache__/realtime_response_status.cpython-310.pyc,, +openai/types/realtime/__pycache__/realtime_response_usage.cpython-310.pyc,, +openai/types/realtime/__pycache__/realtime_response_usage_input_token_details.cpython-310.pyc,, +openai/types/realtime/__pycache__/realtime_response_usage_output_token_details.cpython-310.pyc,, +openai/types/realtime/__pycache__/realtime_server_event.cpython-310.pyc,, +openai/types/realtime/__pycache__/realtime_session_client_secret.cpython-310.pyc,, +openai/types/realtime/__pycache__/realtime_session_create_request.cpython-310.pyc,, +openai/types/realtime/__pycache__/realtime_session_create_request_param.cpython-310.pyc,, +openai/types/realtime/__pycache__/realtime_session_create_response.cpython-310.pyc,, +openai/types/realtime/__pycache__/realtime_tool_choice_config.cpython-310.pyc,, +openai/types/realtime/__pycache__/realtime_tool_choice_config_param.cpython-310.pyc,, +openai/types/realtime/__pycache__/realtime_tools_config.cpython-310.pyc,, +openai/types/realtime/__pycache__/realtime_tools_config_param.cpython-310.pyc,, +openai/types/realtime/__pycache__/realtime_tools_config_union.cpython-310.pyc,, +openai/types/realtime/__pycache__/realtime_tools_config_union_param.cpython-310.pyc,, +openai/types/realtime/__pycache__/realtime_tracing_config.cpython-310.pyc,, +openai/types/realtime/__pycache__/realtime_tracing_config_param.cpython-310.pyc,, +openai/types/realtime/__pycache__/realtime_transcription_session_audio.cpython-310.pyc,, +openai/types/realtime/__pycache__/realtime_transcription_session_audio_input.cpython-310.pyc,, +openai/types/realtime/__pycache__/realtime_transcription_session_audio_input_param.cpython-310.pyc,, +openai/types/realtime/__pycache__/realtime_transcription_session_audio_input_turn_detection.cpython-310.pyc,, +openai/types/realtime/__pycache__/realtime_transcription_session_audio_input_turn_detection_param.cpython-310.pyc,, +openai/types/realtime/__pycache__/realtime_transcription_session_audio_param.cpython-310.pyc,, +openai/types/realtime/__pycache__/realtime_transcription_session_create_request.cpython-310.pyc,, +openai/types/realtime/__pycache__/realtime_transcription_session_create_request_param.cpython-310.pyc,, +openai/types/realtime/__pycache__/realtime_transcription_session_create_response.cpython-310.pyc,, +openai/types/realtime/__pycache__/realtime_transcription_session_turn_detection.cpython-310.pyc,, +openai/types/realtime/__pycache__/realtime_truncation.cpython-310.pyc,, +openai/types/realtime/__pycache__/realtime_truncation_param.cpython-310.pyc,, +openai/types/realtime/__pycache__/realtime_truncation_retention_ratio.cpython-310.pyc,, +openai/types/realtime/__pycache__/realtime_truncation_retention_ratio_param.cpython-310.pyc,, +openai/types/realtime/__pycache__/response_audio_delta_event.cpython-310.pyc,, +openai/types/realtime/__pycache__/response_audio_done_event.cpython-310.pyc,, +openai/types/realtime/__pycache__/response_audio_transcript_delta_event.cpython-310.pyc,, +openai/types/realtime/__pycache__/response_audio_transcript_done_event.cpython-310.pyc,, +openai/types/realtime/__pycache__/response_cancel_event.cpython-310.pyc,, +openai/types/realtime/__pycache__/response_cancel_event_param.cpython-310.pyc,, +openai/types/realtime/__pycache__/response_content_part_added_event.cpython-310.pyc,, +openai/types/realtime/__pycache__/response_content_part_done_event.cpython-310.pyc,, +openai/types/realtime/__pycache__/response_create_event.cpython-310.pyc,, +openai/types/realtime/__pycache__/response_create_event_param.cpython-310.pyc,, +openai/types/realtime/__pycache__/response_created_event.cpython-310.pyc,, +openai/types/realtime/__pycache__/response_done_event.cpython-310.pyc,, +openai/types/realtime/__pycache__/response_function_call_arguments_delta_event.cpython-310.pyc,, +openai/types/realtime/__pycache__/response_function_call_arguments_done_event.cpython-310.pyc,, +openai/types/realtime/__pycache__/response_mcp_call_arguments_delta.cpython-310.pyc,, +openai/types/realtime/__pycache__/response_mcp_call_arguments_done.cpython-310.pyc,, +openai/types/realtime/__pycache__/response_mcp_call_completed.cpython-310.pyc,, +openai/types/realtime/__pycache__/response_mcp_call_failed.cpython-310.pyc,, +openai/types/realtime/__pycache__/response_mcp_call_in_progress.cpython-310.pyc,, +openai/types/realtime/__pycache__/response_output_item_added_event.cpython-310.pyc,, +openai/types/realtime/__pycache__/response_output_item_done_event.cpython-310.pyc,, +openai/types/realtime/__pycache__/response_text_delta_event.cpython-310.pyc,, +openai/types/realtime/__pycache__/response_text_done_event.cpython-310.pyc,, +openai/types/realtime/__pycache__/session_created_event.cpython-310.pyc,, +openai/types/realtime/__pycache__/session_update_event.cpython-310.pyc,, +openai/types/realtime/__pycache__/session_update_event_param.cpython-310.pyc,, +openai/types/realtime/__pycache__/session_updated_event.cpython-310.pyc,, +openai/types/realtime/audio_transcription.py,sha256=E0x4I29kjvVbJOvsjNMSUQP8iIofodsWjwuGdbPynN4,1513 +openai/types/realtime/audio_transcription_param.py,sha256=ecAesrI27A1iH01YyPXPx0G0bB7GwYxd8eS7WRouJkE,1484 +openai/types/realtime/call_accept_params.py,sha256=KSB7hkvjWTU-ooaKXqJRa2rpSIuTHU52P97212W_jlQ,5267 +openai/types/realtime/call_create_params.py,sha256=r0vyhcjvDAKZF8DSbLP7bEQAVh92hgzNBnXBQWJ56no,544 +openai/types/realtime/call_refer_params.py,sha256=Zhy_H0Jv0leRL6HS_WH7Oca7HUlZ0feINePxN-hms0s,422 +openai/types/realtime/call_reject_params.py,sha256=cyz55zIN5DzSL74uhzeKQOSecl4V0UkpySt7zykoGIA,374 +openai/types/realtime/client_secret_create_params.py,sha256=12Guwuw4txjO_cW2GnrPT67UO-AVBaOg7LTEPAFuuLQ,1989 +openai/types/realtime/client_secret_create_response.py,sha256=ZUDhhR8vzMZRdHkfc9hpl80T4bUbDk4QNeLV8QlcQlc,1016 +openai/types/realtime/conversation_created_event.py,sha256=kVZE5Y1ZRDQq-x0vqbuYAl-VIRj5is_zC4pjHKwKZjw,879 +openai/types/realtime/conversation_item.py,sha256=BGqZp9UpybVbEyr6enYqdleryy4NMbXpzkUPX03cvoI,1437 +openai/types/realtime/conversation_item_added.py,sha256=SaE8tAlIbS1hgv_F6p3BWMmfwkdrPQU18UnTxfoJ29U,1516 +openai/types/realtime/conversation_item_create_event.py,sha256=Oo3Bk0qhCGS6INYscyU8l7c6omFu6n8cxmxPCZKYH5U,1560 +openai/types/realtime/conversation_item_create_event_param.py,sha256=tMEIyyj5ues0XQ9wTTvFbE8Lubx30DrXzvKJfa1GeUE,1581 +openai/types/realtime/conversation_item_created_event.py,sha256=hSqqRT281uXGKYPCaIImzYcWtzJLGp_fTkhIX3fQx3c,1474 +openai/types/realtime/conversation_item_delete_event.py,sha256=gDbD3qPFSxSYj3zjGUuVn5L1KLQDEyiDqPaGwMZeUAY,840 +openai/types/realtime/conversation_item_delete_event_param.py,sha256=sMZ5WeOkLHCV8bh9-06i2EUFD2V_-bA9DMkGX67W82I,861 +openai/types/realtime/conversation_item_deleted_event.py,sha256=VwTgV7HhBNOIBwSJfAtO9C0LP0pFers83ORnggEpbOQ,741 +openai/types/realtime/conversation_item_done.py,sha256=TjLuup27QloMnHgXemjpknACqV9N3A2-alX2_msK-1I,969 +openai/types/realtime/conversation_item_input_audio_transcription_completed_event.py,sha256=ZemU0mTZAdxe8O1u5ZdLpelVd-UksKrIKFPnf0_Eg1I,3281 +openai/types/realtime/conversation_item_input_audio_transcription_delta_event.py,sha256=KGr9MvmKDXnbAsQU-PbHethJ09I_MQ4KfQgUJlhF8Tk,1495 +openai/types/realtime/conversation_item_input_audio_transcription_failed_event.py,sha256=Qugv24nmNkYqJFmsTMlDH-P38r50dPVSyt8-43Nmmr0,1364 +openai/types/realtime/conversation_item_input_audio_transcription_segment.py,sha256=JC_QW5z_TFmCPjRUm6k2qnWUEnoIK3IHXGlE88J2YP0,1079 +openai/types/realtime/conversation_item_param.py,sha256=yrtdTZDIfAsJGwSXDY7v-_e9GtOPqRNXQAM2LWjxOOI,1483 +openai/types/realtime/conversation_item_retrieve_event.py,sha256=YuRUzjV3VWdfMchkeyWBNkZdMjtboRaHiBz3PpXGVZc,975 +openai/types/realtime/conversation_item_retrieve_event_param.py,sha256=NG4pq3JXYIG44-vu5apNfa6iEO8y9Pn1Ypa8AqKQWjE,996 +openai/types/realtime/conversation_item_truncate_event.py,sha256=3OUHG_uqYOQgH6LX0vtZJ4WFjVDAv9FrSb8ZLAtZcSw,1560 +openai/types/realtime/conversation_item_truncate_event_param.py,sha256=OiSiFZ0KyUYN5AyhCKf7HKIyOxj-uW3psdtMN6PI5Yk,1601 +openai/types/realtime/conversation_item_truncated_event.py,sha256=KXuLWI1Ozcq0bR44T92cleAd6bvZRLQhD1XvaJBHLNM,1124 +openai/types/realtime/input_audio_buffer_append_event.py,sha256=VS09zVCzDrXJ2OXxkzBQORqfWuwwxRrUibsLkPyVxCs,1591 +openai/types/realtime/input_audio_buffer_append_event_param.py,sha256=U7EcpuHlmMwI6MJm5aWSg-OQs19T5IDycNSrosMFTgY,1612 +openai/types/realtime/input_audio_buffer_clear_event.py,sha256=2MJ3ypmN8SyOYkNjYTvdPp0Sz5UtvGumqOa8tCH1O3Y,637 +openai/types/realtime/input_audio_buffer_clear_event_param.py,sha256=MTQMjN6RACt7J9tSnzglNNOfGL-_MgzxWCYWltbko0k,648 +openai/types/realtime/input_audio_buffer_cleared_event.py,sha256=iClRNb-MTR86VkuZSbmwJii8iP9j0Ox0QlbCyt2G85w,556 +openai/types/realtime/input_audio_buffer_commit_event.py,sha256=Nluuq9h5G3hxy5PmmWGStxiS7-t-Dy4lsOKBIudMy14,1068 +openai/types/realtime/input_audio_buffer_commit_event_param.py,sha256=oflH-u7iSqqF-eriFHDWrqO-8G7iW0n-VfUUWcF2thQ,1079 +openai/types/realtime/input_audio_buffer_committed_event.py,sha256=M3MAPO5XwuFmOnfC_0_GcuYQ12aKILiEgEGdPAWzOlU,1027 +openai/types/realtime/input_audio_buffer_dtmf_event_received_event.py,sha256=JUq_t7Q5E4241M1JTqTcRUwsmgZNmJmiUdn_LKcYP84,889 +openai/types/realtime/input_audio_buffer_speech_started_event.py,sha256=GjjkLM3MOoW8wJ6KKdUz-3yLpkK4VtrQu8NKC1BwQBg,1567 +openai/types/realtime/input_audio_buffer_speech_stopped_event.py,sha256=l0CmVlUxcGpthG37Hxtdf9ydypFKQxR0a_z7fz131kY,1029 +openai/types/realtime/input_audio_buffer_timeout_triggered.py,sha256=lLraokk0TW03UNyjpeVpHlyCoupNLQaRCsH7NofvYdI,1898 +openai/types/realtime/log_prob_properties.py,sha256=78jAqS9O2wwM5JhpV8Cdwpy8xgET0ceXclW4BcyPoCY,489 +openai/types/realtime/mcp_list_tools_completed.py,sha256=zKMP0fwchYZMbdJERmFBhIALlrNuz6npH4Gn0cMjbto,543 +openai/types/realtime/mcp_list_tools_failed.py,sha256=1En1gz6mxUhaYy0RoTBKxLk6grL1_kfaYWGRmX2Lkrs,528 +openai/types/realtime/mcp_list_tools_in_progress.py,sha256=xC1hb9qODkB-dAJPeNf40gQOi4q1cYNU1LivPPgMaIg,550 +openai/types/realtime/noise_reduction_type.py,sha256=lrAb7YhMM-anRYzzOha8hcVloNJR_zWuFrO2SccrcIo,238 +openai/types/realtime/output_audio_buffer_clear_event.py,sha256=w4xRRkTcCtGdYDspgkLnD6LlzavmjxUNiF0lOzVF2ow,934 +openai/types/realtime/output_audio_buffer_clear_event_param.py,sha256=-zmYI1KaCgiGFnYeLl9_tutNFuHHrbeKeXt94-ZmLhk,945 +openai/types/realtime/rate_limits_updated_event.py,sha256=rsYK1Yc0WrtMFG8shtaYQv5uEd59foHBTJs4XoA1l-s,1255 +openai/types/realtime/realtime_audio_config.py,sha256=CFWqdebuucyHyL4gqIZTuh78PME5cQPLmeq1VsD9Z3Y,520 +openai/types/realtime/realtime_audio_config_input.py,sha256=eQohrH7bIyaHA7W4qIerP61cUO_FAd9Cb-X36ZmNjd0,3248 +openai/types/realtime/realtime_audio_config_input_param.py,sha256=mdQg38uNnx9dOck83iWhsmdafe_OXLTtH9K8oLtq3LY,3300 +openai/types/realtime/realtime_audio_config_output.py,sha256=JgABZNG00eseAgYVZiJF_mf32iK3n7BsxwyI3Y5nAfw,1393 +openai/types/realtime/realtime_audio_config_output_param.py,sha256=m1Z2NNR2jWprIF94sLq5j1uwilleezSm51L1rb6u3As,1375 +openai/types/realtime/realtime_audio_config_param.py,sha256=d5gwlXPsJIc53Nh1A9QYefyHd5YJ-E1VRRfuK2IOafk,556 +openai/types/realtime/realtime_audio_formats.py,sha256=QZTTihJeSWCwEFUK2kjf4nvaZNXWnFfCKSXZee-Xa-Q,1069 +openai/types/realtime/realtime_audio_formats_param.py,sha256=N0NFD1pbh6ZlcbENKv10Np-K2QVPWIpFsLzl0koPirs,960 +openai/types/realtime/realtime_audio_input_turn_detection.py,sha256=mZB4mr8AaT5TtogXLwxzjM_Q5W-x5o4T1tPFFnyiMmU,4260 +openai/types/realtime/realtime_audio_input_turn_detection_param.py,sha256=ItL4MBzCTPYUScMQwgxdrbyIsbgbcNVoTuAF7Zk9oG8,4091 +openai/types/realtime/realtime_client_event.py,sha256=4_lYEyK-wj25VTh8GTaV0mZ0t17KhkfJrQ0yUF0mCYU,1473 +openai/types/realtime/realtime_client_event_param.py,sha256=YPveu8tNyKmZkK24qEJv8js5l5NNygDyAcsza2iOmKw,1543 +openai/types/realtime/realtime_connect_params.py,sha256=Zd5FnP-6nEzAPiWTckSdVGQsA_8GqhwELCpQXt22J8A,288 +openai/types/realtime/realtime_conversation_item_assistant_message.py,sha256=up0WdL75VVd6rK11nBjRCcIRn3Qj-6dFv3podahi78E,1780 +openai/types/realtime/realtime_conversation_item_assistant_message_param.py,sha256=agNl8DdxAJPV1NeGgTkti3V8QKg_WQTlZFbXYR931bI,1748 +openai/types/realtime/realtime_conversation_item_function_call.py,sha256=uT0NfedSzOpneh3Bqbx_oKSb-Cj0aFUd4b-sJ7QSIvQ,1262 +openai/types/realtime/realtime_conversation_item_function_call_output.py,sha256=aXHbpueTQ3lUri3oXA7B8m6S9Tcq8xQqFurzS8kNhdk,1170 +openai/types/realtime/realtime_conversation_item_function_call_output_param.py,sha256=yRdBH7iqCLqtKxgylFYK5c9moNuefrW6OQAWR0g0De0,1167 +openai/types/realtime/realtime_conversation_item_function_call_param.py,sha256=Et9Qisj41TwOgu6YweMUXm4hGDD6q4FGyoTEN5n8fWE,1242 +openai/types/realtime/realtime_conversation_item_system_message.py,sha256=vxOmlQODp5RtxCObbTj5_E-C82HjWJA2SiBIZYTuycE,1695 +openai/types/realtime/realtime_conversation_item_system_message_param.py,sha256=nbvAmVDzYD6HflURqkbrugVshOXpRvFxnnxFuQjc744,1697 +openai/types/realtime/realtime_conversation_item_user_message.py,sha256=Ud0ijIPDq8BALQsAjHyJqHjywzq7yl37dv28Ru7Ilvg,2170 +openai/types/realtime/realtime_conversation_item_user_message_param.py,sha256=bIEMwkqvS9iDpt7X1KofesZaTi0sZYVo6IJj-CWtXCc,2104 +openai/types/realtime/realtime_error.py,sha256=V33P7DvMTFyEI2rhVkCnoD5zbt6wndDYyr_eZQ29BJo,658 +openai/types/realtime/realtime_error_event.py,sha256=FGyefJO5-GSjwyrsXBq-Ezp6qYZEcQsN1J_5sWBlyiA,714 +openai/types/realtime/realtime_function_tool.py,sha256=3CDiCZCM0A1VLRxOFdG4teFXr8dx0JFU94KbSn-JgGc,734 +openai/types/realtime/realtime_function_tool_param.py,sha256=-vDBSmMWNdbABC8dxVckkNeRdEujAKeff6icJvYrM0I,674 +openai/types/realtime/realtime_mcp_approval_request.py,sha256=n88sEpgRYFAIPe_jqmlYa0KsBDv03kkBstpp90GSvhY,696 +openai/types/realtime/realtime_mcp_approval_request_param.py,sha256=GIdxqx9wz5cRBU0iT1_DtlZlyNgehKhrsaqMEfRl624,792 +openai/types/realtime/realtime_mcp_approval_response.py,sha256=HOiVoqZwfA8WAZIyN3fPwUsceepQq6OWkkLaZGqJDXM,742 +openai/types/realtime/realtime_mcp_approval_response_param.py,sha256=ysEMT5pSKOo8-scsMSoW_vutKX14MDTsKc_jEHQsPoc,821 +openai/types/realtime/realtime_mcp_list_tools.py,sha256=LrgWdG30hgwmoFHC92aONFReZzWaI24SDQaH78yyGsc,1004 +openai/types/realtime/realtime_mcp_list_tools_param.py,sha256=SEiFMWl6ig6xjzUAx9fnN8ap_88kVUQEy3Ia8zWPiSg,1090 +openai/types/realtime/realtime_mcp_protocol_error.py,sha256=4jqkfl6h7tFT5kQy40VW24LrokpKe6X4VROYlNmOHDQ,313 +openai/types/realtime/realtime_mcp_protocol_error_param.py,sha256=jlufPTMU_9JuYtqzQGTmb0o978gDiOFxkNx0yJAvwx8,389 +openai/types/realtime/realtime_mcp_tool_call.py,sha256=C3FP8y_xqmFjBcO64I6phXMiuFC1sRt-8JZypczbQ4k,1407 +openai/types/realtime/realtime_mcp_tool_call_param.py,sha256=lsbewyAWR1wH4rj3znPeJ3twMSV1EaP6p0Faq2s5JXM,1421 +openai/types/realtime/realtime_mcp_tool_execution_error.py,sha256=swcOrTKO5cx1kkfGS_5PhBPEQx_Vf_ZW04HbA5eRa0g,314 +openai/types/realtime/realtime_mcp_tool_execution_error_param.py,sha256=3IuPmvy52n_VByGYqfCr87kciEQdJMTcwGWj4__PiX8,380 +openai/types/realtime/realtime_mcphttp_error.py,sha256=-Zqz0xr2gPs6peG_wC3S8qVgtEUJNrZm4Mm5BIvmZw0,301 +openai/types/realtime/realtime_mcphttp_error_param.py,sha256=GcmAMBvZVNrN9p_tneHPu_pyN7D8wCytaAKruFtMfwI,377 +openai/types/realtime/realtime_response.py,sha256=zaUF7moDzT5foAYhiqg1zALpA-4pxUCvy1ImIFprD6g,3903 +openai/types/realtime/realtime_response_create_audio_output.py,sha256=HSSmi2e3Xg5Cp5ONpBVtwVvCchSy0xyK6voM4LN-Xc4,1006 +openai/types/realtime/realtime_response_create_audio_output_param.py,sha256=ioIoPVwiTweGns-5sdJLOS-X7sPjhYSY5KsLRVwNSv4,1001 +openai/types/realtime/realtime_response_create_mcp_tool.py,sha256=lUpF8l6G9q2sMcxHY7LyQ9pzoZMmGygFtXhP_NT9vBM,5076 +openai/types/realtime/realtime_response_create_mcp_tool_param.py,sha256=HJ3UYYO-saxPWzfz4v9TvZCFNMu_gCGQ0OxDl6OC-rM,4979 +openai/types/realtime/realtime_response_create_params.py,sha256=kQ1B1iLiK7IS4ACTMtUvbj-toYGJ0d4yi-EoV6piXNQ,4348 +openai/types/realtime/realtime_response_create_params_param.py,sha256=J3ch3svShqtnVCeDcPF0hV7o1WbwjSIZP7WtV7PHEgw,4380 +openai/types/realtime/realtime_response_status.py,sha256=QjUdgjUKfX8tHVk-qRofl33vnjdoYzwTFhlk6knUGCo,1500 +openai/types/realtime/realtime_response_usage.py,sha256=atuMmLvzmTtH46RcOE7k3Br730ObjnkVBFL6x1BXWYg,1727 +openai/types/realtime/realtime_response_usage_input_token_details.py,sha256=Pt38MhRPUUhNGVN6nRShH750heUbbAlrYbKLPXcHHe8,1635 +openai/types/realtime/realtime_response_usage_output_token_details.py,sha256=3K2DIIvd92aJe9X38tSALn_t_-OyU4ivG3ckBXspkng,524 +openai/types/realtime/realtime_server_event.py,sha256=WkL6yNqJXjKbkhegGZr00_Uw84NVITGhV-zPlQdLGeA,8248 +openai/types/realtime/realtime_session_client_secret.py,sha256=oIOEWToHr1J_6e5VsDYA6xl7klWCaVoqWvoWVr4YDRs,629 +openai/types/realtime/realtime_session_create_request.py,sha256=ZrXWm3Vn7huXn52G_ctxSYNX99sSlqA0GejzQ863d7U,5365 +openai/types/realtime/realtime_session_create_request_param.py,sha256=toIWRT0Q2IV2cPYjNzKEeeiSbM49lvJH9qtmYv_Up0A,5351 +openai/types/realtime/realtime_session_create_response.py,sha256=P4MEjrOSicIGv2MmUbK4j8__noih8qxwZyIkT9eW43A,19935 +openai/types/realtime/realtime_tool_choice_config.py,sha256=DV0uuyfK59paj5NC9adQskUF2km5TRSiHAlMDu1Fmdo,472 +openai/types/realtime/realtime_tool_choice_config_param.py,sha256=0vqYNM4MkU5d8GXfitT6AoE9AubKeLZOSHGOH8q73QU,550 +openai/types/realtime/realtime_tools_config.py,sha256=JSxehiQnA_tJUeXvi2h9H6wlYsnhhtRWB_o5S20V-oQ,318 +openai/types/realtime/realtime_tools_config_param.py,sha256=ux7AlLllQQozenBmkr5dzGOhdUp3b_LD9TN_StFxtfM,5272 +openai/types/realtime/realtime_tools_config_union.py,sha256=u_ss-JAdDLzZ4qsF0BumBXi3s_0LOlTjLCEszdsdpVk,5333 +openai/types/realtime/realtime_tools_config_union_param.py,sha256=B6LRLV6jkDeeQAp5UtAi0QwFDTTKxZHZR2LrwM31Ubc,5159 +openai/types/realtime/realtime_tracing_config.py,sha256=gkG557bdUH4-kAoyH9TEFp12VJFwQkGfQkPDc4ESh-E,918 +openai/types/realtime/realtime_tracing_config_param.py,sha256=fo2An_bdLxQrCLhZ6d34vcxiDAHTj2BoxsTqoyT6yjw,887 +openai/types/realtime/realtime_transcription_session_audio.py,sha256=cUETz-wzN-6qaupdCF06nmLmgH846dp7zu_wGOyl-Ho,467 +openai/types/realtime/realtime_transcription_session_audio_input.py,sha256=Wd8Kzuu5dU7V4RSMj4rvnuIk0nBt0tCrlyQfYs7f6Y4,3377 +openai/types/realtime/realtime_transcription_session_audio_input_param.py,sha256=2Lbur4VMABLAZ8LDqiAWMTJUf4rlWEUXLiki18gG_iI,3429 +openai/types/realtime/realtime_transcription_session_audio_input_turn_detection.py,sha256=1j9-uzpvv-dt4D1jAqH-nbIIx7RwoZ3VnF6Kw4cXoIw,4300 +openai/types/realtime/realtime_transcription_session_audio_input_turn_detection_param.py,sha256=kmDzDtQAU8U6t9mmIMNz2pw5Mle06RMcpxACtCpnfw4,4131 +openai/types/realtime/realtime_transcription_session_audio_param.py,sha256=Jia_0_l0wqhrZDvY2WkR3koSHY2lL25z9ZlHcytXdFU,504 +openai/types/realtime/realtime_transcription_session_create_request.py,sha256=a2sXjIAJYePFHZ0SqC5Obi0EWO-YAgEW85vQqCK5Mkc,963 +openai/types/realtime/realtime_transcription_session_create_request_param.py,sha256=fRfSPuKXdxYJQbUT8woqTUReDUvQNVwXLsbMRH8NwiA,992 +openai/types/realtime/realtime_transcription_session_create_response.py,sha256=TAuRl80bsI_snH8ZkqCAGVTQC-WbgBLPpQ493onEjp0,2616 +openai/types/realtime/realtime_transcription_session_turn_detection.py,sha256=0eoBc3mWERrAO3coDqlMwDbfplCZ6pXJN_lRR_wSkmA,1270 +openai/types/realtime/realtime_truncation.py,sha256=lnr1Uq9kSs6OfJb_TcvQrs7jx92UuSKaIhGNvwUK-qU,380 +openai/types/realtime/realtime_truncation_param.py,sha256=wBXHiAPS_HA6MWBqhRGEtqZxu6RdIrgnTVRYgUljwq4,442 +openai/types/realtime/realtime_truncation_retention_ratio.py,sha256=EfcvUfo8Ul7EgqWpDsTpUTlZrmZXIO1LroTLhgOfibw,1749 +openai/types/realtime/realtime_truncation_retention_ratio_param.py,sha256=t6qlpaEmtc0g4PvyXH2wz8PewNmnI_WyeYSCkYVkiXQ,1766 +openai/types/realtime/response_audio_delta_event.py,sha256=1tXOxDJqpshq4EP7R62Fnhy1u2Wwqflzru8N5MlPaNQ,818 +openai/types/realtime/response_audio_done_event.py,sha256=9fdpuN-G0dQGsSfWJRXftoA9vY2JCQ4kGEbw0VNFo3w,837 +openai/types/realtime/response_audio_transcript_delta_event.py,sha256=fk3wI07iou7q7dTIDuJsP1kjPfHvj_5E_pUct70_TWM,873 +openai/types/realtime/response_audio_transcript_done_event.py,sha256=fLcAV1ckIpDJ-SxukAeLZvuhaZrzid1AU4EUItVmLWM,983 +openai/types/realtime/response_cancel_event.py,sha256=VEQcxB2cTEmItz_o5ubbrWbvxWjrH8DB9UkKeTSrYV0,1031 +openai/types/realtime/response_cancel_event_param.py,sha256=Uh6zrwrw0xxKQULpYj6kGAOysR48JYp-yUlJDoFyRok,1025 +openai/types/realtime/response_content_part_added_event.py,sha256=SBtIynUx0WKuuHgrhDfwfejRZe0Rb3oe_eC_mBMj0p0,1399 +openai/types/realtime/response_content_part_done_event.py,sha256=Y27ee1ZRzf9WYwsny6J0SS-Su9DH-YfG7dEGK_Ooc9U,1404 +openai/types/realtime/response_create_event.py,sha256=mn__Lnv3Fuf4_aF6ZPEzHIHIZoXX_jzTRHbxEqnrnto,2085 +openai/types/realtime/response_create_event_param.py,sha256=3hLwSZv8ROLu11eU17YgdR2A8XE5P6XVFbBMO5PjY40,2096 +openai/types/realtime/response_created_event.py,sha256=02IS6XqoTnQr9gvPhUimQ0bchMW3LmjtYbL7V6H6Xlw,669 +openai/types/realtime/response_done_event.py,sha256=Vt88mKQS72w7ysbkcU3yMxkIoYV4AoDyWqxeth1rJj0,1053 +openai/types/realtime/response_function_call_arguments_delta_event.py,sha256=hceOwsJTdQ_vW8wMPebwj6tkt0nDPYbwCpWPs76nmrQ,874 +openai/types/realtime/response_function_call_arguments_done_event.py,sha256=orgPc3_ACCQhqXihyZwi04t27D4l8Llz_fuyZABu7TY,966 +openai/types/realtime/response_mcp_call_arguments_delta.py,sha256=STwpfYmz3P6m-IwlUkHbI9rfauQn2HrmwNprY5NMaP4,920 +openai/types/realtime/response_mcp_call_arguments_done.py,sha256=3nZoOA7m2Bj1JyMPTwhiqIvsLwb8UBnFUVGZ5KviNmg,799 +openai/types/realtime/response_mcp_call_completed.py,sha256=8SfUc0zoVadfToShvtxw_wQ-wdh1q0aw68TSdS07fAE,633 +openai/types/realtime/response_mcp_call_failed.py,sha256=--MvtTO4ZANb5uYa-L9aC-PCdttglgUWyfzVvUlAqjU,605 +openai/types/realtime/response_mcp_call_in_progress.py,sha256=MK2m5lYcsKStDptN0fhVxQAn2hHZwSBD7KgzdIv0RJM,643 +openai/types/realtime/response_output_item_added_event.py,sha256=Wnbnn423eTc8iY1vFMGPghz88VCt7WfmTBIM3ErrD1U,796 +openai/types/realtime/response_output_item_done_event.py,sha256=foWLMOSv67skqeGLfOmuYGgABmMQnoLNGiT5ZmmmGIY,854 +openai/types/realtime/response_text_delta_event.py,sha256=1I5yu-_1ifZaNWvW9qzyMtOowUYjUyFt_JDIRSJGl1U,819 +openai/types/realtime/response_text_done_event.py,sha256=55fRkCAPSocN9iadkJeowE2-3laFjAAZwLa9WCNsXJA,914 +openai/types/realtime/session_created_event.py,sha256=vcxq7m7QFXsoLHiNRB6_Oh6zzxTUyfpB7ogqAUo4Lzo,980 +openai/types/realtime/session_update_event.py,sha256=R5YvfGKxhk0E4b02kQbYbg9MenVbUf-q-ZEHL8Gf5m4,1735 +openai/types/realtime/session_update_event_param.py,sha256=uSxqGmfwGW0raKiTkMFwBuPkTxEsDu-L8I1IR53530M,1807 +openai/types/realtime/session_updated_event.py,sha256=Zp9HaAopCZ1mdpt8BGp8t7JA5J4o3Cm_FbIobqvJhpM,887 +openai/types/responses/__init__.py,sha256=DOdOxzHyOleFM6lEY18XMz1hbskIXwEjuQnmApXJJ3M,18173 +openai/types/responses/__pycache__/__init__.cpython-310.pyc,, +openai/types/responses/__pycache__/apply_patch_tool.cpython-310.pyc,, +openai/types/responses/__pycache__/apply_patch_tool_param.cpython-310.pyc,, +openai/types/responses/__pycache__/compacted_response.cpython-310.pyc,, +openai/types/responses/__pycache__/computer_tool.cpython-310.pyc,, +openai/types/responses/__pycache__/computer_tool_param.cpython-310.pyc,, +openai/types/responses/__pycache__/custom_tool.cpython-310.pyc,, +openai/types/responses/__pycache__/custom_tool_param.cpython-310.pyc,, +openai/types/responses/__pycache__/easy_input_message.cpython-310.pyc,, +openai/types/responses/__pycache__/easy_input_message_param.cpython-310.pyc,, +openai/types/responses/__pycache__/file_search_tool.cpython-310.pyc,, +openai/types/responses/__pycache__/file_search_tool_param.cpython-310.pyc,, +openai/types/responses/__pycache__/function_shell_tool.cpython-310.pyc,, +openai/types/responses/__pycache__/function_shell_tool_param.cpython-310.pyc,, +openai/types/responses/__pycache__/function_tool.cpython-310.pyc,, +openai/types/responses/__pycache__/function_tool_param.cpython-310.pyc,, +openai/types/responses/__pycache__/input_item_list_params.cpython-310.pyc,, +openai/types/responses/__pycache__/input_token_count_params.cpython-310.pyc,, +openai/types/responses/__pycache__/input_token_count_response.cpython-310.pyc,, +openai/types/responses/__pycache__/parsed_response.cpython-310.pyc,, +openai/types/responses/__pycache__/response.cpython-310.pyc,, +openai/types/responses/__pycache__/response_apply_patch_tool_call.cpython-310.pyc,, +openai/types/responses/__pycache__/response_apply_patch_tool_call_output.cpython-310.pyc,, +openai/types/responses/__pycache__/response_audio_delta_event.cpython-310.pyc,, +openai/types/responses/__pycache__/response_audio_done_event.cpython-310.pyc,, +openai/types/responses/__pycache__/response_audio_transcript_delta_event.cpython-310.pyc,, +openai/types/responses/__pycache__/response_audio_transcript_done_event.cpython-310.pyc,, +openai/types/responses/__pycache__/response_code_interpreter_call_code_delta_event.cpython-310.pyc,, +openai/types/responses/__pycache__/response_code_interpreter_call_code_done_event.cpython-310.pyc,, +openai/types/responses/__pycache__/response_code_interpreter_call_completed_event.cpython-310.pyc,, +openai/types/responses/__pycache__/response_code_interpreter_call_in_progress_event.cpython-310.pyc,, +openai/types/responses/__pycache__/response_code_interpreter_call_interpreting_event.cpython-310.pyc,, +openai/types/responses/__pycache__/response_code_interpreter_tool_call.cpython-310.pyc,, +openai/types/responses/__pycache__/response_code_interpreter_tool_call_param.cpython-310.pyc,, +openai/types/responses/__pycache__/response_compact_params.cpython-310.pyc,, +openai/types/responses/__pycache__/response_compaction_item.cpython-310.pyc,, +openai/types/responses/__pycache__/response_compaction_item_param.cpython-310.pyc,, +openai/types/responses/__pycache__/response_compaction_item_param_param.cpython-310.pyc,, +openai/types/responses/__pycache__/response_completed_event.cpython-310.pyc,, +openai/types/responses/__pycache__/response_computer_tool_call.cpython-310.pyc,, +openai/types/responses/__pycache__/response_computer_tool_call_output_item.cpython-310.pyc,, +openai/types/responses/__pycache__/response_computer_tool_call_output_screenshot.cpython-310.pyc,, +openai/types/responses/__pycache__/response_computer_tool_call_output_screenshot_param.cpython-310.pyc,, +openai/types/responses/__pycache__/response_computer_tool_call_param.cpython-310.pyc,, +openai/types/responses/__pycache__/response_content_part_added_event.cpython-310.pyc,, +openai/types/responses/__pycache__/response_content_part_done_event.cpython-310.pyc,, +openai/types/responses/__pycache__/response_conversation_param.cpython-310.pyc,, +openai/types/responses/__pycache__/response_create_params.cpython-310.pyc,, +openai/types/responses/__pycache__/response_created_event.cpython-310.pyc,, +openai/types/responses/__pycache__/response_custom_tool_call.cpython-310.pyc,, +openai/types/responses/__pycache__/response_custom_tool_call_input_delta_event.cpython-310.pyc,, +openai/types/responses/__pycache__/response_custom_tool_call_input_done_event.cpython-310.pyc,, +openai/types/responses/__pycache__/response_custom_tool_call_output.cpython-310.pyc,, +openai/types/responses/__pycache__/response_custom_tool_call_output_param.cpython-310.pyc,, +openai/types/responses/__pycache__/response_custom_tool_call_param.cpython-310.pyc,, +openai/types/responses/__pycache__/response_error.cpython-310.pyc,, +openai/types/responses/__pycache__/response_error_event.cpython-310.pyc,, +openai/types/responses/__pycache__/response_failed_event.cpython-310.pyc,, +openai/types/responses/__pycache__/response_file_search_call_completed_event.cpython-310.pyc,, +openai/types/responses/__pycache__/response_file_search_call_in_progress_event.cpython-310.pyc,, +openai/types/responses/__pycache__/response_file_search_call_searching_event.cpython-310.pyc,, +openai/types/responses/__pycache__/response_file_search_tool_call.cpython-310.pyc,, +openai/types/responses/__pycache__/response_file_search_tool_call_param.cpython-310.pyc,, +openai/types/responses/__pycache__/response_format_text_config.cpython-310.pyc,, +openai/types/responses/__pycache__/response_format_text_config_param.cpython-310.pyc,, +openai/types/responses/__pycache__/response_format_text_json_schema_config.cpython-310.pyc,, +openai/types/responses/__pycache__/response_format_text_json_schema_config_param.cpython-310.pyc,, +openai/types/responses/__pycache__/response_function_call_arguments_delta_event.cpython-310.pyc,, +openai/types/responses/__pycache__/response_function_call_arguments_done_event.cpython-310.pyc,, +openai/types/responses/__pycache__/response_function_call_output_item.cpython-310.pyc,, +openai/types/responses/__pycache__/response_function_call_output_item_list.cpython-310.pyc,, +openai/types/responses/__pycache__/response_function_call_output_item_list_param.cpython-310.pyc,, +openai/types/responses/__pycache__/response_function_call_output_item_param.cpython-310.pyc,, +openai/types/responses/__pycache__/response_function_shell_call_output_content.cpython-310.pyc,, +openai/types/responses/__pycache__/response_function_shell_call_output_content_param.cpython-310.pyc,, +openai/types/responses/__pycache__/response_function_shell_tool_call.cpython-310.pyc,, +openai/types/responses/__pycache__/response_function_shell_tool_call_output.cpython-310.pyc,, +openai/types/responses/__pycache__/response_function_tool_call.cpython-310.pyc,, +openai/types/responses/__pycache__/response_function_tool_call_item.cpython-310.pyc,, +openai/types/responses/__pycache__/response_function_tool_call_output_item.cpython-310.pyc,, +openai/types/responses/__pycache__/response_function_tool_call_param.cpython-310.pyc,, +openai/types/responses/__pycache__/response_function_web_search.cpython-310.pyc,, +openai/types/responses/__pycache__/response_function_web_search_param.cpython-310.pyc,, +openai/types/responses/__pycache__/response_image_gen_call_completed_event.cpython-310.pyc,, +openai/types/responses/__pycache__/response_image_gen_call_generating_event.cpython-310.pyc,, +openai/types/responses/__pycache__/response_image_gen_call_in_progress_event.cpython-310.pyc,, +openai/types/responses/__pycache__/response_image_gen_call_partial_image_event.cpython-310.pyc,, +openai/types/responses/__pycache__/response_in_progress_event.cpython-310.pyc,, +openai/types/responses/__pycache__/response_includable.cpython-310.pyc,, +openai/types/responses/__pycache__/response_incomplete_event.cpython-310.pyc,, +openai/types/responses/__pycache__/response_input_audio.cpython-310.pyc,, +openai/types/responses/__pycache__/response_input_audio_param.cpython-310.pyc,, +openai/types/responses/__pycache__/response_input_content.cpython-310.pyc,, +openai/types/responses/__pycache__/response_input_content_param.cpython-310.pyc,, +openai/types/responses/__pycache__/response_input_file.cpython-310.pyc,, +openai/types/responses/__pycache__/response_input_file_content.cpython-310.pyc,, +openai/types/responses/__pycache__/response_input_file_content_param.cpython-310.pyc,, +openai/types/responses/__pycache__/response_input_file_param.cpython-310.pyc,, +openai/types/responses/__pycache__/response_input_image.cpython-310.pyc,, +openai/types/responses/__pycache__/response_input_image_content.cpython-310.pyc,, +openai/types/responses/__pycache__/response_input_image_content_param.cpython-310.pyc,, +openai/types/responses/__pycache__/response_input_image_param.cpython-310.pyc,, +openai/types/responses/__pycache__/response_input_item.cpython-310.pyc,, +openai/types/responses/__pycache__/response_input_item_param.cpython-310.pyc,, +openai/types/responses/__pycache__/response_input_message_content_list.cpython-310.pyc,, +openai/types/responses/__pycache__/response_input_message_content_list_param.cpython-310.pyc,, +openai/types/responses/__pycache__/response_input_message_item.cpython-310.pyc,, +openai/types/responses/__pycache__/response_input_param.cpython-310.pyc,, +openai/types/responses/__pycache__/response_input_text.cpython-310.pyc,, +openai/types/responses/__pycache__/response_input_text_content.cpython-310.pyc,, +openai/types/responses/__pycache__/response_input_text_content_param.cpython-310.pyc,, +openai/types/responses/__pycache__/response_input_text_param.cpython-310.pyc,, +openai/types/responses/__pycache__/response_item.cpython-310.pyc,, +openai/types/responses/__pycache__/response_item_list.cpython-310.pyc,, +openai/types/responses/__pycache__/response_mcp_call_arguments_delta_event.cpython-310.pyc,, +openai/types/responses/__pycache__/response_mcp_call_arguments_done_event.cpython-310.pyc,, +openai/types/responses/__pycache__/response_mcp_call_completed_event.cpython-310.pyc,, +openai/types/responses/__pycache__/response_mcp_call_failed_event.cpython-310.pyc,, +openai/types/responses/__pycache__/response_mcp_call_in_progress_event.cpython-310.pyc,, +openai/types/responses/__pycache__/response_mcp_list_tools_completed_event.cpython-310.pyc,, +openai/types/responses/__pycache__/response_mcp_list_tools_failed_event.cpython-310.pyc,, +openai/types/responses/__pycache__/response_mcp_list_tools_in_progress_event.cpython-310.pyc,, +openai/types/responses/__pycache__/response_output_item.cpython-310.pyc,, +openai/types/responses/__pycache__/response_output_item_added_event.cpython-310.pyc,, +openai/types/responses/__pycache__/response_output_item_done_event.cpython-310.pyc,, +openai/types/responses/__pycache__/response_output_message.cpython-310.pyc,, +openai/types/responses/__pycache__/response_output_message_param.cpython-310.pyc,, +openai/types/responses/__pycache__/response_output_refusal.cpython-310.pyc,, +openai/types/responses/__pycache__/response_output_refusal_param.cpython-310.pyc,, +openai/types/responses/__pycache__/response_output_text.cpython-310.pyc,, +openai/types/responses/__pycache__/response_output_text_annotation_added_event.cpython-310.pyc,, +openai/types/responses/__pycache__/response_output_text_param.cpython-310.pyc,, +openai/types/responses/__pycache__/response_prompt.cpython-310.pyc,, +openai/types/responses/__pycache__/response_prompt_param.cpython-310.pyc,, +openai/types/responses/__pycache__/response_queued_event.cpython-310.pyc,, +openai/types/responses/__pycache__/response_reasoning_item.cpython-310.pyc,, +openai/types/responses/__pycache__/response_reasoning_item_param.cpython-310.pyc,, +openai/types/responses/__pycache__/response_reasoning_summary_part_added_event.cpython-310.pyc,, +openai/types/responses/__pycache__/response_reasoning_summary_part_done_event.cpython-310.pyc,, +openai/types/responses/__pycache__/response_reasoning_summary_text_delta_event.cpython-310.pyc,, +openai/types/responses/__pycache__/response_reasoning_summary_text_done_event.cpython-310.pyc,, +openai/types/responses/__pycache__/response_reasoning_text_delta_event.cpython-310.pyc,, +openai/types/responses/__pycache__/response_reasoning_text_done_event.cpython-310.pyc,, +openai/types/responses/__pycache__/response_refusal_delta_event.cpython-310.pyc,, +openai/types/responses/__pycache__/response_refusal_done_event.cpython-310.pyc,, +openai/types/responses/__pycache__/response_retrieve_params.cpython-310.pyc,, +openai/types/responses/__pycache__/response_status.cpython-310.pyc,, +openai/types/responses/__pycache__/response_stream_event.cpython-310.pyc,, +openai/types/responses/__pycache__/response_text_config.cpython-310.pyc,, +openai/types/responses/__pycache__/response_text_config_param.cpython-310.pyc,, +openai/types/responses/__pycache__/response_text_delta_event.cpython-310.pyc,, +openai/types/responses/__pycache__/response_text_done_event.cpython-310.pyc,, +openai/types/responses/__pycache__/response_usage.cpython-310.pyc,, +openai/types/responses/__pycache__/response_web_search_call_completed_event.cpython-310.pyc,, +openai/types/responses/__pycache__/response_web_search_call_in_progress_event.cpython-310.pyc,, +openai/types/responses/__pycache__/response_web_search_call_searching_event.cpython-310.pyc,, +openai/types/responses/__pycache__/tool.cpython-310.pyc,, +openai/types/responses/__pycache__/tool_choice_allowed.cpython-310.pyc,, +openai/types/responses/__pycache__/tool_choice_allowed_param.cpython-310.pyc,, +openai/types/responses/__pycache__/tool_choice_apply_patch.cpython-310.pyc,, +openai/types/responses/__pycache__/tool_choice_apply_patch_param.cpython-310.pyc,, +openai/types/responses/__pycache__/tool_choice_custom.cpython-310.pyc,, +openai/types/responses/__pycache__/tool_choice_custom_param.cpython-310.pyc,, +openai/types/responses/__pycache__/tool_choice_function.cpython-310.pyc,, +openai/types/responses/__pycache__/tool_choice_function_param.cpython-310.pyc,, +openai/types/responses/__pycache__/tool_choice_mcp.cpython-310.pyc,, +openai/types/responses/__pycache__/tool_choice_mcp_param.cpython-310.pyc,, +openai/types/responses/__pycache__/tool_choice_options.cpython-310.pyc,, +openai/types/responses/__pycache__/tool_choice_shell.cpython-310.pyc,, +openai/types/responses/__pycache__/tool_choice_shell_param.cpython-310.pyc,, +openai/types/responses/__pycache__/tool_choice_types.cpython-310.pyc,, +openai/types/responses/__pycache__/tool_choice_types_param.cpython-310.pyc,, +openai/types/responses/__pycache__/tool_param.cpython-310.pyc,, +openai/types/responses/__pycache__/web_search_preview_tool.cpython-310.pyc,, +openai/types/responses/__pycache__/web_search_preview_tool_param.cpython-310.pyc,, +openai/types/responses/__pycache__/web_search_tool.cpython-310.pyc,, +openai/types/responses/__pycache__/web_search_tool_param.cpython-310.pyc,, +openai/types/responses/apply_patch_tool.py,sha256=gO_CtGczaXdBPe8zSW-aEqV_dVJhn22GAh9AE4-xEq4,399 +openai/types/responses/apply_patch_tool_param.py,sha256=k3kqGLeZ_f9vyXh7ZMmYVeQ-xcVUbGmo4ZAqM9HhgzI,455 +openai/types/responses/compacted_response.py,sha256=qlkdM6GmpFWCcqU11xODEMqD16DinPF0W-o70Hg5VOk,934 +openai/types/responses/computer_tool.py,sha256=ExJaomK4IFKSprXIaQju1nGVi2CWNXV6QvDyrvNV7Pk,767 +openai/types/responses/computer_tool_param.py,sha256=SvgG7R0e63WUd20qbImia5VnEbJKjYPW7YF4F4G26QM,853 +openai/types/responses/custom_tool.py,sha256=3T6L-3w8A7TyPFcsw5_zYXNraP4nPbShg6lnoVt3K40,923 +openai/types/responses/custom_tool_param.py,sha256=hdnZol_ta8KGMlolyBsTY9RuwYBf4NyxDr1QK5MJwiA,935 +openai/types/responses/easy_input_message.py,sha256=VDVZoLHoRrgfSgBNlQM6rmdqFjzSD8I7n6325sQdmFo,1168 +openai/types/responses/easy_input_message_param.py,sha256=uK5_Oo_8iTJbUG9w8-hujQmfsH5lfepCpTNUgcpcU0c,1224 +openai/types/responses/file_search_tool.py,sha256=HpPWubu_JTVOxtRefDRDZkhTtWfoekHCqiL6B702j9w,2259 +openai/types/responses/file_search_tool_param.py,sha256=CvxYOB2DFpRC18WWa05En3heqTNd9Lv2SCZ7uP-ZOG8,2329 +openai/types/responses/function_shell_tool.py,sha256=nkYhOasTJYmE2CCJ8la94cadqThV4EdgVrB5-Fr3ecs,378 +openai/types/responses/function_shell_tool_param.py,sha256=wBsstq7T75MJRucaLjvHpTBdy7H5Ex-ExmoSyAugKCc,434 +openai/types/responses/function_tool.py,sha256=XatrZMskh15WXVnDmYF8WcxrMnt9Di6u-W5z_tS8Las,978 +openai/types/responses/function_tool_param.py,sha256=YnvnWFZijbVnEeoaruUBGidPEPhLRnNXnpZ8Pp3yrnU,1043 +openai/types/responses/input_item_list_params.py,sha256=wazm2tELpbpBWdAQrXGBq88Bm5RsxWXmlVJAV3f_k-I,964 +openai/types/responses/input_token_count_params.py,sha256=s_KKPE-q6j4KU_0DoIi7hVHQNQoykxdP7MaJYW0EB-U,5668 +openai/types/responses/input_token_count_response.py,sha256=w8LogkiqiItGuty0-bXts8xy1Ug-bbSYKEm7DDKmoP0,310 +openai/types/responses/parsed_response.py,sha256=65aPRFaHpbX81f23JesWawUZcK3uGsY2N0DRbn3_rSk,3846 +openai/types/responses/response.py,sha256=yehCZVZjB_DKbaSdSpChPbNNfOVwYhb-HU4D4ZgCThA,12591 +openai/types/responses/response_apply_patch_tool_call.py,sha256=dmqjz9eTdk5WNkxxWhuJayZ-ALygsPXTosRW6B9r4oQ,2217 +openai/types/responses/response_apply_patch_tool_call_output.py,sha256=Y3ZxBSHXFAv1yaUfpf_XOFZwvAgSKiBB6Bw8fOYEsvw,1024 +openai/types/responses/response_audio_delta_event.py,sha256=JWICz6tC6Zs-8Ax3y3hkUahWE2vxwJLLVguhDQJWzhk,574 +openai/types/responses/response_audio_done_event.py,sha256=8wQI1KQcdEBSC7_IGWGVgHfjEWSaEEwTdOglsyj8dWo,470 +openai/types/responses/response_audio_transcript_delta_event.py,sha256=ekRu3IEU7rZRzoSFu2K98btqFDm_vz1aunOl5Q_WvtA,597 +openai/types/responses/response_audio_transcript_done_event.py,sha256=mP_kZAh8gDCQ01ekkygstR6F5oD-s-8Ycfb1ZRGo_v4,521 +openai/types/responses/response_code_interpreter_call_code_delta_event.py,sha256=nxweCYi0ARKFednUF9Hya8lyuLqc1VqKquYbB4fRqvY,924 +openai/types/responses/response_code_interpreter_call_code_done_event.py,sha256=dZl7A5ZEdN-fyJG-d7XbBdkQ-cLrlcEaZ5f5nzwPGu0,885 +openai/types/responses/response_code_interpreter_call_completed_event.py,sha256=rUQ3-3KER-MRvNgUPuUyv5q6o7KsWqsQWZrjFzDscmE,823 +openai/types/responses/response_code_interpreter_call_in_progress_event.py,sha256=gC-u3RAQ08IcD9p2imVX27goYwH-HvukhScwLVORMfU,831 +openai/types/responses/response_code_interpreter_call_interpreting_event.py,sha256=gOWRUvIgcMVrp-KcmtAttp8PbeVknhmCmfgYg04-2K4,862 +openai/types/responses/response_code_interpreter_tool_call.py,sha256=r1NjHpBWawfNwy5-2gVoJjc1rjrHk2jJjlGsoAIU55I,1795 +openai/types/responses/response_code_interpreter_tool_call_param.py,sha256=uaywHPXN7U5uh9b7bi5xFL6xvXRM7lTJihYdZbi84Zw,1868 +openai/types/responses/response_compact_params.py,sha256=vO8L88q5sVvGe9tfbVxholqvnfflsHV0Im-QUFTYyIY,4783 +openai/types/responses/response_compaction_item.py,sha256=FevR74gnDtJkGYJKhwaPHUMqDXkHxpPojBBgip93NK4,758 +openai/types/responses/response_compaction_item_param.py,sha256=cCRvAVU1LSLWhryYp54lU2j00wwHiaogpDcXBULI3PQ,673 +openai/types/responses/response_compaction_item_param_param.py,sha256=UYpgNuwAxAd11LxXJJ-EPHGLWR3TDP00lylHxytE6K4,732 +openai/types/responses/response_completed_event.py,sha256=fe5mGXdeFLkozUpasDitkCHe7Qv22xKUesRDNFcXazY,573 +openai/types/responses/response_computer_tool_call.py,sha256=PwgncJ5PbXv8PfNDYT5F1xP7sWDPaPQrEKKjDllE1Ew,5276 +openai/types/responses/response_computer_tool_call_output_item.py,sha256=4ezZAGHMVIe88MQRCVprLbhzOFvVv7mu7eQ2NLukEew,1584 +openai/types/responses/response_computer_tool_call_output_screenshot.py,sha256=rhc-E2ECWZOqpvIFcVo7fi-j9rS94AwXU_1xuVyUYKc,734 +openai/types/responses/response_computer_tool_call_output_screenshot_param.py,sha256=iRdRiNE-wpwaNhBfnilJ4GaouMGh-oWn121LkSzDBA4,728 +openai/types/responses/response_computer_tool_call_param.py,sha256=O2IdaoOcDAsETrNu5D6n-ue_WKVLBJnJoCr9i4u6sjs,5687 +openai/types/responses/response_content_part_added_event.py,sha256=lpdjp8MXrniMjY5aEchgOXAYx9nCRzqsGcm10A7k9oA,1432 +openai/types/responses/response_content_part_done_event.py,sha256=QHR1gB9E8AeAxL30xp2pTVr8YQxEojwozkZqJ-s_45E,1419 +openai/types/responses/response_conversation_param.py,sha256=diMeoJm5-D3AideV9XtPNfd1gdRTyAEnYz0HeRXvzPE,399 +openai/types/responses/response_create_params.py,sha256=yPNDnw9d4oDPKSMI7RQp6fVO1hCQPm7lQoTmkvzkOlM,14008 +openai/types/responses/response_created_event.py,sha256=3q0JhFTkp3vgkkJdjFzgLG690GfDFFfnpXrsjl81ES8,566 +openai/types/responses/response_custom_tool_call.py,sha256=DhCtiuQYuyNm1c2DxSxLnqrDqiexmL4hsjCf7zXhRQY,787 +openai/types/responses/response_custom_tool_call_input_delta_event.py,sha256=70JVIwG8KfTnW2zzovAdDv-qIyvJU8cY2aQcxHiDPKw,786 +openai/types/responses/response_custom_tool_call_input_done_event.py,sha256=QQ8V5TNyMOdLRSQ1hxCCw2_LjxERccGqvXjjnUrvc_8,756 +openai/types/responses/response_custom_tool_call_output.py,sha256=w2yJcnkB48PfpBvLikpUx_Iz8ryKuS7WtqyqsECr0ps,1307 +openai/types/responses/response_custom_tool_call_output_param.py,sha256=Nr6hV5PHkOeDRR3INuiyPtpk3ybd4zwLbMKYEfkv1Hw,1314 +openai/types/responses/response_custom_tool_call_param.py,sha256=8baPSm1t7KXm8ABHUgdjw68vDf6yoNLY7scZyR1BX_I,828 +openai/types/responses/response_error.py,sha256=AszYOuu1LOWaglgKy9tX9zO9q7Yid9alJc2cKINk9ro,996 +openai/types/responses/response_error_event.py,sha256=fjB964eSWEh0ciSk5s64TQGJyTsYjtNT8duSqztjhW0,617 +openai/types/responses/response_failed_event.py,sha256=FG812T2EKLJwrSyO8KPbs-QSpsg1e4n8YHpXiANlb04,551 +openai/types/responses/response_file_search_call_completed_event.py,sha256=gE4Zju0_edN_9zfOc07K7BepToGCjdOIBvqQx-jwfqM,744 +openai/types/responses/response_file_search_call_in_progress_event.py,sha256=0SKVai4oBjI9ayOjSZw4HlX5Alsm1QOUEe_UEM8EedE,734 +openai/types/responses/response_file_search_call_searching_event.py,sha256=h39eY-lBOJiN-9M1xKdbsij8nVe7CoJHRq1ncE4Djog,733 +openai/types/responses/response_file_search_tool_call.py,sha256=kz4aytPWtfajEkJjtxW395Gy_IZC90uSQr0gcFOqqPw,1838 +openai/types/responses/response_file_search_tool_call_param.py,sha256=rnHg_KmMbyduAwzE1iC6QcenZSMtAmrNq1r9hojH75M,1911 +openai/types/responses/response_format_text_config.py,sha256=Z1uv9YytZAXaMtD_faYD6SL9Q8kOjSvRQXFkSZc0_hY,647 +openai/types/responses/response_format_text_config_param.py,sha256=T6cMHds5NYojK9fZMMldWYBypWwVmywIIbkRm5e4pMc,625 +openai/types/responses/response_format_text_json_schema_config.py,sha256=qjE8Yx2p7fT2KUoEtlwEbgqqV11FwdWdmXdrEsEUAP0,1611 +openai/types/responses/response_format_text_json_schema_config_param.py,sha256=kmkEvymu058oqJngIz5ir0XKTDGbfRbc6G2GKJWa5SA,1593 +openai/types/responses/response_function_call_arguments_delta_event.py,sha256=EKF5hkCH9qMKFzVhIxatJJiXrcyG8HGByep0hi5h-w4,866 +openai/types/responses/response_function_call_arguments_done_event.py,sha256=483jGS7xNW-VSivfrgFCox5WI9AXakYRN3KaTVZ5Zzk,702 +openai/types/responses/response_function_call_output_item.py,sha256=mkRJ2mCmrrmjRbvijxt-zHw9eLU6-aLjM6___SmTiTw,633 +openai/types/responses/response_function_call_output_item_list.py,sha256=5NYWEt_FNPCyQVRMiIJDJt4fYV6GWUwbW9S8hCucIpw,367 +openai/types/responses/response_function_call_output_item_list_param.py,sha256=y6vpVbdTGurYpDVbg0UFp4GhSMtwYRium9Z5bbiyyuE,774 +openai/types/responses/response_function_call_output_item_param.py,sha256=VEe_wQ8z9PN0qJbLuCwfg9n4Lwe5-WNljzmNJ-fqnwM,629 +openai/types/responses/response_function_shell_call_output_content.py,sha256=QZlIb_3DHE9BX5W6HmdDEY14Vwy5c295707BzgH1Z_Q,1260 +openai/types/responses/response_function_shell_call_output_content_param.py,sha256=e8nFAciWLudLRQxr-CNH8U_9Nge65cZ6M1d-ZqE_h8g,1299 +openai/types/responses/response_function_shell_tool_call.py,sha256=0kGBz7g4SZOnSAoU3-yjMeOXM681abrTQ4EFRs02quY,1395 +openai/types/responses/response_function_shell_tool_call_output.py,sha256=mH94PBtKr9zXzb2RirDijvWXoNidElxPidqdYskMZ2c,2435 +openai/types/responses/response_function_tool_call.py,sha256=Rf1IGPksyVcGFSOMPJuj5h0h7oJs5UQEKiwRYZ-R0Sc,1086 +openai/types/responses/response_function_tool_call_item.py,sha256=aiw34m4uKx8J_Lk33SZ_oqENRKm3m72q-8qiVrgNykk,509 +openai/types/responses/response_function_tool_call_output_item.py,sha256=F8lyiugcU6q6tG9kJUpTnF-g_xGBWgIje6mTYMErXL4,1394 +openai/types/responses/response_function_tool_call_param.py,sha256=L6T3MtKCuaiUDzK8YaJZRysMEADbuWBrqJrUpSgoSAk,1110 +openai/types/responses/response_function_web_search.py,sha256=LtyQ1svmci8bRs36fexPL9aFq008zuH-LBwAap0s0Jc,2307 +openai/types/responses/response_function_web_search_param.py,sha256=QjDva_BhIuapFyCnuOYdefVAGvrGm0g0_78ByQ3NWEs,2481 +openai/types/responses/response_image_gen_call_completed_event.py,sha256=4EVne_sRTbCxuPTTdt7YMRBblpH8nR3in4PkzzrHxBE,783 +openai/types/responses/response_image_gen_call_generating_event.py,sha256=Xu-lJZAHRZTDbiOGRVOBLCmXnAbHFQBYNRmPab2pFug,824 +openai/types/responses/response_image_gen_call_in_progress_event.py,sha256=LvCFXfC7VNqmZegn_b4xy021H-qO08SzGOspvK-6Wew,778 +openai/types/responses/response_image_gen_call_partial_image_event.py,sha256=Iss5tU9uBlv9qdnLMsnRh2g0rRJL5BDW04TTUHD8Shc,1059 +openai/types/responses/response_in_progress_event.py,sha256=oi8YtsItiH0TloP7rbkm0-XOY1-FKTV4o6Ia_y4pS6Q,571 +openai/types/responses/response_includable.py,sha256=tkia-hgFh4ttgy53H5lJtoBWsSQh6G2DzCXj-14o8Ko,505 +openai/types/responses/response_incomplete_event.py,sha256=WGBjWI-kwdaQpGBqzrIKfTehJ4Phzxsn2fvfDYoTV6w,592 +openai/types/responses/response_input_audio.py,sha256=CLnaiuQjjF11emjSxbVBLL0yF_kONznqXgIB6m8lric,614 +openai/types/responses/response_input_audio_param.py,sha256=MXXWtLXdToypWHQYLSpfUXtC2U_PllaffzJLMy5LNYY,713 +openai/types/responses/response_input_content.py,sha256=MaZ-MNnZvhM2stSUKdhofXrdM9BzFjSJQal7UDVAQaI,542 +openai/types/responses/response_input_content_param.py,sha256=1q_4oG8Q0DAGnQlS-OBNZxMD7k69jfra7AnXkkqfyr4,537 +openai/types/responses/response_input_file.py,sha256=lgoRiDEn_dPiga4dgHZsHcjgbzEAftmhwSnloBVTQuo,755 +openai/types/responses/response_input_file_content.py,sha256=SdUicUGXlg_OICLMdySuJLTmmU0WDTywjRIWFVEYOwg,781 +openai/types/responses/response_input_file_content_param.py,sha256=F2cwpQbEZtgt4p4dxs7zl_2mM7rB9OsNjbbLMRq8WV4,809 +openai/types/responses/response_input_file_param.py,sha256=TFYJQXKEOWB3cdiAIVkRrMgeO_-ZjqNUAidCl7KKwK0,753 +openai/types/responses/response_input_image.py,sha256=djDER2-m43vk1YaX3GXF5dNg9W-vYF26Itto5kQtNYs,904 +openai/types/responses/response_input_image_content.py,sha256=0fVlJtV-LnTYP51zwqcBkKyyDdCq7OZQZkFcbeQihjY,934 +openai/types/responses/response_input_image_content_param.py,sha256=Lf0hJTLtG0CRvtKt6Yb_ldtbcUge5IP-wK-_UjoCdn4,969 +openai/types/responses/response_input_image_param.py,sha256=lxWoE5udEUHohLRw9KtfNTcwMuabydqQy1gxZ7oAtqk,956 +openai/types/responses/response_input_item.py,sha256=LmxxV68vNIfspCf0l13pnRnZI8lXMtsfpjddxiT4t1c,15925 +openai/types/responses/response_input_item_param.py,sha256=BaA20QXJmRpcXSxVd5Z9bMVD_GI9R4KZ6pUoodzbPRI,16693 +openai/types/responses/response_input_message_content_list.py,sha256=LEaQ_x6dRt3w5Sl7R-Ewu89KlLyGFhMf31OHAHPD3U8,329 +openai/types/responses/response_input_message_content_list_param.py,sha256=cbbqvs4PcK8CRsNCQqoA4w6stJCRNOQSiJozwC18urs,666 +openai/types/responses/response_input_message_item.py,sha256=_zXthGtO0zstLvIHg9XesNAme6yNa8JOejkBYLwXm70,1029 +openai/types/responses/response_input_param.py,sha256=EgiARo-Co4lsATiwFkntXs1OrGV8IQ_RNhYRbCr2Ndg,16787 +openai/types/responses/response_input_text.py,sha256=uCT-nKv5EEjOHmTyBlKJ01gLSCGue8lcjrBwzwWzPrE,413 +openai/types/responses/response_input_text_content.py,sha256=UPb4d4KHkbgN0rS6wkvoaTPZVGN_2aYo-VbL-zwMkpU,427 +openai/types/responses/response_input_text_content_param.py,sha256=nAOZRT6FsZqPr0va99wAFVB1lz5W8cinZ-9iEuAHgN0,493 +openai/types/responses/response_input_text_param.py,sha256=9DbrdxWlak_wHmcPhw9BIVlyWkBnuAfmYC4TDtM_Lqo,479 +openai/types/responses/response_item.py,sha256=hGxb4N8Ue0bXbnDQ9Oqf2TStEhboo8_Cbiga2Ve93QE,7357 +openai/types/responses/response_item_list.py,sha256=JclJxBBJda7fjXVbeLfYeVUH8A_swQN24LWpUjIsuus,702 +openai/types/responses/response_mcp_call_arguments_delta_event.py,sha256=EUCaFYd-EgZS9zikqlXi8xqUzeC94MzVx2qrhUgm4a0,884 +openai/types/responses/response_mcp_call_arguments_done_event.py,sha256=LWMaw5HKV8AgiYNJxTB-14kmCcFc_T22tSCdqJjlp3Y,826 +openai/types/responses/response_mcp_call_completed_event.py,sha256=MFjkI21vxzlTzvEAvmRjntJ3vGCeDTViMFek6AIyQXM,670 +openai/types/responses/response_mcp_call_failed_event.py,sha256=bhC0gZ4b2qZPrux4Le6_ZDthm22Ck1wGZ6G5Mf0wsPQ,636 +openai/types/responses/response_mcp_call_in_progress_event.py,sha256=oTLlZyhdLsnnkZj4Fr3WYF2QRnupgdT6_4FIVl8gjUg,696 +openai/types/responses/response_mcp_list_tools_completed_event.py,sha256=3OdrqmMVVUgVTwl2nOf5vQ-4VVcbfS6VoF3G7z5DOjU,726 +openai/types/responses/response_mcp_list_tools_failed_event.py,sha256=9nvFQ-02kRXV6nOf_4o5aQhYw7-tSzppLbN-C7a2zHE,680 +openai/types/responses/response_mcp_list_tools_in_progress_event.py,sha256=V9IqBnh-oLKqtthuGA3LDNveWzx10kd2N6x3MpzATzM,756 +openai/types/responses/response_output_item.py,sha256=mYMzKnpJLEQqIQCEZp2FU29QhdWecQHVGsOM3h1Gvig,6101 +openai/types/responses/response_output_item_added_event.py,sha256=6rIwK1Dtgu2K0t1R2ForK1C70CUpcnihqMpWqlM0jtg,696 +openai/types/responses/response_output_item_done_event.py,sha256=n-T9NinnYm0xhOmPfDhqKso0JQYVhfs0IXa1MhkA2cc,707 +openai/types/responses/response_output_message.py,sha256=OkOnbst0qUMmnFDOwKgPbK4Ii7JLxoKYV-9aLOrYNHI,1149 +openai/types/responses/response_output_message_param.py,sha256=YwLkQ0QRwWyon_RMzA09uQbqLO949cIpQZwlUQUIWpY,1193 +openai/types/responses/response_output_refusal.py,sha256=VkWPvAhqWz3mZ_7X8CZA4CPjgCfkWpAsrUL_EEAp3mY,425 +openai/types/responses/response_output_refusal_param.py,sha256=RO2CI71fbzQPwsr3URyR-EP20oRNMaWAmLj9dMDeu1o,491 +openai/types/responses/response_output_text.py,sha256=LPESiPQE6UcQFa1bHRFr-Dxsvbyqc6UoCDsauGDAev0,3157 +openai/types/responses/response_output_text_annotation_added_event.py,sha256=Eg-nDtkqOX51T-kJOjUFVd_2vnnk6Ah1rE3lCdYe3f0,1034 +openai/types/responses/response_output_text_param.py,sha256=3G84IlzqWmex9echpQUFApOWLra_oWvwbBiweX6NFtU,3460 +openai/types/responses/response_prompt.py,sha256=YYeJIp7WbLKe8Y1EieKpNgRHSIqArcx9OdSBeAInPRk,1107 +openai/types/responses/response_prompt_param.py,sha256=n0Kh9RsuVkgfFlll_fwaSPmQ91iJoJXDEX2yQw97eEg,1198 +openai/types/responses/response_queued_event.py,sha256=yr1KhJ27RjkFrKFmTx1uncSpR5t5-RHEXOBTyFaazUc,582 +openai/types/responses/response_reasoning_item.py,sha256=Rxg9gZDFhwR_eswynvNae2NCa0BYGCqB0OkijLwfl8A,1843 +openai/types/responses/response_reasoning_item_param.py,sha256=rKLmJVQdld2ZNlo6VylJmeyScXGE3aRPF9gUiYj5UbE,1956 +openai/types/responses/response_reasoning_summary_part_added_event.py,sha256=z6fdUEOQYdkbM2Kbv-qR39uqLScYWHozlZc43AaUy_U,1113 +openai/types/responses/response_reasoning_summary_part_done_event.py,sha256=IVAfLcKqV0tN5DlcHoWj-7kgHPCJIZuRO2tjautGVag,1099 +openai/types/responses/response_reasoning_summary_text_delta_event.py,sha256=dtVBDqeHnL6rqfKkqS9S48iEG40XeL7HPHedYvAtPfo,916 +openai/types/responses/response_reasoning_summary_text_done_event.py,sha256=mKK-nrU1DChaUyRYLw0hKChMvYovs6e19ZChJDuW69I,896 +openai/types/responses/response_reasoning_text_delta_event.py,sha256=3_UV-GjQdCNjqpmABpDrxovTKqQIyJa_K5fKemPcezA,903 +openai/types/responses/response_reasoning_text_done_event.py,sha256=M9AZZ7cHvXj4NthBIsYEbY2h_RzFcn2YJ37_mXd_Q9M,843 +openai/types/responses/response_refusal_delta_event.py,sha256=nqAEXShLQDV84Qd2pr7po-UnzW3aJxgXCNNivmJHQOQ,827 +openai/types/responses/response_refusal_done_event.py,sha256=y-Dtjpa0CCA0hqqC8O_m5tRqKoSJayhYpTTBFDXOvK0,826 +openai/types/responses/response_retrieve_params.py,sha256=Y_4UacCQ7xUYXc7_QTCJt-zLzIuv-PWocNQ1k0RnPsw,2372 +openai/types/responses/response_status.py,sha256=289NTnFcyk0195A2E15KDILXNLpHbfo6q4tcvezYWgs,278 +openai/types/responses/response_stream_event.py,sha256=uPEbNTxXOaiEFRVt_PbdeecyfS9rgjaYU7m15NIvSbo,6916 +openai/types/responses/response_text_config.py,sha256=8mk0TYDQB9VNJxKAazvCHy3WIzOpPgmn95qLkwToK6o,1655 +openai/types/responses/response_text_config_param.py,sha256=Wq9RvKd2ZlOBRsTaUxWRmSekcQxH8mMoYJZGp2QHY30,1684 +openai/types/responses/response_text_delta_event.py,sha256=fdEVcZTuoTzxIhFwcp48YPUTdEHXwI4ocSwDM2kfcEM,1689 +openai/types/responses/response_text_done_event.py,sha256=KbaXsGeQvNJTGN8vOpEaA-_mGBSr6Viv-9rEAFrY6zw,1687 +openai/types/responses/response_usage.py,sha256=g1YqV35FbWEsFkDyXyQ95gK1CPT2vXaEj7dx35Nyllo,1204 +openai/types/responses/response_web_search_call_completed_event.py,sha256=huESBkfYVrHZUHUHeRknBgw7bMrIYEsVryEZrXHMiGc,754 +openai/types/responses/response_web_search_call_in_progress_event.py,sha256=MQIZTtd3LqdX4C46cG-vOSeNaLxQFvM2FaCfiDwUits,760 +openai/types/responses/response_web_search_call_searching_event.py,sha256=UucDvOvM6VjzBgCkwVO-UvRbttfidcBVCQdvgw-NUJk,754 +openai/types/responses/tool.py,sha256=UtSKlLaCHSaHCEj8KIgqzgnxZZSgaHG55zRmF7hkJqk,9895 +openai/types/responses/tool_choice_allowed.py,sha256=Mq18Us-Yd-59WZppbXaWYjBPtUqSbHHE48IWCHdLFCI,1100 +openai/types/responses/tool_choice_allowed_param.py,sha256=6Yk0zOe8QGzTyrReDIEy56o3opFvPbS0FeuBN6DW8CM,1184 +openai/types/responses/tool_choice_apply_patch.py,sha256=zUFufN0Okg40AkOM3QmzUxtRX4DQKwR52fYQYdQDECk,404 +openai/types/responses/tool_choice_apply_patch_param.py,sha256=DVgCdqmlWw1hMyDigUfUU8O7R7nQahuPqEYL6rq2y24,460 +openai/types/responses/tool_choice_custom.py,sha256=W-rAidO4Rf7Hf-EE-K_hIJwjPiiASxo5Vt7t8wQCFSA,460 +openai/types/responses/tool_choice_custom_param.py,sha256=SygHwvYJsXDSPAVjsxWqioSZd0Gc1oayMlbZUcR5BlA,526 +openai/types/responses/tool_choice_function.py,sha256=ZmF4VEQWTEcLrj7IEBqVX8kQv5Mf_sf0FJDffs0uIog,459 +openai/types/responses/tool_choice_function_param.py,sha256=RfRfJNf27JsZBNYjabLnFgksPFZJIrkdy3ouVGdNrF8,525 +openai/types/responses/tool_choice_mcp.py,sha256=z3k0i7zbJyp4DBvKs-OsvWdMOFrm0CNSgCyB0nEqnBE,585 +openai/types/responses/tool_choice_mcp_param.py,sha256=-cnA3zk3wMmx93CkCwwOBiw3CROD2UanAmdQm__Gow0,644 +openai/types/responses/tool_choice_options.py,sha256=gJHrNT72mRECrN7hQKRHAOA-OS0JJo51YnXvUcMfqMQ,237 +openai/types/responses/tool_choice_shell.py,sha256=5k4381juLRA_qZc_h4x0DGtnW6wkSKAogWfnYLMEPxk,378 +openai/types/responses/tool_choice_shell_param.py,sha256=Qy0ySbnh_0eDYPJwzn5iHoHHuo79HAHwkk8zRhNYtdY,434 +openai/types/responses/tool_choice_types.py,sha256=djkzycVSxYunU9WIGeRbS6nZa-tjEsSEtI2r1MwzMG0,923 +openai/types/responses/tool_choice_types_param.py,sha256=tuaS5Azelo0hDgpjBjZprfwjZKfmAcFT9zLq2QIIBik,1021 +openai/types/responses/tool_param.py,sha256=U6cO_JJY9YxrHdMou1yxDqOWX16zMif8rFUuf_vyW7w,9870 +openai/types/responses/web_search_preview_tool.py,sha256=jJkDvIC9p9aS5TAoLFhP5oW0sVVZ0m2FOqs-Bv_r_zA,1690 +openai/types/responses/web_search_preview_tool_param.py,sha256=Nmk4AtPCKWMXFNwQCfSh6yfT1JT46xTWAsCodri2BVE,1717 +openai/types/responses/web_search_tool.py,sha256=rsN2LrO25vAvIVEl3sKCiLS5pAqvwxJyam0Cgys419g,2083 +openai/types/responses/web_search_tool_param.py,sha256=pXX6qVRcsVNBDhy91nHM0svtpzvKVoAPIg7ciOXmctg,2124 +openai/types/shared/__init__.py,sha256=EVk-X1P3R7YWmlYmrbpMrjAeZEfVfudF-Tw7fbOC90o,1267 +openai/types/shared/__pycache__/__init__.cpython-310.pyc,, +openai/types/shared/__pycache__/all_models.cpython-310.pyc,, +openai/types/shared/__pycache__/chat_model.cpython-310.pyc,, +openai/types/shared/__pycache__/comparison_filter.cpython-310.pyc,, +openai/types/shared/__pycache__/compound_filter.cpython-310.pyc,, +openai/types/shared/__pycache__/custom_tool_input_format.cpython-310.pyc,, +openai/types/shared/__pycache__/error_object.cpython-310.pyc,, +openai/types/shared/__pycache__/function_definition.cpython-310.pyc,, +openai/types/shared/__pycache__/function_parameters.cpython-310.pyc,, +openai/types/shared/__pycache__/metadata.cpython-310.pyc,, +openai/types/shared/__pycache__/reasoning.cpython-310.pyc,, +openai/types/shared/__pycache__/reasoning_effort.cpython-310.pyc,, +openai/types/shared/__pycache__/response_format_json_object.cpython-310.pyc,, +openai/types/shared/__pycache__/response_format_json_schema.cpython-310.pyc,, +openai/types/shared/__pycache__/response_format_text.cpython-310.pyc,, +openai/types/shared/__pycache__/response_format_text_grammar.cpython-310.pyc,, +openai/types/shared/__pycache__/response_format_text_python.cpython-310.pyc,, +openai/types/shared/__pycache__/responses_model.cpython-310.pyc,, +openai/types/shared/all_models.py,sha256=OggdrF27d8_oCWAsE-LyQQmtjflAesmOyogIvi-atAs,716 +openai/types/shared/chat_model.py,sha256=sMmuHjOzYGegtDSqONSt2HXKYLuG66ncZRMFmeXuCTI,1957 +openai/types/shared/comparison_filter.py,sha256=9mpikD4dkjYTWni13i7VBKnfGUWJMIcug4PFWGaMGfc,981 +openai/types/shared/compound_filter.py,sha256=Dk2EVAI9kgojEKyeaXnIsu93rz8kKPERW0y5Y9LpdzY,638 +openai/types/shared/custom_tool_input_format.py,sha256=qgYtTA-5KQssG4TCdI2V2s83GcNQHhHczjSQVj4oIhg,856 +openai/types/shared/error_object.py,sha256=G7SGPZ9Qw3gewTKbi3fK69eM6L2Ur0C2D57N8iEapJA,305 +openai/types/shared/function_definition.py,sha256=2F07J5Q7r2Iwg74dC5rarhwWTnt579Y5LUrNc8OdqSc,1475 +openai/types/shared/function_parameters.py,sha256=Dkc_pm98zCKyouQmYrl934cK8ZWX7heY_IIyunW8x7c,236 +openai/types/shared/metadata.py,sha256=DC0SFof2EeVvFK0EsmQH8W5b_HnpI_bdp47s51E5LKw,213 +openai/types/shared/reasoning.py,sha256=SnHlaG2jXpvgvUQ_d7Gp0WPcC8tG3AtGGfP7B357rt0,2011 +openai/types/shared/reasoning_effort.py,sha256=1t5QFutihQBbHg0rbwsCCmfLs1wTT7TPDyH5iUIvVSo,296 +openai/types/shared/response_format_json_object.py,sha256=XXcdZ9Sx6o6gTrF73MayEITLmCtnnwo-HjjI7dbnzOg,624 +openai/types/shared/response_format_json_schema.py,sha256=Dujs9kHbLPk1dVIcMEerJ54OLyDC3svF_aXaX5SaRec,1843 +openai/types/shared/response_format_text.py,sha256=klc3lCY1T1M-lX6Dd6MnMUuNZH2sxoZqZcRymDrahlM,395 +openai/types/shared/response_format_text_grammar.py,sha256=zjIlUfFTCfAyLCc4fmiV3IA-q1Rjcjzui97bCSVV2NY,606 +openai/types/shared/response_format_text_python.py,sha256=hWrzEgdP1hUAX80Slc-UddodqFE3HDEWM8w0T0tADr0,525 +openai/types/shared/responses_model.py,sha256=Ot5_u8itwSMhVUhZ8rHnt5Bdj2eI_Ux76WHQ8xKHt7E,726 +openai/types/shared_params/__init__.py,sha256=Jtx94DUXqIaXTb7Sgsx3MPoB9nViBlYEy0DlQ3VcOJU,976 +openai/types/shared_params/__pycache__/__init__.cpython-310.pyc,, +openai/types/shared_params/__pycache__/chat_model.cpython-310.pyc,, +openai/types/shared_params/__pycache__/comparison_filter.cpython-310.pyc,, +openai/types/shared_params/__pycache__/compound_filter.cpython-310.pyc,, +openai/types/shared_params/__pycache__/custom_tool_input_format.cpython-310.pyc,, +openai/types/shared_params/__pycache__/function_definition.cpython-310.pyc,, +openai/types/shared_params/__pycache__/function_parameters.cpython-310.pyc,, +openai/types/shared_params/__pycache__/metadata.cpython-310.pyc,, +openai/types/shared_params/__pycache__/reasoning.cpython-310.pyc,, +openai/types/shared_params/__pycache__/reasoning_effort.cpython-310.pyc,, +openai/types/shared_params/__pycache__/response_format_json_object.cpython-310.pyc,, +openai/types/shared_params/__pycache__/response_format_json_schema.cpython-310.pyc,, +openai/types/shared_params/__pycache__/response_format_text.cpython-310.pyc,, +openai/types/shared_params/__pycache__/responses_model.cpython-310.pyc,, +openai/types/shared_params/chat_model.py,sha256=EcH6f6e8-ylkT206MlIK-nrk1NbfbIkirDYuMKR6gWE,1993 +openai/types/shared_params/comparison_filter.py,sha256=xtHLwK5uBnkRyecsqrbjYXnlHPoB66uf3wJyGUjR3DY,1089 +openai/types/shared_params/compound_filter.py,sha256=kpjER_a7NZT4rvAHxEj3hd6CgF_JHgBFl5WI9-HBzkY,703 +openai/types/shared_params/custom_tool_input_format.py,sha256=uv6tIPrdbzJ3_erNTbz7bDjVbMzAi8o22QG2wOhpRGQ,852 +openai/types/shared_params/function_definition.py,sha256=6JjuRmXIofTv76GCC4XFssqgZw-iKbBazjWqKerfq6Q,1510 +openai/types/shared_params/function_parameters.py,sha256=UvxKz_3b9b5ECwWr8RFrIH511htbU2JZsp9Z9BMkF-o,272 +openai/types/shared_params/metadata.py,sha256=YCb9eFyy17EuLwtVHjUBUjW2FU8SbWp4NV-aEr_it54,249 +openai/types/shared_params/reasoning.py,sha256=wXoF46cFeHImN8acwAJ8DRzsPzuI_2zqteLW47OdZIY,2025 +openai/types/shared_params/reasoning_effort.py,sha256=CjSzyuVxoslnEBq_vQt2WWsVVgPe4jGjoHYu6chpGx0,332 +openai/types/shared_params/response_format_json_object.py,sha256=eEG54vILrwf5es7h1vja1zlyKOQiRy9zstuQicW2NLg,670 +openai/types/shared_params/response_format_json_schema.py,sha256=Rx2m7tbaMVWO0FQABF0B7jc8Cxo8_EmTq_tQwCX9XqU,1804 +openai/types/shared_params/response_format_text.py,sha256=zCKpz3Fl_w-EICrTTrarsBhxea_LvzaKG6J864zjF1c,441 +openai/types/shared_params/responses_model.py,sha256=r1tGQ9j25cW84o01POd2p74wb18DdSBe2OeBTJhVOc8,770 +openai/types/static_file_chunking_strategy.py,sha256=JmAzT2-9eaG9ZTH8X0jS1IVCOE3Jgi1PzE11oMST3Fc,595 +openai/types/static_file_chunking_strategy_object.py,sha256=MTwQ1olGZHoC26xxCKw0U0RvWORIJLgWzNWRQ1V0KmA,424 +openai/types/static_file_chunking_strategy_object_param.py,sha256=tUsAYwR07qefkjFgt_qNwdUDbo2Rd-k9Xgu9OvtK9EE,597 +openai/types/static_file_chunking_strategy_param.py,sha256=kCMmgyOxO0XIF2wjCWjUXtyn9S6q_7mNmyUCauqrjsg,692 +openai/types/upload.py,sha256=_ePK_A-Hxr0bctSI3PfiAiJh22YRZwWXsBt0xdEQIk4,1281 +openai/types/upload_complete_params.py,sha256=PW5mCxJt7eg7F5sttX5LCE43m9FX8oZs3P5i9HvjRoU,527 +openai/types/upload_create_params.py,sha256=uOXPb_sdZhCqoR3gSSvpb4RpZ5K_Ppl1oAmJGbIAT3Y,1689 +openai/types/uploads/__init__.py,sha256=fDsmd3L0nIWbFldbViOLvcQavsFA4SL3jsXDfAueAck,242 +openai/types/uploads/__pycache__/__init__.cpython-310.pyc,, +openai/types/uploads/__pycache__/part_create_params.cpython-310.pyc,, +openai/types/uploads/__pycache__/upload_part.cpython-310.pyc,, +openai/types/uploads/part_create_params.py,sha256=pBByUzngaj70ov1knoSo_gpeBjaWP9D5EdiHwiG4G7U,362 +openai/types/uploads/upload_part.py,sha256=A_6PT8ptLJtR-jbU1b11jlpnVNLE10Kwoh1U985j9Y4,677 +openai/types/vector_store.py,sha256=rbjldzgYE-1TsFvAeBQYSOJqSiSWISrhYpmnkJTVsL0,2633 +openai/types/vector_store_create_params.py,sha256=nMpX4sfgUug_D67MDA1bNSNKgc4tP26_SNp2aca8iD4,1947 +openai/types/vector_store_deleted.py,sha256=BbtnlZ0Z5f4ncDyHLKrEfmY6Uuc0xOg3WBxvMoR8Wxk,307 +openai/types/vector_store_list_params.py,sha256=KeSeQaEdqO2EiPEVtq1Nun-uRRdkfwW0P8aHeCmL5zA,1226 +openai/types/vector_store_search_params.py,sha256=Uglni3jSE8d8-4c7eKRlxxEsxFNnhzuGBlUBnYPiJvA,1301 +openai/types/vector_store_search_response.py,sha256=qlhdAjqLPZg_JQmsqQCzAgT2Pxc2C-vGZmh64kR8y-M,1156 +openai/types/vector_store_update_params.py,sha256=-RQr2LnJzmpI8iFx-cGSZK8hg-24mYx9c497xtN_36k,1293 +openai/types/vector_stores/__init__.py,sha256=F_DyW6EqxOJTBPKE5LUSzgTibcZM6axMo-irysr52ro,818 +openai/types/vector_stores/__pycache__/__init__.cpython-310.pyc,, +openai/types/vector_stores/__pycache__/file_batch_create_params.cpython-310.pyc,, +openai/types/vector_stores/__pycache__/file_batch_list_files_params.cpython-310.pyc,, +openai/types/vector_stores/__pycache__/file_content_response.cpython-310.pyc,, +openai/types/vector_stores/__pycache__/file_create_params.cpython-310.pyc,, +openai/types/vector_stores/__pycache__/file_list_params.cpython-310.pyc,, +openai/types/vector_stores/__pycache__/file_update_params.cpython-310.pyc,, +openai/types/vector_stores/__pycache__/vector_store_file.cpython-310.pyc,, +openai/types/vector_stores/__pycache__/vector_store_file_batch.cpython-310.pyc,, +openai/types/vector_stores/__pycache__/vector_store_file_deleted.cpython-310.pyc,, +openai/types/vector_stores/file_batch_create_params.py,sha256=rHysxuqX1vfxUqsIfaLYJMi4CkmMSJEmDWBjTb_ntdg,2707 +openai/types/vector_stores/file_batch_list_files_params.py,sha256=FPpQvCQI2skyLB8YCuwdCj7RbO9ba1UjaHAtvrWxAbs,1451 +openai/types/vector_stores/file_content_response.py,sha256=uAFvFDE_NVRzg0xm1fLJ2zEd62qzq8rPYko7xpDjbaU,367 +openai/types/vector_stores/file_create_params.py,sha256=nTHWG0OMqqLRjWFH2qbif89fpCJQCzGGdXDjCqPbq1Y,1229 +openai/types/vector_stores/file_list_params.py,sha256=AIzmNH1oFuy-qlpRhj9eXu9yyTA-2z_IppLYFclMtZw,1385 +openai/types/vector_stores/file_update_params.py,sha256=NGah01luDW_W3psfsYa3ShlswH8pAhC_EebLMvd925I,781 +openai/types/vector_stores/vector_store_file.py,sha256=uHAXG0fdkbeJHS21gWmXourPYlc4GyyXkdam-EENwtU,2431 +openai/types/vector_stores/vector_store_file_batch.py,sha256=W1VoZE_PaiiOxRKG3empVJfr22oc7bE14dL9jheMG14,1512 +openai/types/vector_stores/vector_store_file_deleted.py,sha256=sOds3FSmDBFhe25zoSAz2vHsmG2bo4s2PASgB_M6UU0,321 +openai/types/video.py,sha256=3jT9tcZYJDAA93XgdKxpMT2WgpzwYcxKmVj9pJweWec,1698 +openai/types/video_create_error.py,sha256=DZpLbIAIOXOaZDNZk1dyVYMZHuxu81xB34krRLF6ddU,415 +openai/types/video_create_params.py,sha256=xe09Ac0l_M_PsKFIAdw90jJfZIs2QePxu_x5Qw1oUvU,1015 +openai/types/video_delete_response.py,sha256=eiD7lHgtxXIl0sY-JzhrKzWfRFdiGne24LfPZ9tQIho,529 +openai/types/video_download_content_params.py,sha256=MXcSQOL67hzODH__CRf7g6i74hjXJG9I0zPIqqBjnlU,405 +openai/types/video_list_params.py,sha256=pa8Nd6-hrc2fF8ZQRf4udebbMXpMDEKDrAAH9niSlgk,550 +openai/types/video_model.py,sha256=fkUBLAJ37g6TOBcXZGUgAMUhNNCDtSYCoEeMWiw0iJc,329 +openai/types/video_model_param.py,sha256=TeQQgBVyDxOuVo5qibiJIS9qJObnCtjbHGZEuam-SZc,375 +openai/types/video_remix_params.py,sha256=cFh9Tuaa1HH-cWyScfHPlw7N8nU-fg_AW0BL7S1yjR4,346 +openai/types/video_seconds.py,sha256=HyRb-NR4sVEGe2DoYZIQGig4kOrbbFfRYiqVejAgFbg,215 +openai/types/video_size.py,sha256=H1o0EhMbmicXdvaTC3wL-DnghhXzB7EkBChHL-gqdbI,243 +openai/types/webhooks/__init__.py,sha256=T8XC8KrJNXiNUPevxpO4PJi__C-HZgd0TMg7D2bRPh4,1828 +openai/types/webhooks/__pycache__/__init__.cpython-310.pyc,, +openai/types/webhooks/__pycache__/batch_cancelled_webhook_event.cpython-310.pyc,, +openai/types/webhooks/__pycache__/batch_completed_webhook_event.cpython-310.pyc,, +openai/types/webhooks/__pycache__/batch_expired_webhook_event.cpython-310.pyc,, +openai/types/webhooks/__pycache__/batch_failed_webhook_event.cpython-310.pyc,, +openai/types/webhooks/__pycache__/eval_run_canceled_webhook_event.cpython-310.pyc,, +openai/types/webhooks/__pycache__/eval_run_failed_webhook_event.cpython-310.pyc,, +openai/types/webhooks/__pycache__/eval_run_succeeded_webhook_event.cpython-310.pyc,, +openai/types/webhooks/__pycache__/fine_tuning_job_cancelled_webhook_event.cpython-310.pyc,, +openai/types/webhooks/__pycache__/fine_tuning_job_failed_webhook_event.cpython-310.pyc,, +openai/types/webhooks/__pycache__/fine_tuning_job_succeeded_webhook_event.cpython-310.pyc,, +openai/types/webhooks/__pycache__/realtime_call_incoming_webhook_event.cpython-310.pyc,, +openai/types/webhooks/__pycache__/response_cancelled_webhook_event.cpython-310.pyc,, +openai/types/webhooks/__pycache__/response_completed_webhook_event.cpython-310.pyc,, +openai/types/webhooks/__pycache__/response_failed_webhook_event.cpython-310.pyc,, +openai/types/webhooks/__pycache__/response_incomplete_webhook_event.cpython-310.pyc,, +openai/types/webhooks/__pycache__/unwrap_webhook_event.cpython-310.pyc,, +openai/types/webhooks/batch_cancelled_webhook_event.py,sha256=1iE0xOSTWzU8FJD5ruqgZazkOdCjmBZ_PB9_4Gmif7Y,862 +openai/types/webhooks/batch_completed_webhook_event.py,sha256=XCFcMnvn5xosPWdUwp3sO8wi4zYNefHWc_z6btzdGAE,862 +openai/types/webhooks/batch_expired_webhook_event.py,sha256=wNL76DW5xg9Jm8hqpPP-X5GHz3_wajmoNwc0jufgXtI,841 +openai/types/webhooks/batch_failed_webhook_event.py,sha256=_GpPkiVIL7N4bySS74Px97-niWeDM3hdzcDCViL6j1k,835 +openai/types/webhooks/eval_run_canceled_webhook_event.py,sha256=ZJkU4lAyr05OB-9t12njluzBmel81MbvhbSnOO2-L2M,840 +openai/types/webhooks/eval_run_failed_webhook_event.py,sha256=SbvytOgYKaIM33ud9pFLPjafUmRNAWF1juqAa-PLisI,819 +openai/types/webhooks/eval_run_succeeded_webhook_event.py,sha256=-ZZ3cfwXa4r2uyTrwPLvUxIQUZ4sjqTwdF6R2WE6fok,837 +openai/types/webhooks/fine_tuning_job_cancelled_webhook_event.py,sha256=IDUIOsMJqJTqE9AHOx2VNextcyWFkQcP2xTZ09bAAlk,892 +openai/types/webhooks/fine_tuning_job_failed_webhook_event.py,sha256=B2341NT-mry_Rv9kuQL-HQrRT6cyIidPR-vdlm3CCrE,865 +openai/types/webhooks/fine_tuning_job_succeeded_webhook_event.py,sha256=qxt7SsRh0bT0CSpEmRQhdoBpzCedzXZ06qaV2AiA3JI,883 +openai/types/webhooks/realtime_call_incoming_webhook_event.py,sha256=44kdeD1WbPjaqpf5S8EIfxtrSudjlpaaz74_9-rIzrw,1155 +openai/types/webhooks/response_cancelled_webhook_event.py,sha256=gmAVs2WIUoF1_HLbZ4whmTl6WPzEAsWzKFq6WiZsku8,870 +openai/types/webhooks/response_completed_webhook_event.py,sha256=mwNAM5x1-uqL39j_gCXPGSPLjzuPZoYwBZ-uo3ia5Dw,870 +openai/types/webhooks/response_failed_webhook_event.py,sha256=ka3VuDmBV7d9hxoFWktSZFXmtQrd4vrL4OinnMriyvQ,843 +openai/types/webhooks/response_incomplete_webhook_event.py,sha256=egeszUAnZW5j7-JBYXqNNUPNcDJc_Rl_shR2Lyz3iLM,878 +openai/types/webhooks/unwrap_webhook_event.py,sha256=KrfVL0-NsOuWHtRGiJfGMYwI8blUr09vUqUVJdZNpDQ,2039 +openai/types/websocket_connection_options.py,sha256=4cAWpv1KKp_9pvnez7pGYzO3s8zh1WvX2xpBhpe-96k,1840 +openai/version.py,sha256=cjbXKO8Ut3aiv4YlQnugff7AdC48MpSndcx96q88Yb8,62 diff --git a/portkey_ai/_vendor/openai-2.7.1.dist-info/REQUESTED b/portkey_ai/_vendor/openai-2.16.0.dist-info/REQUESTED similarity index 100% rename from portkey_ai/_vendor/openai-2.7.1.dist-info/REQUESTED rename to portkey_ai/_vendor/openai-2.16.0.dist-info/REQUESTED diff --git a/portkey_ai/_vendor/openai-2.7.1.dist-info/WHEEL b/portkey_ai/_vendor/openai-2.16.0.dist-info/WHEEL similarity index 100% rename from portkey_ai/_vendor/openai-2.7.1.dist-info/WHEEL rename to portkey_ai/_vendor/openai-2.16.0.dist-info/WHEEL diff --git a/portkey_ai/_vendor/openai-2.7.1.dist-info/entry_points.txt b/portkey_ai/_vendor/openai-2.16.0.dist-info/entry_points.txt similarity index 100% rename from portkey_ai/_vendor/openai-2.7.1.dist-info/entry_points.txt rename to portkey_ai/_vendor/openai-2.16.0.dist-info/entry_points.txt diff --git a/portkey_ai/_vendor/openai-2.7.1.dist-info/licenses/LICENSE b/portkey_ai/_vendor/openai-2.16.0.dist-info/licenses/LICENSE similarity index 99% rename from portkey_ai/_vendor/openai-2.7.1.dist-info/licenses/LICENSE rename to portkey_ai/_vendor/openai-2.16.0.dist-info/licenses/LICENSE index f011417a..cbb5bb26 100644 --- a/portkey_ai/_vendor/openai-2.7.1.dist-info/licenses/LICENSE +++ b/portkey_ai/_vendor/openai-2.16.0.dist-info/licenses/LICENSE @@ -186,7 +186,7 @@ same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright 2025 OpenAI + Copyright 2026 OpenAI Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/portkey_ai/_vendor/openai-2.7.1.dist-info/RECORD b/portkey_ai/_vendor/openai-2.7.1.dist-info/RECORD deleted file mode 100644 index ac058039..00000000 --- a/portkey_ai/_vendor/openai-2.7.1.dist-info/RECORD +++ /dev/null @@ -1,1964 +0,0 @@ -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/__main__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/_base_client.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/_client.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/_compat.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/_constants.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/_exceptions.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/_extras/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/_extras/_common.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/_extras/numpy_proxy.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/_extras/pandas_proxy.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/_extras/sounddevice_proxy.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/_files.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/_legacy_response.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/_models.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/_module_client.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/_qs.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/_resource.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/_response.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/_streaming.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/_types.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/_utils/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/_utils/_compat.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/_utils/_datetime_parse.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/_utils/_logs.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/_utils/_proxy.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/_utils/_reflection.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/_utils/_resources_proxy.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/_utils/_streams.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/_utils/_sync.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/_utils/_transform.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/_utils/_typing.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/_utils/_utils.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/_version.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/cli/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/cli/_api/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/cli/_api/_main.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/cli/_api/audio.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/cli/_api/chat/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/cli/_api/chat/completions.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/cli/_api/completions.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/cli/_api/files.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/cli/_api/fine_tuning/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/cli/_api/fine_tuning/jobs.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/cli/_api/image.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/cli/_api/models.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/cli/_cli.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/cli/_errors.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/cli/_models.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/cli/_progress.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/cli/_tools/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/cli/_tools/_main.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/cli/_tools/fine_tunes.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/cli/_tools/migrate.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/cli/_utils.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/helpers/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/helpers/local_audio_player.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/helpers/microphone.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/lib/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/lib/_old_api.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/lib/_parsing/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/lib/_parsing/_completions.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/lib/_parsing/_responses.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/lib/_pydantic.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/lib/_realtime.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/lib/_tools.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/lib/_validators.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/lib/azure.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/lib/streaming/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/lib/streaming/_assistants.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/lib/streaming/_deltas.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/lib/streaming/chat/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/lib/streaming/chat/_completions.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/lib/streaming/chat/_events.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/lib/streaming/chat/_types.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/lib/streaming/responses/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/lib/streaming/responses/_events.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/lib/streaming/responses/_responses.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/lib/streaming/responses/_types.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/pagination.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/resources/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/resources/audio/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/resources/audio/audio.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/resources/audio/speech.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/resources/audio/transcriptions.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/resources/audio/translations.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/resources/batches.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/resources/beta/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/resources/beta/assistants.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/resources/beta/beta.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/resources/beta/chatkit/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/resources/beta/chatkit/chatkit.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/resources/beta/chatkit/sessions.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/resources/beta/chatkit/threads.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/resources/beta/realtime/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/resources/beta/realtime/realtime.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/resources/beta/realtime/sessions.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/resources/beta/realtime/transcription_sessions.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/resources/beta/threads/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/resources/beta/threads/messages.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/resources/beta/threads/runs/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/resources/beta/threads/runs/runs.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/resources/beta/threads/runs/steps.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/resources/beta/threads/threads.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/resources/chat/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/resources/chat/chat.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/resources/chat/completions/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/resources/chat/completions/completions.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/resources/chat/completions/messages.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/resources/completions.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/resources/containers/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/resources/containers/containers.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/resources/containers/files/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/resources/containers/files/content.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/resources/containers/files/files.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/resources/conversations/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/resources/conversations/conversations.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/resources/conversations/items.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/resources/embeddings.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/resources/evals/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/resources/evals/evals.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/resources/evals/runs/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/resources/evals/runs/output_items.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/resources/evals/runs/runs.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/resources/files.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/resources/fine_tuning/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/resources/fine_tuning/alpha/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/resources/fine_tuning/alpha/alpha.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/resources/fine_tuning/alpha/graders.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/resources/fine_tuning/checkpoints/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/resources/fine_tuning/checkpoints/checkpoints.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/resources/fine_tuning/checkpoints/permissions.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/resources/fine_tuning/fine_tuning.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/resources/fine_tuning/jobs/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/resources/fine_tuning/jobs/checkpoints.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/resources/fine_tuning/jobs/jobs.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/resources/images.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/resources/models.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/resources/moderations.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/resources/realtime/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/resources/realtime/calls.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/resources/realtime/client_secrets.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/resources/realtime/realtime.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/resources/responses/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/resources/responses/input_items.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/resources/responses/input_tokens.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/resources/responses/responses.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/resources/uploads/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/resources/uploads/parts.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/resources/uploads/uploads.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/resources/vector_stores/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/resources/vector_stores/file_batches.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/resources/vector_stores/files.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/resources/vector_stores/vector_stores.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/resources/videos.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/resources/webhooks.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/audio/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/audio/speech_create_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/audio/speech_model.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/audio/transcription.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/audio/transcription_create_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/audio/transcription_create_response.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/audio/transcription_diarized.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/audio/transcription_diarized_segment.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/audio/transcription_include.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/audio/transcription_segment.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/audio/transcription_stream_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/audio/transcription_text_delta_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/audio/transcription_text_done_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/audio/transcription_text_segment_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/audio/transcription_verbose.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/audio/transcription_word.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/audio/translation.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/audio/translation_create_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/audio/translation_create_response.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/audio/translation_verbose.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/audio_model.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/audio_response_format.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/auto_file_chunking_strategy_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/batch.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/batch_create_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/batch_error.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/batch_list_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/batch_request_counts.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/batch_usage.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/assistant.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/assistant_create_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/assistant_deleted.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/assistant_list_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/assistant_response_format_option.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/assistant_response_format_option_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/assistant_stream_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/assistant_tool.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/assistant_tool_choice.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/assistant_tool_choice_function.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/assistant_tool_choice_function_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/assistant_tool_choice_option.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/assistant_tool_choice_option_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/assistant_tool_choice_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/assistant_tool_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/assistant_update_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/chat/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/chatkit/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/chatkit/chat_session.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/chatkit/chat_session_automatic_thread_titling.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/chatkit/chat_session_chatkit_configuration.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/chatkit/chat_session_chatkit_configuration_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/chatkit/chat_session_expires_after_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/chatkit/chat_session_file_upload.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/chatkit/chat_session_history.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/chatkit/chat_session_rate_limits.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/chatkit/chat_session_rate_limits_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/chatkit/chat_session_status.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/chatkit/chat_session_workflow_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/chatkit/chatkit_attachment.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/chatkit/chatkit_response_output_text.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/chatkit/chatkit_thread.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/chatkit/chatkit_thread_assistant_message_item.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/chatkit/chatkit_thread_item_list.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/chatkit/chatkit_thread_user_message_item.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/chatkit/chatkit_widget_item.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/chatkit/session_create_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/chatkit/thread_delete_response.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/chatkit/thread_list_items_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/chatkit/thread_list_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/chatkit_workflow.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/code_interpreter_tool.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/code_interpreter_tool_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/file_search_tool.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/file_search_tool_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/function_tool.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/function_tool_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/realtime/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/realtime/conversation_created_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/realtime/conversation_item.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/realtime/conversation_item_content.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/realtime/conversation_item_content_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/realtime/conversation_item_create_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/realtime/conversation_item_create_event_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/realtime/conversation_item_created_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/realtime/conversation_item_delete_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/realtime/conversation_item_delete_event_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/realtime/conversation_item_deleted_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/realtime/conversation_item_input_audio_transcription_completed_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/realtime/conversation_item_input_audio_transcription_delta_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/realtime/conversation_item_input_audio_transcription_failed_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/realtime/conversation_item_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/realtime/conversation_item_retrieve_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/realtime/conversation_item_retrieve_event_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/realtime/conversation_item_truncate_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/realtime/conversation_item_truncate_event_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/realtime/conversation_item_truncated_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/realtime/conversation_item_with_reference.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/realtime/conversation_item_with_reference_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/realtime/error_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/realtime/input_audio_buffer_append_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/realtime/input_audio_buffer_append_event_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/realtime/input_audio_buffer_clear_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/realtime/input_audio_buffer_clear_event_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/realtime/input_audio_buffer_cleared_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/realtime/input_audio_buffer_commit_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/realtime/input_audio_buffer_commit_event_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/realtime/input_audio_buffer_committed_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/realtime/input_audio_buffer_speech_started_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/realtime/input_audio_buffer_speech_stopped_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/realtime/rate_limits_updated_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/realtime/realtime_client_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/realtime/realtime_client_event_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/realtime/realtime_connect_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/realtime/realtime_response.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/realtime/realtime_response_status.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/realtime/realtime_response_usage.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/realtime/realtime_server_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/realtime/response_audio_delta_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/realtime/response_audio_done_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/realtime/response_audio_transcript_delta_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/realtime/response_audio_transcript_done_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/realtime/response_cancel_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/realtime/response_cancel_event_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/realtime/response_content_part_added_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/realtime/response_content_part_done_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/realtime/response_create_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/realtime/response_create_event_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/realtime/response_created_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/realtime/response_done_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/realtime/response_function_call_arguments_delta_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/realtime/response_function_call_arguments_done_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/realtime/response_output_item_added_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/realtime/response_output_item_done_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/realtime/response_text_delta_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/realtime/response_text_done_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/realtime/session.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/realtime/session_create_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/realtime/session_create_response.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/realtime/session_created_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/realtime/session_update_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/realtime/session_update_event_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/realtime/session_updated_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/realtime/transcription_session.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/realtime/transcription_session_create_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/realtime/transcription_session_update.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/realtime/transcription_session_update_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/realtime/transcription_session_updated_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/thread.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/thread_create_and_run_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/thread_create_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/thread_deleted.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/thread_update_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/threads/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/threads/annotation.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/threads/annotation_delta.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/threads/file_citation_annotation.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/threads/file_citation_delta_annotation.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/threads/file_path_annotation.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/threads/file_path_delta_annotation.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/threads/image_file.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/threads/image_file_content_block.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/threads/image_file_content_block_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/threads/image_file_delta.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/threads/image_file_delta_block.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/threads/image_file_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/threads/image_url.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/threads/image_url_content_block.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/threads/image_url_content_block_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/threads/image_url_delta.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/threads/image_url_delta_block.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/threads/image_url_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/threads/message.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/threads/message_content.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/threads/message_content_delta.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/threads/message_content_part_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/threads/message_create_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/threads/message_deleted.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/threads/message_delta.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/threads/message_delta_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/threads/message_list_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/threads/message_update_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/threads/refusal_content_block.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/threads/refusal_delta_block.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/threads/required_action_function_tool_call.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/threads/run.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/threads/run_create_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/threads/run_list_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/threads/run_status.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/threads/run_submit_tool_outputs_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/threads/run_update_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/threads/runs/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/threads/runs/code_interpreter_logs.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/threads/runs/code_interpreter_output_image.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/threads/runs/code_interpreter_tool_call.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/threads/runs/code_interpreter_tool_call_delta.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/threads/runs/file_search_tool_call.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/threads/runs/file_search_tool_call_delta.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/threads/runs/function_tool_call.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/threads/runs/function_tool_call_delta.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/threads/runs/message_creation_step_details.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/threads/runs/run_step.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/threads/runs/run_step_delta.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/threads/runs/run_step_delta_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/threads/runs/run_step_delta_message_delta.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/threads/runs/run_step_include.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/threads/runs/step_list_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/threads/runs/step_retrieve_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/threads/runs/tool_call.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/threads/runs/tool_call_delta.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/threads/runs/tool_call_delta_object.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/threads/runs/tool_calls_step_details.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/threads/text.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/threads/text_content_block.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/threads/text_content_block_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/threads/text_delta.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/beta/threads/text_delta_block.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/chat/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/chat/chat_completion.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/chat/chat_completion_allowed_tool_choice_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/chat/chat_completion_allowed_tools_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/chat/chat_completion_assistant_message_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/chat/chat_completion_audio.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/chat/chat_completion_audio_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/chat/chat_completion_chunk.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/chat/chat_completion_content_part_image.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/chat/chat_completion_content_part_image_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/chat/chat_completion_content_part_input_audio_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/chat/chat_completion_content_part_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/chat/chat_completion_content_part_refusal_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/chat/chat_completion_content_part_text.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/chat/chat_completion_content_part_text_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/chat/chat_completion_custom_tool_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/chat/chat_completion_deleted.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/chat/chat_completion_developer_message_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/chat/chat_completion_function_call_option_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/chat/chat_completion_function_message_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/chat/chat_completion_function_tool.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/chat/chat_completion_function_tool_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/chat/chat_completion_message.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/chat/chat_completion_message_custom_tool_call.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/chat/chat_completion_message_custom_tool_call_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/chat/chat_completion_message_function_tool_call.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/chat/chat_completion_message_function_tool_call_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/chat/chat_completion_message_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/chat/chat_completion_message_tool_call.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/chat/chat_completion_message_tool_call_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/chat/chat_completion_message_tool_call_union_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/chat/chat_completion_modality.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/chat/chat_completion_named_tool_choice_custom_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/chat/chat_completion_named_tool_choice_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/chat/chat_completion_prediction_content_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/chat/chat_completion_reasoning_effort.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/chat/chat_completion_role.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/chat/chat_completion_store_message.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/chat/chat_completion_stream_options_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/chat/chat_completion_system_message_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/chat/chat_completion_token_logprob.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/chat/chat_completion_tool_choice_option_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/chat/chat_completion_tool_message_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/chat/chat_completion_tool_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/chat/chat_completion_tool_union_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/chat/chat_completion_user_message_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/chat/completion_create_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/chat/completion_list_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/chat/completion_update_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/chat/completions/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/chat/completions/message_list_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/chat/parsed_chat_completion.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/chat/parsed_function_tool_call.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/chat_model.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/completion.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/completion_choice.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/completion_create_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/completion_usage.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/container_create_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/container_create_response.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/container_list_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/container_list_response.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/container_retrieve_response.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/containers/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/containers/file_create_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/containers/file_create_response.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/containers/file_list_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/containers/file_list_response.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/containers/file_retrieve_response.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/containers/files/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/conversations/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/conversations/computer_screenshot_content.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/conversations/conversation.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/conversations/conversation_create_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/conversations/conversation_deleted_resource.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/conversations/conversation_item.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/conversations/conversation_item_list.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/conversations/conversation_update_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/conversations/input_file_content.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/conversations/input_file_content_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/conversations/input_image_content.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/conversations/input_image_content_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/conversations/input_text_content.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/conversations/input_text_content_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/conversations/item_create_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/conversations/item_list_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/conversations/item_retrieve_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/conversations/message.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/conversations/output_text_content.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/conversations/output_text_content_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/conversations/refusal_content.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/conversations/refusal_content_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/conversations/summary_text_content.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/conversations/text_content.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/create_embedding_response.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/embedding.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/embedding_create_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/embedding_model.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/eval_create_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/eval_create_response.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/eval_custom_data_source_config.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/eval_delete_response.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/eval_list_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/eval_list_response.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/eval_retrieve_response.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/eval_stored_completions_data_source_config.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/eval_update_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/eval_update_response.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/evals/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/evals/create_eval_completions_run_data_source.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/evals/create_eval_completions_run_data_source_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/evals/create_eval_jsonl_run_data_source.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/evals/create_eval_jsonl_run_data_source_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/evals/eval_api_error.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/evals/run_cancel_response.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/evals/run_create_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/evals/run_create_response.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/evals/run_delete_response.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/evals/run_list_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/evals/run_list_response.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/evals/run_retrieve_response.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/evals/runs/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/evals/runs/output_item_list_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/evals/runs/output_item_list_response.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/evals/runs/output_item_retrieve_response.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/file_chunking_strategy.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/file_chunking_strategy_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/file_content.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/file_create_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/file_deleted.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/file_list_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/file_object.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/file_purpose.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/fine_tuning/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/fine_tuning/alpha/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/fine_tuning/alpha/grader_run_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/fine_tuning/alpha/grader_run_response.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/fine_tuning/alpha/grader_validate_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/fine_tuning/alpha/grader_validate_response.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/fine_tuning/checkpoints/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/fine_tuning/checkpoints/permission_create_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/fine_tuning/checkpoints/permission_create_response.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/fine_tuning/checkpoints/permission_delete_response.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/fine_tuning/checkpoints/permission_retrieve_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/fine_tuning/checkpoints/permission_retrieve_response.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/fine_tuning/dpo_hyperparameters.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/fine_tuning/dpo_hyperparameters_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/fine_tuning/dpo_method.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/fine_tuning/dpo_method_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/fine_tuning/fine_tuning_job.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/fine_tuning/fine_tuning_job_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/fine_tuning/fine_tuning_job_integration.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/fine_tuning/fine_tuning_job_wandb_integration.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/fine_tuning/fine_tuning_job_wandb_integration_object.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/fine_tuning/job_create_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/fine_tuning/job_list_events_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/fine_tuning/job_list_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/fine_tuning/jobs/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/fine_tuning/jobs/checkpoint_list_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/fine_tuning/jobs/fine_tuning_job_checkpoint.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/fine_tuning/reinforcement_hyperparameters.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/fine_tuning/reinforcement_hyperparameters_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/fine_tuning/reinforcement_method.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/fine_tuning/reinforcement_method_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/fine_tuning/supervised_hyperparameters.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/fine_tuning/supervised_hyperparameters_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/fine_tuning/supervised_method.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/fine_tuning/supervised_method_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/graders/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/graders/label_model_grader.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/graders/label_model_grader_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/graders/multi_grader.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/graders/multi_grader_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/graders/python_grader.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/graders/python_grader_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/graders/score_model_grader.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/graders/score_model_grader_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/graders/string_check_grader.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/graders/string_check_grader_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/graders/text_similarity_grader.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/graders/text_similarity_grader_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/image.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/image_create_variation_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/image_edit_completed_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/image_edit_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/image_edit_partial_image_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/image_edit_stream_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/image_gen_completed_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/image_gen_partial_image_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/image_gen_stream_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/image_generate_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/image_model.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/images_response.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/model.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/model_deleted.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/moderation.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/moderation_create_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/moderation_create_response.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/moderation_image_url_input_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/moderation_model.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/moderation_multi_modal_input_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/moderation_text_input_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/other_file_chunking_strategy_object.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/audio_transcription.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/audio_transcription_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/call_accept_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/call_create_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/call_refer_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/call_reject_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/client_secret_create_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/client_secret_create_response.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/conversation_created_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/conversation_item.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/conversation_item_added.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/conversation_item_create_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/conversation_item_create_event_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/conversation_item_created_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/conversation_item_delete_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/conversation_item_delete_event_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/conversation_item_deleted_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/conversation_item_done.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/conversation_item_input_audio_transcription_completed_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/conversation_item_input_audio_transcription_delta_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/conversation_item_input_audio_transcription_failed_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/conversation_item_input_audio_transcription_segment.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/conversation_item_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/conversation_item_retrieve_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/conversation_item_retrieve_event_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/conversation_item_truncate_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/conversation_item_truncate_event_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/conversation_item_truncated_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/input_audio_buffer_append_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/input_audio_buffer_append_event_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/input_audio_buffer_clear_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/input_audio_buffer_clear_event_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/input_audio_buffer_cleared_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/input_audio_buffer_commit_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/input_audio_buffer_commit_event_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/input_audio_buffer_committed_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/input_audio_buffer_speech_started_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/input_audio_buffer_speech_stopped_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/input_audio_buffer_timeout_triggered.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/log_prob_properties.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/mcp_list_tools_completed.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/mcp_list_tools_failed.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/mcp_list_tools_in_progress.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/noise_reduction_type.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/output_audio_buffer_clear_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/output_audio_buffer_clear_event_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/rate_limits_updated_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/realtime_audio_config.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/realtime_audio_config_input.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/realtime_audio_config_input_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/realtime_audio_config_output.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/realtime_audio_config_output_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/realtime_audio_config_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/realtime_audio_formats.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/realtime_audio_formats_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/realtime_audio_input_turn_detection.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/realtime_audio_input_turn_detection_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/realtime_client_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/realtime_client_event_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/realtime_connect_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/realtime_conversation_item_assistant_message.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/realtime_conversation_item_assistant_message_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/realtime_conversation_item_function_call.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/realtime_conversation_item_function_call_output.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/realtime_conversation_item_function_call_output_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/realtime_conversation_item_function_call_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/realtime_conversation_item_system_message.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/realtime_conversation_item_system_message_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/realtime_conversation_item_user_message.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/realtime_conversation_item_user_message_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/realtime_error.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/realtime_error_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/realtime_function_tool.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/realtime_function_tool_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/realtime_mcp_approval_request.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/realtime_mcp_approval_request_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/realtime_mcp_approval_response.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/realtime_mcp_approval_response_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/realtime_mcp_list_tools.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/realtime_mcp_list_tools_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/realtime_mcp_protocol_error.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/realtime_mcp_protocol_error_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/realtime_mcp_tool_call.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/realtime_mcp_tool_call_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/realtime_mcp_tool_execution_error.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/realtime_mcp_tool_execution_error_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/realtime_mcphttp_error.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/realtime_mcphttp_error_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/realtime_response.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/realtime_response_create_audio_output.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/realtime_response_create_audio_output_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/realtime_response_create_mcp_tool.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/realtime_response_create_mcp_tool_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/realtime_response_create_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/realtime_response_create_params_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/realtime_response_status.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/realtime_response_usage.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/realtime_response_usage_input_token_details.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/realtime_response_usage_output_token_details.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/realtime_server_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/realtime_session_client_secret.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/realtime_session_create_request.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/realtime_session_create_request_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/realtime_session_create_response.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/realtime_tool_choice_config.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/realtime_tool_choice_config_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/realtime_tools_config.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/realtime_tools_config_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/realtime_tools_config_union.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/realtime_tools_config_union_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/realtime_tracing_config.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/realtime_tracing_config_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/realtime_transcription_session_audio.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/realtime_transcription_session_audio_input.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/realtime_transcription_session_audio_input_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/realtime_transcription_session_audio_input_turn_detection.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/realtime_transcription_session_audio_input_turn_detection_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/realtime_transcription_session_audio_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/realtime_transcription_session_create_request.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/realtime_transcription_session_create_request_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/realtime_transcription_session_create_response.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/realtime_transcription_session_turn_detection.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/realtime_truncation.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/realtime_truncation_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/realtime_truncation_retention_ratio.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/realtime_truncation_retention_ratio_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/response_audio_delta_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/response_audio_done_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/response_audio_transcript_delta_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/response_audio_transcript_done_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/response_cancel_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/response_cancel_event_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/response_content_part_added_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/response_content_part_done_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/response_create_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/response_create_event_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/response_created_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/response_done_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/response_function_call_arguments_delta_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/response_function_call_arguments_done_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/response_mcp_call_arguments_delta.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/response_mcp_call_arguments_done.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/response_mcp_call_completed.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/response_mcp_call_failed.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/response_mcp_call_in_progress.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/response_output_item_added_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/response_output_item_done_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/response_text_delta_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/response_text_done_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/session_created_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/session_update_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/session_update_event_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/realtime/session_updated_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/computer_tool.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/computer_tool_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/custom_tool.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/custom_tool_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/easy_input_message.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/easy_input_message_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/file_search_tool.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/file_search_tool_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/function_tool.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/function_tool_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/input_item_list_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/input_token_count_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/input_token_count_response.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/parsed_response.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_audio_delta_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_audio_done_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_audio_transcript_delta_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_audio_transcript_done_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_code_interpreter_call_code_delta_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_code_interpreter_call_code_done_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_code_interpreter_call_completed_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_code_interpreter_call_in_progress_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_code_interpreter_call_interpreting_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_code_interpreter_tool_call.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_code_interpreter_tool_call_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_completed_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_computer_tool_call.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_computer_tool_call_output_item.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_computer_tool_call_output_screenshot.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_computer_tool_call_output_screenshot_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_computer_tool_call_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_content_part_added_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_content_part_done_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_conversation_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_create_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_created_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_custom_tool_call.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_custom_tool_call_input_delta_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_custom_tool_call_input_done_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_custom_tool_call_output.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_custom_tool_call_output_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_custom_tool_call_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_error.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_error_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_failed_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_file_search_call_completed_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_file_search_call_in_progress_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_file_search_call_searching_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_file_search_tool_call.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_file_search_tool_call_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_format_text_config.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_format_text_config_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_format_text_json_schema_config.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_format_text_json_schema_config_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_function_call_arguments_delta_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_function_call_arguments_done_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_function_call_output_item.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_function_call_output_item_list.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_function_call_output_item_list_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_function_call_output_item_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_function_tool_call.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_function_tool_call_item.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_function_tool_call_output_item.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_function_tool_call_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_function_web_search.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_function_web_search_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_image_gen_call_completed_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_image_gen_call_generating_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_image_gen_call_in_progress_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_image_gen_call_partial_image_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_in_progress_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_includable.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_incomplete_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_input_audio.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_input_audio_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_input_content.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_input_content_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_input_file.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_input_file_content.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_input_file_content_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_input_file_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_input_image.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_input_image_content.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_input_image_content_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_input_image_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_input_item.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_input_item_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_input_message_content_list.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_input_message_content_list_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_input_message_item.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_input_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_input_text.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_input_text_content.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_input_text_content_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_input_text_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_item.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_item_list.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_mcp_call_arguments_delta_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_mcp_call_arguments_done_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_mcp_call_completed_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_mcp_call_failed_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_mcp_call_in_progress_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_mcp_list_tools_completed_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_mcp_list_tools_failed_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_mcp_list_tools_in_progress_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_output_item.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_output_item_added_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_output_item_done_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_output_message.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_output_message_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_output_refusal.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_output_refusal_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_output_text.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_output_text_annotation_added_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_output_text_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_prompt.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_prompt_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_queued_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_reasoning_item.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_reasoning_item_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_reasoning_summary_part_added_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_reasoning_summary_part_done_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_reasoning_summary_text_delta_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_reasoning_summary_text_done_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_reasoning_text_delta_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_reasoning_text_done_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_refusal_delta_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_refusal_done_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_retrieve_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_status.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_stream_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_text_config.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_text_config_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_text_delta_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_text_done_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_usage.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_web_search_call_completed_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_web_search_call_in_progress_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/response_web_search_call_searching_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/tool.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/tool_choice_allowed.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/tool_choice_allowed_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/tool_choice_custom.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/tool_choice_custom_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/tool_choice_function.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/tool_choice_function_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/tool_choice_mcp.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/tool_choice_mcp_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/tool_choice_options.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/tool_choice_types.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/tool_choice_types_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/tool_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/web_search_preview_tool.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/web_search_preview_tool_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/web_search_tool.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/responses/web_search_tool_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/shared/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/shared/all_models.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/shared/chat_model.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/shared/comparison_filter.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/shared/compound_filter.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/shared/custom_tool_input_format.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/shared/error_object.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/shared/function_definition.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/shared/function_parameters.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/shared/metadata.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/shared/reasoning.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/shared/reasoning_effort.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/shared/response_format_json_object.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/shared/response_format_json_schema.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/shared/response_format_text.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/shared/response_format_text_grammar.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/shared/response_format_text_python.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/shared/responses_model.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/shared_params/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/shared_params/chat_model.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/shared_params/comparison_filter.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/shared_params/compound_filter.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/shared_params/custom_tool_input_format.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/shared_params/function_definition.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/shared_params/function_parameters.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/shared_params/metadata.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/shared_params/reasoning.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/shared_params/reasoning_effort.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/shared_params/response_format_json_object.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/shared_params/response_format_json_schema.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/shared_params/response_format_text.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/shared_params/responses_model.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/static_file_chunking_strategy.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/static_file_chunking_strategy_object.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/static_file_chunking_strategy_object_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/static_file_chunking_strategy_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/upload.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/upload_complete_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/upload_create_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/uploads/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/uploads/part_create_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/uploads/upload_part.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/vector_store.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/vector_store_create_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/vector_store_deleted.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/vector_store_list_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/vector_store_search_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/vector_store_search_response.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/vector_store_update_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/vector_stores/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/vector_stores/file_batch_create_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/vector_stores/file_batch_list_files_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/vector_stores/file_content_response.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/vector_stores/file_create_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/vector_stores/file_list_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/vector_stores/file_update_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/vector_stores/vector_store_file.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/vector_stores/vector_store_file_batch.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/vector_stores/vector_store_file_deleted.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/video.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/video_create_error.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/video_create_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/video_delete_response.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/video_download_content_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/video_list_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/video_model.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/video_remix_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/video_seconds.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/video_size.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/webhooks/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/webhooks/batch_cancelled_webhook_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/webhooks/batch_completed_webhook_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/webhooks/batch_expired_webhook_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/webhooks/batch_failed_webhook_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/webhooks/eval_run_canceled_webhook_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/webhooks/eval_run_failed_webhook_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/webhooks/eval_run_succeeded_webhook_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/webhooks/fine_tuning_job_cancelled_webhook_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/webhooks/fine_tuning_job_failed_webhook_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/webhooks/fine_tuning_job_succeeded_webhook_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/webhooks/realtime_call_incoming_webhook_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/webhooks/response_cancelled_webhook_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/webhooks/response_completed_webhook_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/webhooks/response_failed_webhook_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/webhooks/response_incomplete_webhook_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/webhooks/unwrap_webhook_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/types/websocket_connection_options.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-qqkh5s9r/lib/python/openai/version.cpython-39.pyc,, -../../bin/openai,sha256=hl2E5BbKWVfkczcW65l8G1zyeJ3Si5m9TUnp5aG8gtY,276 -openai-2.7.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -openai-2.7.1.dist-info/METADATA,sha256=44GVu1kBO70bkX7Yljzc_Yknc_pB4uwtjS7OXtJuhY0,29092 -openai-2.7.1.dist-info/RECORD,, -openai-2.7.1.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -openai-2.7.1.dist-info/WHEEL,sha256=C2FUgwZgiLbznR-k0b_5k3Ai_1aASOXDss3lzCUsUug,87 -openai-2.7.1.dist-info/entry_points.txt,sha256=kAYhQEmziJwsKs5raYAIOvJ2LWmbz5dulEXOzsY71ro,43 -openai-2.7.1.dist-info/licenses/LICENSE,sha256=1xHtN7sZrnJJr40JO4_G6nWP01VLkqxhUAwa08wOP7k,11336 -openai/__init__.py,sha256=Fvc0dwOoaIZDN_s3iV62jlxeU5d7qn-Q8eQIaPIdD8g,11196 -openai/__main__.py,sha256=bYt9eEaoRQWdejEHFD8REx9jxVEdZptECFsV7F49Ink,30 -openai/_base_client.py,sha256=VjNDJ-oPNC34F3NRHbRSVRpsQM-sazL4fljxad_5tLQ,68237 -openai/_client.py,sha256=uZlue35miO_zv84wqaCRvqchb6WH3UaKPbZGIvIs0Gw,44645 -openai/_compat.py,sha256=k2XpUhYfgp5ZXkZkQAftJHt_UWFjUct1Sm2ye2kPBXo,6964 -openai/_constants.py,sha256=WmCwgT4tGmFsSrltb26f3bM8ftUyFYkzh32Ny5yl-So,467 -openai/_exceptions.py,sha256=TYcCxnfT7fln5duvVnCVJ0znuUHXSAbCT5sAMnaeKjU,5008 -openai/_extras/__init__.py,sha256=sainrYWujCxIyL24wNpKfMVr-ZyBPlnSZfqXcg2S6Xg,165 -openai/_extras/_common.py,sha256=NWWtgbdJsO3hQGQxaXGfVk0LjeIE5AFZ8VS_795hhMc,364 -openai/_extras/numpy_proxy.py,sha256=LyTZkKDdnjz0h1SKLsphrhmXyUsJ_xEUhTFMrCf7k7g,805 -openai/_extras/pandas_proxy.py,sha256=NCEt1Dqwc_0H85YdsWPDE3lPDJtYnBT8G-gJE_BCeEc,637 -openai/_extras/sounddevice_proxy.py,sha256=xDoE21YGu13dSAJJkiOM9Qdb7uOIv5zskaJRX6xciEg,725 -openai/_files.py,sha256=cQOoF0UFpnyH5JMIdu_EvGpj_dGzH1ojtJvyX7Xwqn0,3612 -openai/_legacy_response.py,sha256=fx9I0IInZY1zr2bUmpqW2ZUcL9JW2xS6S4NqFuwhdPM,16237 -openai/_models.py,sha256=3a_WIVJsJSD3NvO0w1r8ockx1aH8BETdvluPWZK9Ci8,32192 -openai/_module_client.py,sha256=33fORSMWHuxqpvlROvYVMEIvaWUishUpSeaqpsOjWuI,5033 -openai/_qs.py,sha256=craIKyvPktJ94cvf9zn8j8ekG9dWJzhWv0ob34lIOv4,4828 -openai/_resource.py,sha256=IQihFzFLhGOiGSlT2dO1ESWSTg2XypgbtAldtGdTOqU,1100 -openai/_response.py,sha256=zLVaMPYE1o2Tz1eS5_bnJNGMikRN1byMpMcVpW1tgIU,29510 -openai/_streaming.py,sha256=khbxY5QCmugoe9RIQUldep02hjVnSXt4kIUw8xQN0MM,13439 -openai/_types.py,sha256=ducEA5cX8RYL6KegX0S9zNZIx2CvDS4kadu24_JozoM,7364 -openai/_utils/__init__.py,sha256=qiOG_n0G-sP5r5jNvD4OUaeaVLFEw5s-h7h7b0nD7Nk,2465 -openai/_utils/_compat.py,sha256=D8gtAvjJQrDWt9upS0XaG9Rr5l1QhiAx_I_1utT_tt0,1195 -openai/_utils/_datetime_parse.py,sha256=bABTs0Bc6rabdFvnIwXjEhWL15TcRgWZ_6XGTqN8xUk,4204 -openai/_utils/_logs.py,sha256=IC5iwPflwelNpJEpWsvK3up-pol5hR8k_VL9fSukk_Y,1351 -openai/_utils/_proxy.py,sha256=aglnj2yBTDyGX9Akk2crZHrl10oqRmceUy2Zp008XEs,1975 -openai/_utils/_reflection.py,sha256=aTXm-W0Kww4PJo5LPkUnQ92N-2UvrK1-D67cJVBlIgw,1426 -openai/_utils/_resources_proxy.py,sha256=AHHZCOgv-2CRqB4B52dB7ySlE5q6QCWj0bsTqNmzikw,589 -openai/_utils/_streams.py,sha256=SMC90diFFecpEg_zgDRVbdR3hSEIgVVij4taD-noMLM,289 -openai/_utils/_sync.py,sha256=TpGLrrhRNWTJtODNE6Fup3_k7zrWm1j2RlirzBwre-0,2862 -openai/_utils/_transform.py,sha256=hzILp2ijV9J7D-uoEDmadtyCmzMK6DprJP8IlwEg0ZY,15999 -openai/_utils/_typing.py,sha256=N_5PPuFNsaygbtA_npZd98SVN1LQQvFTKL6bkWPBZGU,4786 -openai/_utils/_utils.py,sha256=Z2y9rNbK-worRedH9Ub9tO_FSIjl0SH2AV9Tdgz9LUA,12667 -openai/_version.py,sha256=fsIQ3UEUnequgOmMCe_bsuMisf-ugnez3G6Dz8uAwsE,158 -openai/cli/__init__.py,sha256=soGgtqyomgddl92H0KJRqHqGuaXIaghq86qkzLuVp7U,31 -openai/cli/_api/__init__.py,sha256=cj92MZq-9_1PQM8A4TQVsqKn5mcTDAGxHllJ0UvJOPE,58 -openai/cli/_api/_main.py,sha256=3xVyycq-4HEYMBdMDJFk893PTXpr8yvkGL3eCiuSx8E,501 -openai/cli/_api/audio.py,sha256=0GU49a-XurLlyVEy2V9IZ_pDmjL1XEBI7Jp7fQfJ5Sk,3757 -openai/cli/_api/chat/__init__.py,sha256=MhFUQH9F6QCtbPMlbsU_DWTd7wc5DSCZ7Wy3FBGVij0,300 -openai/cli/_api/chat/completions.py,sha256=GyfAo3B2w2ySV0dK9D2IIVA4fOb0zqJZadQ-Yc8a_yU,5536 -openai/cli/_api/completions.py,sha256=Jy1rlQqw__12ZfbTrnZJgoGBbDKJ58kOUAT-vkLr5kE,6334 -openai/cli/_api/files.py,sha256=6nKXFnsC2QE0bGnVUAG7BTLSu6K1_MhPE0ZJACmzgRY,2345 -openai/cli/_api/fine_tuning/__init__.py,sha256=hZeWhTZtIRAl1xgSbznjpCYy9lnUUXngh8uEIbVn__Y,286 -openai/cli/_api/fine_tuning/jobs.py,sha256=4wj9DPfw3343fJQW9j52Q-ga4jYa1haOTn4yYsH_zqk,5311 -openai/cli/_api/image.py,sha256=3UDZ1R8SjYh4IOhhdJqf20FPqPgPdhpRxqu3eo5BKhU,5014 -openai/cli/_api/models.py,sha256=pGmIGZToj3raGGpKvPSq_EVUR-dqg4Vi0PNfZH98D2E,1295 -openai/cli/_cli.py,sha256=42j_eI8PPdFbVjufluregmNYTdwrw3yQtsHtTzyNvcQ,6779 -openai/cli/_errors.py,sha256=nejlu1HnOyAIr2n7uqpFtWn8XclWj_9N8FwgfT3BPK8,471 -openai/cli/_models.py,sha256=_budygMbXh3Fv-w-TDfWecZNiKfox6f0lliCUytxE1Q,491 -openai/cli/_progress.py,sha256=aMLssU9jh-LoqRYH3608jNos7r6vZKnHTRlHxFznzv4,1406 -openai/cli/_tools/__init__.py,sha256=cj92MZq-9_1PQM8A4TQVsqKn5mcTDAGxHllJ0UvJOPE,58 -openai/cli/_tools/_main.py,sha256=pakjEXHRHqYlTml-RxV7fNrRtRXzmZBinoPi1AJipFY,467 -openai/cli/_tools/fine_tunes.py,sha256=RQgYMzifk6S7Y1I1K6huqco2QxmXa7gVUlHl6SrKTSU,1543 -openai/cli/_tools/migrate.py,sha256=o-iomzhtC6N6X5H5GDlgQ_QOaIovE2YA9oHc_tIAUj8,4497 -openai/cli/_utils.py,sha256=oiTc9MnxQh_zxAZ1OIHPkoDpCll0NF9ZgkdFHz4T-Bs,848 -openai/helpers/__init__.py,sha256=F0x_Pguq1XC2KXZYbfxUG-G_FxJ3mlsi7HaFZ1x-g9A,130 -openai/helpers/local_audio_player.py,sha256=7MWwt1BYEh579z1brnQ2mUEB0Ble4UoGMHDKusOfZJQ,5852 -openai/helpers/microphone.py,sha256=6tIHWZGpRA5XvUoer-nPBvHbrmxK7CWx3_Ta-qp1H54,3341 -openai/lib/.keep,sha256=wuNrz-5SXo3jJaJOJgz4vFHM41YH_g20F5cRQo0vLes,224 -openai/lib/__init__.py,sha256=BMTfMnlbugMgDA1STDIAlx4bI4t4l_8bQmJxd0th0n8,126 -openai/lib/_old_api.py,sha256=XZnXBrEKuTd70iJirj5mGW35fZoqruJobbBTq6bvg10,1947 -openai/lib/_parsing/__init__.py,sha256=wS3BYvMGj9TqiPqOe3rO1sleaAJqHVuCaQuCE5rZIUw,539 -openai/lib/_parsing/_completions.py,sha256=3vihFrFWJIrToaWYjJMqn42gTyNmrQhXvi2vr5Wduo8,10629 -openai/lib/_parsing/_responses.py,sha256=uweKd5rcBfkl_Kv6DCQdvGGW6ZR2M5dUStsVla_HAUI,6005 -openai/lib/_pydantic.py,sha256=Cf0vGwuWdNEuIUg8WNREjWRGApMObgl8DjdLU4f5jAc,5623 -openai/lib/_realtime.py,sha256=DyFqSff1XlgJdD1I5tJXdleQP2WiDT8ISeYbpHHih6c,3974 -openai/lib/_tools.py,sha256=Dc4U2TXKvfAvVUvDS30SDeftrwgGM2vZ85t5ojLHiEg,1969 -openai/lib/_validators.py,sha256=cXJXFuaAl7jeJcYHXXnFa4NHGtHs-_zt3Zs1VVCmQo4,35288 -openai/lib/azure.py,sha256=dLzUXTXUOnfarLdDyO6dVzp8wY2vTMFFHUJZLuFznWY,26537 -openai/lib/streaming/__init__.py,sha256=kD3LpjsqU7caDQDhB-YjTUl9qqbb5sPnGGSI2yQYC70,379 -openai/lib/streaming/_assistants.py,sha256=LUWSinmYopQIkQ5xSg73b6BWbkRkQS5JvX62w_V9xSw,40692 -openai/lib/streaming/_deltas.py,sha256=I7B_AznXZwlBmE8Puau7ayTQUx6hMIEVE8FYTQm2fjs,2502 -openai/lib/streaming/chat/__init__.py,sha256=7krL_atOvvpQkY_byWSglSfDsMs5hdoxHmz4Ulq7lcc,1305 -openai/lib/streaming/chat/_completions.py,sha256=4PDLu_1-wQOrAwHY-Gz8NIQ8UnJ9gshwrmxuMDesFp8,30775 -openai/lib/streaming/chat/_events.py,sha256=lstVmM6YR2Cs9drikzrY9JCZn9Nbfym0aKIPtNpxL6w,2618 -openai/lib/streaming/chat/_types.py,sha256=-SYVBNhGkOUoJ-8dotxpCRqPJpfyOQ8hwR2_HrsQCRI,739 -openai/lib/streaming/responses/__init__.py,sha256=MwE1Oc3OIiXjtuRFsuP_k5Ra8pNiqKpc1GZum-8ZRJM,543 -openai/lib/streaming/responses/_events.py,sha256=3UWmeYgg23E3XTkYVlrpXJPnhBM2kmQFoXh3WiT9CrE,5576 -openai/lib/streaming/responses/_responses.py,sha256=Myeo4so-aMFrzEyNCjX0ypYWTWvY5uDelhe2ygC93lY,13614 -openai/lib/streaming/responses/_types.py,sha256=msq1KWj3e3BLn7NKu5j2kzHgj9kShuoitgXEyTmQxus,276 -openai/pagination.py,sha256=dtPji3wApb_0rkvYDwh50rl8cjxT3i6EUS6PfTXwhQI,4770 -openai/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -openai/resources/__init__.py,sha256=YDrG7nC0vTf4xk-JCSs0132OA5XWmqAMtjWu4wypnB4,6067 -openai/resources/audio/__init__.py,sha256=YM7FHvPKVlj_v6EIgfpUQsb6q4hS2hVQ3gfkgic0sP0,1687 -openai/resources/audio/audio.py,sha256=nEIB4q7a1MSYdQkcYH2O6jB-_rNCMDCBJyUuqOL67CI,5491 -openai/resources/audio/speech.py,sha256=ajss0PEI4CnOhYO5tGtZEodIBh4PDh21bhQ7w4uYyjU,10172 -openai/resources/audio/transcriptions.py,sha256=1QK3FSJNcptcrbomBSbQD8IviA-8sT43EDL74Ro8njI,51568 -openai/resources/audio/translations.py,sha256=IsPiYZtr9BLS7pgAWAneU7yNq1E9igDCa-QXN12PhZM,15505 -openai/resources/batches.py,sha256=0WyWzYyxdEj0nuRk5Fnreb_6mLx1Zo7-2OzyxQg_yss,20854 -openai/resources/beta/__init__.py,sha256=chKjkpkqNxO1Dbl9OsCJNXVC1AbDcvTrvfvvAIh5B5I,1570 -openai/resources/beta/assistants.py,sha256=0WEY5uymYP9X48QyI1dzeslCtRDkFdIAEwXsQUmwrdw,46097 -openai/resources/beta/beta.py,sha256=Lrsu8f9haXb4bZphmw9wgHzn8uZUBUUC11mZa3XRbr0,5725 -openai/resources/beta/chatkit/__init__.py,sha256=lJAQpi-JogtnSAlOegSae6WfCfgRLMd8rpPBuT9_2FE,1216 -openai/resources/beta/chatkit/chatkit.py,sha256=CleguF_80H_gAsyX_tLoGxFTD7YItZPshRAZ2QAqfzY,4333 -openai/resources/beta/chatkit/sessions.py,sha256=H8oIyd8V553LruOilYuTnXwU8Mh_z5xOjuu7GunaGIc,11837 -openai/resources/beta/chatkit/threads.py,sha256=kqngETyzx9uRBKtgfq9r9WrtOXpNfnHej4PkrVfnklo,20077 -openai/resources/beta/realtime/__init__.py,sha256=dOXRjPiDqRJXIFoGKSVjzKh3IwSXnLbwHx4ND5OdnVs,1412 -openai/resources/beta/realtime/realtime.py,sha256=tuiq_0PdFmC2p-LNOfQNrVuDEMlLAHKEgeAsPsHLUHU,43694 -openai/resources/beta/realtime/sessions.py,sha256=EQva_qI71CgS35qkK9TGxuibviHwUQ6VzErIzunP4gU,22098 -openai/resources/beta/realtime/transcription_sessions.py,sha256=uTDGEat50lojdD0N8slnZu2RVzMP96rlicpDp4tpl34,14124 -openai/resources/beta/threads/__init__.py,sha256=fQ_qdUVSfouVS5h47DlTb5mamChT4K-v-siPuuAB6do,1177 -openai/resources/beta/threads/messages.py,sha256=a8HEG-QKIgG8r4XtE0M7ixRBikAmdQEUDWUDf1gkaSg,30794 -openai/resources/beta/threads/runs/__init__.py,sha256=2FfDaqwmJJCd-IVpY_CrzWcFvw0KFyQ3cm5jnTfI-DQ,771 -openai/resources/beta/threads/runs/runs.py,sha256=McfGWrWoVMLLzzFK0w2btqGFUN3QmgL7oOl3pNSkmCE,152745 -openai/resources/beta/threads/runs/steps.py,sha256=YkoPMeMXEzoL09AWF7Eh1lNaJocykV1igmcsZpXKw5Y,16981 -openai/resources/beta/threads/threads.py,sha256=3C3OzlgL0S1mDdnRBowU14Di8W7T81C2BEGFm5Mx42Y,97651 -openai/resources/chat/__init__.py,sha256=8Q9ODRo1wIpFa34VaNwuaWFmxqFxagDtUhIAkQNvxEU,849 -openai/resources/chat/chat.py,sha256=HjcasSCmt-g3-J-RkZQ9HRj_-hPfImakFxdUvvk5mCg,3364 -openai/resources/chat/completions/__init__.py,sha256=KOi8blzNyHWD7nKgcoW3CxZ4428IcNVP0gCU74HySf8,901 -openai/resources/chat/completions/completions.py,sha256=W5fDeIKhrSODZUKlBiB4g5Z6zFn_9MCgol62oeoAKpQ,158390 -openai/resources/chat/completions/messages.py,sha256=AYVwQ24jPQGs2Y-vE6Yjl5nbCECtuw-HpcBEEpCgC-0,8010 -openai/resources/completions.py,sha256=wO39_sLxmSzTI6Mp13KzjqaxMgFZw4l-t0_9xxDbX_4,59201 -openai/resources/containers/__init__.py,sha256=7VzY-TFwG3x5D_kUCs_iAQaaCKAswt1Jk70KpmnU8Do,849 -openai/resources/containers/containers.py,sha256=yz2zZoHT_rjXh-_Ij2-shBgerSkal61DqA1TNWkDqog,19240 -openai/resources/containers/files/__init__.py,sha256=nDhg0wY7eHRMO-xOErno0mV0Ya_ynlmKAp-4a3nj-us,810 -openai/resources/containers/files/content.py,sha256=-jupriq97X2kq_yCdYihZ1h2qCx-IMbaaR10M4lz6TA,6491 -openai/resources/containers/files/files.py,sha256=jjiRGS489CzoOXb3nvsD-i3qTSINE9CrAo2jZPWxyLI,21042 -openai/resources/conversations/__init__.py,sha256=Uslb4pakT8pQJGQ29CvoiN-SvN2AgMum-TeIDyYTzQE,888 -openai/resources/conversations/conversations.py,sha256=IjnSvilsJG_yK4IoRP86R6_5MFlHSpZt6lWxgpbGP-Y,19151 -openai/resources/conversations/items.py,sha256=q3XbPsh09Gb9qYisb6BEa9BExX4HF5oMu-Z0khdAFlY,23969 -openai/resources/embeddings.py,sha256=GYA_sI2h5auPwyHKm44-brPxRxqvcQaH0JQMZW13bMA,12374 -openai/resources/evals/__init__.py,sha256=DXhYb6mCKKY2bDdS3s4raH1SvwPUyaBFvdHgPEbwRWY,771 -openai/resources/evals/evals.py,sha256=goQ9ek2_xI34SG7GkwpqKhXO2hZouq5bxS26EejY-cI,25904 -openai/resources/evals/runs/__init__.py,sha256=7EtKZ43tGlmAOYyDdyFXy80tk2X8AmXb5taTWRRXBXE,850 -openai/resources/evals/runs/output_items.py,sha256=7pcGpGc61Df4jQIgxRYLX-27wz_8qc0Ux-ni_EfVvwA,12530 -openai/resources/evals/runs/runs.py,sha256=228Vf9S8_dz0tZAWCh2ehECQYg_Z4JXNV5MRuvUtDh4,24359 -openai/resources/files.py,sha256=R8gZj_H63NYWjQ3cxkJyidCyyVzSQHbb33wWzlWLJRM,30436 -openai/resources/fine_tuning/__init__.py,sha256=RQPC5QfqE-ByhRQbJK-j7ooUrkBO9s9bKt5xkzOL8ls,1597 -openai/resources/fine_tuning/alpha/__init__.py,sha256=QKAYZscx1Fw3GLD8cVdZAYG9L_i6MnPGeifn8GgcztU,810 -openai/resources/fine_tuning/alpha/alpha.py,sha256=P-zLOHpI-Aa0jUUWspkanL7WpUtfjwIGDH8KTGDNeHY,3274 -openai/resources/fine_tuning/alpha/graders.py,sha256=TA39PsdXWjxsts6p_UjPhyTwE4a1O7nQOkUC0V2ZHbU,10758 -openai/resources/fine_tuning/checkpoints/__init__.py,sha256=rvsbut5FCQNAr-VjvL-14GFT3Tld49FlFuBJDpfxBug,940 -openai/resources/fine_tuning/checkpoints/checkpoints.py,sha256=njpz496JifeZ8RXjoYUb1Tj9tBItuXRxGJHW2jrrfwo,3606 -openai/resources/fine_tuning/checkpoints/permissions.py,sha256=A9SfSQk7o0gbqhu2NMZTW53Tq5c3zbBDSgL_0K0t1WQ,17103 -openai/resources/fine_tuning/fine_tuning.py,sha256=UL4MXoUqEnbSZ5e4dnbUPTtd4tE-1p2L7Hh_0CQ_0s0,5410 -openai/resources/fine_tuning/jobs/__init__.py,sha256=_smlrwijZOCcsDWqKnofLxQM2QLucZzXgboL9zJBPHw,849 -openai/resources/fine_tuning/jobs/checkpoints.py,sha256=-QQNOZJplnCJyHCFTFO-DMN-AWc1Dp8p9Hifffgz5a0,7442 -openai/resources/fine_tuning/jobs/jobs.py,sha256=jIXuCijf7v9ufH3SqgWBrQAFg5uqPKAuyXgNDmLEXK4,37033 -openai/resources/images.py,sha256=qVirq5eAepwm98ass32H1iqRlTYdFrnHEMfIgtVXBxg,95649 -openai/resources/models.py,sha256=1PDMpmdtaGiNHZNWPL-sI_I-SDOjuK-yfm2oq7mKiGI,11232 -openai/resources/moderations.py,sha256=8BWoTw8QHsSUbgByBlLxHHYEeeozFsY8n3j-ah13YdI,7808 -openai/resources/realtime/__init__.py,sha256=5v7pt2NQKz1j-X1z4bTqupmE3G8O5_G4PYCyw3F3-eo,1269 -openai/resources/realtime/calls.py,sha256=J7hmEm_l2qOkLTjHF4Fv5xxOTElVw2YF2IeOQOmD3Gc,33169 -openai/resources/realtime/client_secrets.py,sha256=Z8NmSg2GGN3we3w89Un26jWp5OO9lxOi8oS4lSYMrUg,7700 -openai/resources/realtime/realtime.py,sha256=ruZXI9QQTMbyfoe8zMeVSa0UoOTeXGgDEqpVoY8o0og,44868 -openai/resources/responses/__init__.py,sha256=9LkjQomOIh6B5Qg1HbdCgjMRoCzIBzyRaYNyt3moA38,1322 -openai/resources/responses/input_items.py,sha256=tzg31yUowcCMqU32TBHI18YzRjqNs_EGwKdpSU8bSTs,8774 -openai/resources/responses/input_tokens.py,sha256=RwdAOZEtcFCL0kiZHZILV2tsnKN4jQ7ueFUJALkPink,14494 -openai/resources/responses/responses.py,sha256=jIt_Hkcn5-1PX3-3j-S4xI3gUXloa8nJm6vIbbHFjk0,156584 -openai/resources/uploads/__init__.py,sha256=HmY3WQgvUI2bN3CjfWHWQOk7UUC6Ozna97_lHhrrRSA,810 -openai/resources/uploads/parts.py,sha256=2Vov0reg5wdOSGSJ7hhs9pqsIofkhqjoUoE_AgXHLZM,8121 -openai/resources/uploads/uploads.py,sha256=OeCCAEK1W1ICTfraOBbYRrBclnzroEOaAOpuT05Fyvg,25443 -openai/resources/vector_stores/__init__.py,sha256=11Xn1vhgndWiI0defJHv31vmbtbDgh2GwZT3gX8GgHk,1296 -openai/resources/vector_stores/file_batches.py,sha256=eSrMBeNf-bKsHu40HHyKkejo8BNoFAE32LnG119FxIs,34196 -openai/resources/vector_stores/files.py,sha256=xJStwcbKIzVzqIXK7G-Mfll61wbt154SObua945XXEI,39703 -openai/resources/vector_stores/vector_stores.py,sha256=L1vifq5tiw7EnBuxYREA_VPMzyRcePiQG4QLQL5vd18,35451 -openai/resources/videos.py,sha256=E6h9M6xH6JrT5HrYIpO8V-hQWBVO-4Vd2G8vfFAEPbc,31700 -openai/resources/webhooks.py,sha256=wz3filqxxUEhhW5RSa-1LiN10MzafKXJPl5-Wb1mCew,7820 -openai/types/__init__.py,sha256=FO5EisUiCUE6ly-QUuUvtgxVjc8Qt4FrdtdlnIzBWYE,7564 -openai/types/audio/__init__.py,sha256=8DwArrrSRwIemWLhWLItaV3F_EgXgtVPSu4yUIf8iyM,1723 -openai/types/audio/speech_create_params.py,sha256=u7FQabjLOgNhZu4FMyk1sa9qbadrmWzc-VnSesZXz3M,1780 -openai/types/audio/speech_model.py,sha256=i_YqCZ4AWN0jCY70F8FAazQAsbQyG-VUQGxSJnLsviw,237 -openai/types/audio/transcription.py,sha256=lUl3qdjgrK94zCjgpD4f9xa9w-vNhOTxh9hPeYj3ymc,2102 -openai/types/audio/transcription_create_params.py,sha256=5uee93j79uHV9Tfkd_qhyYmMOyaCIC7KT2H1-1JNQ3Q,6902 -openai/types/audio/transcription_create_response.py,sha256=-x20GMKDHrHzl_37fsGjpPlJC91ZNGwrD_5-AkZgnOw,459 -openai/types/audio/transcription_diarized.py,sha256=cuj3Q8gdypJeLUOB0tHG8TPenErE4j7lP2BU4bTuQ98,2018 -openai/types/audio/transcription_diarized_segment.py,sha256=d1XNYfdaFw-SMYwi8skS6rRESXtL1IVfKsqOmQIAoEQ,859 -openai/types/audio/transcription_include.py,sha256=mclUP_50njW7TG4d9m_E6zSjAFW8djPJ6ZTYub71kq0,227 -openai/types/audio/transcription_segment.py,sha256=-pPAGolwIIXUBMic-H5U7aR0u_Aq-pipSA4xTtn_viA,1153 -openai/types/audio/transcription_stream_event.py,sha256=MJNeIqgrVJekLGA3KSzE7lHK3dyhrvvhGLIwgQsZDPE,648 -openai/types/audio/transcription_text_delta_event.py,sha256=mZCyXgLXzXRLUF-U34-IG2CbWTUk-RU1iEZAzTSdcC8,1249 -openai/types/audio/transcription_text_done_event.py,sha256=Q2-fKHeO_niBWWSCl-ZehKKz9DDM7KEimBx5Ha5e4t8,1940 -openai/types/audio/transcription_text_segment_event.py,sha256=Vs3o2MdK8ZChcinC0NWWFPv_-H-n2SMrVCvCUQIaPGQ,681 -openai/types/audio/transcription_verbose.py,sha256=Dm5rPAMeMD-ZwijA8xff34QdOGLiRD5J2CN9R_dTIRo,1114 -openai/types/audio/transcription_word.py,sha256=s2aWssAgHjMOZHhiihs1m4gYWQpjBP2rkI1DE5eZBXc,367 -openai/types/audio/translation.py,sha256=Dlu9YMo0cc44NSCAtLfZnEugkM7VBA6zw2v9bfrLMh0,193 -openai/types/audio/translation_create_params.py,sha256=ejrom_64QOe47gZtrYmDAQkb65wLaZL4-iU-mKVTVq0,1572 -openai/types/audio/translation_create_response.py,sha256=x6H0yjTbZR3vd3d7LdABcn9nrMDNdeMjepcjW1oUfVc,362 -openai/types/audio/translation_verbose.py,sha256=lGB5FqkV-ne__aaGbMTFbEciJ-Sl3wBhlKmETmtROT8,615 -openai/types/audio_model.py,sha256=Bk3ODctNK5okBWPEPbx2V81e_f-OIsF6Rhu0Gvm3lIE,284 -openai/types/audio_response_format.py,sha256=67QSPDpT9_yYhxFYYd15N3nukwKrHJ7f8pvVQiVOQuk,276 -openai/types/auto_file_chunking_strategy_param.py,sha256=hbBtARkJXSJE7_4RqC-ZR3NiztUp9S4WuG3s3W0GpqY,351 -openai/types/batch.py,sha256=o8ADxSZQe7F_1VTGSC5_RDUajU03SbWvN1wPiH98dVQ,3517 -openai/types/batch_create_params.py,sha256=p5qhTnzYVsAcXFuCj4Qyk3yPIo-FxSllTecdidq3dSs,2467 -openai/types/batch_error.py,sha256=Xxl-gYm0jerpYyI-mKSSVxRMQRubkoLUiOP9U3v72EM,622 -openai/types/batch_list_params.py,sha256=X1_sfRspuIMSDyXWVh0YnJ9vJLeOOH66TrvgEHueC84,705 -openai/types/batch_request_counts.py,sha256=u_a_hehmqYE6N6lA3MfvF1-CVzR9phiMlHgh_sRff0Y,408 -openai/types/batch_usage.py,sha256=168R7ks7r7rY5RDHyjFz137Qeq7lz17F_TTe-Rrgu-o,938 -openai/types/beta/__init__.py,sha256=kl4wEKnYF_urBLZV6wZ6ZCTwaLhlNYSOfFR64jO-Adc,2393 -openai/types/beta/assistant.py,sha256=_OgFKmjaMXM2yNOTFTcCj5qVo_-F9p7uiEXJnYbB0XE,5054 -openai/types/beta/assistant_create_params.py,sha256=-Fvyft9WpYlqkdil-r86G9vcSyWQuFb-JpAuUE1wqUM,8042 -openai/types/beta/assistant_deleted.py,sha256=bTTUl5FPHTBI5nRm7d0sGuR9VCSBDZ-IbOn9G_IpmJQ,301 -openai/types/beta/assistant_list_params.py,sha256=yW-lj6AUkG0IRZQKre0veEr9p4VMN-9YdELFMYs74Cw,1222 -openai/types/beta/assistant_response_format_option.py,sha256=yNeoAWxM-_8Sjmwqu8exqyKRFhVZIKeTypetPY55VFA,561 -openai/types/beta/assistant_response_format_option_param.py,sha256=dyPMhwRSLBZ0ltpxiD7KM-9X6BzWnbGeG-nT_3SenuQ,628 -openai/types/beta/assistant_stream_event.py,sha256=vP4LDqYWzSKGcZ1JAfyNw7YqC__XsVPe0nqZ2qdn93E,6930 -openai/types/beta/assistant_tool.py,sha256=_0FC7Db4Ctq_0yLaKJ93zNTB5HthuJWEAHx3fadDRlw,506 -openai/types/beta/assistant_tool_choice.py,sha256=Hy4HIfPQCkWD8VruHHicuTkomNwljGHviQHk36prKhg,544 -openai/types/beta/assistant_tool_choice_function.py,sha256=p5YEbTnED_kZpPfj5fMQqWSgLXAUEajsDd0LXGdlENU,269 -openai/types/beta/assistant_tool_choice_function_param.py,sha256=-O38277LhSaqOVhTp0haHP0ZnVTLpEBvcLJa5MRo7wE,355 -openai/types/beta/assistant_tool_choice_option.py,sha256=jrXMd_IYIQ1pt8Lkc-KrPd4CR3lR8sFV4m7_lpG8A4Y,362 -openai/types/beta/assistant_tool_choice_option_param.py,sha256=VcatO5Nej9e5eqfrwetG4uM1vFoewnBEcFz47IxAK2E,424 -openai/types/beta/assistant_tool_choice_param.py,sha256=NOWx9SzZEwYaHeAyFZTQlG3pmogMNXzjPJDGQUlbv7Q,572 -openai/types/beta/assistant_tool_param.py,sha256=6DcaU3nMjurur2VkVIYcCaRAY1QLQscXXjCd0ZHHGho,501 -openai/types/beta/assistant_update_params.py,sha256=fE-04or_O3w73bgmGDZUTR87YTR8m8OGZry-rN9g5Js,6740 -openai/types/beta/chat/__init__.py,sha256=OKfJYcKb4NObdiRObqJV_dOyDQ8feXekDUge2o_4pXQ,122 -openai/types/beta/chatkit/__init__.py,sha256=K4Q3JL1OSz8tRSJyUoReRkBKsksw5QZdBy4HBvuBjZ4,2116 -openai/types/beta/chatkit/chat_session.py,sha256=cywf6ZrR1-5fKtPPFu6Q0TvTlJGcdcieUTvpoYZV0mk,1329 -openai/types/beta/chatkit/chat_session_automatic_thread_titling.py,sha256=zELA0ToZsGGb9G6GsxwbwQGnJs95a8AhCGWMnXeuLmI,297 -openai/types/beta/chatkit/chat_session_chatkit_configuration.py,sha256=a_O9jJQjlqsm54O-wO8Jv6fiH-hpi340FD3Bz0W4DmE,689 -openai/types/beta/chatkit/chat_session_chatkit_configuration_param.py,sha256=xDb0vGSYnzysSpqWcltuS3OLAp38s1LNwjGAG638w3M,1674 -openai/types/beta/chatkit/chat_session_expires_after_param.py,sha256=c2pJuuxZQkLZJ21ISar8HUapZfMlhJni67VzzwWuqeU,519 -openai/types/beta/chatkit/chat_session_file_upload.py,sha256=6zrSDEVYXQm1EhM-oal6EJlD0WiKfnuKXfvEcRiN7nI,493 -openai/types/beta/chatkit/chat_session_history.py,sha256=yX4cJiXqrGArn4udmMSjg_YTVg-j6sxGYAfTtjDCZsA,467 -openai/types/beta/chatkit/chat_session_rate_limits.py,sha256=fgQg7_0IQwy9sxzA4N1Yh5gGmsX5Z9goKtlKYDT9054,293 -openai/types/beta/chatkit/chat_session_rate_limits_param.py,sha256=EE5CBJG2h_Tr_3islXXeb7YH6WhtAKQeyjCethF1ivw,389 -openai/types/beta/chatkit/chat_session_status.py,sha256=l-SGwIDV4EFdX13Wjl2t-EipKFyYw7mckp9gxJhydqI,243 -openai/types/beta/chatkit/chat_session_workflow_param.py,sha256=bUm1k9936ktAllWNsYBVo2pgM3ZGG9yXDf90jkZQ4rY,1007 -openai/types/beta/chatkit/chatkit_attachment.py,sha256=BiqxHW-mw6F5tm4OwX_jpWPmuqSutmijl4DdRBViymw,608 -openai/types/beta/chatkit/chatkit_response_output_text.py,sha256=LFi43V0UuEUrPN92URK3afGkgwujsYh-S3f4j-ezpak,1607 -openai/types/beta/chatkit/chatkit_thread.py,sha256=XG8mEW3brYrQRBf55juhpF1YKivsOjff6PWdddcqTg4,1683 -openai/types/beta/chatkit/chatkit_thread_assistant_message_item.py,sha256=ZAebHG_Jt5gIWxIEE9-DnXuieQRrHz4ym8sxyr72Z38,886 -openai/types/beta/chatkit/chatkit_thread_item_list.py,sha256=OERakSKEofs4cRcsRwy2Y18l9u62rufTOUwD9p7cpgI,3945 -openai/types/beta/chatkit/chatkit_thread_user_message_item.py,sha256=tI326WVOBvmlZX_xiclh4RT_Y267l0jzRtK6E5HVqX4,2153 -openai/types/beta/chatkit/chatkit_widget_item.py,sha256=JR8uTgviFDGIuWhwJM5oyf8-8Qukn8uCOdXdQuic1XU,720 -openai/types/beta/chatkit/session_create_params.py,sha256=OUWYjVEXdYa8Y7mMKjR8CwZCAKI9QMZ1aqM8N1JDcQw,1274 -openai/types/beta/chatkit/thread_delete_response.py,sha256=hpQsI2jGJhIOv_BfIua0i7KRfT1wTaYd9oJrx_F7Z4c,483 -openai/types/beta/chatkit/thread_list_items_params.py,sha256=9WerQ8VLMzWFSGJDnmg82bRNZy-YT7S6OjESDN9a9vU,697 -openai/types/beta/chatkit/thread_list_params.py,sha256=IHHdXtlMoV7a19YJg8dFLYzVgSzEUnSgMW2kQc_RE9Y,812 -openai/types/beta/chatkit_workflow.py,sha256=nzvswFe6liB0Sln-698VcioHzSrU7vljeqnZkgJS_VU,843 -openai/types/beta/code_interpreter_tool.py,sha256=7mgQc9OtD_ZUnZeNhoobMFcmmvtZPFCNYGB-PEnNnfs,333 -openai/types/beta/code_interpreter_tool_param.py,sha256=X6mwzFyZx1RCKEYbBCPs4kh_tZkxFxydPMK4yFNJkLs,389 -openai/types/beta/file_search_tool.py,sha256=5aNU8RZj-UNdmuqqpjCXNaa1pI9GzSP5qCPtvVSJ1oQ,1769 -openai/types/beta/file_search_tool_param.py,sha256=o6sWPrzRYY8wtNaVuF8h3D1sAQV3N0L3dbdiiaMisW0,1765 -openai/types/beta/function_tool.py,sha256=oYGJfcfPpUohKw2ikgshDjOI1HXCK-5pAWyegYNezeU,397 -openai/types/beta/function_tool_param.py,sha256=hCclpGO4Re-TxiGy_QxX75g1kcN6_ElubicO6SdJ_YI,471 -openai/types/beta/realtime/__init__.py,sha256=trJb-lqh3vHHMYdohrgiU2cHwReFZyw4cXM-Xj8Dwq8,7364 -openai/types/beta/realtime/conversation_created_event.py,sha256=U4-nesN8rAep2_25E2DrkXUMafQejj3NE_0llXKj5Y8,752 -openai/types/beta/realtime/conversation_item.py,sha256=eIFg9zl3qzEijcQZvCnkvVLpSZgvEdubasgxGsQuFM4,2327 -openai/types/beta/realtime/conversation_item_content.py,sha256=KWZY8EUkjAi6K_IkWVjjrNZLG3KK2vGCy2_O30CEhzY,1050 -openai/types/beta/realtime/conversation_item_content_param.py,sha256=CrGi3XKwnfJdQGs-kJaGCsn53omdJF6_je0GWnVXhjQ,972 -openai/types/beta/realtime/conversation_item_create_event.py,sha256=jYXYdmqJh_znzcAgDuCxJXo5shf-t_DwmsyFkaDVnAE,1081 -openai/types/beta/realtime/conversation_item_create_event_param.py,sha256=vxTag6TrOLu1bf46F3mUmRkl5dd1Kb6bUp65gBDVmhM,1101 -openai/types/beta/realtime/conversation_item_created_event.py,sha256=cZBm_uKk5dkQXLlbF0Aetg4NJge3Ftz9kwRu2kCI3m4,817 -openai/types/beta/realtime/conversation_item_delete_event.py,sha256=p-O6R1Ku5pxZvaxhSi4YTPqLXS1SHhdLGgJuPQyPcHY,549 -openai/types/beta/realtime/conversation_item_delete_event_param.py,sha256=a17h8Hd8MxUbXT6NQg8YpTr1ICt1ztRecpfukHw4g34,569 -openai/types/beta/realtime/conversation_item_deleted_event.py,sha256=uWHSqX5ig550romSdhtROwrdQmdeN31Oz1Vpr9IuQFI,492 -openai/types/beta/realtime/conversation_item_input_audio_transcription_completed_event.py,sha256=FwZHHO4ZGMKoeQ80snCh_S-7anNUQtRLOhGjb8ScGOQ,2538 -openai/types/beta/realtime/conversation_item_input_audio_transcription_delta_event.py,sha256=5kjLmnRJug7L5fHxSSWWbhB70jGwNaMwbdENEwz9Xek,1143 -openai/types/beta/realtime/conversation_item_input_audio_transcription_failed_event.py,sha256=xYNSBIyERQJ4P-5YoFF1VptfPa8JnJ0sWaH6LGsPow0,1077 -openai/types/beta/realtime/conversation_item_param.py,sha256=HMB7MFR6WkztV1vMCFdIYNv8qOY4jzI2MIDtr9y8nEo,2207 -openai/types/beta/realtime/conversation_item_retrieve_event.py,sha256=5Cc7f0fM8ujwER0eIcQRwz0rmc6hdCUrAqiAvRNn9Zc,559 -openai/types/beta/realtime/conversation_item_retrieve_event_param.py,sha256=TRYaZ3btNaywRPaMOVRzK5VT7wh4taIGjbUdhkZ7gFc,579 -openai/types/beta/realtime/conversation_item_truncate_event.py,sha256=1c2_BamaTkgD26eyGZJU5xwbz7lRHupqU2HqcK0VniI,943 -openai/types/beta/realtime/conversation_item_truncate_event_param.py,sha256=hSnVOSMMtLf16nn4ISHkevYCfEsiN9kNcgxXRtHa8Kc,983 -openai/types/beta/realtime/conversation_item_truncated_event.py,sha256=K4S35U85J-UNRba9nkm-7G1ReZu8gA8Sa1z0-Vlozc0,704 -openai/types/beta/realtime/conversation_item_with_reference.py,sha256=NDMfbnG0YKLqWJskFSHRIMkN2ISs8yNRxP6d6sZshws,3288 -openai/types/beta/realtime/conversation_item_with_reference_param.py,sha256=X0iEdjijFkoGtZtp0viB8bAFqMn4fNNSvJiCZbgJ-3Q,3079 -openai/types/beta/realtime/error_event.py,sha256=goNkorKXUHKiYVsVunEsnaRa6_3dsDKVtrxXQtzZCmk,877 -openai/types/beta/realtime/input_audio_buffer_append_event.py,sha256=lTKWd_WFbtDAy6AdaCjeQYBV0dgHuVNNt_PbrtPB8tg,662 -openai/types/beta/realtime/input_audio_buffer_append_event_param.py,sha256=XmN2bE6jBRrkKGVPJdnPjJql5dqMPqwbmFnxo-z22JE,682 -openai/types/beta/realtime/input_audio_buffer_clear_event.py,sha256=7AfCQfMxZQ-UoQXF9edYKw5GcTELPcfvvJWWpuLS41c,489 -openai/types/beta/realtime/input_audio_buffer_clear_event_param.py,sha256=y-zfWqJsh1n6r2i0MgLDpnNC4g1dq3GCS66Twfkng38,499 -openai/types/beta/realtime/input_audio_buffer_cleared_event.py,sha256=j9gpm7aGVmrUt48wqtvBMN8NOgtvqHciegjXjOnWm7A,429 -openai/types/beta/realtime/input_audio_buffer_commit_event.py,sha256=SLZR2xxRd6uO3IQL6-LuozkjROXiGyblKoHYQjwXk4I,493 -openai/types/beta/realtime/input_audio_buffer_commit_event_param.py,sha256=B8agXC-rUl-D-RijJ5MeTLgw43qVYzmf2_2oAVokhLY,503 -openai/types/beta/realtime/input_audio_buffer_committed_event.py,sha256=76XHl3ETfG5YiYce2OCUsv0wNfSiaabLzYVjGtBwux0,733 -openai/types/beta/realtime/input_audio_buffer_speech_started_event.py,sha256=NVp60RUsLFtte9Ilknmu_5lRk2dZp_1fXCgGHd4EvSM,861 -openai/types/beta/realtime/input_audio_buffer_speech_stopped_event.py,sha256=gszRuYQtAW8upIhd7CJZ7pxboDk-K7sqidjqxgf47q4,779 -openai/types/beta/realtime/rate_limits_updated_event.py,sha256=kBnf_p-49Q_LNdJsj0R1Szi8R4TGYAAJ_KifLuuyFZw,949 -openai/types/beta/realtime/realtime_client_event.py,sha256=0c48JcJH5yruF52zl0Sanm_dd2W5ZHV5GocRG0Xm6m4,1839 -openai/types/beta/realtime/realtime_client_event_param.py,sha256=xBeZ60Q-OWuZxstPQaoqE0DUTDOPOwrL8LWMmDJI2rM,1887 -openai/types/beta/realtime/realtime_connect_params.py,sha256=AvTypkFCYmDn9qMeektVqij6cqzgovr3PpgpMalJoJ4,290 -openai/types/beta/realtime/realtime_response.py,sha256=iUOItlPQv6-okCuiTsloe0LDVyJ0MUQ64ug8ZaLePnw,3567 -openai/types/beta/realtime/realtime_response_status.py,sha256=gU-59Pr_58TRfMZqFzdCloc53e1qOnU4aaHY3yURUK8,1326 -openai/types/beta/realtime/realtime_response_usage.py,sha256=6XOFjCjPWioHoICZ0Q8KXuUzktQugx6WuTz0O5UvzZg,1541 -openai/types/beta/realtime/realtime_server_event.py,sha256=-PpqZpg-DL_C_wseLMRQHWdBvxnVGRAfOF7x13Qr34E,5408 -openai/types/beta/realtime/response_audio_delta_event.py,sha256=UjbnK4u_WSNTOColZj8SmJgHnAc2H8iRXD76ZnPbz7E,742 -openai/types/beta/realtime/response_audio_done_event.py,sha256=1XEWBPh1JiOgyr6V03mRt_3sLm0YFUq5ft1AhfFlNEg,679 -openai/types/beta/realtime/response_audio_transcript_delta_event.py,sha256=HEVNQ_R2_Nyo6BvNvsliMnN__b17eVd2Jx5udRHg0Hg,773 -openai/types/beta/realtime/response_audio_transcript_done_event.py,sha256=Cn5l4mJnKK3LeSN9qFL4LLqs1WOWg4kt1SaYThB-5c0,787 -openai/types/beta/realtime/response_cancel_event.py,sha256=EKx8IZUISJHdl-_3tCdHtz2BINQ85Tq_ocadnsEGPSk,637 -openai/types/beta/realtime/response_cancel_event_param.py,sha256=nidzBL83liHwyImiNGiz9Ad0V34EtFAQDw1utqcF6ns,630 -openai/types/beta/realtime/response_content_part_added_event.py,sha256=a8-rm1NAwX685fk7GdT6Xi0Yr-JfeAkyUr94-RoFe34,1232 -openai/types/beta/realtime/response_content_part_done_event.py,sha256=jO2TZygxPabbnEG9E1AfNP-JYJv1QtCMnCzgcZ_3n18,1190 -openai/types/beta/realtime/response_create_event.py,sha256=46i-O9wwvhr1CzHNMDzhs2SGVwHiFJDOkQfOZZRfAWo,4763 -openai/types/beta/realtime/response_create_event_param.py,sha256=IPJlTWH0HzsknpSRrFgrQ3bfxsFZVRdQ6IYEsiGSZOk,4619 -openai/types/beta/realtime/response_created_event.py,sha256=zZtHx-1YjehXxX6aNE88SFINDaKOBzpzejo6sTNjq9g,506 -openai/types/beta/realtime/response_done_event.py,sha256=_yUPoECCli89iHLtV3NQkXQOW6Lc1JlxVPFw04ziBGY,494 -openai/types/beta/realtime/response_function_call_arguments_delta_event.py,sha256=Yh2mQZDucfnTLiO8LRyG9r7zeS1sjwLcMF1JPMdTFJc,793 -openai/types/beta/realtime/response_function_call_arguments_done_event.py,sha256=kxSPK6nbNWL6pxveY7zaNGgCkCXqyBFJPVYJrw9cbOw,793 -openai/types/beta/realtime/response_output_item_added_event.py,sha256=-_BZjvAqcgv3NIz-EMhvYMxIwvcXTt68FVNp0pw09dI,713 -openai/types/beta/realtime/response_output_item_done_event.py,sha256=0ClNVMZmeIxKghlEid9VGoWiZ97wp00hIdNnev4qBD8,709 -openai/types/beta/realtime/response_text_delta_event.py,sha256=B1yyuc6iMOMoG5Wh6W5KoQNYtVD1vEm2cKqHnl2CuFQ,721 -openai/types/beta/realtime/response_text_done_event.py,sha256=mPgVG6nWxwkZ3aZOX-JkVF7CpaWP5-bvtbxFrr4fK7g,724 -openai/types/beta/realtime/session.py,sha256=RZMR4d09k0erHFefzbYwQNyw0V6M5ESEPJ0qoO90lCU,10183 -openai/types/beta/realtime/session_create_params.py,sha256=PTFBt7w7fTrz2KWZIz5GjJqLoQkyv7qEIspFscs6zy8,10251 -openai/types/beta/realtime/session_create_response.py,sha256=HfCFE46q3IEfvLFEdU06DAg5GKIPlJjaU9DtKzKcr2U,6574 -openai/types/beta/realtime/session_created_event.py,sha256=rTElnBlE7z1htmkdmpdPN4q_dUYS6Su4BkmsqO65hUc,489 -openai/types/beta/realtime/session_update_event.py,sha256=w4SVhFjcoasgL1yyyNfykarnD-BzmxDt_0kED8pN8pw,11237 -openai/types/beta/realtime/session_update_event_param.py,sha256=Wu_4oa0R1YUTyI6_7AtOwy07qJf1YSCUZpdqO8CKkd0,10749 -openai/types/beta/realtime/session_updated_event.py,sha256=HyR-Pz3U9finVO-bUCvnmeqsANw-fceNvVqEIF6ey10,489 -openai/types/beta/realtime/transcription_session.py,sha256=Soo2LuEMJtkUD2oPJ1E23GUcoUrYBiSu_UtbLUKemfw,3184 -openai/types/beta/realtime/transcription_session_create_params.py,sha256=BVwSY41UX2njXAJpWynMJtC5XuKv6sNs7kp2Y8KSjnk,5976 -openai/types/beta/realtime/transcription_session_update.py,sha256=YMP9OB9P5FaSwaicXtYELjm4hD1gDSvKFq9YtF2sq64,6694 -openai/types/beta/realtime/transcription_session_update_param.py,sha256=b99v4yKnB2lC_cnYGiaxKnQuHB4eUW-v3eKT2UDsamk,6453 -openai/types/beta/realtime/transcription_session_updated_event.py,sha256=CKAS98QL7CuOVEWF6qGcC9qhTktdG2CPPJXbrW75GIM,833 -openai/types/beta/thread.py,sha256=RrArSK1-_prQY_YBexgD_SU87y_k2rmRq_tti66i7s4,2132 -openai/types/beta/thread_create_and_run_params.py,sha256=MPLDgaZ69PR-WZRPN_zwwF1--Cg9d99oD3rC2aWiCCk,14875 -openai/types/beta/thread_create_params.py,sha256=T0ok3yJ6ZXqPbX5dqwpQp7YFWCXoAhz_zbroMo6rPDQ,6561 -openai/types/beta/thread_deleted.py,sha256=MaYG_jZIjSiB9h_ZBiTtpMsRSwFKkCY83ziM5GO_oUk,292 -openai/types/beta/thread_update_params.py,sha256=eN1VyP4lk6puJseydX9Ac9CLZLobYQJcijEWk1RlHKc,1928 -openai/types/beta/threads/__init__.py,sha256=0WsJo0tXp08CgayozR7Tqc3b8sqzotWzvBun19CEIWc,3066 -openai/types/beta/threads/annotation.py,sha256=Ce3Y0mSodmYRkoqyhtyIdep6WfWew6KJJgtrENOnfek,462 -openai/types/beta/threads/annotation_delta.py,sha256=iNsE-1Gn1yU0TlTHoxqKbOvPRUxWuXsF72qY_mMnWGY,510 -openai/types/beta/threads/file_citation_annotation.py,sha256=0Rs1Sr-eCLQpLsu8-WwHG7kv5Ihud4kiHO1NL7xHO0s,595 -openai/types/beta/threads/file_citation_delta_annotation.py,sha256=R87tcXkJ0RiH5UJo0Qknwk7X_c4qF1qvGsu2spOPx-I,873 -openai/types/beta/threads/file_path_annotation.py,sha256=hNc4ebprJynqMG1yk0gLvgzTpjtVzgEbXriMZftkgew,552 -openai/types/beta/threads/file_path_delta_annotation.py,sha256=RW9dgDF9Ggf357fPZ-vUu2ge3U-Hf11DVTr-ecklsBY,755 -openai/types/beta/threads/image_file.py,sha256=QVXLiplb-CigZqdMZtXlmebXKt6tF74kI-3vHxe_qUE,707 -openai/types/beta/threads/image_file_content_block.py,sha256=31I5trSERP2qLZpJ4ugZtIyta4DDoBhBvxkM4LovL3w,363 -openai/types/beta/threads/image_file_content_block_param.py,sha256=3ryZ6AV-DLwWYVP2XSK11UHkvutTUollxn6z8BZ4rSA,445 -openai/types/beta/threads/image_file_delta.py,sha256=nUJoSuP-3YyqqwBsmPJ0AqiQydz2FymVDCXQVkNYwOk,734 -openai/types/beta/threads/image_file_delta_block.py,sha256=XJ2YVX_cq0OiNcGbNmXO0_dca1IvPockOvvoM7pDvbI,492 -openai/types/beta/threads/image_file_param.py,sha256=BaKD31JPxQ5CjRfZ_0RcOG3lDTZeW_k85XCvwyctD54,717 -openai/types/beta/threads/image_url.py,sha256=EzEK-CYoO0YyqFmejIPu7pMfTEgMmp5NFscsRd2pCos,592 -openai/types/beta/threads/image_url_content_block.py,sha256=_sg3BWrtVGw-8XtAh15Rs4co6NCBB9Y3zCp_XOAz4U8,365 -openai/types/beta/threads/image_url_content_block_param.py,sha256=RWzo5KkBiwvgJSviZl6JUlsfv3VQKIFr6cp9lhkLu8E,447 -openai/types/beta/threads/image_url_delta.py,sha256=MXCp-OmuNT4njbWA9DWAbocP7pD3VpdcUy2wgeOjwm4,582 -openai/types/beta/threads/image_url_delta_block.py,sha256=Jjdfub4g9ceNKF8GuuTIghOmYba2vEeX3320mg5PWIA,484 -openai/types/beta/threads/image_url_param.py,sha256=VRLaxZf-wxnvAOcKGwyF_o6KEvwktBfE3B6KmYE5LZo,602 -openai/types/beta/threads/message.py,sha256=vk5lEpeA_aykADtn9GB8sLye7TByWZmV3ghauCh2s3c,3414 -openai/types/beta/threads/message_content.py,sha256=b8IC_EG28hcXk28z09EABfJwPkYZ7U-lTp_9ykdoxvU,630 -openai/types/beta/threads/message_content_delta.py,sha256=o4Edlx9BtdH2Z4OMwGWWXex8wiijknNRihJ-wu8PDUQ,615 -openai/types/beta/threads/message_content_part_param.py,sha256=RXrnoDP2-UMQHoR2jJvaT3JHrCeffLi6WzXzH05cDGI,550 -openai/types/beta/threads/message_create_params.py,sha256=7fXlNyqy7tzuLgMsCYfJegL2sZcjKwYNLihwteODyg0,2083 -openai/types/beta/threads/message_deleted.py,sha256=DNnrSfGZ3kWEazmo4mVTdLhiKlIHxs-D8Ef5sNdHY1o,303 -openai/types/beta/threads/message_delta.py,sha256=-kaRyvnIA8Yr2QV5jKRn15BU2Ni068a_WtWJ4PqlLfE,570 -openai/types/beta/threads/message_delta_event.py,sha256=7SpE4Dd3Lrc_cm97SzBwZzGGhfLqiFViDeTRQz-5YmQ,579 -openai/types/beta/threads/message_list_params.py,sha256=iuwzDccnViooUxHlq-WoE1FEJArNy5-zrYCoaNgVS8k,1296 -openai/types/beta/threads/message_update_params.py,sha256=XNCSLfRkk531F8mNbUB9bRYcCzJfW8NiFQ9c0Aq75Dk,757 -openai/types/beta/threads/refusal_content_block.py,sha256=qB9jrS2Wv9UQ7XXaIVKe62dTAU1WOnN3qenR_E43mhg,310 -openai/types/beta/threads/refusal_delta_block.py,sha256=ZhgFC8KqA9LIwo_CQIX-w3VVg3Vj0h71xC1Hh1bwmnU,423 -openai/types/beta/threads/required_action_function_tool_call.py,sha256=XsR4OBbxI-RWteLvhcLEDBan6eUUGvhLORFRKjPbsLg,888 -openai/types/beta/threads/run.py,sha256=cFOL77mXgELKefaRVN9Ds2jKoxYtBYwE6-82iegarcA,8338 -openai/types/beta/threads/run_create_params.py,sha256=3gwHxxLbQP91yjQSprGKC9-RlO_cUL4KfPUGDReGMh8,10401 -openai/types/beta/threads/run_list_params.py,sha256=TgepSLrupUUtuQV2kbVcoGH1YA0FVUX9ESkszKuwyHY,1210 -openai/types/beta/threads/run_status.py,sha256=OU1hzoyYXaRJ3lupX4YcZ-HZkTpctNE4tzAcp6X8Q9U,351 -openai/types/beta/threads/run_submit_tool_outputs_params.py,sha256=cKiyD374BsZN_Oih5o5n5gOf_DYsxErVrbgxveNhmPI,1643 -openai/types/beta/threads/run_update_params.py,sha256=sVjkl6ayjU75Tk8t69r6xgIg80OlTikyRdS0sa2Gavg,749 -openai/types/beta/threads/runs/__init__.py,sha256=mg_roY9yL1bClJ8isizkQgHOAkN17iSdVr2m65iyBrs,1653 -openai/types/beta/threads/runs/code_interpreter_logs.py,sha256=7wXZpUE9I-oZJ0K3mFG0Nwmfm2bKGiSpWJyBeo7txwo,482 -openai/types/beta/threads/runs/code_interpreter_output_image.py,sha256=8o99k0ZHMHpqH0taXkOkYR9WaDUpCN-G0Ifd5XsJpb8,613 -openai/types/beta/threads/runs/code_interpreter_tool_call.py,sha256=ekiIuH1kVCN51hCzY3AYr5i3_a4vlgUiZHJ59pl17oY,1810 -openai/types/beta/threads/runs/code_interpreter_tool_call_delta.py,sha256=Qr2cen-bKyXTW2NDEUHnmJRE0jY-nkLcnO4NzCbBPDo,1479 -openai/types/beta/threads/runs/file_search_tool_call.py,sha256=R5mYh5W2qbVnz-fkAAqLlSqBQ2Gint1gSE_UBGum5-E,1962 -openai/types/beta/threads/runs/file_search_tool_call_delta.py,sha256=Gx8c7GSgGYuOvGadcAr3ZIspEFMZS3e2OY7vBo_MYnM,655 -openai/types/beta/threads/runs/function_tool_call.py,sha256=aOq5yOtKOi6C5Q1FIQRxqtJJR1AcSW_K5PvRiKISNCI,920 -openai/types/beta/threads/runs/function_tool_call_delta.py,sha256=VFRtCJkj4PHX97upM1cXpJAk9-JvJSgyngie06fBIjQ,1076 -openai/types/beta/threads/runs/message_creation_step_details.py,sha256=tRFMNF2Rf4DekVliUKkoujItiOjjAE9EG9bbxJvpVPA,506 -openai/types/beta/threads/runs/run_step.py,sha256=zTSlNBowJx507-oo6QJ7A30BFXdUt9k3lTZ4o34L1wI,3589 -openai/types/beta/threads/runs/run_step_delta.py,sha256=FNYDTddRrTO3PT_fgi7AsJ1PeMtyWsVzcxoihjbBzAw,663 -openai/types/beta/threads/runs/run_step_delta_event.py,sha256=rkDyvHSXt-hc1LngB41f9vglkn6t03kS62bsn0iGaxU,585 -openai/types/beta/threads/runs/run_step_delta_message_delta.py,sha256=UIo6oPH8STLjPHiWL-A4CtKfYe49uptvIAHWNnZ3Ums,564 -openai/types/beta/threads/runs/run_step_include.py,sha256=u-9Cw1hruRiWr70f_hw4XG0w1cwOAYfRJYKva2dEacs,264 -openai/types/beta/threads/runs/step_list_params.py,sha256=zorF5juogCzLMsZLjzMZTs_iIBcPj9WUur5HcrXuH8M,1752 -openai/types/beta/threads/runs/step_retrieve_params.py,sha256=aJ7l8RDJLPyEmqjfO4XsTV54VZOOqyb_gKSUvqp33ZI,815 -openai/types/beta/threads/runs/tool_call.py,sha256=1rwq4IbLgjQAQ-ORXYkNpmJyi9SREDnqA57nJbj_NiU,537 -openai/types/beta/threads/runs/tool_call_delta.py,sha256=t5wF8ndW3z99lHF981FL-IN5xXBS9p7eonH9bxvKu_c,600 -openai/types/beta/threads/runs/tool_call_delta_object.py,sha256=eK20VsIswEyT48XbkGu60HUrE7OD3fhpn1fbXrVauM4,615 -openai/types/beta/threads/runs/tool_calls_step_details.py,sha256=bDa-yybVF3a8H6VqhDGmFZMkpn-0gtPQM2jWWsmUvYo,574 -openai/types/beta/threads/text.py,sha256=9gjmDCqoptnxQ8Jhym87pECyd6m1lB3daCxKNzSFp4Y,319 -openai/types/beta/threads/text_content_block.py,sha256=pdGlKYM1IF9PjTvxjxo1oDg1XeGCFdJdl0kJVpZ7jIs,319 -openai/types/beta/threads/text_content_block_param.py,sha256=feQr0muF845tc1q3FJrzgYOhXeuKLU3x1x5DGFTN2Q0,407 -openai/types/beta/threads/text_delta.py,sha256=2EFeQCkg_cc8nYEJ6BtYAA3_TqgMTbmEXoMvLjzaB34,389 -openai/types/beta/threads/text_delta_block.py,sha256=pkHkVBgNsmHi9JURzs5ayPqxQXSkex3F0jH0MqJXik0,448 -openai/types/chat/__init__.py,sha256=wyA0EWb0utj19dX0tCeGh4Jg5GrO3TGjmfQkR9HVxxE,6102 -openai/types/chat/chat_completion.py,sha256=cQQEYFoF1Cs3Xy_nskiwo0nrDGmdu5t4TtiQ03xA8T4,3488 -openai/types/chat/chat_completion_allowed_tool_choice_param.py,sha256=kQgAzwedjhFLqSzkhI59rJ2ZtfyMIhBQf09I9oJvpII,636 -openai/types/chat/chat_completion_allowed_tools_param.py,sha256=q7PeluUYm0xA9EbwwHdbbk72obyFyuChFDfG4zwIBto,1010 -openai/types/chat/chat_completion_assistant_message_param.py,sha256=2hLVB-u2C20B2QBREBEymXJfFSwiGnnzv3jbKyHdSrY,2441 -openai/types/chat/chat_completion_audio.py,sha256=ioAcuhkIdk1TSZK1LqTXYcjTPxoaM2b0RhGJekyCABY,655 -openai/types/chat/chat_completion_audio_param.py,sha256=FLcawzQQpYPC4_yC7h3hOvFa1NyvLECKGSAoKB1d-Mk,811 -openai/types/chat/chat_completion_chunk.py,sha256=zySvwbuKEVrPLuKVZgPUsIqq1D4nRjSp1U6nCkVFxGI,6006 -openai/types/chat/chat_completion_content_part_image.py,sha256=G51SQ-Pjc2FO8vtq_DizAlPe7WhloVZMK7L84Y2kECI,753 -openai/types/chat/chat_completion_content_part_image_param.py,sha256=Gqv98qyD8jB81THZp49c8v2tHrId_iQp4NzciT9SKI0,797 -openai/types/chat/chat_completion_content_part_input_audio_param.py,sha256=r1EXNEtjJo5oJ9AnP3omaJzACE1gSfdmob5Q0HKsOm4,704 -openai/types/chat/chat_completion_content_part_param.py,sha256=0S9iFE1p93HG_Yx7Wj_TR2CmBNK_i7TaWE7HuE-tLc4,1259 -openai/types/chat/chat_completion_content_part_refusal_param.py,sha256=TV1vu-IgrvKa5IBlPSIdBxUaW8g1zDhMOOBOEmhU2w0,467 -openai/types/chat/chat_completion_content_part_text.py,sha256=A9WfAYjt-8fbCzEn8kC9pTpK9e2G0aua58FqssXXfrY,363 -openai/types/chat/chat_completion_content_part_text_param.py,sha256=4IpiXMKM9AuTyop5PRptPBbBhh9s93xy2vjg4Yw6NIw,429 -openai/types/chat/chat_completion_custom_tool_param.py,sha256=n-ThsvnkdKvRePzRdHEjikeXtju4K9Uc-ueB4LnByyM,1638 -openai/types/chat/chat_completion_deleted.py,sha256=O7oRuPI6YDa_h7uKnEubsjtw8raTcyVmVk95hoDfo74,470 -openai/types/chat/chat_completion_developer_message_param.py,sha256=OCFKdTWkff94VtgY7AaDUUFiZLT8LBn7WWxjbcIq2OM,830 -openai/types/chat/chat_completion_function_call_option_param.py,sha256=M-IqWHyBLkvYBcwFxxp4ydCIxbPDaMlNl4bik9UoFd4,365 -openai/types/chat/chat_completion_function_message_param.py,sha256=jIaZbBHHbt4v4xHCIyvYtYLst_X4jOznRjYNcTf0MF0,591 -openai/types/chat/chat_completion_function_tool.py,sha256=Yw3wlkMQPjs-j2JQaBEcbxtXv9b0w2FJryRPegWknjc,445 -openai/types/chat/chat_completion_function_tool_param.py,sha256=isNPdszq2CXOZB6a-ALjTBRaX8T-BeToe2tApMepmto,519 -openai/types/chat/chat_completion_message.py,sha256=iC4SMjhTCVVO1Xueb_iAObMB_nLRc_PFxasfZK0A1kM,2521 -openai/types/chat/chat_completion_message_custom_tool_call.py,sha256=fbnL3fERlW4E9hd5EoCcb43zgCoaPc11tZ0AlBjoegM,643 -openai/types/chat/chat_completion_message_custom_tool_call_param.py,sha256=OvZxmUFfz7SDl55gvfscHaKPHUe8DmV83JzkQhJQplo,752 -openai/types/chat/chat_completion_message_function_tool_call.py,sha256=9KJxJ6T40mFBtznBnPE3wfHlzhQtNG_ayrn3ZYuIlyA,916 -openai/types/chat/chat_completion_message_function_tool_call_param.py,sha256=V09BFjYcP2pYigtrBfFtg6PfEPKbD0E6MAUxrDWyn_g,1025 -openai/types/chat/chat_completion_message_param.py,sha256=aLrz_cX_CYymFdW9cMIPZpv0Z4zM50RECV3SH6QNZsc,1019 -openai/types/chat/chat_completion_message_tool_call.py,sha256=aWpKcV6NZZfx_-aGEwPz99IDWNCdRuwoYpUChs0Uvvc,738 -openai/types/chat/chat_completion_message_tool_call_param.py,sha256=rE_TbdN3N6JGzHecykgdFHZgI66p2lsl0loPpz5TxW0,458 -openai/types/chat/chat_completion_message_tool_call_union_param.py,sha256=L8IoSHXgIFxYyHSfXQJNN7FJlp31ez8X4l5eSPKGmYM,602 -openai/types/chat/chat_completion_modality.py,sha256=8Ga0kruwJc43WD2OIqNudn7KrVRTPDQaalVkh_8bp9I,236 -openai/types/chat/chat_completion_named_tool_choice_custom_param.py,sha256=K7LbF_AYWRohfzsVj8iCYNYePdAmqsqWmWoQBw_nsXk,565 -openai/types/chat/chat_completion_named_tool_choice_param.py,sha256=bS9rzU0SzIZCQCfOlEoRaRtFr10oIUV9HRQ_-iv6W0M,559 -openai/types/chat/chat_completion_prediction_content_param.py,sha256=Xw4K_4F379LsXENOpZvREDn55cCnbmZ69xa4fw9w3bg,868 -openai/types/chat/chat_completion_reasoning_effort.py,sha256=9sAGlM21dgRNOQRSsL_znZf9ruXcmvVriWeex0fRgMk,235 -openai/types/chat/chat_completion_role.py,sha256=LW6-tqXaqpD7H53PiSXrjvIo6g4RfHhWityDm6Nfvig,275 -openai/types/chat/chat_completion_store_message.py,sha256=krUE7xzu6DWc64_yAOABOGfM8-aFeE59HDF1QLoOgek,916 -openai/types/chat/chat_completion_stream_options_param.py,sha256=5didkVskgUUcVH6BjfCnA6hG4lp9LOiBU7cDnx3abh0,1311 -openai/types/chat/chat_completion_system_message_param.py,sha256=WYtzmsNP8ZI3Ie8cd-oU7RuNoaBF6-bBR3mOzST9hMw,815 -openai/types/chat/chat_completion_token_logprob.py,sha256=6-ipUFfsXMf5L7FDFi127NaVkDtmEooVgGBF6Ts965A,1769 -openai/types/chat/chat_completion_tool_choice_option_param.py,sha256=wPIjU-eeybPjRFr28mx8Njp2OCrKw3Xpu0231z4Kz1A,758 -openai/types/chat/chat_completion_tool_message_param.py,sha256=5K7jfKpwTuKNi1PTFabq_LHH-7wun8CUsLDh90U8zQE,730 -openai/types/chat/chat_completion_tool_param.py,sha256=5hFt0Izat_o50JMJzspCYeB0gubilRDB3a6yIfGHoN8,431 -openai/types/chat/chat_completion_tool_union_param.py,sha256=smpIoekwuuXKQx9jRRB2cqc3L7_fmN5lB4IIJHlKhys,504 -openai/types/chat/chat_completion_user_message_param.py,sha256=mik-MRkwb543C5FSJ52LtTkeA2E_HdLUgtoHEdO73XQ,792 -openai/types/chat/completion_create_params.py,sha256=EQc4KD_erAgdA5G-J9QxlIpFk3iNVbRCidco754tbfw,17340 -openai/types/chat/completion_list_params.py,sha256=jOAiZ6vYSrxyD-3qzIXvXofJkejl6bet9_yNsC9p5ws,1154 -openai/types/chat/completion_update_params.py,sha256=VRDF28qoonjrveHhw8BT4Yo_NlLsV2Qzd_KUUQ6AEG8,742 -openai/types/chat/completions/__init__.py,sha256=nmKlohYbZmr7Pzv1qCDMSDbthcH6ySPFIgvXpHZtxK8,195 -openai/types/chat/completions/message_list_params.py,sha256=IArlye40xGlMVIDHxsK9RX_5usPL71wXPMgdwI7_wYU,583 -openai/types/chat/parsed_chat_completion.py,sha256=KwcwCtj0yexl6gB7yuOnyETRW-uUvNRYbVzPMkwCe5Q,1437 -openai/types/chat/parsed_function_tool_call.py,sha256=JDWYo1XhTDQ8CxssbgjpzBhUw8jeXAmEd5Tr_CqFrVA,945 -openai/types/chat_model.py,sha256=yFvzwm6VJXCn6jN21FS-utN6bcBBzRIpKYk1VTP8sdo,177 -openai/types/completion.py,sha256=yuYVEVkJcMVUINNLglkxOJqCx097HKCYFeJun3Js73A,1172 -openai/types/completion_choice.py,sha256=PUk77T3Cp34UJSXoMfSzTKGWDK0rQQwq84X_PSlOUJo,965 -openai/types/completion_create_params.py,sha256=UqgYjUpYbQYPdYETVxhkwgbGRKTQCBoyeSFtrB8iuAo,7652 -openai/types/completion_usage.py,sha256=uf5n0vzlCkGAU67BBn_h7yhjd_G4OHpQbJnvzz0eO2A,1735 -openai/types/container_create_params.py,sha256=119x8wG_Cz_IC-N1ha02h9IlHBjh8DPHOkr2o6FiMa8,814 -openai/types/container_create_response.py,sha256=5tItbVA4xiJRcJMvqPbSoIIO49n-Hmtq_MnLBz_ww-w,1129 -openai/types/container_list_params.py,sha256=7RiUMBOEJj9QH9LYtPiwUrIufx8czF6kk2JcfO_LP_s,893 -openai/types/container_list_response.py,sha256=LVxHagc20cMD9brVMMJaQ-LTO-9uACqF8nUupsO1bsY,1125 -openai/types/container_retrieve_response.py,sha256=mqPMgQXPBDm72O2eLj8CdZgcdX0uVH28cCUL6g6sqtg,1133 -openai/types/containers/__init__.py,sha256=SCdMa4GNxw-I23CwW03iVOoHRfDybyKEMmpDkdVuUcI,480 -openai/types/containers/file_create_params.py,sha256=KXoZNG4DpiD7NDeQixdKJsuOv-iCZAlSN4sz7AQm49k,412 -openai/types/containers/file_create_response.py,sha256=Dh1OWf86XNMfmvVwfRGezfihdDuuAcdiQxT_3iefBzw,722 -openai/types/containers/file_list_params.py,sha256=9bU7uKeXPk7adFzwvKHFitFOV4phnIbbfFx5u6n1OFY,883 -openai/types/containers/file_list_response.py,sha256=xwvdMIUafkHSXJGQT1_mxt6T_8nJo-isp9M_5YTq-J8,718 -openai/types/containers/file_retrieve_response.py,sha256=wGPU9o5SKkg8s4aUJXhwC38u8KfTFKmIUk1ItUdYxJg,726 -openai/types/containers/files/__init__.py,sha256=OKfJYcKb4NObdiRObqJV_dOyDQ8feXekDUge2o_4pXQ,122 -openai/types/conversations/__init__.py,sha256=N7GRumNq1KeGR4X9STSKWLM1axUntyaMI_OwPihZmjI,1854 -openai/types/conversations/computer_screenshot_content.py,sha256=yJ-i6Z9VxHt21iuz2K9i0saVWOsMzpLjqjjDoob1AAk,632 -openai/types/conversations/conversation.py,sha256=BVpec4hLHle_8iRf6v5y4CPYHtMhEntP0m8PDG_5GSY,886 -openai/types/conversations/conversation_create_params.py,sha256=dtgIXlZj1yKP3oJUYdFCb2MKIk6BH8e4QbKIPPGjHf8,976 -openai/types/conversations/conversation_deleted_resource.py,sha256=HagMTsOrDL7QYQSeZqMbBMfRzhWAgnrxtinGT5uhog4,326 -openai/types/conversations/conversation_item.py,sha256=KvE4Mn0d2yfVUhwbdMhg894PuMoUBSU3wXbqeDEb9Wo,6660 -openai/types/conversations/conversation_item_list.py,sha256=FvZW9mcZsKpaWNAI1PRuBtnKWt8vB1PEbDLmKN7ZF5o,667 -openai/types/conversations/conversation_update_params.py,sha256=YMyThjw2ObnqY-dhI4iy2pqf0cZW7rNV0TcxpBMs1bs,746 -openai/types/conversations/input_file_content.py,sha256=xxG8_PMhnjH1F6jXs6vZyj_T1HdO--48fTYFrvWCPzk,219 -openai/types/conversations/input_file_content_param.py,sha256=ATFOU1VRdw8SDRvwdC1KEamfAMna-mIfpER5bLpGIeg,244 -openai/types/conversations/input_image_content.py,sha256=LKKWx1y5Gi0nu34a8CFbDUaXUWQACeQ80lwJtukOx3U,224 -openai/types/conversations/input_image_content_param.py,sha256=AceRCBW-WuXG5rI4uDF2w0n_eaa8DzpCmbdWm3ofVMg,248 -openai/types/conversations/input_text_content.py,sha256=G5L4ln3tkWqSzaZlAkFuzkFOpjYqPVnE3QyXafiA6YU,219 -openai/types/conversations/input_text_content_param.py,sha256=HPl92LQHoA3_2azNJcVF1FD6OTJY200uwbCodF7_xPg,244 -openai/types/conversations/item_create_params.py,sha256=TRAsvDuMBjLeL5DzqC-WyqmorZTnu4qZRt9eE13SJ8E,874 -openai/types/conversations/item_list_params.py,sha256=nMzeK_XkVTWsa5pMQDGDuRPfGwiXFBDcdZ4NYwYV7H4,1896 -openai/types/conversations/item_retrieve_params.py,sha256=lHK-Sqbd7DXWQKuXGRBUvu_a7LxYNAT_tBQqLP-OC5A,690 -openai/types/conversations/message.py,sha256=6rgMphWrnp4S3WmKEEVsnk2dhgMXtZhrYrgcMci4NtI,2033 -openai/types/conversations/output_text_content.py,sha256=bFDVfODBGMwRLcKeo0OZzZumZdZwHzHkG1B_Bw43vWA,224 -openai/types/conversations/output_text_content_param.py,sha256=8NlazI-VuJ9DgQ-ZGt9xJ8su2-CZ1mb_ebI9O19YC7Q,248 -openai/types/conversations/refusal_content.py,sha256=ThoHeemlqaKlUf7oVYOTUwnHuqph-4RXS4Ud_kGbGg0,227 -openai/types/conversations/refusal_content_param.py,sha256=hWb2AoU0oTKCNLRZs5kzxY2Uk7HkeHhEy5leL29Uy64,254 -openai/types/conversations/summary_text_content.py,sha256=M7-_fLUFx_L7hOcn4i4e8jyNNWKwHmU8bSMWGhyuAj4,405 -openai/types/conversations/text_content.py,sha256=SV7snTCpe8X3CJy1T1uOMiFn0IyZjWzj7GCtPJRezv8,259 -openai/types/create_embedding_response.py,sha256=lTAu_Pym76kFljDnnDRoDB2GNQSzWmwwlqf5ff7FNPM,798 -openai/types/embedding.py,sha256=2pV6RTSf5UV6E86Xeud5ZwmjQjMS93m_4LrQ0GN3fho,637 -openai/types/embedding_create_params.py,sha256=asahWWNcMvXGDfbTMz4uDy7DU9g6OJ9wowqZByghzw8,2039 -openai/types/embedding_model.py,sha256=0dDL87len4vZ4DR6eCp7JZJCJpgwWphRmJhMK3Se8f4,281 -openai/types/eval_create_params.py,sha256=EMEE1XtHP_AGF_R3ptJe55-uNbfvThBmKzN-sEq49mo,6703 -openai/types/eval_create_response.py,sha256=h8o7zz_pat94dmryy2QDMOK3Lz-szPkmD52faYtBK0c,3531 -openai/types/eval_custom_data_source_config.py,sha256=-39Cjr1v2C1Fer4PLl7rfA-bDK08I-bM4cqlp9Z_mzE,589 -openai/types/eval_delete_response.py,sha256=iCMGN0JG5kFIYNPSCOMSWlTu0FDkd2lbAw1VLO73-bQ,245 -openai/types/eval_list_params.py,sha256=WmIJa3O9wyuDKXXwE3tSnQv1XOTe1hngttSvvhbtf28,754 -openai/types/eval_list_response.py,sha256=mTm1vQbqAfG9u2rfUH8UkJC1vPi_1Z1snKPlYA1EKE4,3527 -openai/types/eval_retrieve_response.py,sha256=pn5FaZ5_dzhX3iiCTlu0iHa9w-bc7Gk1ZHvFllQWVA4,3535 -openai/types/eval_stored_completions_data_source_config.py,sha256=7CYy14MMLj6HBJULXploJPQLs-4wpzlXUazw7oJZAjo,1081 -openai/types/eval_update_params.py,sha256=Wooz-3SDznbC3ihrhOs-10y9cxpTKGQgobDLfZ-23c0,757 -openai/types/eval_update_response.py,sha256=D9ItfznRN1jwp_w48r-i4jvH1_h2uiSpleHePrVigJs,3531 -openai/types/evals/__init__.py,sha256=wiXRqdkT-SkjE0Sgv6MixeECZjF0xaoCPdSGFEh0rEs,1193 -openai/types/evals/create_eval_completions_run_data_source.py,sha256=cQn9zxSIlGOHc6Pnp3BPZOi0o7vJrdviyeddsXoOFaI,7911 -openai/types/evals/create_eval_completions_run_data_source_param.py,sha256=EKOPQZKM2th4g2NAOwlATGTVa7VafEzrbTGopj-6-Ww,7982 -openai/types/evals/create_eval_jsonl_run_data_source.py,sha256=GzE9S1AZy46LOooR61Nwmp5yGUMoFGU5yk4g18BP72E,1219 -openai/types/evals/create_eval_jsonl_run_data_source_param.py,sha256=sM4-h4qDDkttGeaKgip8JZeuiaghPTBmwwxb5Xa6zhk,1285 -openai/types/evals/eval_api_error.py,sha256=VvRO-N9_tIxpRiSi17PXiMpleowg_Y-Rq2kqiRgmpC4,268 -openai/types/evals/run_cancel_response.py,sha256=SBBBM_lAO_LPsDWYSVVHOlnalqOInQ1TsdoBqHtkpUY,13857 -openai/types/evals/run_create_params.py,sha256=qYSMUdITG61p1BlqmxqCxxG14_3KQtW2nB-vbxWBHxI,12909 -openai/types/evals/run_create_response.py,sha256=WWrrYvKM0LI5YYg7uVPlSq2KVbFX0LfDzFospNnapNQ,13857 -openai/types/evals/run_delete_response.py,sha256=WSQpOlZu53eWBCXSRGkthFn_Yz5rDCcSomqoa4HpUrk,323 -openai/types/evals/run_list_params.py,sha256=vgbJMYybzCep7e9rxUVHlWy_o4GNy4tJyGTwNu4n4ys,758 -openai/types/evals/run_list_response.py,sha256=rB8DMBOkz4mYCjG_YEurKr55zg_-7XKtXuyHN4tQbjk,13853 -openai/types/evals/run_retrieve_response.py,sha256=MpNeNZdA5ERcfe-xG4ezS4EanY4ykCyo5tKkezkWgf0,13861 -openai/types/evals/runs/__init__.py,sha256=sltNV1VwseIVr09gQ5E4IKbRKJuWJSLY1xUvAuC97Ec,393 -openai/types/evals/runs/output_item_list_params.py,sha256=Lp1OQV1qXeEUwMS90_-BpOnO1jICwJOo9QgNC9OGJ2U,821 -openai/types/evals/runs/output_item_list_response.py,sha256=p-fc6-5-5VCvsaNuMefoOtf6htw8eqwKWj7qSjUEOfI,3836 -openai/types/evals/runs/output_item_retrieve_response.py,sha256=Yb-w50QBxucfI3D07pjqNIZ4rmUclbcLfmZGTFiTSBE,3844 -openai/types/file_chunking_strategy.py,sha256=oT5tAbwt3wJsFqSj2sjDPBcisegNwJOecxS_V7M4EdA,559 -openai/types/file_chunking_strategy_param.py,sha256=mOFh18BKAGkzVTrWv_3Iphzbs-EbT6hq-jChCA4HgAE,517 -openai/types/file_content.py,sha256=qLlM4J8kgu1BfrtlmYftPsQVCJu4VqYeiS1T28u8EQ8,184 -openai/types/file_create_params.py,sha256=Ame7qem1zNkBzHFLv5AOB1DnrIgAsIGdzOr6dr3NWZc,1394 -openai/types/file_deleted.py,sha256=H_r9U7XthT5xHAo_4ay1EGGkc21eURt8MkkIBRYiQcw,277 -openai/types/file_list_params.py,sha256=TmmqvM7droAJ49YlgpeFzrhPv5uVkSZDxqlG6hhumPo,960 -openai/types/file_object.py,sha256=Qu0rci3ec0iPh36ThAK4tiCN_BRmULnOFU8jzzFYhB4,1504 -openai/types/file_purpose.py,sha256=aNd8G-GC1UVCL9bvTgtL4kfkiF0uEjfiimRS-eh8VrY,265 -openai/types/fine_tuning/__init__.py,sha256=f8GH2rKGcIU1Kjrfjw5J0QoqlsC4jRmH96bU6axGD64,1832 -openai/types/fine_tuning/alpha/__init__.py,sha256=e_Evj3xLs7o_SONlqoXDM75oZMbxuGWhxBW-azsXD_w,429 -openai/types/fine_tuning/alpha/grader_run_params.py,sha256=ECVczgghTZ8J9xfqAbNc_VvAHfhOpkaVzQw_wUmE4r8,1414 -openai/types/fine_tuning/alpha/grader_run_response.py,sha256=So-fvQMRvpccsSYb0jfKGQ_MNWdqqS71OcE9GbeLASs,1556 -openai/types/fine_tuning/alpha/grader_validate_params.py,sha256=Jd6m3DjIZAUNY-PlLUWDbH3ojm8ztnfjHmPjKw2DrLM,875 -openai/types/fine_tuning/alpha/grader_validate_response.py,sha256=nLldMLyNG-ICS3HwykDWdKuAPKu4gR2A2I0C79C4khs,773 -openai/types/fine_tuning/checkpoints/__init__.py,sha256=xA69SYwf79pe8QIq9u9vXPjjCw7lf3ZW2arzg9c_bus,588 -openai/types/fine_tuning/checkpoints/permission_create_params.py,sha256=TI90xY-4dv8vDKKZ0FBdbly9JTCrw4FgXkcXz_gTUlk,407 -openai/types/fine_tuning/checkpoints/permission_create_response.py,sha256=F-A0bNQ5iTNUDmtCbQwv1PUDrJWSsdymcbCqfiZ3TwE,636 -openai/types/fine_tuning/checkpoints/permission_delete_response.py,sha256=X_RuOvxa6i3wGLP5joHixv4tNLUpuK-2umiUf6P7Ha8,558 -openai/types/fine_tuning/checkpoints/permission_retrieve_params.py,sha256=3zVCOq1676MizKhKSba2OLmBSPlBx6Az2ZdxyVl580o,610 -openai/types/fine_tuning/checkpoints/permission_retrieve_response.py,sha256=Z2Iz3u9-BJ2dENhBz54d9qVarl0H67B8H268Ybz6lQE,848 -openai/types/fine_tuning/dpo_hyperparameters.py,sha256=RTcK6yOw8CgwKL6CHtxcvY1ucD37d0TXArBb5h_fShQ,1064 -openai/types/fine_tuning/dpo_hyperparameters_param.py,sha256=T3AX6qWEhl-vukTDj6h0cknhlHkiKY1bTsjzAORnWM0,1048 -openai/types/fine_tuning/dpo_method.py,sha256=i6jDyRNOxYb8c_YnsZa5qThpDPUBkO-rTFbpQT2hA5Q,377 -openai/types/fine_tuning/dpo_method_param.py,sha256=v3CD8Ywn-SuIFJyHJsRN3nF379d3MK8jwz1WUU_Q3O0,414 -openai/types/fine_tuning/fine_tuning_job.py,sha256=p1HKONRbL4cnXJaG6zQv_v8L6InFTz5cdmGH9yH1uTk,5238 -openai/types/fine_tuning/fine_tuning_job_event.py,sha256=POxSD7-WxAtJV2KuEpA9EmZi7W_u0PikOUtUzxIXii4,854 -openai/types/fine_tuning/fine_tuning_job_integration.py,sha256=uNFfuBV87nUHQORNGVLP_HbotooR_e37Bgd0dyZ4nUM,241 -openai/types/fine_tuning/fine_tuning_job_wandb_integration.py,sha256=YnBeiz14UuhUSpnD0KBj5V143qLvJbDIMcUVWOCBLXY,1026 -openai/types/fine_tuning/fine_tuning_job_wandb_integration_object.py,sha256=7vEc2uEV2c_DENBjhq0Qy5X8B-rzxsKvGECjnvF1Wdw,804 -openai/types/fine_tuning/job_create_params.py,sha256=p42ebOzvo_ghEitjITP4Qg-mhUvQchreeDrd_FR5YKA,6178 -openai/types/fine_tuning/job_list_events_params.py,sha256=4xOED4H2ky2mI9sIDytjmfJz5bNAdNWb70WIb_0bBWs,400 -openai/types/fine_tuning/job_list_params.py,sha256=wUGXsQ4UDCKvAjHDZAZ-JDU6XAouiTGThb0Jo_9XX08,623 -openai/types/fine_tuning/jobs/__init__.py,sha256=nuWhOUsmsoVKTKMU35kknmr8sfpTF-kkIzyuOlRbJj0,295 -openai/types/fine_tuning/jobs/checkpoint_list_params.py,sha256=XoDLkkKCWmf5an5rnoVEpNK8mtQHq1fHw9EqmezfrXM,415 -openai/types/fine_tuning/jobs/fine_tuning_job_checkpoint.py,sha256=Z_sUhebJY9nWSssZU7QoOJwe5sez76sCAuVeSO63XhY,1347 -openai/types/fine_tuning/reinforcement_hyperparameters.py,sha256=Iu2MstoQBzXTmifW_jyWfomBT6nOUA6COO3_m0ufm2Q,1426 -openai/types/fine_tuning/reinforcement_hyperparameters_param.py,sha256=_NwYZjJ1bKN_ePxITeB0rgLMhO8Xpm8xNoYQ9aB_c_8,1357 -openai/types/fine_tuning/reinforcement_method.py,sha256=pGc_df_gFfIvCfqFeYH7vlrMBhw44LZt70L0s18EK6I,958 -openai/types/fine_tuning/reinforcement_method_param.py,sha256=FpgTPJewfFQB9ZU0IrmHWyEAmcQ8cxDqmEu15xwOAhg,1090 -openai/types/fine_tuning/supervised_hyperparameters.py,sha256=F3fY--I2O4cBOHflfn09aeHW8iZKA7cIhAMdMzPqc4I,865 -openai/types/fine_tuning/supervised_hyperparameters_param.py,sha256=WogLPJmKhsqgj6YMGxXQ3mY8BusZgCx45StftqNTayg,862 -openai/types/fine_tuning/supervised_method.py,sha256=p9lV9DCi7KbkfOuZdytm1Sguqt-0AWtRiNawxxSuCgA,408 -openai/types/fine_tuning/supervised_method_param.py,sha256=LNvDK4FdDWflr7KQHYBDcWP9UB5UBcGP3YohVsnvi7s,445 -openai/types/graders/__init__.py,sha256=GiHbVTKVpfAqbbzZrtF-N00Njkr28cNG26wd_EDLPGI,1019 -openai/types/graders/label_model_grader.py,sha256=z7YmiMn7suYk5CbIFAn8MLTnYeJgxhJNiDcI5S4rDGQ,2026 -openai/types/graders/label_model_grader_param.py,sha256=21MydaUGP5Y5zBW61ShSxwtpzY-NcC3gGJaaCWka1KU,2310 -openai/types/graders/multi_grader.py,sha256=QyTkY28D7_DyZHOdlTCpLHHyzWFYDs8KT4-30_XgSLY,1018 -openai/types/graders/multi_grader_param.py,sha256=6-AOnwpdJt5yGBqdtSu7fPOIav0GuipZMg5ZnDskYtc,1191 -openai/types/graders/python_grader.py,sha256=WnZ24W9dtfqX8ZEPgVArYNkyAQElz2j-6no03u1wcU0,534 -openai/types/graders/python_grader_param.py,sha256=ss-fnK1MZe9eDLvFd2sz1AayD3cbuIMBn3mXCDUZMb8,565 -openai/types/graders/score_model_grader.py,sha256=h5rIYQ4dKTFwo6s_fUPgVhFaHl2b3e3yRTUMcvivIqQ,3128 -openai/types/graders/score_model_grader_param.py,sha256=Nte2LuanCMfsfnBlNkcc2OR9W_a0YhrGLwcqh2WBrAo,3293 -openai/types/graders/string_check_grader.py,sha256=Ofmiv6cZw6Le42M-XQ2p_IJqazRLN626xf_zie5LVKE,675 -openai/types/graders/string_check_grader_param.py,sha256=gwIhLOMY4xyI6lKLwGTrTlorb98mODRATC1Ei2KbvrY,771 -openai/types/graders/text_similarity_grader.py,sha256=SYoSbN81qi3_Q-y_l7H4B_ATbwfLlx_RnzY2J11f1FQ,887 -openai/types/graders/text_similarity_grader_param.py,sha256=fWPcnMC6Qp0NjsaQOm7wJ0eCumyXex99MX7URGm2ja4,1045 -openai/types/image.py,sha256=cWbI4EZxZ_etXKGl0u-7sr3_fJEaWwP0RpJ2fSIDYfc,766 -openai/types/image_create_variation_params.py,sha256=Xeka4vp5V0o8R_6vnLsqiQhWH5O6tUSCyO3FKGVmAeU,1426 -openai/types/image_edit_completed_event.py,sha256=E19lxYAYTACjUME298BXryQdQZ0DnzWZPbzM636el6k,1736 -openai/types/image_edit_params.py,sha256=eviBpqYKwM0btam2nDwQV-0mM4KDdXfFkc-HUT8_L4Q,5374 -openai/types/image_edit_partial_image_event.py,sha256=kgMb_9JveHjePvhZFhUnj5-us1mdZhgzFaoOUPmFBLU,1095 -openai/types/image_edit_stream_event.py,sha256=GtHKc8VdumW5RnQtIiyMqhwIIaqYogKXZF1QNuq9Bd4,516 -openai/types/image_gen_completed_event.py,sha256=sA2Ezhl-Gwh0cPq3VFmDSZDD8yiO1i5mkB-BziIdqd8,1745 -openai/types/image_gen_partial_image_event.py,sha256=vTArcJ1v00opWXiP8iUr9L886cg1VUCtoJLL8NCUH8I,1077 -openai/types/image_gen_stream_event.py,sha256=gVzdE6qzBPpK3kEFM7EdoUzBa4DgCaS3AdF9gjd0pUs,508 -openai/types/image_generate_params.py,sha256=Jl4jufBZAvsw0fGzOnjjoXng7VGiplQN6fhAOecEtQM,5327 -openai/types/image_model.py,sha256=FbWk4MBzMBGTxq4yXJT817FfwV12qD7LyUmNZ3inDoY,254 -openai/types/images_response.py,sha256=cpbt5tKIax5JIDM4FSj3hjo2RO7AFN2pJPNQm4AWqeM,1905 -openai/types/model.py,sha256=DMw8KwQx8B6S6sAI038D0xdzkmYdY5-r0oMhCUG4l6w,532 -openai/types/model_deleted.py,sha256=ntKUfq9nnKB6esFmLBla1hYU29KjmFElr_i14IcWIUA,228 -openai/types/moderation.py,sha256=6mV-unXrz5mA47tFzMNPiB--ilWRpOXlCtT5HKZE7vg,6840 -openai/types/moderation_create_params.py,sha256=bv5qr2y_MQ1MYBhWWUiCET2L18ypWtQpaIKzYTrl9xs,1032 -openai/types/moderation_create_response.py,sha256=e6SVfWX2_JX25Za0C6KojcnbMTtDB2A7cjUm6cFMKcs,484 -openai/types/moderation_image_url_input_param.py,sha256=t1r9WD3c-CK2Al1lpB4-DjfzLFSwgETR0g8nsRdoL0Y,622 -openai/types/moderation_model.py,sha256=BFeqSyel2My2WKC6MCa_mAIHJx4uXU3-p8UNudJANeM,319 -openai/types/moderation_multi_modal_input_param.py,sha256=RFdiEPsakWIscutX896ir5_rnEA2TLX5xQkjO5QR2vs,483 -openai/types/moderation_text_input_param.py,sha256=ardCbBcdaULf8bkFuzkSKukV9enrINSjNWvb7m0LjZg,406 -openai/types/other_file_chunking_strategy_object.py,sha256=Hf9XBL1RpF9ySZDchijlsJQ59wXghbVa0jp8MaEoC-4,310 -openai/types/realtime/__init__.py,sha256=vIt8a8hMBc42EcWBP9hIqwzotPt2737yxjJY0vjSt-E,16937 -openai/types/realtime/audio_transcription.py,sha256=uEUysrLR-RFQ4-3X5M7b1d0H6SbJMioil5xOG2LzO_0,1332 -openai/types/realtime/audio_transcription_param.py,sha256=FMAYpRHVNVCkKwmUGRSFQsGaCmKM3qP-g1aQwtC3Dwg,1275 -openai/types/realtime/call_accept_params.py,sha256=up5XX0MDcN14v9KK6MyAqYdvDfGDVwWqZHp8HNC-gSg,5171 -openai/types/realtime/call_create_params.py,sha256=r0vyhcjvDAKZF8DSbLP7bEQAVh92hgzNBnXBQWJ56no,544 -openai/types/realtime/call_refer_params.py,sha256=Zhy_H0Jv0leRL6HS_WH7Oca7HUlZ0feINePxN-hms0s,422 -openai/types/realtime/call_reject_params.py,sha256=cyz55zIN5DzSL74uhzeKQOSecl4V0UkpySt7zykoGIA,374 -openai/types/realtime/client_secret_create_params.py,sha256=g1pj1BB4T5ZvUltoj6BgfAEqzL2zCtMYV5Ai_ZJioLM,1674 -openai/types/realtime/client_secret_create_response.py,sha256=maHTZ6A_YogizgdV4jy5xOakvVMRUc6NRyWxzC9hObY,932 -openai/types/realtime/conversation_created_event.py,sha256=dJiXF9qKzTyPGFjURZYRrtu0np1ZtDpSYUpQgXPzrRo,751 -openai/types/realtime/conversation_item.py,sha256=BGqZp9UpybVbEyr6enYqdleryy4NMbXpzkUPX03cvoI,1437 -openai/types/realtime/conversation_item_added.py,sha256=3cMQ_vYbEUlnPTYFZmayW4dqkt-gpbzNxDHI0RJhWL8,742 -openai/types/realtime/conversation_item_create_event.py,sha256=-42Pp6Kswz74lpWr7sHbDI3FO4byz5TJvD3VLMNRwhg,1089 -openai/types/realtime/conversation_item_create_event_param.py,sha256=14RaZ7n5CRh6cKaOsOsN6n94MLLijFzY9KmltHnH8xk,1110 -openai/types/realtime/conversation_item_created_event.py,sha256=2err9ZwNCqt9oxy-jvp5y_T8C0_OkHl_KxJCwyHesaY,825 -openai/types/realtime/conversation_item_delete_event.py,sha256=Ao3zKkKF_JQyBwFK1fGojKw96cZjIfHziwvRyLPpgMQ,548 -openai/types/realtime/conversation_item_delete_event_param.py,sha256=a17h8Hd8MxUbXT6NQg8YpTr1ICt1ztRecpfukHw4g34,569 -openai/types/realtime/conversation_item_deleted_event.py,sha256=7dZc3PmGyeSwNGwHCvQgoHwYK4QN9kcv9kRPL4QfSak,491 -openai/types/realtime/conversation_item_done.py,sha256=2dlfFQgk0mSVCoOPUdXKbKShbzsesucxkk84bob_R1A,738 -openai/types/realtime/conversation_item_input_audio_transcription_completed_event.py,sha256=1GEybZ5q1L8lH0p0lA-grhCmm8F8WN3mUcLAC-FG-vg,2440 -openai/types/realtime/conversation_item_input_audio_transcription_delta_event.py,sha256=xy20zUa5uSj0HtefAbOq5ZgG_N4o-HkAbxecbIhvOhc,1349 -openai/types/realtime/conversation_item_input_audio_transcription_failed_event.py,sha256=Gwm8rry9Tsv2eNkfrjsjDE69K9qmc27KXcps3zdCTGA,1076 -openai/types/realtime/conversation_item_input_audio_transcription_segment.py,sha256=hBP5di6OQ9u5fhKjKz6XKmy7T-An8orAlZfboIYukHo,990 -openai/types/realtime/conversation_item_param.py,sha256=yrtdTZDIfAsJGwSXDY7v-_e9GtOPqRNXQAM2LWjxOOI,1483 -openai/types/realtime/conversation_item_retrieve_event.py,sha256=qGlMQI_0YfoO11d0VgV5iVFLHMCjHErPWN516xn0h9s,558 -openai/types/realtime/conversation_item_retrieve_event_param.py,sha256=TRYaZ3btNaywRPaMOVRzK5VT7wh4taIGjbUdhkZ7gFc,579 -openai/types/realtime/conversation_item_truncate_event.py,sha256=IcWi21tiuaduW2S_-w5qSYZIIYEY5c-mRvpb54In_pM,944 -openai/types/realtime/conversation_item_truncate_event_param.py,sha256=-rMZ2Y1TJ-naH6g7Ht8dipjQRnOnSW8xWHrzT9Up4P4,985 -openai/types/realtime/conversation_item_truncated_event.py,sha256=W2L6YmRG-YQ3YZd0knL-EUL3_qPColjJj-DzdECYwv0,703 -openai/types/realtime/input_audio_buffer_append_event.py,sha256=iY7_Acz5Lu6Ul_2d-Ho0Tnjo4b8y-eZuztjsgJtqVPQ,661 -openai/types/realtime/input_audio_buffer_append_event_param.py,sha256=XmN2bE6jBRrkKGVPJdnPjJql5dqMPqwbmFnxo-z22JE,682 -openai/types/realtime/input_audio_buffer_clear_event.py,sha256=8qKqJLRpEhiMcGiuosO5TRx6e0qCIZq3F-TF-pWqIcU,488 -openai/types/realtime/input_audio_buffer_clear_event_param.py,sha256=y-zfWqJsh1n6r2i0MgLDpnNC4g1dq3GCS66Twfkng38,499 -openai/types/realtime/input_audio_buffer_cleared_event.py,sha256=L4tqLyaTqQGdBoZudMxF0BJNqT5-lUVWKuSudIFKA6U,428 -openai/types/realtime/input_audio_buffer_commit_event.py,sha256=gXFJz3MRKaOcjMB5MpmzPSM3tj5HHPxWSScpGJCVEpE,492 -openai/types/realtime/input_audio_buffer_commit_event_param.py,sha256=B8agXC-rUl-D-RijJ5MeTLgw43qVYzmf2_2oAVokhLY,503 -openai/types/realtime/input_audio_buffer_committed_event.py,sha256=_u1WZzg0jTPr4NEwav7dVpYJNDPjv0sI-4XzFSFlJj0,732 -openai/types/realtime/input_audio_buffer_speech_started_event.py,sha256=e7EScZ7HN3GL9jvQVMKz7qRZ6vPIsRO6yPGkHlFBvIs,860 -openai/types/realtime/input_audio_buffer_speech_stopped_event.py,sha256=VAHzgW0UzAQz2cA_j3VgVmfbiRJeH7GzhugeyAXGfC0,778 -openai/types/realtime/input_audio_buffer_timeout_triggered.py,sha256=Q8Og1NhtzVz_YLVm2VWk7ZqxB00lVn5H7anwvG12wVo,853 -openai/types/realtime/log_prob_properties.py,sha256=ADUI2Bprv-PM8EGaMLOo77UpbYQKttIOyLuR1lsEJd0,452 -openai/types/realtime/mcp_list_tools_completed.py,sha256=jc7_Cz3ZPrxrHFIoRaesudFHm7XLwEfbxASknRBR-1w,473 -openai/types/realtime/mcp_list_tools_failed.py,sha256=do32WvGDKI15Mcwp86_eUU1Yj3JGs7KOctwrddchlwM,461 -openai/types/realtime/mcp_list_tools_in_progress.py,sha256=4nOZiQCY6DAAxpST0K2wQGSvqsffgWczyyxaFgoPOJI,479 -openai/types/realtime/noise_reduction_type.py,sha256=lrAb7YhMM-anRYzzOha8hcVloNJR_zWuFrO2SccrcIo,238 -openai/types/realtime/output_audio_buffer_clear_event.py,sha256=VP4gqG3Mkc4n3uo_AuGzC9062yRAVc5h-wpRk0bga1g,493 -openai/types/realtime/output_audio_buffer_clear_event_param.py,sha256=gUnmGZhwaMW5tpfYkEjafmcjuxe6eVk0CwQsq_od0Pc,504 -openai/types/realtime/rate_limits_updated_event.py,sha256=lPYwNoaEopdkfIr5KDOz0ns1OJKEn-2HI209zpDzeuQ,948 -openai/types/realtime/realtime_audio_config.py,sha256=TXlcVjt8PwthyneFJ0kUqF4j2nwG-ubJyHENzrcd0gU,467 -openai/types/realtime/realtime_audio_config_input.py,sha256=48ANMlwtkmcH04CvcDXJxJYF4VIhaBzJSdXn3a3eV1Y,2874 -openai/types/realtime/realtime_audio_config_input_param.py,sha256=grQocM_NhSJlByguflJ7jc4H1O6CknI5Im_A85_NbDg,2926 -openai/types/realtime/realtime_audio_config_output.py,sha256=DdlfBuf2quGvKikEcNFRx8C41B-fO6iENK9L5Y6DCHA,1389 -openai/types/realtime/realtime_audio_config_output_param.py,sha256=elX8xA49gDbbFjvJv24dquBwJVyxtOJHIwHN6XWR2Vg,1371 -openai/types/realtime/realtime_audio_config_param.py,sha256=RcizKdhyXCLKrykVY3pQx_z_w4Oe1Xk5p2IqcHgvZu8,503 -openai/types/realtime/realtime_audio_formats.py,sha256=YTBxJ-D1AHA0EoaH1s-N99489Y57beSn7RiA6SjxIuQ,926 -openai/types/realtime/realtime_audio_formats_param.py,sha256=jtxa21eFt-2XkhMMadEvZ7MTv-itqCRWqDi4VEmMjwI,817 -openai/types/realtime/realtime_audio_input_turn_detection.py,sha256=b3xLXLZ7tsLoTKE2Ex-dqK5mir0QhA0ohpKEgErwDZg,3449 -openai/types/realtime/realtime_audio_input_turn_detection_param.py,sha256=ulsMAl_sDB6In9I9aGI1XERH-cVUtEyQPU9uyOtF0rk,3280 -openai/types/realtime/realtime_client_event.py,sha256=4_lYEyK-wj25VTh8GTaV0mZ0t17KhkfJrQ0yUF0mCYU,1473 -openai/types/realtime/realtime_client_event_param.py,sha256=YPveu8tNyKmZkK24qEJv8js5l5NNygDyAcsza2iOmKw,1543 -openai/types/realtime/realtime_connect_params.py,sha256=Zd5FnP-6nEzAPiWTckSdVGQsA_8GqhwELCpQXt22J8A,288 -openai/types/realtime/realtime_conversation_item_assistant_message.py,sha256=g67lu3x-Z3zw9RdXyEOWTbmsDKlmRNZErtE510jMsy8,1715 -openai/types/realtime/realtime_conversation_item_assistant_message_param.py,sha256=vlSO9xgZHh099lbQU4FqngPEIgkNDB9AsFwatJeFR0I,1683 -openai/types/realtime/realtime_conversation_item_function_call.py,sha256=7HTj4l_AtGBPxRZqQ9JlY9uuBLrOIDatyBE_JVji9YU,1202 -openai/types/realtime/realtime_conversation_item_function_call_output.py,sha256=E5BtjqP6anIi9XpdVKtpd8pFh8SXoersKOpn6hbrS5o,1103 -openai/types/realtime/realtime_conversation_item_function_call_output_param.py,sha256=45NvbyGoO4V6lbeQn5mKck8SQJGHQb3xtgTy2GmnuqE,1100 -openai/types/realtime/realtime_conversation_item_function_call_param.py,sha256=hxeYcWk09Lota1TqIZvg5kXMu_0S0y9iDGJxPlzHmVA,1182 -openai/types/realtime/realtime_conversation_item_system_message.py,sha256=mq0tDiLi7r4bMRqI83lgnSF1uJwGsFUfhKr2181ELYI,1224 -openai/types/realtime/realtime_conversation_item_system_message_param.py,sha256=0iLyhkIE6xLzjDI7vqa-bbs73kWnaCQz8rHBujMY6nA,1226 -openai/types/realtime/realtime_conversation_item_user_message.py,sha256=N7jJ9WlJMabAyvldcGJzfVL1w2Nw-wDcBJma3lyIJeQ,2111 -openai/types/realtime/realtime_conversation_item_user_message_param.py,sha256=b6KnyeTZty254f5A2GCCoiH-cvIXffs9UrLJprlRSFQ,2045 -openai/types/realtime/realtime_error.py,sha256=1pg3if_lIqzP7Ow23UGQyqs8x0PLdiLIC-Ax79TLe6Y,625 -openai/types/realtime/realtime_error_event.py,sha256=fAosJOL7vMbG5JYMwzg8yrRBaT0INz4W_1XCxIUFzTw,466 -openai/types/realtime/realtime_function_tool.py,sha256=3CDiCZCM0A1VLRxOFdG4teFXr8dx0JFU94KbSn-JgGc,734 -openai/types/realtime/realtime_function_tool_param.py,sha256=-vDBSmMWNdbABC8dxVckkNeRdEujAKeff6icJvYrM0I,674 -openai/types/realtime/realtime_mcp_approval_request.py,sha256=Li-i-Sa7tfiI5nWA4Dyz4ac3_KTWd_qLc3u7KNOcMjM,621 -openai/types/realtime/realtime_mcp_approval_request_param.py,sha256=zdoRzHIrSzhfa3DTO4XyYQ4P1hNq4J3XesJFQmuD-9Q,717 -openai/types/realtime/realtime_mcp_approval_response.py,sha256=3GcWB31Mg2pWynk3-IqflayLAD6QRt_UXB2-4sKxgOU,676 -openai/types/realtime/realtime_mcp_approval_response_param.py,sha256=CU8G-jv5aYbTrts4JQuZeLHf3RZ2HgIrsCDtwkqSxk8,755 -openai/types/realtime/realtime_mcp_list_tools.py,sha256=MzGc-pTTKpBqweIMwvz5BOzBtDQGmqXFkY0En81l1Xw,889 -openai/types/realtime/realtime_mcp_list_tools_param.py,sha256=8L8i5K1xUxvT2Op4B5hN-x9YoclR9Wlb9vNi2q1TQo4,975 -openai/types/realtime/realtime_mcp_protocol_error.py,sha256=4jqkfl6h7tFT5kQy40VW24LrokpKe6X4VROYlNmOHDQ,313 -openai/types/realtime/realtime_mcp_protocol_error_param.py,sha256=jlufPTMU_9JuYtqzQGTmb0o978gDiOFxkNx0yJAvwx8,389 -openai/types/realtime/realtime_mcp_tool_call.py,sha256=s1sx6konXKS340J7oI01BOKnwm2uP2aOlmOmBwKdnnQ,1325 -openai/types/realtime/realtime_mcp_tool_call_param.py,sha256=9-ks4Mhu91HzzgZBjq1zHqO1zP4ae4ezdbYgI367O34,1339 -openai/types/realtime/realtime_mcp_tool_execution_error.py,sha256=swcOrTKO5cx1kkfGS_5PhBPEQx_Vf_ZW04HbA5eRa0g,314 -openai/types/realtime/realtime_mcp_tool_execution_error_param.py,sha256=3IuPmvy52n_VByGYqfCr87kciEQdJMTcwGWj4__PiX8,380 -openai/types/realtime/realtime_mcphttp_error.py,sha256=-Zqz0xr2gPs6peG_wC3S8qVgtEUJNrZm4Mm5BIvmZw0,301 -openai/types/realtime/realtime_mcphttp_error_param.py,sha256=GcmAMBvZVNrN9p_tneHPu_pyN7D8wCytaAKruFtMfwI,377 -openai/types/realtime/realtime_response.py,sha256=IvGy_VZPIRVCD4-mLElif7bOVMFJglR0tvU1zpfz6ys,3826 -openai/types/realtime/realtime_response_create_audio_output.py,sha256=gnMvrt0BR440zNDOmYB-j_Eh9WcaDExnZE8P68ptmdc,1004 -openai/types/realtime/realtime_response_create_audio_output_param.py,sha256=u1kCAMUjCRFoM402IZbfvRxvQLzrKN66PLqKG-yD2i4,999 -openai/types/realtime/realtime_response_create_mcp_tool.py,sha256=OhQM73g8gqOgsWphIb6Jw31ZaaucbG9BKDu7qk6mc2Y,4512 -openai/types/realtime/realtime_response_create_mcp_tool_param.py,sha256=2kxSDx7qzMPwB-pizGyqlr6QA2EnaSoEI3U_3RE0Ykg,4415 -openai/types/realtime/realtime_response_create_params.py,sha256=VECIsK9brymU5sgjGwDtTbb8-X_jYvcVEHo1QMLIFE4,4284 -openai/types/realtime/realtime_response_create_params_param.py,sha256=85nEALuSKC4TBGSj6qlZEUrqnNHEkhKsZuFtxTIqh-w,4316 -openai/types/realtime/realtime_response_status.py,sha256=bSeFcCy9c4jyf12ZzJFcxpCYKrSwMEgpNipOE1SNqcA,1325 -openai/types/realtime/realtime_response_usage.py,sha256=rxUW5DM1d4BY3F74KaImcADVnWasSv_Zj_febO30Vms,1429 -openai/types/realtime/realtime_response_usage_input_token_details.py,sha256=YcOrEtHj9QjJ-s3fmNqGMJ2nJUcJO_J9yXbCueppqZo,1244 -openai/types/realtime/realtime_response_usage_output_token_details.py,sha256=9wWB5tRft0LQsIgsIBsSaAhv4rDGgTl9Y5svpGU4ooE,459 -openai/types/realtime/realtime_server_event.py,sha256=5XfW7BkJMsJJUGXq0hCd7AtCa2uPPKnQbqkrorx_LYk,6578 -openai/types/realtime/realtime_session_client_secret.py,sha256=hjco-0FnTvhnMSLezczUBDz739hbvZSbxB4BeZCeark,583 -openai/types/realtime/realtime_session_create_request.py,sha256=1DNHKmhpYZLU5ENwoomUh6IRZ2lt03mcpSpmQjdKiPo,5219 -openai/types/realtime/realtime_session_create_request_param.py,sha256=U4Lq3X2XKrq6V1WabMMtegOWkP8eTuCUfWVrM_68X6Y,5205 -openai/types/realtime/realtime_session_create_response.py,sha256=zZbfse_KKlKlZwZ6Lx_4BaL0G-5yp3_ZjV2OVBblasE,17868 -openai/types/realtime/realtime_tool_choice_config.py,sha256=DV0uuyfK59paj5NC9adQskUF2km5TRSiHAlMDu1Fmdo,472 -openai/types/realtime/realtime_tool_choice_config_param.py,sha256=0vqYNM4MkU5d8GXfitT6AoE9AubKeLZOSHGOH8q73QU,550 -openai/types/realtime/realtime_tools_config.py,sha256=JSxehiQnA_tJUeXvi2h9H6wlYsnhhtRWB_o5S20V-oQ,318 -openai/types/realtime/realtime_tools_config_param.py,sha256=0jxEaIIHOdhLLAN2zQqsx8hrHSjWWeVvTW-896ye3gs,4708 -openai/types/realtime/realtime_tools_config_union.py,sha256=FbA6HwGnNC9AKBNh-3vjb7yzag5Snc88RY18gim-fY8,4769 -openai/types/realtime/realtime_tools_config_union_param.py,sha256=Wkxn6uvJDWi1IadV_DjbPmYeyThJlB50S4iizpw5Xvk,4595 -openai/types/realtime/realtime_tracing_config.py,sha256=TzKfoTJuLjPBG-qozwigXQv1uAZszgVX_K-U6HaiEjY,871 -openai/types/realtime/realtime_tracing_config_param.py,sha256=SqfUQ8RO0Re28Lb2AF2HlaJj7LS_3OK3kHXrUsKPcDc,840 -openai/types/realtime/realtime_transcription_session_audio.py,sha256=yGDcdMTaxGZKIgmDKnKQeEtgEH5SVYJfPXpr_zAr03c,414 -openai/types/realtime/realtime_transcription_session_audio_input.py,sha256=IXUUnr2WbKCeqPyd9VTge1Ho0MQvy0FZMh2l0awdTZs,3003 -openai/types/realtime/realtime_transcription_session_audio_input_param.py,sha256=sCvGim5THVMJ1c1a5ipyiht85tcrkgt75OsLIUp8ncs,3055 -openai/types/realtime/realtime_transcription_session_audio_input_turn_detection.py,sha256=nLF5DpguSg4-ectSCSSvbV7t7X2Z_yUvSCNQEdEuFEM,3489 -openai/types/realtime/realtime_transcription_session_audio_input_turn_detection_param.py,sha256=VL4MchzWUsCShFXvTnfJOKUqOh71mtZ_0YmEBrJ_ofQ,3320 -openai/types/realtime/realtime_transcription_session_audio_param.py,sha256=IdEgpkEbtPrEHJ-KkvEcV_8aSvCBzxBQDUQhB6ehQgI,451 -openai/types/realtime/realtime_transcription_session_create_request.py,sha256=-hJUbNd0rR0pbMnCzXvCylhOSLWUG42RNweAk_KhpXw,899 -openai/types/realtime/realtime_transcription_session_create_request_param.py,sha256=kP35GihtGw5L6T1okdSRJ9rJrs7FDBURy7htgKPDMR0,928 -openai/types/realtime/realtime_transcription_session_create_response.py,sha256=dpnCsv19sMo4aQ3oYIcStplpKft1EFRxQFLzLvaCaUM,2434 -openai/types/realtime/realtime_transcription_session_turn_detection.py,sha256=hFAIILzs1QaQ8JvX8PoHBExUm3eNZKWnJQfjQKnGBfE,1040 -openai/types/realtime/realtime_truncation.py,sha256=lnr1Uq9kSs6OfJb_TcvQrs7jx92UuSKaIhGNvwUK-qU,380 -openai/types/realtime/realtime_truncation_param.py,sha256=wBXHiAPS_HA6MWBqhRGEtqZxu6RdIrgnTVRYgUljwq4,442 -openai/types/realtime/realtime_truncation_retention_ratio.py,sha256=bP3nvGYdj2ssc1e-rfrL8VFksVTvuA4HjD50CwxSR98,1380 -openai/types/realtime/realtime_truncation_retention_ratio_param.py,sha256=K_iW_qPUxsnWIXpgCUpdlUxlngAcWZKwFZKXenOjyIQ,1397 -openai/types/realtime/response_audio_delta_event.py,sha256=9-CcYOY4JeBiFYVkGwQ1uOVHrtRNxsMg43M3csgaOw4,755 -openai/types/realtime/response_audio_done_event.py,sha256=Kuc7DYWSIcNfCH8M2HIl80phHyYnHnChfSdp30qXqUA,692 -openai/types/realtime/response_audio_transcript_delta_event.py,sha256=Pr0dP0Up-jY-QQiyL07q9kByaOMkV0WIaYrkDOCLhXY,786 -openai/types/realtime/response_audio_transcript_done_event.py,sha256=IEbDxwWpjCIoMpT5-iu3gTSAqbmqvOcjsKsj3PuYKvQ,800 -openai/types/realtime/response_cancel_event.py,sha256=WCXDsVwgkgyb3L8Nh-bPaaiDnifXjLyPbxvoIkN7YA8,636 -openai/types/realtime/response_cancel_event_param.py,sha256=nidzBL83liHwyImiNGiz9Ad0V34EtFAQDw1utqcF6ns,630 -openai/types/realtime/response_content_part_added_event.py,sha256=CSsdmclKPRCclNpUixYg54tUmJG3Dy1fgKe2-D7E8fs,1231 -openai/types/realtime/response_content_part_done_event.py,sha256=ws8nIPrUln5ue45ID_UdR8AjgYQiL6F0imrv7TMRsfc,1189 -openai/types/realtime/response_create_event.py,sha256=GlWcb2kLyq9oDcsJQ4nkwWjfGjleMQWy6HmDKztCXU4,654 -openai/types/realtime/response_create_event_param.py,sha256=_NQArkqOHZCFJsxq26HjHGS2IgVh8cy4VcjN9M80cj8,665 -openai/types/realtime/response_created_event.py,sha256=7LMTqoVE0WiqlAdEYMN0weSTBBhU_4CyD3gFxLAeKcg,505 -openai/types/realtime/response_done_event.py,sha256=u44ZBOYbzqiC8VqqDp8YuA9qBmVHWLXMJZGvOqJOIks,493 -openai/types/realtime/response_function_call_arguments_delta_event.py,sha256=8mQkxsj6MEUMtVutdqQG3ERqL4u1qNY55WKSXMol-0s,792 -openai/types/realtime/response_function_call_arguments_done_event.py,sha256=iIDsECFP-jj_fkcDGa1ZHstjkBVYxdbFeNvZV3_z0sk,792 -openai/types/realtime/response_mcp_call_arguments_delta.py,sha256=wXMighxToTIFK2ElkOrIYKvxqN9i-tZDR3iUdTFvRFc,831 -openai/types/realtime/response_mcp_call_arguments_done.py,sha256=3Hlq2bJW31yvvPOin3IOf1XSRMLMwPoZL5Kn9uTm1-o,708 -openai/types/realtime/response_mcp_call_completed.py,sha256=OlfSjKJmHn77tdwgt5wVGbGXL8HHQWhYQzpFf8QuOWg,563 -openai/types/realtime/response_mcp_call_failed.py,sha256=m3AZktEUCOzRp6UKCLPhLDhgYYkaWmO3hUTwTRERBmA,551 -openai/types/realtime/response_mcp_call_in_progress.py,sha256=PjME9TvWMBBR5gnEgrOf8zbuliR3eYW1h48RDsRgPfA,569 -openai/types/realtime/response_output_item_added_event.py,sha256=B_H6V9ijObo-JOUFIEH1JqMdKhhSuY24K5I9rly1j6c,721 -openai/types/realtime/response_output_item_done_event.py,sha256=64b5NILItLOAi9IxkYEhfAkmJSzoYjDLo4WJaL-zdOY,717 -openai/types/realtime/response_text_delta_event.py,sha256=Al1GXaZ55DQbrEyou48U8IfP2525e9G7277YfFM9VSU,734 -openai/types/realtime/response_text_done_event.py,sha256=bhm59wW9hARgdl55rzzEvx33Xajy5gQ9Fr11RUWOeno,737 -openai/types/realtime/session_created_event.py,sha256=nPM98I--WtKuzs3Srofj6kptYbRYt9x5LBMxxL7j9mQ,770 -openai/types/realtime/session_update_event.py,sha256=TmxL9PYD7GD_MAhRoGsor7mhAp2PZl4JaWl_APLteew,1088 -openai/types/realtime/session_update_event_param.py,sha256=7uKlFHpoKcvYMCeAJ2cwCWe-dDPW6XeMV-zNgRvtX1E,1160 -openai/types/realtime/session_updated_event.py,sha256=a5zFzk9iBCpB6TOjb_x_KjTdOhIPOSlW3cpx3nGNiKk,770 -openai/types/responses/__init__.py,sha256=kBlksNyIvsmL1rbaVsPGiblwMNuMbSxIHSysKKeYYfM,16300 -openai/types/responses/computer_tool.py,sha256=bigJ0RyhP6jKtAB7YM-oP2sPtL1isCnZufTue80u9vg,607 -openai/types/responses/computer_tool_param.py,sha256=7SJn4rXdQeAt-DiMiXfdPI6Q_X6S7Wfxrc1Am8nPZeg,693 -openai/types/responses/custom_tool.py,sha256=WcsLiBUJbnMhjFF3hAFCP8SsCzzcbJh4BhC3NiVIl0c,736 -openai/types/responses/custom_tool_param.py,sha256=cAbh_D2pQa0SPEFrrRVPXXoQCndExjjqKrwRaBghWZk,748 -openai/types/responses/easy_input_message.py,sha256=4rPo04A1WVaCxLpPn3e_gJNgdNuAKlH9k6ijLK3-Bdc,817 -openai/types/responses/easy_input_message_param.py,sha256=8kM4AkSoiUOspuDTQPfdLjkgydQ9yHmo-FCfjdthtgU,873 -openai/types/responses/file_search_tool.py,sha256=2icXjeM09toOtIDIQP2n3fgHoIWJSpVS2O5LMUEya4Y,1870 -openai/types/responses/file_search_tool_param.py,sha256=bWqRblYEALYn4DjJfZR0JnOURieh-IhjmRHPcuMMrqk,1940 -openai/types/responses/function_tool.py,sha256=gpcLwRIXSp92jVJcIXBUnsSH_FzJrlH-jLIo-IbE1IY,796 -openai/types/responses/function_tool_param.py,sha256=ZDGBcqx-T24wgum2YHr3kBzk-P8lH-lCkuAHxyzKxGI,861 -openai/types/responses/input_item_list_params.py,sha256=wazm2tELpbpBWdAQrXGBq88Bm5RsxWXmlVJAV3f_k-I,964 -openai/types/responses/input_token_count_params.py,sha256=rsM-e-quCSgO1gylCh8lz3HJtnvGejTkKASzJsr7RGU,5314 -openai/types/responses/input_token_count_response.py,sha256=w8LogkiqiItGuty0-bXts8xy1Ug-bbSYKEm7DDKmoP0,310 -openai/types/responses/parsed_response.py,sha256=1rKsrhTtF8LhoRt_SHtBtQcRbztxAvPgZvTqGB9AMsY,3315 -openai/types/responses/response.py,sha256=WZcotgnNu3UrRgrClMXNYJAisVlGC12MX9hF-F6YFYQ,11617 -openai/types/responses/response_audio_delta_event.py,sha256=mXPosLnDn72HLG-Lk3EdyOw7isLm3HgpqQoYkG6XrJY,515 -openai/types/responses/response_audio_done_event.py,sha256=26KUM9PJlWIQi80FKo5TSD9lKJh7JnPHnUCD5cqIcrg,414 -openai/types/responses/response_audio_transcript_delta_event.py,sha256=Q3nSbPpT5Ij3iIvpweMF9KCct20B8MWJWOFV5pVqC8k,533 -openai/types/responses/response_audio_transcript_done_event.py,sha256=92_yKmcs8ILjaA6NeoZR1wuzUS0VXLzCfMNcdRji6-o,457 -openai/types/responses/response_code_interpreter_call_code_delta_event.py,sha256=mPveF26pvu_3esV1tMUnqfsT_NnZ1HWeqNM4F38NqUU,840 -openai/types/responses/response_code_interpreter_call_code_done_event.py,sha256=M5bmLyCJX8YFJv4GPtPBQZuXvt-ObQE9fztWnMli9rU,806 -openai/types/responses/response_code_interpreter_call_completed_event.py,sha256=STgdlJ5gQFLJeDwJGTGgvKKaJ_Ihz3qMNWWVjC9Wu4E,759 -openai/types/responses/response_code_interpreter_call_in_progress_event.py,sha256=4G7za-MHwtjkSILQeV_oQ6LEIzK35ak5HE3oi1pYFzA,767 -openai/types/responses/response_code_interpreter_call_interpreting_event.py,sha256=n8gNOqoJf47KE1T7kYE7q9bCeFnIUeODuFHmlGZcYkE,774 -openai/types/responses/response_code_interpreter_tool_call.py,sha256=Zv5gnGOcZpAd1Nj542JVyMNBObv175YKAR2DRxZ3pxM,1650 -openai/types/responses/response_code_interpreter_tool_call_param.py,sha256=QNucxgTL8laa3Fh8NjoWFdzRnFyaplIb0kGlyk8oZV8,1723 -openai/types/responses/response_completed_event.py,sha256=lpsi8GcuDN1Jk624y6TsUjpxRO39-Pt_QeuVtU8g-QA,517 -openai/types/responses/response_computer_tool_call.py,sha256=SKagtciWz5JZq8kzHZ44tmxOZ5C1CC1sfEgm0XDyPuQ,4671 -openai/types/responses/response_computer_tool_call_output_item.py,sha256=c7s0_T4GC_03P-ywObgbgsFzsqfRILWnf9FyqrZMceQ,1527 -openai/types/responses/response_computer_tool_call_output_screenshot.py,sha256=HVkJ_VJx1L9-sdIVzfdlk1EkrA3QSGJU24rcwqfvGzo,662 -openai/types/responses/response_computer_tool_call_output_screenshot_param.py,sha256=YJ3_l0_Z_sAbhIVMnBeCriUn1Izql404_YEQHLbt2Xg,656 -openai/types/responses/response_computer_tool_call_param.py,sha256=WKaBaoeXYZex1XCQU8d61gyYF47z1QsvnjAvBQqcaq4,5082 -openai/types/responses/response_content_part_added_event.py,sha256=P2CBuVxxxAgFzTP481h5hGa4IsfxYguwAQnbJLZQpcs,1337 -openai/types/responses/response_content_part_done_event.py,sha256=PosTBej2YbmUZmBWOZrxivITJIUryLWaT-jc65ZA0QY,1329 -openai/types/responses/response_conversation_param.py,sha256=sEhOVnULPS7_ZFHZ81YkLcF9yzlWd4OxWTuOvDdOcgE,340 -openai/types/responses/response_create_params.py,sha256=D3xjGayQxBOX_m7I7jZb46NWrjTEdAV92G1QRGQRbzo,13382 -openai/types/responses/response_created_event.py,sha256=YfL3CDI_3OJ18RqU898KtZyrf0Z9x8PdKJF2DSXgZrc,502 -openai/types/responses/response_custom_tool_call.py,sha256=3OFPImUjDkZPRnyf1anPoUD_UedOotTAF3wAeVs-BUM,730 -openai/types/responses/response_custom_tool_call_input_delta_event.py,sha256=AuKmvk_LEcZGNS3G8MwfAlGgizrPD5T-WwPV5XcwH7s,695 -openai/types/responses/response_custom_tool_call_input_done_event.py,sha256=6sVGqvbECYHfrb1pqbg0zPSO6aFu4BfG5fwI-EkCHOA,681 -openai/types/responses/response_custom_tool_call_output.py,sha256=1bAznnitN1IlSI8VfRlh5yA-rq_74JAbPckOb7m4eUI,1217 -openai/types/responses/response_custom_tool_call_output_param.py,sha256=lLjueo4lDdf9ayD5tK3bqgcODwb0p4LYSO_ZOMXsoiU,1224 -openai/types/responses/response_custom_tool_call_param.py,sha256=bNJuc1YiF8SToRWjP0GiVgmttQieNPW0G5cfuKpvRhQ,771 -openai/types/responses/response_error.py,sha256=k6GX4vV8zgqJaW6Z15ij0N0Yammcgbxv3NyMxZeJsdQ,915 -openai/types/responses/response_error_event.py,sha256=695pQwl1Z2Ig7-NaicKxmOnhBDQKAcM44OiYCwl3bRc,576 -openai/types/responses/response_failed_event.py,sha256=Y0g4NnAuY3ESLzrkJ6VUqQ2CuQYBQ3gCK5ioqj4r9Rg,492 -openai/types/responses/response_file_search_call_completed_event.py,sha256=6gpE8B-RMbcnniTAZOaXG8Aaphy4A0--lbzKc7mwojg,671 -openai/types/responses/response_file_search_call_in_progress_event.py,sha256=wM-A66CcIlOiZL-78U76IjlrQo2DWEuR6Ce-vlRlNLQ,677 -openai/types/responses/response_file_search_call_searching_event.py,sha256=wdDdm9zEPEFx6dNZx1omfN4Qlchf92vXh6s6AojYWM8,671 -openai/types/responses/response_file_search_tool_call.py,sha256=DE3NhTc7hR5ZcTfHHV7FddimfuMIu5bjLIWJPRe0_9E,1664 -openai/types/responses/response_file_search_tool_call_param.py,sha256=uNt3RQNJtRIhuyJ6iEadR_1KQ_urwzszo8hdCbuof30,1737 -openai/types/responses/response_format_text_config.py,sha256=Z1uv9YytZAXaMtD_faYD6SL9Q8kOjSvRQXFkSZc0_hY,647 -openai/types/responses/response_format_text_config_param.py,sha256=T6cMHds5NYojK9fZMMldWYBypWwVmywIIbkRm5e4pMc,625 -openai/types/responses/response_format_text_json_schema_config.py,sha256=Bg7fRMlXuBz95kDZnee3cTNavvZNbPganIL4QI-rPLg,1414 -openai/types/responses/response_format_text_json_schema_config_param.py,sha256=7Uaoc1Uj60cVFL6_XRtErwi5veXJO-v_T3KIpS6XTdE,1396 -openai/types/responses/response_function_call_arguments_delta_event.py,sha256=qXcRpMudoAGWOHo-SaBDq9V9ZrIm1qtiCbBU0pPbj04,792 -openai/types/responses/response_function_call_arguments_done_event.py,sha256=6RK5IPlbNKeEWbf87tdUaf1qdKanu2GLmTpivyCv5wA,639 -openai/types/responses/response_function_call_output_item.py,sha256=mkRJ2mCmrrmjRbvijxt-zHw9eLU6-aLjM6___SmTiTw,633 -openai/types/responses/response_function_call_output_item_list.py,sha256=5NYWEt_FNPCyQVRMiIJDJt4fYV6GWUwbW9S8hCucIpw,367 -openai/types/responses/response_function_call_output_item_list_param.py,sha256=y6vpVbdTGurYpDVbg0UFp4GhSMtwYRium9Z5bbiyyuE,774 -openai/types/responses/response_function_call_output_item_param.py,sha256=VEe_wQ8z9PN0qJbLuCwfg9n4Lwe5-WNljzmNJ-fqnwM,629 -openai/types/responses/response_function_tool_call.py,sha256=SNaR7XXA6x5hFWMVjB2gsa-VBViodKSDb72eNdbHp8Q,917 -openai/types/responses/response_function_tool_call_item.py,sha256=Xbkpq2_-OQ70p-yA---inPz6YaRU8x1R4E6eTiWN7Zs,340 -openai/types/responses/response_function_tool_call_output_item.py,sha256=F8lyiugcU6q6tG9kJUpTnF-g_xGBWgIje6mTYMErXL4,1394 -openai/types/responses/response_function_tool_call_param.py,sha256=k153-Qo1k-VPZidjuBPp2VcB6RGYGEQjGbZO2_RJ6ZY,941 -openai/types/responses/response_function_web_search.py,sha256=72x-qMcenYqDZfLRuFa3wA19jsXZn6UDnB0uRRtNi18,1794 -openai/types/responses/response_function_web_search_param.py,sha256=d_STmLPRRJki_Q533r2RlW_g3FktylWVxCvG3UI2Z5M,1937 -openai/types/responses/response_image_gen_call_completed_event.py,sha256=sOYW6800BE6U2JnP-mEU3HjubGd-KkiPwZ7jisDT_7Y,671 -openai/types/responses/response_image_gen_call_generating_event.py,sha256=1mjodLwyfkMBzcgQQhTix_EzQFNAWKnL6aycczObXJI,706 -openai/types/responses/response_image_gen_call_in_progress_event.py,sha256=DxvV9tMMGGcu5lTgIuHTL7Kbt3bO40NKg6Qd8kATvkQ,708 -openai/types/responses/response_image_gen_call_partial_image_event.py,sha256=xN3hw_RbEiD9ZoSZCf3TJZcL3JUIWCVzd5cha20s_7I,971 -openai/types/responses/response_in_progress_event.py,sha256=uvYzRXq4v6LuXY8fNyGbzbTt4tySoSskzz_hUFWc-64,518 -openai/types/responses/response_includable.py,sha256=tkia-hgFh4ttgy53H5lJtoBWsSQh6G2DzCXj-14o8Ko,505 -openai/types/responses/response_incomplete_event.py,sha256=0EP3BJzI2E6VXcpEvaPenBKHGocEZbFjToSMMktUo7U,516 -openai/types/responses/response_input_audio.py,sha256=OUNcmcb1VfKnxNIuDRunZNGp564UHOHUreiWhmQzOUE,574 -openai/types/responses/response_input_audio_param.py,sha256=-B87XBc8ndNEgOkm7U5ZI380fEmkDcAPa9fIzIPb7q4,673 -openai/types/responses/response_input_content.py,sha256=MaZ-MNnZvhM2stSUKdhofXrdM9BzFjSJQal7UDVAQaI,542 -openai/types/responses/response_input_content_param.py,sha256=1q_4oG8Q0DAGnQlS-OBNZxMD7k69jfra7AnXkkqfyr4,537 -openai/types/responses/response_input_file.py,sha256=Sp8QjnKF3XgUbPXRRpOhJAnlpbyVdAFM8AY-9Xa3JZo,717 -openai/types/responses/response_input_file_content.py,sha256=dS2mFZFvkcuV6dau_TQHN6CnBhnmypRAzlW28gjvK3M,743 -openai/types/responses/response_input_file_content_param.py,sha256=ubBzg8iwjHRjh4uZCKCuDAUVzV20mfx8qkWqgqdjHyY,771 -openai/types/responses/response_input_file_param.py,sha256=1v_0w7IsMTeasMI97k5RtWF2XsqJGEgoV7Urzm7_Rio,715 -openai/types/responses/response_input_image.py,sha256=zHA7iFssu0aFcivwzyurAJgGpFdmzxq1BooVp5magsI,778 -openai/types/responses/response_input_image_content.py,sha256=GcoVxWL3dPW9hCjZg5VUOuXBCVC1b25SBrcXvsmhvzg,809 -openai/types/responses/response_input_image_content_param.py,sha256=PHVEiMvucEH2CClIXA6H9tdKKdvp0BA0Q-cHbTrMmpE,844 -openai/types/responses/response_input_image_param.py,sha256=5qhS_nF1GH3buGga8HSz9Ds2gVqQ8OqhfhkvssciIHE,830 -openai/types/responses/response_input_item.py,sha256=8dzWkkQ6wlCFYb_zciLP_q9pVE3Rji1rM06r4aQBAhI,9703 -openai/types/responses/response_input_item_param.py,sha256=IEeVlp_5LD79iGEOHoCu_arzmX4QdX4jFZkZXQ0fUh4,10236 -openai/types/responses/response_input_message_content_list.py,sha256=LEaQ_x6dRt3w5Sl7R-Ewu89KlLyGFhMf31OHAHPD3U8,329 -openai/types/responses/response_input_message_content_list_param.py,sha256=cbbqvs4PcK8CRsNCQqoA4w6stJCRNOQSiJozwC18urs,666 -openai/types/responses/response_input_message_item.py,sha256=_zXthGtO0zstLvIHg9XesNAme6yNa8JOejkBYLwXm70,1029 -openai/types/responses/response_input_param.py,sha256=oMK5yD1JhNjmSbjtAObkC5FKsi39lVRiKbvO75qFjCY,10330 -openai/types/responses/response_input_text.py,sha256=L7ikIc1qFUSjB9FLeKiy6uwa2y-TkN1bMMgq7PpGOuE,375 -openai/types/responses/response_input_text_content.py,sha256=vnRLSvQcZdsIUpRqZVPLgve5CLPE36uveVv48PlfGek,389 -openai/types/responses/response_input_text_content_param.py,sha256=wcnUVntg8i7r23EOgWI_0UYZDrPg1wmKKit_RCkrmMA,455 -openai/types/responses/response_input_text_param.py,sha256=N9k0QajI4grRD44GKOz4qG4nrU_au1kVZWmwX3o0koU,441 -openai/types/responses/response_item.py,sha256=IfZyW7_vAI169DKoZFTgI3NWX5SOenQnmvenNW3Mcww,6389 -openai/types/responses/response_item_list.py,sha256=uGGJlbBtuaNadG9PjebjngvtKdXTcI7MIvF05m7qtjc,665 -openai/types/responses/response_mcp_call_arguments_delta_event.py,sha256=dq4_Z156rwK6F9_97sgEOZJHNNPxt6ZfGHX8b_MSWS8,778 -openai/types/responses/response_mcp_call_arguments_done_event.py,sha256=16ETbPuAreyAapg7rKMLWtSOlu6-mxfrkJUfVKiV9dM,752 -openai/types/responses/response_mcp_call_completed_event.py,sha256=ylzTH1FOI2Ha8PABzWOF_ais1_GgMsBmUklaTkR18bU,600 -openai/types/responses/response_mcp_call_failed_event.py,sha256=BmPnCvz72x-lgUK6x8Svmxo1y4ep0FJWYh5ROgYyuCU,582 -openai/types/responses/response_mcp_call_in_progress_event.py,sha256=Em1Xni2Ah6m7pF4wsvI_7Q0UMIlHsd75uF0r2Z6RI14,638 -openai/types/responses/response_mcp_list_tools_completed_event.py,sha256=3tLqKFzakR7H9_gPdYBzyLlKmIOrjtWuULex2069EY0,637 -openai/types/responses/response_mcp_list_tools_failed_event.py,sha256=NhjpRJ5jTrsc7qhQYL9aKTdL6FT6LClZB03G25WySQM,604 -openai/types/responses/response_mcp_list_tools_in_progress_event.py,sha256=_mfZNKGLIVvEmvmfBie4Q5QMUmzAiSyjdHQdORfcqWY,646 -openai/types/responses/response_output_item.py,sha256=k33Sd8vc5em2oSohmaOkI9RmWLrDbksf1bKI0qmygMk,5140 -openai/types/responses/response_output_item_added_event.py,sha256=ct7JDhk7EzyD7oDFVFx1X8T2hblAuDQea3GPXY61Fzw,644 -openai/types/responses/response_output_item_done_event.py,sha256=adnds7wknAbha4-USAUosKuQTMFwA58pZC842VUrJO0,652 -openai/types/responses/response_output_message.py,sha256=FXVWYe6pptTXvCxwadX602dL4xNjl1GKugTOrlFCBuU,1104 -openai/types/responses/response_output_message_param.py,sha256=VfnkR1ClDhUq3uoGsrp-HmmYoDmkY6X3wNcdXC7NHjU,1148 -openai/types/responses/response_output_refusal.py,sha256=oraX9ZXcD4B7w8t9jcbZPACp-8puytJX_1SSQfTAy_M,388 -openai/types/responses/response_output_refusal_param.py,sha256=kCxtRvVJ6PF75Svmd3JUXyV_W-m9VqV-SpjSe6VUt3Y,454 -openai/types/responses/response_output_text.py,sha256=dZwIefV0zZmQJZ-7jfbgQwu6BJRHuFlG3V_AjxNRy3s,2810 -openai/types/responses/response_output_text_annotation_added_event.py,sha256=xGlSoFd2n9hjLeVKCQPh-yBtI2uS-d3ckJBHHmEoXg4,963 -openai/types/responses/response_output_text_param.py,sha256=H9Hq_D5Unp1Y1m4QDblzpcJiZ-5yDuhCtQSvIYSVddY,3113 -openai/types/responses/response_prompt.py,sha256=hIkV3qs1eSvczvxif_w-QSAIRuUjNc-Iukl447udRQ4,936 -openai/types/responses/response_prompt_param.py,sha256=SC4_UYJudF-inMfJ-PBNRGPOO0gNE9IbQ3ZO0loqzVY,1027 -openai/types/responses/response_queued_event.py,sha256=EDgtn58yhHg9784KjOwIK5_qRxZOnRdX25gKNMCt958,508 -openai/types/responses/response_reasoning_item.py,sha256=UOuGhZL0BqW4-HGD5PKCgWqFCNFUXUIobRDbPBNrytQ,1426 -openai/types/responses/response_reasoning_item_param.py,sha256=NuF_7zDG8x2yhyq-7A8u0oIlIzgZ3bZQotCqHZq0upk,1539 -openai/types/responses/response_reasoning_summary_part_added_event.py,sha256=wFecLMHuG4cmznOQvr9lD31qg9ebU8E6T7IVXxTR3EM,1006 -openai/types/responses/response_reasoning_summary_part_done_event.py,sha256=VhU-pOK6fGfCsarOUZ5PD-GTHIvKspOuiWqG709_KMM,997 -openai/types/responses/response_reasoning_summary_text_delta_event.py,sha256=GtOuch2QaTXItNJR9hk0Y9TD5s_INjc22a9-e52KfBM,846 -openai/types/responses/response_reasoning_summary_text_done_event.py,sha256=_fPOh7N6naMEHcRv42nUlb9vKC9lI8BJ0ll20T1ejzg,833 -openai/types/responses/response_reasoning_text_delta_event.py,sha256=Bv6wVhRCIve81iyl8287xssRVbg1SRZA8__GCx3Lrec,841 -openai/types/responses/response_reasoning_text_done_event.py,sha256=4F30ObYxJKBjjoXbz5Vsij4PVWo_5M3FjPlMTT8Q29Q,788 -openai/types/responses/response_refusal_delta_event.py,sha256=ss7m9NX5doTFE6g79k3iBK_z5gXstGFeM2Z2gcO-cPo,770 -openai/types/responses/response_refusal_done_event.py,sha256=0iI5jIbuDuHAPnzSK0zWVf8RdjiXTt1HoYEVy4ngIKI,775 -openai/types/responses/response_retrieve_params.py,sha256=Y_4UacCQ7xUYXc7_QTCJt-zLzIuv-PWocNQ1k0RnPsw,2372 -openai/types/responses/response_status.py,sha256=289NTnFcyk0195A2E15KDILXNLpHbfo6q4tcvezYWgs,278 -openai/types/responses/response_stream_event.py,sha256=uPEbNTxXOaiEFRVt_PbdeecyfS9rgjaYU7m15NIvSbo,6916 -openai/types/responses/response_text_config.py,sha256=dM28UJfEjLSKBcRHNmBQJjkZSVdZ-vDFccPTVmXYs00,1352 -openai/types/responses/response_text_config_param.py,sha256=348GrnnGUF8fGEfRSW-Vw1wFoqTqQw7FfcgIvc1usCg,1381 -openai/types/responses/response_text_delta_event.py,sha256=e96nx3l-1Q3r9jCGyGgiH-siauP5Ka4LJ8THgUrkEXk,1374 -openai/types/responses/response_text_done_event.py,sha256=PDENYq1-kdZD19eps5qY3-Ih96obk75iUSVO-XUmkig,1380 -openai/types/responses/response_usage.py,sha256=DFA8WjqKGl7iGCmZl2G18y48xT82UTZ_NCKm0MAuRDY,945 -openai/types/responses/response_web_search_call_completed_event.py,sha256=gWv2xgDeGbvN0oqm96uuecGBy1SkbF_yNA56h5hMlOE,698 -openai/types/responses/response_web_search_call_in_progress_event.py,sha256=XxOSK7EI1d0WDkfG5jgU_LIXz72CGixqp4uYW88-dY8,704 -openai/types/responses/response_web_search_call_searching_event.py,sha256=sYr9K30DjDeD_h5Jj41OwoTrvUkF--dCQGnQuEnggcw,698 -openai/types/responses/tool.py,sha256=65BfcGZ-bh-qjN7Fjl8xK2pq0JTPHow7kbcvN9mfL2o,8569 -openai/types/responses/tool_choice_allowed.py,sha256=I0bB6Gq7aIswr3mWH3TN6aOgtun01Kaopa72AhZJG9I,1023 -openai/types/responses/tool_choice_allowed_param.py,sha256=PMokbtPLR48_b_ZNe0AMyZx-C-OrcwPsbeX31DpoIwE,1107 -openai/types/responses/tool_choice_custom.py,sha256=xi7cPj8VJn4qYXXSkZwFoV_WdYbyGwEVTDIcdHL9AQo,382 -openai/types/responses/tool_choice_custom_param.py,sha256=0ZHVrSkRkVFuCC27k6TQKy2hBoCDt6NB2f8fVnLNrXM,448 -openai/types/responses/tool_choice_function.py,sha256=X51PqYW8HMrJcxSkaTCF-uDG_KetD_6WqU1TgmCPR-k,384 -openai/types/responses/tool_choice_function_param.py,sha256=UzIJgiqJV7fj0nRDWyzwxpwJmZd0czZVciq4ffvfl_4,450 -openai/types/responses/tool_choice_mcp.py,sha256=iq6CwniC-hOQ9TmH4D4Wo6hT5V0J_4XbZ1TTtf0xEf8,481 -openai/types/responses/tool_choice_mcp_param.py,sha256=E4VcW1YhjYJgYaSw74NuluyM9WylELUZIs7-s4u-N1A,540 -openai/types/responses/tool_choice_options.py,sha256=gJHrNT72mRECrN7hQKRHAOA-OS0JJo51YnXvUcMfqMQ,237 -openai/types/responses/tool_choice_types.py,sha256=-3FM-g4h0122Aq2CxEqiNt2A4hjYWPrJJ9MKh_hEROs,740 -openai/types/responses/tool_choice_types_param.py,sha256=_EqjVdOTy8bjKho3ZGdwYAgc11PaXp804jkBvj9dCz4,838 -openai/types/responses/tool_param.py,sha256=0nPCOu3JjSf_onMbVIDSNIuXI4KZZ7MS6U1JBOy4y14,8516 -openai/types/responses/web_search_preview_tool.py,sha256=jIoIdmR4tzsIjT2a_5II0tHCnJsea4HTirBR2u00hFk,1469 -openai/types/responses/web_search_preview_tool_param.py,sha256=W64kS2h1cm2lY9ODnp_YoLojRyjei9SZq2UU7X2AJ48,1496 -openai/types/responses/web_search_tool.py,sha256=WuPSLv-W8j8LQvUyHA7S6gGtJrQmGP_t0QCrbh6qPYI,1821 -openai/types/responses/web_search_tool_param.py,sha256=6iMdaKKYaO7bTUzSfmfw3owAjiQGh55qgjr8E1geCPc,1862 -openai/types/shared/__init__.py,sha256=EVk-X1P3R7YWmlYmrbpMrjAeZEfVfudF-Tw7fbOC90o,1267 -openai/types/shared/all_models.py,sha256=vRGGY6__Y_2koZ0uqHZGA4GKvV0yHj5cEIFPZMt7uFQ,687 -openai/types/shared/chat_model.py,sha256=6VpDw8bZPrezzjN8UfBwKpIWokakgU-12rdLzQulLHo,1731 -openai/types/shared/comparison_filter.py,sha256=mLGAFzMffGvjnzp63nlUrZTJbEeyI6xZJ2s1PNatLWg,854 -openai/types/shared/compound_filter.py,sha256=QhKPeKKdtWvMDDO85YLKUGgdxBQfrYiFimjadAM31Bs,581 -openai/types/shared/custom_tool_input_format.py,sha256=cO7pX1O0k8J6FgERYUqNjafjjYiwS7GCmIw3E_xSiVQ,773 -openai/types/shared/error_object.py,sha256=G7SGPZ9Qw3gewTKbi3fK69eM6L2Ur0C2D57N8iEapJA,305 -openai/types/shared/function_definition.py,sha256=2F07J5Q7r2Iwg74dC5rarhwWTnt579Y5LUrNc8OdqSc,1475 -openai/types/shared/function_parameters.py,sha256=Dkc_pm98zCKyouQmYrl934cK8ZWX7heY_IIyunW8x7c,236 -openai/types/shared/metadata.py,sha256=DC0SFof2EeVvFK0EsmQH8W5b_HnpI_bdp47s51E5LKw,213 -openai/types/shared/reasoning.py,sha256=fXs23Decys71ETmmMWf5wP5HpN4YXl3eb6gtSNEpq-Y,1403 -openai/types/shared/reasoning_effort.py,sha256=oK9lKsN8e2SZ8jV49MZ7PBxbnCP1MxGUQDLYMxlGQYE,279 -openai/types/shared/response_format_json_object.py,sha256=E1KGMUZnaj8fLnQXQC8_m9rMp8F6vIqeR9T1RmFNvE4,352 -openai/types/shared/response_format_json_schema.py,sha256=SsiLtgrudK4Dvxi2Kx0qUFiBQt26y5uGw_33te7L0Gg,1568 -openai/types/shared/response_format_text.py,sha256=p_JASD-xQ4ZveWnAtSoB8a19kVYc9vOZeg6WRMYHKDE,326 -openai/types/shared/response_format_text_grammar.py,sha256=PvmYxTEH_2r2nJsacTs6_Yw88ED1VbBuQJy_jZVbZwo,418 -openai/types/shared/response_format_text_python.py,sha256=Rfkd4jhzndD0Nw5H6LLnR4Y3MySyTz331MwoxcBL-Ek,342 -openai/types/shared/responses_model.py,sha256=qW98KTljofKYY-AiIF_KKpwyGLL5Uk8iQMidmR5FEGc,697 -openai/types/shared_params/__init__.py,sha256=Jtx94DUXqIaXTb7Sgsx3MPoB9nViBlYEy0DlQ3VcOJU,976 -openai/types/shared_params/chat_model.py,sha256=S0JO3lMtaZ7CG8ZvjYcRls-CF5qLL7AUUDuj1peeKDE,1767 -openai/types/shared_params/comparison_filter.py,sha256=TmykbxNA41bzNZVXET9T5oBn3hkEXo-1q-k6DnxOAFk,962 -openai/types/shared_params/compound_filter.py,sha256=dJrqaoOVY8QBEZPCjjD3hhf4qwcJLJ26jgK4N85bEFc,646 -openai/types/shared_params/custom_tool_input_format.py,sha256=ifDywFgUir2J2CPm1vyNcGnwl6nJFQsMFF1-qOvAdJA,769 -openai/types/shared_params/function_definition.py,sha256=6JjuRmXIofTv76GCC4XFssqgZw-iKbBazjWqKerfq6Q,1510 -openai/types/shared_params/function_parameters.py,sha256=UvxKz_3b9b5ECwWr8RFrIH511htbU2JZsp9Z9BMkF-o,272 -openai/types/shared_params/metadata.py,sha256=YCb9eFyy17EuLwtVHjUBUjW2FU8SbWp4NV-aEr_it54,249 -openai/types/shared_params/reasoning.py,sha256=K0ZpgWvM7S5ZEKX5jzno0Qu0YyqcBmkLLjTw3eWmR-g,1417 -openai/types/shared_params/reasoning_effort.py,sha256=d_oflloFU0aeSyJrEZKwpwi0kZNUsg8rEZ4XUU-5eoE,315 -openai/types/shared_params/response_format_json_object.py,sha256=aEdVMoEkiEVE_YX6pfj5VqRVqfRIPju5hU-lqNubhVE,398 -openai/types/shared_params/response_format_json_schema.py,sha256=iCr7oU2jaHmVAi60mG90uksfv1QQjtvrVT9Vd3paE0k,1529 -openai/types/shared_params/response_format_text.py,sha256=N3-JNmbAjreYMj8KBkYb5kZhbblR9ds_6vwYLzUAWDA,372 -openai/types/shared_params/responses_model.py,sha256=Kknm1-mp7yNtRcqvQet16nNZR-3F55Chb_H0S7Sj3SE,741 -openai/types/static_file_chunking_strategy.py,sha256=JmAzT2-9eaG9ZTH8X0jS1IVCOE3Jgi1PzE11oMST3Fc,595 -openai/types/static_file_chunking_strategy_object.py,sha256=MTwQ1olGZHoC26xxCKw0U0RvWORIJLgWzNWRQ1V0KmA,424 -openai/types/static_file_chunking_strategy_object_param.py,sha256=OwAOs1PT2ygBm4RpzHVVsr-93-Uqjg_IcCoNhtEPT7I,508 -openai/types/static_file_chunking_strategy_param.py,sha256=kCMmgyOxO0XIF2wjCWjUXtyn9S6q_7mNmyUCauqrjsg,692 -openai/types/upload.py,sha256=lFrEOsbVJwQ6jzzhn307AvBVjyF85lYHdig5ZvQQypE,1207 -openai/types/upload_complete_params.py,sha256=PW5mCxJt7eg7F5sttX5LCE43m9FX8oZs3P5i9HvjRoU,527 -openai/types/upload_create_params.py,sha256=n9BNQ7GasHGCQf7poS5NKSEQM8eUCzb6rRBVFqylmlw,1507 -openai/types/uploads/__init__.py,sha256=fDsmd3L0nIWbFldbViOLvcQavsFA4SL3jsXDfAueAck,242 -openai/types/uploads/part_create_params.py,sha256=pBByUzngaj70ov1knoSo_gpeBjaWP9D5EdiHwiG4G7U,362 -openai/types/uploads/upload_part.py,sha256=U9953cr9lJJLWEfhTiwHphRzLKARq3gWAWqrjxbhTR4,590 -openai/types/vector_store.py,sha256=hS30tSgL_s1BC04nIHfZL95-uD60t5Oe44JUQnVD8T8,2470 -openai/types/vector_store_create_params.py,sha256=92Z1F3rAUnsniQ2lRHAZHpEE0pSVAsbQCWhCGBvEXxc,1894 -openai/types/vector_store_deleted.py,sha256=BbtnlZ0Z5f4ncDyHLKrEfmY6Uuc0xOg3WBxvMoR8Wxk,307 -openai/types/vector_store_list_params.py,sha256=KeSeQaEdqO2EiPEVtq1Nun-uRRdkfwW0P8aHeCmL5zA,1226 -openai/types/vector_store_search_params.py,sha256=EnYfNFP4dgovZeLLPeGofA3TCJatJDYt4aoppMOto9g,1262 -openai/types/vector_store_search_response.py,sha256=qlhdAjqLPZg_JQmsqQCzAgT2Pxc2C-vGZmh64kR8y-M,1156 -openai/types/vector_store_update_params.py,sha256=RJm0qkqLOsHjhPIiOWPNwkrEIqHjDukyZT52mle4gWc,1240 -openai/types/vector_stores/__init__.py,sha256=F_DyW6EqxOJTBPKE5LUSzgTibcZM6axMo-irysr52ro,818 -openai/types/vector_stores/file_batch_create_params.py,sha256=rHysxuqX1vfxUqsIfaLYJMi4CkmMSJEmDWBjTb_ntdg,2707 -openai/types/vector_stores/file_batch_list_files_params.py,sha256=FPpQvCQI2skyLB8YCuwdCj7RbO9ba1UjaHAtvrWxAbs,1451 -openai/types/vector_stores/file_content_response.py,sha256=uAFvFDE_NVRzg0xm1fLJ2zEd62qzq8rPYko7xpDjbaU,367 -openai/types/vector_stores/file_create_params.py,sha256=nTHWG0OMqqLRjWFH2qbif89fpCJQCzGGdXDjCqPbq1Y,1229 -openai/types/vector_stores/file_list_params.py,sha256=AIzmNH1oFuy-qlpRhj9eXu9yyTA-2z_IppLYFclMtZw,1385 -openai/types/vector_stores/file_update_params.py,sha256=NGah01luDW_W3psfsYa3ShlswH8pAhC_EebLMvd925I,781 -openai/types/vector_stores/vector_store_file.py,sha256=O-9uanENp8lAUVv_wmXzAwEHi-VVzIKgLoX5s0YfFpM,2261 -openai/types/vector_stores/vector_store_file_batch.py,sha256=MnRehH5Mc0VOhSCZtniMDz8eH72syy2RScmECR_BEhE,1456 -openai/types/vector_stores/vector_store_file_deleted.py,sha256=sOds3FSmDBFhe25zoSAz2vHsmG2bo4s2PASgB_M6UU0,321 -openai/types/video.py,sha256=V6kjO9wReYBZK4-Py315LhzMHEXjM74rlJjYOft3iWs,1630 -openai/types/video_create_error.py,sha256=LeUNv9jRmwiBvpinM8oXZfS2aLaKwPhZYBRzctCvj1c,220 -openai/types/video_create_params.py,sha256=u1b0NOeaIcwTV45Nevs0TSblUidhuFTX8p9rexqtnhw,852 -openai/types/video_delete_response.py,sha256=OBDgodTKpz3knKlFr9yVEkT_pjNTxGLO1Ol2iYjZSzI,464 -openai/types/video_download_content_params.py,sha256=MXcSQOL67hzODH__CRf7g6i74hjXJG9I0zPIqqBjnlU,405 -openai/types/video_list_params.py,sha256=pa8Nd6-hrc2fF8ZQRf4udebbMXpMDEKDrAAH9niSlgk,550 -openai/types/video_model.py,sha256=Yn91GHbYqN9mAOyWLM_QdXfzFZT_oRH0f2qqDHhDMnw,219 -openai/types/video_remix_params.py,sha256=cFh9Tuaa1HH-cWyScfHPlw7N8nU-fg_AW0BL7S1yjR4,346 -openai/types/video_seconds.py,sha256=HyRb-NR4sVEGe2DoYZIQGig4kOrbbFfRYiqVejAgFbg,215 -openai/types/video_size.py,sha256=H1o0EhMbmicXdvaTC3wL-DnghhXzB7EkBChHL-gqdbI,243 -openai/types/webhooks/__init__.py,sha256=T8XC8KrJNXiNUPevxpO4PJi__C-HZgd0TMg7D2bRPh4,1828 -openai/types/webhooks/batch_cancelled_webhook_event.py,sha256=9eadXH42hNN8ZEnkvT1xP4-tXJSSU1EnFo0407UphUU,770 -openai/types/webhooks/batch_completed_webhook_event.py,sha256=HTcSImBaYwlnm8wQdvjPaWzyFIS-KBSSA_E2WkQ1uqg,770 -openai/types/webhooks/batch_expired_webhook_event.py,sha256=fbrvrZrbQZNf_aPBm08HSD99NFaAHVjv4nQg3pNmh9w,756 -openai/types/webhooks/batch_failed_webhook_event.py,sha256=WRxFObJMtp7zPJTl_pa4ppVhKSxHwNMvQdqyR0CqdV8,751 -openai/types/webhooks/eval_run_canceled_webhook_event.py,sha256=hLoN9c6C5QDPJEOLpOInSiGRgqsrtZmwE3NIOjiowtM,757 -openai/types/webhooks/eval_run_failed_webhook_event.py,sha256=rMoiy66aVGgyA2Fxu3ypg1Q1moIj0yDyMsL4ZVJAe6s,743 -openai/types/webhooks/eval_run_succeeded_webhook_event.py,sha256=GFRFtx7JxtUGeWEoQRpbeE3oPoOhPhW1BskJOxuaFI8,758 -openai/types/webhooks/fine_tuning_job_cancelled_webhook_event.py,sha256=kFx4imcbFxTD4L4G6h6kSINfX7yLpo4GQDAuYBGd9wM,802 -openai/types/webhooks/fine_tuning_job_failed_webhook_event.py,sha256=YjfTRr2mvpiJB4IZkzcFNNLwnhrUKVKkLP7RpPgHTnA,783 -openai/types/webhooks/fine_tuning_job_succeeded_webhook_event.py,sha256=wxUg8-llqFJ6K--LI3JHXgTJ1IY2vCD7rO1eq8RWoYo,798 -openai/types/webhooks/realtime_call_incoming_webhook_event.py,sha256=E7mD7ZO6_1v_SAn60-8pDzR5q2WRM0cFygkJ1I-pUpo,1019 -openai/types/webhooks/response_cancelled_webhook_event.py,sha256=60u91Tcsy_qNaPDqQM_tqWQHXVoSB0-rodF3Llkzzmk,776 -openai/types/webhooks/response_completed_webhook_event.py,sha256=OGSfVNA6Vgugplf4LxXhSkk-ScVvElekoQeksT93z_Q,776 -openai/types/webhooks/response_failed_webhook_event.py,sha256=SWMK_kc1o8WKeQPZudQx7VwU25oAHf_yLR6fKdXKd2E,757 -openai/types/webhooks/response_incomplete_webhook_event.py,sha256=O0LrpnzzxClQf0vQOwF6s_5EAUxM4TdTfEd8uc84iLs,782 -openai/types/webhooks/unwrap_webhook_event.py,sha256=KrfVL0-NsOuWHtRGiJfGMYwI8blUr09vUqUVJdZNpDQ,2039 -openai/types/websocket_connection_options.py,sha256=4cAWpv1KKp_9pvnez7pGYzO3s8zh1WvX2xpBhpe-96k,1840 -openai/version.py,sha256=cjbXKO8Ut3aiv4YlQnugff7AdC48MpSndcx96q88Yb8,62 diff --git a/portkey_ai/_vendor/openai/_base_client.py b/portkey_ai/_vendor/openai/_base_client.py index b477980d..94c2d1b5 100644 --- a/portkey_ai/_vendor/openai/_base_client.py +++ b/portkey_ai/_vendor/openai/_base_client.py @@ -9,6 +9,7 @@ import inspect import logging import platform +import warnings import email.utils from types import TracebackType from random import random @@ -51,9 +52,11 @@ ResponseT, AnyMapping, PostParser, + BinaryTypes, RequestFiles, HttpxSendArgs, RequestOptions, + AsyncBinaryTypes, HttpxRequestFiles, ModelBuilderProtocol, not_given, @@ -479,8 +482,19 @@ def _build_request( retries_taken: int = 0, ) -> httpx.Request: if log.isEnabledFor(logging.DEBUG): - log.debug("Request options: %s", model_dump(options, exclude_unset=True)) - + log.debug( + "Request options: %s", + model_dump( + options, + exclude_unset=True, + # Pydantic v1 can't dump every type we support in content, so we exclude it for now. + exclude={ + "content", + } + if PYDANTIC_V1 + else {}, + ), + ) kwargs: dict[str, Any] = {} json_data = options.json_data @@ -534,7 +548,13 @@ def _build_request( is_body_allowed = options.method.lower() != "get" if is_body_allowed: - if isinstance(json_data, bytes): + if options.content is not None and json_data is not None: + raise TypeError("Passing both `content` and `json_data` is not supported") + if options.content is not None and files is not None: + raise TypeError("Passing both `content` and `files` is not supported") + if options.content is not None: + kwargs["content"] = options.content + elif isinstance(json_data, bytes): kwargs["content"] = json_data else: kwargs["json"] = json_data if is_given(json_data) else None @@ -763,41 +783,6 @@ def _should_retry(self, response: httpx.Response) -> bool: return False return True - - # # Note: CSG: Commenting this logic for reference to the original code - # # Note: this is not a standard header - # should_retry_header = response.headers.get("x-should-retry") - - # # If the server explicitly says whether or not to retry, obey. - # if should_retry_header == "true": - # log.debug("Retrying as header `x-should-retry` is set to `true`") - # return True - # if should_retry_header == "false": - # log.debug("Not retrying as header `x-should-retry` is set to `false`") - # return False - - # # Retry on request timeouts. - # if response.status_code == 408: - # log.debug("Retrying due to status code %i", response.status_code) - # return True - - # # Retry on lock timeouts. - # if response.status_code == 409: - # log.debug("Retrying due to status code %i", response.status_code) - # return True - - # # Retry on rate limits. - # if response.status_code == 429: - # log.debug("Retrying due to status code %i", response.status_code) - # return True - - # # Retry internal errors. - # if response.status_code >= 500: - # log.debug("Retrying due to status code %i", response.status_code) - # return True - - # log.debug("Not retrying") - # return False def _idempotency_key(self) -> str: return f"stainless-python-retry-{uuid.uuid4()}" @@ -1228,6 +1213,7 @@ def post( *, cast_to: Type[ResponseT], body: Body | None = None, + content: BinaryTypes | None = None, options: RequestOptions = {}, files: RequestFiles | None = None, stream: Literal[False] = False, @@ -1240,6 +1226,7 @@ def post( *, cast_to: Type[ResponseT], body: Body | None = None, + content: BinaryTypes | None = None, options: RequestOptions = {}, files: RequestFiles | None = None, stream: Literal[True], @@ -1253,6 +1240,7 @@ def post( *, cast_to: Type[ResponseT], body: Body | None = None, + content: BinaryTypes | None = None, options: RequestOptions = {}, files: RequestFiles | None = None, stream: bool, @@ -1265,13 +1253,25 @@ def post( *, cast_to: Type[ResponseT], body: Body | None = None, + content: BinaryTypes | None = None, options: RequestOptions = {}, files: RequestFiles | None = None, stream: bool = False, stream_cls: type[_StreamT] | None = None, ) -> ResponseT | _StreamT: + if body is not None and content is not None: + raise TypeError("Passing both `body` and `content` is not supported") + if files is not None and content is not None: + raise TypeError("Passing both `files` and `content` is not supported") + if isinstance(body, bytes): + warnings.warn( + "Passing raw bytes as `body` is deprecated and will be removed in a future version. " + "Please pass raw bytes via the `content` parameter instead.", + DeprecationWarning, + stacklevel=2, + ) opts = FinalRequestOptions.construct( - method="post", url=path, json_data=body, files=to_httpx_files(files), **options + method="post", url=path, json_data=body, content=content, files=to_httpx_files(files), **options ) return cast(ResponseT, self.request(cast_to, opts, stream=stream, stream_cls=stream_cls)) @@ -1281,9 +1281,24 @@ def patch( *, cast_to: Type[ResponseT], body: Body | None = None, + content: BinaryTypes | None = None, + files: RequestFiles | None = None, options: RequestOptions = {}, ) -> ResponseT: - opts = FinalRequestOptions.construct(method="patch", url=path, json_data=body, **options) + if body is not None and content is not None: + raise TypeError("Passing both `body` and `content` is not supported") + if files is not None and content is not None: + raise TypeError("Passing both `files` and `content` is not supported") + if isinstance(body, bytes): + warnings.warn( + "Passing raw bytes as `body` is deprecated and will be removed in a future version. " + "Please pass raw bytes via the `content` parameter instead.", + DeprecationWarning, + stacklevel=2, + ) + opts = FinalRequestOptions.construct( + method="patch", url=path, json_data=body, content=content, files=to_httpx_files(files), **options + ) return self.request(cast_to, opts) def put( @@ -1292,11 +1307,23 @@ def put( *, cast_to: Type[ResponseT], body: Body | None = None, + content: BinaryTypes | None = None, files: RequestFiles | None = None, options: RequestOptions = {}, ) -> ResponseT: + if body is not None and content is not None: + raise TypeError("Passing both `body` and `content` is not supported") + if files is not None and content is not None: + raise TypeError("Passing both `files` and `content` is not supported") + if isinstance(body, bytes): + warnings.warn( + "Passing raw bytes as `body` is deprecated and will be removed in a future version. " + "Please pass raw bytes via the `content` parameter instead.", + DeprecationWarning, + stacklevel=2, + ) opts = FinalRequestOptions.construct( - method="put", url=path, json_data=body, files=to_httpx_files(files), **options + method="put", url=path, json_data=body, content=content, files=to_httpx_files(files), **options ) return self.request(cast_to, opts) @@ -1306,9 +1333,19 @@ def delete( *, cast_to: Type[ResponseT], body: Body | None = None, + content: BinaryTypes | None = None, options: RequestOptions = {}, ) -> ResponseT: - opts = FinalRequestOptions.construct(method="delete", url=path, json_data=body, **options) + if body is not None and content is not None: + raise TypeError("Passing both `body` and `content` is not supported") + if isinstance(body, bytes): + warnings.warn( + "Passing raw bytes as `body` is deprecated and will be removed in a future version. " + "Please pass raw bytes via the `content` parameter instead.", + DeprecationWarning, + stacklevel=2, + ) + opts = FinalRequestOptions.construct(method="delete", url=path, json_data=body, content=content, **options) return self.request(cast_to, opts) def get_api_list( @@ -1763,6 +1800,7 @@ async def post( *, cast_to: Type[ResponseT], body: Body | None = None, + content: AsyncBinaryTypes | None = None, files: RequestFiles | None = None, options: RequestOptions = {}, stream: Literal[False] = False, @@ -1775,6 +1813,7 @@ async def post( *, cast_to: Type[ResponseT], body: Body | None = None, + content: AsyncBinaryTypes | None = None, files: RequestFiles | None = None, options: RequestOptions = {}, stream: Literal[True], @@ -1788,6 +1827,7 @@ async def post( *, cast_to: Type[ResponseT], body: Body | None = None, + content: AsyncBinaryTypes | None = None, files: RequestFiles | None = None, options: RequestOptions = {}, stream: bool, @@ -1800,13 +1840,25 @@ async def post( *, cast_to: Type[ResponseT], body: Body | None = None, + content: AsyncBinaryTypes | None = None, files: RequestFiles | None = None, options: RequestOptions = {}, stream: bool = False, stream_cls: type[_AsyncStreamT] | None = None, ) -> ResponseT | _AsyncStreamT: + if body is not None and content is not None: + raise TypeError("Passing both `body` and `content` is not supported") + if files is not None and content is not None: + raise TypeError("Passing both `files` and `content` is not supported") + if isinstance(body, bytes): + warnings.warn( + "Passing raw bytes as `body` is deprecated and will be removed in a future version. " + "Please pass raw bytes via the `content` parameter instead.", + DeprecationWarning, + stacklevel=2, + ) opts = FinalRequestOptions.construct( - method="post", url=path, json_data=body, files=await async_to_httpx_files(files), **options + method="post", url=path, json_data=body, content=content, files=await async_to_httpx_files(files), **options ) return await self.request(cast_to, opts, stream=stream, stream_cls=stream_cls) @@ -1816,9 +1868,29 @@ async def patch( *, cast_to: Type[ResponseT], body: Body | None = None, + content: AsyncBinaryTypes | None = None, + files: RequestFiles | None = None, options: RequestOptions = {}, ) -> ResponseT: - opts = FinalRequestOptions.construct(method="patch", url=path, json_data=body, **options) + if body is not None and content is not None: + raise TypeError("Passing both `body` and `content` is not supported") + if files is not None and content is not None: + raise TypeError("Passing both `files` and `content` is not supported") + if isinstance(body, bytes): + warnings.warn( + "Passing raw bytes as `body` is deprecated and will be removed in a future version. " + "Please pass raw bytes via the `content` parameter instead.", + DeprecationWarning, + stacklevel=2, + ) + opts = FinalRequestOptions.construct( + method="patch", + url=path, + json_data=body, + content=content, + files=await async_to_httpx_files(files), + **options, + ) return await self.request(cast_to, opts) async def put( @@ -1827,11 +1899,23 @@ async def put( *, cast_to: Type[ResponseT], body: Body | None = None, + content: AsyncBinaryTypes | None = None, files: RequestFiles | None = None, options: RequestOptions = {}, ) -> ResponseT: + if body is not None and content is not None: + raise TypeError("Passing both `body` and `content` is not supported") + if files is not None and content is not None: + raise TypeError("Passing both `files` and `content` is not supported") + if isinstance(body, bytes): + warnings.warn( + "Passing raw bytes as `body` is deprecated and will be removed in a future version. " + "Please pass raw bytes via the `content` parameter instead.", + DeprecationWarning, + stacklevel=2, + ) opts = FinalRequestOptions.construct( - method="put", url=path, json_data=body, files=await async_to_httpx_files(files), **options + method="put", url=path, json_data=body, content=content, files=await async_to_httpx_files(files), **options ) return await self.request(cast_to, opts) @@ -1841,9 +1925,19 @@ async def delete( *, cast_to: Type[ResponseT], body: Body | None = None, + content: AsyncBinaryTypes | None = None, options: RequestOptions = {}, ) -> ResponseT: - opts = FinalRequestOptions.construct(method="delete", url=path, json_data=body, **options) + if body is not None and content is not None: + raise TypeError("Passing both `body` and `content` is not supported") + if isinstance(body, bytes): + warnings.warn( + "Passing raw bytes as `body` is deprecated and will be removed in a future version. " + "Please pass raw bytes via the `content` parameter instead.", + DeprecationWarning, + stacklevel=2, + ) + opts = FinalRequestOptions.construct(method="delete", url=path, json_data=body, content=content, **options) return await self.request(cast_to, opts) def get_api_list( diff --git a/portkey_ai/_vendor/openai/_models.py b/portkey_ai/_vendor/openai/_models.py index af71a918..5cca20c6 100644 --- a/portkey_ai/_vendor/openai/_models.py +++ b/portkey_ai/_vendor/openai/_models.py @@ -2,7 +2,22 @@ import os import inspect -from typing import TYPE_CHECKING, Any, Type, Tuple, Union, Generic, TypeVar, Callable, Optional, cast +import weakref +from typing import ( + IO, + TYPE_CHECKING, + Any, + Type, + Tuple, + Union, + Generic, + TypeVar, + Callable, + Iterable, + Optional, + AsyncIterable, + cast, +) from datetime import date, datetime from typing_extensions import ( List, @@ -281,15 +296,16 @@ def model_dump( mode: Literal["json", "python"] | str = "python", include: IncEx | None = None, exclude: IncEx | None = None, + context: Any | None = None, by_alias: bool | None = None, exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False, + exclude_computed_fields: bool = False, round_trip: bool = False, warnings: bool | Literal["none", "warn", "error"] = True, - context: dict[str, Any] | None = None, - serialize_as_any: bool = False, fallback: Callable[[Any], Any] | None = None, + serialize_as_any: bool = False, ) -> dict[str, Any]: """Usage docs: https://docs.pydantic.dev/2.4/concepts/serialization/#modelmodel_dump @@ -297,16 +313,24 @@ def model_dump( Args: mode: The mode in which `to_python` should run. - If mode is 'json', the dictionary will only contain JSON serializable types. - If mode is 'python', the dictionary may contain any Python objects. - include: A list of fields to include in the output. - exclude: A list of fields to exclude from the output. + If mode is 'json', the output will only contain JSON serializable types. + If mode is 'python', the output may contain non-JSON-serializable Python objects. + include: A set of fields to include in the output. + exclude: A set of fields to exclude from the output. + context: Additional context to pass to the serializer. by_alias: Whether to use the field's alias in the dictionary key if defined. - exclude_unset: Whether to exclude fields that are unset or None from the output. - exclude_defaults: Whether to exclude fields that are set to their default value from the output. - exclude_none: Whether to exclude fields that have a value of `None` from the output. - round_trip: Whether to enable serialization and deserialization round-trip support. - warnings: Whether to log warnings when invalid fields are encountered. + exclude_unset: Whether to exclude fields that have not been explicitly set. + exclude_defaults: Whether to exclude fields that are set to their default value. + exclude_none: Whether to exclude fields that have a value of `None`. + exclude_computed_fields: Whether to exclude computed fields. + While this can be useful for round-tripping, it is usually recommended to use the dedicated + `round_trip` parameter instead. + round_trip: If True, dumped values should be valid as input for non-idempotent types such as Json[T]. + warnings: How to handle serialization errors. False/"none" ignores them, True/"warn" logs errors, + "error" raises a [`PydanticSerializationError`][pydantic_core.PydanticSerializationError]. + fallback: A function to call when an unknown value is encountered. If not provided, + a [`PydanticSerializationError`][pydantic_core.PydanticSerializationError] error is raised. + serialize_as_any: Whether to serialize fields with duck-typing serialization behavior. Returns: A dictionary representation of the model. @@ -323,6 +347,8 @@ def model_dump( raise ValueError("serialize_as_any is only supported in Pydantic v2") if fallback is not None: raise ValueError("fallback is only supported in Pydantic v2") + if exclude_computed_fields != False: + raise ValueError("exclude_computed_fields is only supported in Pydantic v2") dumped = super().dict( # pyright: ignore[reportDeprecated] include=include, exclude=exclude, @@ -339,15 +365,17 @@ def model_dump_json( self, *, indent: int | None = None, + ensure_ascii: bool = False, include: IncEx | None = None, exclude: IncEx | None = None, + context: Any | None = None, by_alias: bool | None = None, exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False, + exclude_computed_fields: bool = False, round_trip: bool = False, warnings: bool | Literal["none", "warn", "error"] = True, - context: dict[str, Any] | None = None, fallback: Callable[[Any], Any] | None = None, serialize_as_any: bool = False, ) -> str: @@ -379,6 +407,10 @@ def model_dump_json( raise ValueError("serialize_as_any is only supported in Pydantic v2") if fallback is not None: raise ValueError("fallback is only supported in Pydantic v2") + if ensure_ascii != False: + raise ValueError("ensure_ascii is only supported in Pydantic v2") + if exclude_computed_fields != False: + raise ValueError("exclude_computed_fields is only supported in Pydantic v2") return super().json( # type: ignore[reportDeprecated] indent=indent, include=include, @@ -598,6 +630,9 @@ class CachedDiscriminatorType(Protocol): __discriminator__: DiscriminatorDetails +DISCRIMINATOR_CACHE: weakref.WeakKeyDictionary[type, DiscriminatorDetails] = weakref.WeakKeyDictionary() + + class DiscriminatorDetails: field_name: str """The name of the discriminator field in the variant class, e.g. @@ -640,8 +675,9 @@ def __init__( def _build_discriminated_union_meta(*, union: type, meta_annotations: tuple[Any, ...]) -> DiscriminatorDetails | None: - if isinstance(union, CachedDiscriminatorType): - return union.__discriminator__ + cached = DISCRIMINATOR_CACHE.get(union) + if cached is not None: + return cached discriminator_field_name: str | None = None @@ -694,7 +730,7 @@ def _build_discriminated_union_meta(*, union: type, meta_annotations: tuple[Any, discriminator_field=discriminator_field_name, discriminator_alias=discriminator_alias, ) - cast(CachedDiscriminatorType, union).__discriminator__ = details + DISCRIMINATOR_CACHE.setdefault(union, details) return details @@ -805,6 +841,7 @@ class FinalRequestOptionsInput(TypedDict, total=False): timeout: float | Timeout | None files: HttpxRequestFiles | None idempotency_key: str + content: Union[bytes, bytearray, IO[bytes], Iterable[bytes], AsyncIterable[bytes], None] json_data: Body extra_json: AnyMapping follow_redirects: bool @@ -823,6 +860,7 @@ class FinalRequestOptions(pydantic.BaseModel): post_parser: Union[Callable[[Any], Any], NotGiven] = NotGiven() follow_redirects: Union[bool, None] = None + content: Union[bytes, bytearray, IO[bytes], Iterable[bytes], AsyncIterable[bytes], None] = None # It should be noted that we cannot use `json` here as that would override # a BaseModel method in an incompatible fashion. json_data: Union[Body, None] = None diff --git a/portkey_ai/_vendor/openai/_streaming.py b/portkey_ai/_vendor/openai/_streaming.py index 05c284a2..61a74266 100644 --- a/portkey_ai/_vendor/openai/_streaming.py +++ b/portkey_ai/_vendor/openai/_streaming.py @@ -55,49 +55,51 @@ def __stream__(self) -> Iterator[_T]: process_data = self._client._process_response_data iterator = self._iter_events() - for sse in iterator: - if sse.data.startswith("[DONE]"): - break - - # we have to special case the Assistants `thread.` events since we won't have an "event" key in the data - if sse.event and sse.event.startswith("thread."): - data = sse.json() - - if sse.event == "error" and is_mapping(data) and data.get("error"): - message = None - error = data.get("error") - if is_mapping(error): - message = error.get("message") - if not message or not isinstance(message, str): - message = "An error occurred during streaming" - - raise APIError( - message=message, - request=self.response.request, - body=data["error"], - ) - - yield process_data(data={"data": data, "event": sse.event}, cast_to=cast_to, response=response) - else: - data = sse.json() - if is_mapping(data) and data.get("error"): - message = None - error = data.get("error") - if is_mapping(error): - message = error.get("message") - if not message or not isinstance(message, str): - message = "An error occurred during streaming" - - raise APIError( - message=message, - request=self.response.request, - body=data["error"], - ) - - yield process_data(data=data, cast_to=cast_to, response=response) - - # As we might not fully consume the response stream, we need to close it explicitly - response.close() + try: + for sse in iterator: + if sse.data.startswith("[DONE]"): + break + + # we have to special case the Assistants `thread.` events since we won't have an "event" key in the data + if sse.event and sse.event.startswith("thread."): + data = sse.json() + + if sse.event == "error" and is_mapping(data) and data.get("error"): + message = None + error = data.get("error") + if is_mapping(error): + message = error.get("message") + if not message or not isinstance(message, str): + message = "An error occurred during streaming" + + raise APIError( + message=message, + request=self.response.request, + body=data["error"], + ) + + yield process_data(data={"data": data, "event": sse.event}, cast_to=cast_to, response=response) + else: + data = sse.json() + if is_mapping(data) and data.get("error"): + message = None + error = data.get("error") + if is_mapping(error): + message = error.get("message") + if not message or not isinstance(message, str): + message = "An error occurred during streaming" + + raise APIError( + message=message, + request=self.response.request, + body=data["error"], + ) + + yield process_data(data=data, cast_to=cast_to, response=response) + + finally: + # Ensure the response is closed even if the consumer doesn't read all data + response.close() def __enter__(self) -> Self: return self @@ -156,49 +158,51 @@ async def __stream__(self) -> AsyncIterator[_T]: process_data = self._client._process_response_data iterator = self._iter_events() - async for sse in iterator: - if sse.data.startswith("[DONE]"): - break - - # we have to special case the Assistants `thread.` events since we won't have an "event" key in the data - if sse.event and sse.event.startswith("thread."): - data = sse.json() - - if sse.event == "error" and is_mapping(data) and data.get("error"): - message = None - error = data.get("error") - if is_mapping(error): - message = error.get("message") - if not message or not isinstance(message, str): - message = "An error occurred during streaming" - - raise APIError( - message=message, - request=self.response.request, - body=data["error"], - ) - - yield process_data(data={"data": data, "event": sse.event}, cast_to=cast_to, response=response) - else: - data = sse.json() - if is_mapping(data) and data.get("error"): - message = None - error = data.get("error") - if is_mapping(error): - message = error.get("message") - if not message or not isinstance(message, str): - message = "An error occurred during streaming" - - raise APIError( - message=message, - request=self.response.request, - body=data["error"], - ) - - yield process_data(data=data, cast_to=cast_to, response=response) - - # As we might not fully consume the response stream, we need to close it explicitly - await response.aclose() + try: + async for sse in iterator: + if sse.data.startswith("[DONE]"): + break + + # we have to special case the Assistants `thread.` events since we won't have an "event" key in the data + if sse.event and sse.event.startswith("thread."): + data = sse.json() + + if sse.event == "error" and is_mapping(data) and data.get("error"): + message = None + error = data.get("error") + if is_mapping(error): + message = error.get("message") + if not message or not isinstance(message, str): + message = "An error occurred during streaming" + + raise APIError( + message=message, + request=self.response.request, + body=data["error"], + ) + + yield process_data(data={"data": data, "event": sse.event}, cast_to=cast_to, response=response) + else: + data = sse.json() + if is_mapping(data) and data.get("error"): + message = None + error = data.get("error") + if is_mapping(error): + message = error.get("message") + if not message or not isinstance(message, str): + message = "An error occurred during streaming" + + raise APIError( + message=message, + request=self.response.request, + body=data["error"], + ) + + yield process_data(data=data, cast_to=cast_to, response=response) + + finally: + # Ensure the response is closed even if the consumer doesn't read all data + await response.aclose() async def __aenter__(self) -> Self: return self diff --git a/portkey_ai/_vendor/openai/_types.py b/portkey_ai/_vendor/openai/_types.py index 2387d7e0..42f9df23 100644 --- a/portkey_ai/_vendor/openai/_types.py +++ b/portkey_ai/_vendor/openai/_types.py @@ -13,9 +13,11 @@ Mapping, TypeVar, Callable, + Iterable, Iterator, Optional, Sequence, + AsyncIterable, ) from typing_extensions import ( Set, @@ -57,6 +59,13 @@ else: Base64FileInput = Union[IO[bytes], PathLike] FileContent = Union[IO[bytes], bytes, PathLike] # PathLike is not subscriptable in Python 3.8. + + +# Used for sending raw binary data / streaming data in request bodies +# e.g. for file uploads without multipart encoding +BinaryTypes = Union[bytes, bytearray, IO[bytes], Iterable[bytes]] +AsyncBinaryTypes = Union[bytes, bytearray, IO[bytes], AsyncIterable[bytes]] + FileTypes = Union[ # file (or bytes) FileContent, @@ -247,6 +256,9 @@ class HttpxSendArgs(TypedDict, total=False): if TYPE_CHECKING: # This works because str.__contains__ does not accept object (either in typeshed or at runtime) # https://github.com/hauntsaninja/useful_types/blob/5e9710f3875107d068e7679fd7fec9cfab0eff3b/useful_types/__init__.py#L285 + # + # Note: index() and count() methods are intentionally omitted to allow pyright to properly + # infer TypedDict types when dict literals are used in lists assigned to SequenceNotStr. class SequenceNotStr(Protocol[_T_co]): @overload def __getitem__(self, index: SupportsIndex, /) -> _T_co: ... @@ -255,8 +267,6 @@ def __getitem__(self, index: slice, /) -> Sequence[_T_co]: ... def __contains__(self, value: object, /) -> bool: ... def __len__(self) -> int: ... def __iter__(self) -> Iterator[_T_co]: ... - def index(self, value: Any, start: int = 0, stop: int = ..., /) -> int: ... - def count(self, value: Any, /) -> int: ... def __reversed__(self) -> Iterator[_T_co]: ... else: # just point this to a normal `Sequence` at runtime to avoid having to special case diff --git a/portkey_ai/_vendor/openai/_utils/_sync.py b/portkey_ai/_vendor/openai/_utils/_sync.py index ad7ec71b..f6027c18 100644 --- a/portkey_ai/_vendor/openai/_utils/_sync.py +++ b/portkey_ai/_vendor/openai/_utils/_sync.py @@ -1,10 +1,8 @@ from __future__ import annotations -import sys import asyncio import functools -import contextvars -from typing import Any, TypeVar, Callable, Awaitable +from typing import TypeVar, Callable, Awaitable from typing_extensions import ParamSpec import anyio @@ -15,34 +13,11 @@ T_ParamSpec = ParamSpec("T_ParamSpec") -if sys.version_info >= (3, 9): - _asyncio_to_thread = asyncio.to_thread -else: - # backport of https://docs.python.org/3/library/asyncio-task.html#asyncio.to_thread - # for Python 3.8 support - async def _asyncio_to_thread( - func: Callable[T_ParamSpec, T_Retval], /, *args: T_ParamSpec.args, **kwargs: T_ParamSpec.kwargs - ) -> Any: - """Asynchronously run function *func* in a separate thread. - - Any *args and **kwargs supplied for this function are directly passed - to *func*. Also, the current :class:`contextvars.Context` is propagated, - allowing context variables from the main thread to be accessed in the - separate thread. - - Returns a coroutine that can be awaited to get the eventual result of *func*. - """ - loop = asyncio.events.get_running_loop() - ctx = contextvars.copy_context() - func_call = functools.partial(ctx.run, func, *args, **kwargs) - return await loop.run_in_executor(None, func_call) - - async def to_thread( func: Callable[T_ParamSpec, T_Retval], /, *args: T_ParamSpec.args, **kwargs: T_ParamSpec.kwargs ) -> T_Retval: if sniffio.current_async_library() == "asyncio": - return await _asyncio_to_thread(func, *args, **kwargs) + return await asyncio.to_thread(func, *args, **kwargs) return await anyio.to_thread.run_sync( functools.partial(func, *args, **kwargs), @@ -53,10 +28,7 @@ async def to_thread( def asyncify(function: Callable[T_ParamSpec, T_Retval]) -> Callable[T_ParamSpec, Awaitable[T_Retval]]: """ Take a blocking function and create an async one that receives the same - positional and keyword arguments. For python version 3.9 and above, it uses - asyncio.to_thread to run the function in a separate thread. For python version - 3.8, it uses locally defined copy of the asyncio.to_thread function which was - introduced in python 3.9. + positional and keyword arguments. Usage: diff --git a/portkey_ai/_vendor/openai/_version.py b/portkey_ai/_vendor/openai/_version.py index 9fb4c23d..eb61bdd2 100644 --- a/portkey_ai/_vendor/openai/_version.py +++ b/portkey_ai/_vendor/openai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "openai" -__version__ = "2.7.1" # x-release-please-version +__version__ = "2.16.0" # x-release-please-version diff --git a/portkey_ai/_vendor/openai/lib/_parsing/_responses.py b/portkey_ai/_vendor/openai/lib/_parsing/_responses.py index 8a1bf3cf..4bed171d 100644 --- a/portkey_ai/_vendor/openai/lib/_parsing/_responses.py +++ b/portkey_ai/_vendor/openai/lib/_parsing/_responses.py @@ -103,11 +103,16 @@ def parse_response( or output.type == "file_search_call" or output.type == "web_search_call" or output.type == "reasoning" + or output.type == "compaction" or output.type == "mcp_call" or output.type == "mcp_approval_request" or output.type == "image_generation_call" or output.type == "code_interpreter_call" or output.type == "local_shell_call" + or output.type == "shell_call" + or output.type == "shell_call_output" + or output.type == "apply_patch_call" + or output.type == "apply_patch_call_output" or output.type == "mcp_list_tools" or output.type == "exec" or output.type == "custom_tool_call" diff --git a/portkey_ai/_vendor/openai/lib/_realtime.py b/portkey_ai/_vendor/openai/lib/_realtime.py index d3c56a45..c3a98994 100644 --- a/portkey_ai/_vendor/openai/lib/_realtime.py +++ b/portkey_ai/_vendor/openai/lib/_realtime.py @@ -34,7 +34,7 @@ def create( extra_headers = {"Accept": "application/sdp", "Content-Type": "application/sdp", **(extra_headers or {})} return self._post( "/realtime/calls", - body=sdp.encode("utf-8"), + content=sdp.encode("utf-8"), options=make_request_options(extra_headers=extra_headers, extra_query=extra_query, timeout=timeout), cast_to=_legacy_response.HttpxBinaryResponseContent, ) @@ -71,7 +71,7 @@ async def create( extra_headers = {"Accept": "application/sdp", "Content-Type": "application/sdp", **(extra_headers or {})} return await self._post( "/realtime/calls", - body=sdp.encode("utf-8"), + content=sdp.encode("utf-8"), options=make_request_options(extra_headers=extra_headers, extra_query=extra_query, timeout=timeout), cast_to=_legacy_response.HttpxBinaryResponseContent, ) diff --git a/portkey_ai/_vendor/openai/resources/audio/speech.py b/portkey_ai/_vendor/openai/resources/audio/speech.py index 992fb597..f2c8d635 100644 --- a/portkey_ai/_vendor/openai/resources/audio/speech.py +++ b/portkey_ai/_vendor/openai/resources/audio/speech.py @@ -72,11 +72,12 @@ def create( model: One of the available [TTS models](https://platform.openai.com/docs/models#tts): - `tts-1`, `tts-1-hd` or `gpt-4o-mini-tts`. + `tts-1`, `tts-1-hd`, `gpt-4o-mini-tts`, or `gpt-4o-mini-tts-2025-12-15`. - voice: The voice to use when generating the audio. Supported voices are `alloy`, `ash`, - `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and - `verse`. Previews of the voices are available in the + voice: The voice to use when generating the audio. Supported built-in voices are + `alloy`, `ash`, `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, + `shimmer`, `verse`, `marin`, and `cedar`. Previews of the voices are available + in the [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options). instructions: Control the voice of your generated audio with additional instructions. Does not @@ -168,11 +169,12 @@ async def create( model: One of the available [TTS models](https://platform.openai.com/docs/models#tts): - `tts-1`, `tts-1-hd` or `gpt-4o-mini-tts`. + `tts-1`, `tts-1-hd`, `gpt-4o-mini-tts`, or `gpt-4o-mini-tts-2025-12-15`. - voice: The voice to use when generating the audio. Supported voices are `alloy`, `ash`, - `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and - `verse`. Previews of the voices are available in the + voice: The voice to use when generating the audio. Supported built-in voices are + `alloy`, `ash`, `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, + `shimmer`, `verse`, `marin`, and `cedar`. Previews of the voices are available + in the [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options). instructions: Control the voice of your generated audio with additional instructions. Does not diff --git a/portkey_ai/_vendor/openai/resources/audio/transcriptions.py b/portkey_ai/_vendor/openai/resources/audio/transcriptions.py index a5c86146..59953485 100644 --- a/portkey_ai/_vendor/openai/resources/audio/transcriptions.py +++ b/portkey_ai/_vendor/openai/resources/audio/transcriptions.py @@ -91,8 +91,9 @@ def create( flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. model: ID of the model to use. The options are `gpt-4o-transcribe`, - `gpt-4o-mini-transcribe`, and `whisper-1` (which is powered by our open source - Whisper V2 model). + `gpt-4o-mini-transcribe`, `gpt-4o-mini-transcribe-2025-12-15`, `whisper-1` + (which is powered by our open source Whisper V2 model), and + `gpt-4o-transcribe-diarize`. chunking_strategy: Controls how the audio is cut into chunks. When set to `"auto"`, the server first normalizes loudness and then uses voice activity detection (VAD) to choose @@ -102,8 +103,9 @@ def create( include: Additional information to include in the transcription response. `logprobs` will return the log probabilities of the tokens in the response to understand the model's confidence in the transcription. `logprobs` only works with - response_format set to `json` and only with the models `gpt-4o-transcribe` and - `gpt-4o-mini-transcribe`. + response_format set to `json` and only with the models `gpt-4o-transcribe`, + `gpt-4o-mini-transcribe`, and `gpt-4o-mini-transcribe-2025-12-15`. This field is + not supported when using `gpt-4o-transcribe-diarize`. language: The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`) @@ -239,8 +241,9 @@ def create( flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. model: ID of the model to use. The options are `gpt-4o-transcribe`, - `gpt-4o-mini-transcribe`, `whisper-1` (which is powered by our open source - Whisper V2 model), and `gpt-4o-transcribe-diarize`. + `gpt-4o-mini-transcribe`, `gpt-4o-mini-transcribe-2025-12-15`, `whisper-1` + (which is powered by our open source Whisper V2 model), and + `gpt-4o-transcribe-diarize`. stream: If set to true, the model response data will be streamed to the client as it is generated using @@ -261,9 +264,9 @@ def create( include: Additional information to include in the transcription response. `logprobs` will return the log probabilities of the tokens in the response to understand the model's confidence in the transcription. `logprobs` only works with - response_format set to `json` and only with the models `gpt-4o-transcribe` and - `gpt-4o-mini-transcribe`. This field is not supported when using - `gpt-4o-transcribe-diarize`. + response_format set to `json` and only with the models `gpt-4o-transcribe`, + `gpt-4o-mini-transcribe`, and `gpt-4o-mini-transcribe-2025-12-15`. This field is + not supported when using `gpt-4o-transcribe-diarize`. known_speaker_names: Optional list of speaker names that correspond to the audio samples provided in `known_speaker_references[]`. Each entry should be a short identifier (for @@ -346,8 +349,9 @@ def create( flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. model: ID of the model to use. The options are `gpt-4o-transcribe`, - `gpt-4o-mini-transcribe`, `whisper-1` (which is powered by our open source - Whisper V2 model), and `gpt-4o-transcribe-diarize`. + `gpt-4o-mini-transcribe`, `gpt-4o-mini-transcribe-2025-12-15`, `whisper-1` + (which is powered by our open source Whisper V2 model), and + `gpt-4o-transcribe-diarize`. stream: If set to true, the model response data will be streamed to the client as it is generated using @@ -368,9 +372,9 @@ def create( include: Additional information to include in the transcription response. `logprobs` will return the log probabilities of the tokens in the response to understand the model's confidence in the transcription. `logprobs` only works with - response_format set to `json` and only with the models `gpt-4o-transcribe` and - `gpt-4o-mini-transcribe`. This field is not supported when using - `gpt-4o-transcribe-diarize`. + response_format set to `json` and only with the models `gpt-4o-transcribe`, + `gpt-4o-mini-transcribe`, and `gpt-4o-mini-transcribe-2025-12-15`. This field is + not supported when using `gpt-4o-transcribe-diarize`. known_speaker_names: Optional list of speaker names that correspond to the audio samples provided in `known_speaker_references[]`. Each entry should be a short identifier (for @@ -535,8 +539,9 @@ async def create( flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. model: ID of the model to use. The options are `gpt-4o-transcribe`, - `gpt-4o-mini-transcribe`, `whisper-1` (which is powered by our open source - Whisper V2 model), and `gpt-4o-transcribe-diarize`. + `gpt-4o-mini-transcribe`, `gpt-4o-mini-transcribe-2025-12-15`, `whisper-1` + (which is powered by our open source Whisper V2 model), and + `gpt-4o-transcribe-diarize`. chunking_strategy: Controls how the audio is cut into chunks. When set to `"auto"`, the server first normalizes loudness and then uses voice activity detection (VAD) to choose @@ -548,9 +553,9 @@ async def create( include: Additional information to include in the transcription response. `logprobs` will return the log probabilities of the tokens in the response to understand the model's confidence in the transcription. `logprobs` only works with - response_format set to `json` and only with the models `gpt-4o-transcribe` and - `gpt-4o-mini-transcribe`. This field is not supported when using - `gpt-4o-transcribe-diarize`. + response_format set to `json` and only with the models `gpt-4o-transcribe`, + `gpt-4o-mini-transcribe`, and `gpt-4o-mini-transcribe-2025-12-15`. This field is + not supported when using `gpt-4o-transcribe-diarize`. known_speaker_names: Optional list of speaker names that correspond to the audio samples provided in `known_speaker_references[]`. Each entry should be a short identifier (for @@ -679,8 +684,9 @@ async def create( flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. model: ID of the model to use. The options are `gpt-4o-transcribe`, - `gpt-4o-mini-transcribe`, `whisper-1` (which is powered by our open source - Whisper V2 model), and `gpt-4o-transcribe-diarize`. + `gpt-4o-mini-transcribe`, `gpt-4o-mini-transcribe-2025-12-15`, `whisper-1` + (which is powered by our open source Whisper V2 model), and + `gpt-4o-transcribe-diarize`. stream: If set to true, the model response data will be streamed to the client as it is generated using @@ -701,9 +707,9 @@ async def create( include: Additional information to include in the transcription response. `logprobs` will return the log probabilities of the tokens in the response to understand the model's confidence in the transcription. `logprobs` only works with - response_format set to `json` and only with the models `gpt-4o-transcribe` and - `gpt-4o-mini-transcribe`. This field is not supported when using - `gpt-4o-transcribe-diarize`. + response_format set to `json` and only with the models `gpt-4o-transcribe`, + `gpt-4o-mini-transcribe`, and `gpt-4o-mini-transcribe-2025-12-15`. This field is + not supported when using `gpt-4o-transcribe-diarize`. known_speaker_names: Optional list of speaker names that correspond to the audio samples provided in `known_speaker_references[]`. Each entry should be a short identifier (for @@ -786,8 +792,9 @@ async def create( flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. model: ID of the model to use. The options are `gpt-4o-transcribe`, - `gpt-4o-mini-transcribe`, `whisper-1` (which is powered by our open source - Whisper V2 model), and `gpt-4o-transcribe-diarize`. + `gpt-4o-mini-transcribe`, `gpt-4o-mini-transcribe-2025-12-15`, `whisper-1` + (which is powered by our open source Whisper V2 model), and + `gpt-4o-transcribe-diarize`. stream: If set to true, the model response data will be streamed to the client as it is generated using @@ -808,9 +815,9 @@ async def create( include: Additional information to include in the transcription response. `logprobs` will return the log probabilities of the tokens in the response to understand the model's confidence in the transcription. `logprobs` only works with - response_format set to `json` and only with the models `gpt-4o-transcribe` and - `gpt-4o-mini-transcribe`. This field is not supported when using - `gpt-4o-transcribe-diarize`. + response_format set to `json` and only with the models `gpt-4o-transcribe`, + `gpt-4o-mini-transcribe`, and `gpt-4o-mini-transcribe-2025-12-15`. This field is + not supported when using `gpt-4o-transcribe-diarize`. known_speaker_names: Optional list of speaker names that correspond to the audio samples provided in `known_speaker_references[]`. Each entry should be a short identifier (for diff --git a/portkey_ai/_vendor/openai/resources/batches.py b/portkey_ai/_vendor/openai/resources/batches.py index afc7fa6e..80400839 100644 --- a/portkey_ai/_vendor/openai/resources/batches.py +++ b/portkey_ai/_vendor/openai/resources/batches.py @@ -46,7 +46,9 @@ def create( self, *, completion_window: Literal["24h"], - endpoint: Literal["/v1/responses", "/v1/chat/completions", "/v1/embeddings", "/v1/completions"], + endpoint: Literal[ + "/v1/responses", "/v1/chat/completions", "/v1/embeddings", "/v1/completions", "/v1/moderations" + ], input_file_id: str, metadata: Optional[Metadata] | Omit = omit, output_expires_after: batch_create_params.OutputExpiresAfter | Omit = omit, @@ -65,9 +67,10 @@ def create( is supported. endpoint: The endpoint to be used for all requests in the batch. Currently - `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` - are supported. Note that `/v1/embeddings` batches are also restricted to a - maximum of 50,000 embedding inputs across all requests in the batch. + `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, `/v1/completions`, + and `/v1/moderations` are supported. Note that `/v1/embeddings` batches are also + restricted to a maximum of 50,000 embedding inputs across all requests in the + batch. input_file_id: The ID of an uploaded file that contains requests for the new batch. @@ -261,7 +264,9 @@ async def create( self, *, completion_window: Literal["24h"], - endpoint: Literal["/v1/responses", "/v1/chat/completions", "/v1/embeddings", "/v1/completions"], + endpoint: Literal[ + "/v1/responses", "/v1/chat/completions", "/v1/embeddings", "/v1/completions", "/v1/moderations" + ], input_file_id: str, metadata: Optional[Metadata] | Omit = omit, output_expires_after: batch_create_params.OutputExpiresAfter | Omit = omit, @@ -280,9 +285,10 @@ async def create( is supported. endpoint: The endpoint to be used for all requests in the batch. Currently - `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` - are supported. Note that `/v1/embeddings` batches are also restricted to a - maximum of 50,000 embedding inputs across all requests in the batch. + `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, `/v1/completions`, + and `/v1/moderations` are supported. Note that `/v1/embeddings` batches are also + restricted to a maximum of 50,000 embedding inputs across all requests in the + batch. input_file_id: The ID of an uploaded file that contains requests for the new batch. diff --git a/portkey_ai/_vendor/openai/resources/beta/assistants.py b/portkey_ai/_vendor/openai/resources/beta/assistants.py index a958c0ca..8c697000 100644 --- a/portkey_ai/_vendor/openai/resources/beta/assistants.py +++ b/portkey_ai/_vendor/openai/resources/beta/assistants.py @@ -2,6 +2,7 @@ from __future__ import annotations +import typing_extensions from typing import Union, Iterable, Optional from typing_extensions import Literal @@ -51,6 +52,7 @@ def with_streaming_response(self) -> AssistantsWithStreamingResponse: """ return AssistantsWithStreamingResponse(self) + @typing_extensions.deprecated("deprecated") def create( self, *, @@ -98,12 +100,17 @@ def create( reasoning_effort: Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - effort can result in faster responses and fewer tokens used on reasoning in a - response. - - Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - effort. + supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. + Reducing reasoning effort can result in faster responses and fewer tokens used + on reasoning in a response. + + - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + calls are supported for all reasoning values in gpt-5.1. + - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + support `none`. + - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. + - `xhigh` is supported for all models after `gpt-5.1-codex-max`. response_format: Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), @@ -178,6 +185,7 @@ def create( cast_to=Assistant, ) + @typing_extensions.deprecated("deprecated") def retrieve( self, assistant_id: str, @@ -212,6 +220,7 @@ def retrieve( cast_to=Assistant, ) + @typing_extensions.deprecated("deprecated") def update( self, assistant_id: str, @@ -308,12 +317,17 @@ def update( reasoning_effort: Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - effort can result in faster responses and fewer tokens used on reasoning in a - response. - - Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - effort. + supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. + Reducing reasoning effort can result in faster responses and fewer tokens used + on reasoning in a response. + + - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + calls are supported for all reasoning values in gpt-5.1. + - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + support `none`. + - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. + - `xhigh` is supported for all models after `gpt-5.1-codex-max`. response_format: Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), @@ -390,6 +404,7 @@ def update( cast_to=Assistant, ) + @typing_extensions.deprecated("deprecated") def list( self, *, @@ -455,6 +470,7 @@ def list( model=Assistant, ) + @typing_extensions.deprecated("deprecated") def delete( self, assistant_id: str, @@ -510,6 +526,7 @@ def with_streaming_response(self) -> AsyncAssistantsWithStreamingResponse: """ return AsyncAssistantsWithStreamingResponse(self) + @typing_extensions.deprecated("deprecated") async def create( self, *, @@ -557,12 +574,17 @@ async def create( reasoning_effort: Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - effort can result in faster responses and fewer tokens used on reasoning in a - response. - - Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - effort. + supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. + Reducing reasoning effort can result in faster responses and fewer tokens used + on reasoning in a response. + + - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + calls are supported for all reasoning values in gpt-5.1. + - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + support `none`. + - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. + - `xhigh` is supported for all models after `gpt-5.1-codex-max`. response_format: Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), @@ -637,6 +659,7 @@ async def create( cast_to=Assistant, ) + @typing_extensions.deprecated("deprecated") async def retrieve( self, assistant_id: str, @@ -671,6 +694,7 @@ async def retrieve( cast_to=Assistant, ) + @typing_extensions.deprecated("deprecated") async def update( self, assistant_id: str, @@ -767,12 +791,17 @@ async def update( reasoning_effort: Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - effort can result in faster responses and fewer tokens used on reasoning in a - response. - - Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - effort. + supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. + Reducing reasoning effort can result in faster responses and fewer tokens used + on reasoning in a response. + + - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + calls are supported for all reasoning values in gpt-5.1. + - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + support `none`. + - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. + - `xhigh` is supported for all models after `gpt-5.1-codex-max`. response_format: Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), @@ -849,6 +878,7 @@ async def update( cast_to=Assistant, ) + @typing_extensions.deprecated("deprecated") def list( self, *, @@ -914,6 +944,7 @@ def list( model=Assistant, ) + @typing_extensions.deprecated("deprecated") async def delete( self, assistant_id: str, @@ -953,20 +984,30 @@ class AssistantsWithRawResponse: def __init__(self, assistants: Assistants) -> None: self._assistants = assistants - self.create = _legacy_response.to_raw_response_wrapper( - assistants.create, + self.create = ( # pyright: ignore[reportDeprecated] + _legacy_response.to_raw_response_wrapper( + assistants.create, # pyright: ignore[reportDeprecated], + ) ) - self.retrieve = _legacy_response.to_raw_response_wrapper( - assistants.retrieve, + self.retrieve = ( # pyright: ignore[reportDeprecated] + _legacy_response.to_raw_response_wrapper( + assistants.retrieve, # pyright: ignore[reportDeprecated], + ) ) - self.update = _legacy_response.to_raw_response_wrapper( - assistants.update, + self.update = ( # pyright: ignore[reportDeprecated] + _legacy_response.to_raw_response_wrapper( + assistants.update, # pyright: ignore[reportDeprecated], + ) ) - self.list = _legacy_response.to_raw_response_wrapper( - assistants.list, + self.list = ( # pyright: ignore[reportDeprecated] + _legacy_response.to_raw_response_wrapper( + assistants.list, # pyright: ignore[reportDeprecated], + ) ) - self.delete = _legacy_response.to_raw_response_wrapper( - assistants.delete, + self.delete = ( # pyright: ignore[reportDeprecated] + _legacy_response.to_raw_response_wrapper( + assistants.delete, # pyright: ignore[reportDeprecated], + ) ) @@ -974,20 +1015,30 @@ class AsyncAssistantsWithRawResponse: def __init__(self, assistants: AsyncAssistants) -> None: self._assistants = assistants - self.create = _legacy_response.async_to_raw_response_wrapper( - assistants.create, + self.create = ( # pyright: ignore[reportDeprecated] + _legacy_response.async_to_raw_response_wrapper( + assistants.create, # pyright: ignore[reportDeprecated], + ) ) - self.retrieve = _legacy_response.async_to_raw_response_wrapper( - assistants.retrieve, + self.retrieve = ( # pyright: ignore[reportDeprecated] + _legacy_response.async_to_raw_response_wrapper( + assistants.retrieve, # pyright: ignore[reportDeprecated], + ) ) - self.update = _legacy_response.async_to_raw_response_wrapper( - assistants.update, + self.update = ( # pyright: ignore[reportDeprecated] + _legacy_response.async_to_raw_response_wrapper( + assistants.update, # pyright: ignore[reportDeprecated], + ) ) - self.list = _legacy_response.async_to_raw_response_wrapper( - assistants.list, + self.list = ( # pyright: ignore[reportDeprecated] + _legacy_response.async_to_raw_response_wrapper( + assistants.list, # pyright: ignore[reportDeprecated], + ) ) - self.delete = _legacy_response.async_to_raw_response_wrapper( - assistants.delete, + self.delete = ( # pyright: ignore[reportDeprecated] + _legacy_response.async_to_raw_response_wrapper( + assistants.delete, # pyright: ignore[reportDeprecated], + ) ) @@ -995,20 +1046,30 @@ class AssistantsWithStreamingResponse: def __init__(self, assistants: Assistants) -> None: self._assistants = assistants - self.create = to_streamed_response_wrapper( - assistants.create, + self.create = ( # pyright: ignore[reportDeprecated] + to_streamed_response_wrapper( + assistants.create, # pyright: ignore[reportDeprecated], + ) ) - self.retrieve = to_streamed_response_wrapper( - assistants.retrieve, + self.retrieve = ( # pyright: ignore[reportDeprecated] + to_streamed_response_wrapper( + assistants.retrieve, # pyright: ignore[reportDeprecated], + ) ) - self.update = to_streamed_response_wrapper( - assistants.update, + self.update = ( # pyright: ignore[reportDeprecated] + to_streamed_response_wrapper( + assistants.update, # pyright: ignore[reportDeprecated], + ) ) - self.list = to_streamed_response_wrapper( - assistants.list, + self.list = ( # pyright: ignore[reportDeprecated] + to_streamed_response_wrapper( + assistants.list, # pyright: ignore[reportDeprecated], + ) ) - self.delete = to_streamed_response_wrapper( - assistants.delete, + self.delete = ( # pyright: ignore[reportDeprecated] + to_streamed_response_wrapper( + assistants.delete, # pyright: ignore[reportDeprecated], + ) ) @@ -1016,18 +1077,28 @@ class AsyncAssistantsWithStreamingResponse: def __init__(self, assistants: AsyncAssistants) -> None: self._assistants = assistants - self.create = async_to_streamed_response_wrapper( - assistants.create, + self.create = ( # pyright: ignore[reportDeprecated] + async_to_streamed_response_wrapper( + assistants.create, # pyright: ignore[reportDeprecated], + ) ) - self.retrieve = async_to_streamed_response_wrapper( - assistants.retrieve, + self.retrieve = ( # pyright: ignore[reportDeprecated] + async_to_streamed_response_wrapper( + assistants.retrieve, # pyright: ignore[reportDeprecated], + ) ) - self.update = async_to_streamed_response_wrapper( - assistants.update, + self.update = ( # pyright: ignore[reportDeprecated] + async_to_streamed_response_wrapper( + assistants.update, # pyright: ignore[reportDeprecated], + ) ) - self.list = async_to_streamed_response_wrapper( - assistants.list, + self.list = ( # pyright: ignore[reportDeprecated] + async_to_streamed_response_wrapper( + assistants.list, # pyright: ignore[reportDeprecated], + ) ) - self.delete = async_to_streamed_response_wrapper( - assistants.delete, + self.delete = ( # pyright: ignore[reportDeprecated] + async_to_streamed_response_wrapper( + assistants.delete, # pyright: ignore[reportDeprecated], + ) ) diff --git a/portkey_ai/_vendor/openai/resources/beta/threads/runs/runs.py b/portkey_ai/_vendor/openai/resources/beta/threads/runs/runs.py index 2753f581..8a58e91f 100644 --- a/portkey_ai/_vendor/openai/resources/beta/threads/runs/runs.py +++ b/portkey_ai/_vendor/openai/resources/beta/threads/runs/runs.py @@ -169,12 +169,17 @@ def create( reasoning_effort: Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - effort can result in faster responses and fewer tokens used on reasoning in a - response. - - Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - effort. + supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. + Reducing reasoning effort can result in faster responses and fewer tokens used + on reasoning in a response. + + - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + calls are supported for all reasoning values in gpt-5.1. + - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + support `none`. + - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. + - `xhigh` is supported for all models after `gpt-5.1-codex-max`. response_format: Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), @@ -326,12 +331,17 @@ def create( reasoning_effort: Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - effort can result in faster responses and fewer tokens used on reasoning in a - response. - - Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - effort. + supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. + Reducing reasoning effort can result in faster responses and fewer tokens used + on reasoning in a response. + + - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + calls are supported for all reasoning values in gpt-5.1. + - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + support `none`. + - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. + - `xhigh` is supported for all models after `gpt-5.1-codex-max`. response_format: Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), @@ -479,12 +489,17 @@ def create( reasoning_effort: Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - effort can result in faster responses and fewer tokens used on reasoning in a - response. - - Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - effort. + supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. + Reducing reasoning effort can result in faster responses and fewer tokens used + on reasoning in a response. + + - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + calls are supported for all reasoning values in gpt-5.1. + - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + support `none`. + - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. + - `xhigh` is supported for all models after `gpt-5.1-codex-max`. response_format: Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), @@ -1608,12 +1623,17 @@ async def create( reasoning_effort: Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - effort can result in faster responses and fewer tokens used on reasoning in a - response. - - Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - effort. + supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. + Reducing reasoning effort can result in faster responses and fewer tokens used + on reasoning in a response. + + - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + calls are supported for all reasoning values in gpt-5.1. + - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + support `none`. + - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. + - `xhigh` is supported for all models after `gpt-5.1-codex-max`. response_format: Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), @@ -1765,12 +1785,17 @@ async def create( reasoning_effort: Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - effort can result in faster responses and fewer tokens used on reasoning in a - response. - - Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - effort. + supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. + Reducing reasoning effort can result in faster responses and fewer tokens used + on reasoning in a response. + + - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + calls are supported for all reasoning values in gpt-5.1. + - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + support `none`. + - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. + - `xhigh` is supported for all models after `gpt-5.1-codex-max`. response_format: Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), @@ -1918,12 +1943,17 @@ async def create( reasoning_effort: Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - effort can result in faster responses and fewer tokens used on reasoning in a - response. - - Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - effort. + supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. + Reducing reasoning effort can result in faster responses and fewer tokens used + on reasoning in a response. + + - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + calls are supported for all reasoning values in gpt-5.1. + - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + support `none`. + - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. + - `xhigh` is supported for all models after `gpt-5.1-codex-max`. response_format: Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), diff --git a/portkey_ai/_vendor/openai/resources/chat/completions/completions.py b/portkey_ai/_vendor/openai/resources/chat/completions/completions.py index 4b73c69a..9c0b74b8 100644 --- a/portkey_ai/_vendor/openai/resources/chat/completions/completions.py +++ b/portkey_ai/_vendor/openai/resources/chat/completions/completions.py @@ -102,6 +102,7 @@ def parse( prediction: Optional[ChatCompletionPredictionContentParam] | Omit = omit, presence_penalty: Optional[float] | Omit = omit, prompt_cache_key: str | Omit = omit, + prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit, reasoning_effort: Optional[ReasoningEffort] | Omit = omit, safety_identifier: str | Omit = omit, seed: Optional[int] | Omit = omit, @@ -201,6 +202,7 @@ def parser(raw_completion: ChatCompletion) -> ParsedChatCompletion[ResponseForma "prediction": prediction, "presence_penalty": presence_penalty, "prompt_cache_key": prompt_cache_key, + "prompt_cache_retention": prompt_cache_retention, "reasoning_effort": reasoning_effort, "response_format": _type_to_response_format(response_format), "safety_identifier": safety_identifier, @@ -255,6 +257,7 @@ def create( prediction: Optional[ChatCompletionPredictionContentParam] | Omit = omit, presence_penalty: Optional[float] | Omit = omit, prompt_cache_key: str | Omit = omit, + prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit, reasoning_effort: Optional[ReasoningEffort] | Omit = omit, response_format: completion_create_params.ResponseFormat | Omit = omit, safety_identifier: str | Omit = omit, @@ -401,14 +404,24 @@ def create( hit rates. Replaces the `user` field. [Learn more](https://platform.openai.com/docs/guides/prompt-caching). + prompt_cache_retention: The retention policy for the prompt cache. Set to `24h` to enable extended + prompt caching, which keeps cached prefixes active for longer, up to a maximum + of 24 hours. + [Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention). + reasoning_effort: Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - effort can result in faster responses and fewer tokens used on reasoning in a - response. - - Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - effort. + supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. + Reducing reasoning effort can result in faster responses and fewer tokens used + on reasoning in a response. + + - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + calls are supported for all reasoning values in gpt-5.1. + - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + support `none`. + - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. + - `xhigh` is supported for all models after `gpt-5.1-codex-max`. response_format: An object specifying the format that the model must output. @@ -547,6 +560,7 @@ def create( prediction: Optional[ChatCompletionPredictionContentParam] | Omit = omit, presence_penalty: Optional[float] | Omit = omit, prompt_cache_key: str | Omit = omit, + prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit, reasoning_effort: Optional[ReasoningEffort] | Omit = omit, response_format: completion_create_params.ResponseFormat | Omit = omit, safety_identifier: str | Omit = omit, @@ -701,14 +715,24 @@ def create( hit rates. Replaces the `user` field. [Learn more](https://platform.openai.com/docs/guides/prompt-caching). + prompt_cache_retention: The retention policy for the prompt cache. Set to `24h` to enable extended + prompt caching, which keeps cached prefixes active for longer, up to a maximum + of 24 hours. + [Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention). + reasoning_effort: Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - effort can result in faster responses and fewer tokens used on reasoning in a - response. - - Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - effort. + supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. + Reducing reasoning effort can result in faster responses and fewer tokens used + on reasoning in a response. + + - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + calls are supported for all reasoning values in gpt-5.1. + - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + support `none`. + - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. + - `xhigh` is supported for all models after `gpt-5.1-codex-max`. response_format: An object specifying the format that the model must output. @@ -838,6 +862,7 @@ def create( prediction: Optional[ChatCompletionPredictionContentParam] | Omit = omit, presence_penalty: Optional[float] | Omit = omit, prompt_cache_key: str | Omit = omit, + prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit, reasoning_effort: Optional[ReasoningEffort] | Omit = omit, response_format: completion_create_params.ResponseFormat | Omit = omit, safety_identifier: str | Omit = omit, @@ -992,14 +1017,24 @@ def create( hit rates. Replaces the `user` field. [Learn more](https://platform.openai.com/docs/guides/prompt-caching). + prompt_cache_retention: The retention policy for the prompt cache. Set to `24h` to enable extended + prompt caching, which keeps cached prefixes active for longer, up to a maximum + of 24 hours. + [Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention). + reasoning_effort: Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - effort can result in faster responses and fewer tokens used on reasoning in a - response. - - Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - effort. + supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. + Reducing reasoning effort can result in faster responses and fewer tokens used + on reasoning in a response. + + - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + calls are supported for all reasoning values in gpt-5.1. + - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + support `none`. + - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. + - `xhigh` is supported for all models after `gpt-5.1-codex-max`. response_format: An object specifying the format that the model must output. @@ -1128,6 +1163,7 @@ def create( prediction: Optional[ChatCompletionPredictionContentParam] | Omit = omit, presence_penalty: Optional[float] | Omit = omit, prompt_cache_key: str | Omit = omit, + prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit, reasoning_effort: Optional[ReasoningEffort] | Omit = omit, response_format: completion_create_params.ResponseFormat | Omit = omit, safety_identifier: str | Omit = omit, @@ -1174,6 +1210,7 @@ def create( "prediction": prediction, "presence_penalty": presence_penalty, "prompt_cache_key": prompt_cache_key, + "prompt_cache_retention": prompt_cache_retention, "reasoning_effort": reasoning_effort, "response_format": response_format, "safety_identifier": safety_identifier, @@ -1407,6 +1444,7 @@ def stream( prediction: Optional[ChatCompletionPredictionContentParam] | Omit = omit, presence_penalty: Optional[float] | Omit = omit, prompt_cache_key: str | Omit = omit, + prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit, reasoning_effort: Optional[ReasoningEffort] | Omit = omit, safety_identifier: str | Omit = omit, seed: Optional[int] | Omit = omit, @@ -1477,6 +1515,7 @@ def stream( prediction=prediction, presence_penalty=presence_penalty, prompt_cache_key=prompt_cache_key, + prompt_cache_retention=prompt_cache_retention, reasoning_effort=reasoning_effort, safety_identifier=safety_identifier, seed=seed, @@ -1549,6 +1588,7 @@ async def parse( prediction: Optional[ChatCompletionPredictionContentParam] | Omit = omit, presence_penalty: Optional[float] | Omit = omit, prompt_cache_key: str | Omit = omit, + prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit, reasoning_effort: Optional[ReasoningEffort] | Omit = omit, safety_identifier: str | Omit = omit, seed: Optional[int] | Omit = omit, @@ -1648,6 +1688,7 @@ def parser(raw_completion: ChatCompletion) -> ParsedChatCompletion[ResponseForma "prediction": prediction, "presence_penalty": presence_penalty, "prompt_cache_key": prompt_cache_key, + "prompt_cache_retention": prompt_cache_retention, "reasoning_effort": reasoning_effort, "response_format": _type_to_response_format(response_format), "safety_identifier": safety_identifier, @@ -1702,6 +1743,7 @@ async def create( prediction: Optional[ChatCompletionPredictionContentParam] | Omit = omit, presence_penalty: Optional[float] | Omit = omit, prompt_cache_key: str | Omit = omit, + prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit, reasoning_effort: Optional[ReasoningEffort] | Omit = omit, response_format: completion_create_params.ResponseFormat | Omit = omit, safety_identifier: str | Omit = omit, @@ -1848,14 +1890,24 @@ async def create( hit rates. Replaces the `user` field. [Learn more](https://platform.openai.com/docs/guides/prompt-caching). + prompt_cache_retention: The retention policy for the prompt cache. Set to `24h` to enable extended + prompt caching, which keeps cached prefixes active for longer, up to a maximum + of 24 hours. + [Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention). + reasoning_effort: Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - effort can result in faster responses and fewer tokens used on reasoning in a - response. - - Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - effort. + supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. + Reducing reasoning effort can result in faster responses and fewer tokens used + on reasoning in a response. + + - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + calls are supported for all reasoning values in gpt-5.1. + - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + support `none`. + - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. + - `xhigh` is supported for all models after `gpt-5.1-codex-max`. response_format: An object specifying the format that the model must output. @@ -1994,6 +2046,7 @@ async def create( prediction: Optional[ChatCompletionPredictionContentParam] | Omit = omit, presence_penalty: Optional[float] | Omit = omit, prompt_cache_key: str | Omit = omit, + prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit, reasoning_effort: Optional[ReasoningEffort] | Omit = omit, response_format: completion_create_params.ResponseFormat | Omit = omit, safety_identifier: str | Omit = omit, @@ -2148,14 +2201,24 @@ async def create( hit rates. Replaces the `user` field. [Learn more](https://platform.openai.com/docs/guides/prompt-caching). + prompt_cache_retention: The retention policy for the prompt cache. Set to `24h` to enable extended + prompt caching, which keeps cached prefixes active for longer, up to a maximum + of 24 hours. + [Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention). + reasoning_effort: Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - effort can result in faster responses and fewer tokens used on reasoning in a - response. - - Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - effort. + supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. + Reducing reasoning effort can result in faster responses and fewer tokens used + on reasoning in a response. + + - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + calls are supported for all reasoning values in gpt-5.1. + - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + support `none`. + - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. + - `xhigh` is supported for all models after `gpt-5.1-codex-max`. response_format: An object specifying the format that the model must output. @@ -2285,6 +2348,7 @@ async def create( prediction: Optional[ChatCompletionPredictionContentParam] | Omit = omit, presence_penalty: Optional[float] | Omit = omit, prompt_cache_key: str | Omit = omit, + prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit, reasoning_effort: Optional[ReasoningEffort] | Omit = omit, response_format: completion_create_params.ResponseFormat | Omit = omit, safety_identifier: str | Omit = omit, @@ -2439,14 +2503,24 @@ async def create( hit rates. Replaces the `user` field. [Learn more](https://platform.openai.com/docs/guides/prompt-caching). + prompt_cache_retention: The retention policy for the prompt cache. Set to `24h` to enable extended + prompt caching, which keeps cached prefixes active for longer, up to a maximum + of 24 hours. + [Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention). + reasoning_effort: Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - effort can result in faster responses and fewer tokens used on reasoning in a - response. - - Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - effort. + supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. + Reducing reasoning effort can result in faster responses and fewer tokens used + on reasoning in a response. + + - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + calls are supported for all reasoning values in gpt-5.1. + - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + support `none`. + - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. + - `xhigh` is supported for all models after `gpt-5.1-codex-max`. response_format: An object specifying the format that the model must output. @@ -2575,6 +2649,7 @@ async def create( prediction: Optional[ChatCompletionPredictionContentParam] | Omit = omit, presence_penalty: Optional[float] | Omit = omit, prompt_cache_key: str | Omit = omit, + prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit, reasoning_effort: Optional[ReasoningEffort] | Omit = omit, response_format: completion_create_params.ResponseFormat | Omit = omit, safety_identifier: str | Omit = omit, @@ -2621,6 +2696,7 @@ async def create( "prediction": prediction, "presence_penalty": presence_penalty, "prompt_cache_key": prompt_cache_key, + "prompt_cache_retention": prompt_cache_retention, "reasoning_effort": reasoning_effort, "response_format": response_format, "safety_identifier": safety_identifier, @@ -2854,6 +2930,7 @@ def stream( prediction: Optional[ChatCompletionPredictionContentParam] | Omit = omit, presence_penalty: Optional[float] | Omit = omit, prompt_cache_key: str | Omit = omit, + prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit, reasoning_effort: Optional[ReasoningEffort] | Omit = omit, safety_identifier: str | Omit = omit, seed: Optional[int] | Omit = omit, @@ -2925,6 +3002,7 @@ def stream( prediction=prediction, presence_penalty=presence_penalty, prompt_cache_key=prompt_cache_key, + prompt_cache_retention=prompt_cache_retention, reasoning_effort=reasoning_effort, safety_identifier=safety_identifier, seed=seed, diff --git a/portkey_ai/_vendor/openai/resources/containers/containers.py b/portkey_ai/_vendor/openai/resources/containers/containers.py index dcdc3e1a..0cbb400d 100644 --- a/portkey_ai/_vendor/openai/resources/containers/containers.py +++ b/portkey_ai/_vendor/openai/resources/containers/containers.py @@ -60,6 +60,7 @@ def create( name: str, expires_after: container_create_params.ExpiresAfter | Omit = omit, file_ids: SequenceNotStr[str] | Omit = omit, + memory_limit: Literal["1g", "4g", "16g", "64g"] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -77,6 +78,8 @@ def create( file_ids: IDs of files to copy to the container. + memory_limit: Optional memory limit for the container. Defaults to "1g". + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -92,6 +95,7 @@ def create( "name": name, "expires_after": expires_after, "file_ids": file_ids, + "memory_limit": memory_limit, }, container_create_params.ContainerCreateParams, ), @@ -256,6 +260,7 @@ async def create( name: str, expires_after: container_create_params.ExpiresAfter | Omit = omit, file_ids: SequenceNotStr[str] | Omit = omit, + memory_limit: Literal["1g", "4g", "16g", "64g"] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -273,6 +278,8 @@ async def create( file_ids: IDs of files to copy to the container. + memory_limit: Optional memory limit for the container. Defaults to "1g". + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -288,6 +295,7 @@ async def create( "name": name, "expires_after": expires_after, "file_ids": file_ids, + "memory_limit": memory_limit, }, container_create_params.ContainerCreateParams, ), diff --git a/portkey_ai/_vendor/openai/resources/files.py b/portkey_ai/_vendor/openai/resources/files.py index cc117e7f..964d6505 100644 --- a/portkey_ai/_vendor/openai/resources/files.py +++ b/portkey_ai/_vendor/openai/resources/files.py @@ -68,8 +68,8 @@ def create( """Upload a file that can be used across various endpoints. Individual files can be - up to 512 MB, and the size of all files uploaded by one organization can be up - to 1 TB. + up to 512 MB, and each project can store up to 2.5 TB of files in total. There + is no organization-wide storage limit. - The Assistants API supports files up to 2 million tokens and of specific file types. See the @@ -91,10 +91,15 @@ def create( Args: file: The File object (not file name) to be uploaded. - purpose: The intended purpose of the uploaded file. One of: - `assistants`: Used in the - Assistants API - `batch`: Used in the Batch API - `fine-tune`: Used for - fine-tuning - `vision`: Images used for vision fine-tuning - `user_data`: - Flexible file type for any purpose - `evals`: Used for eval data sets + purpose: + The intended purpose of the uploaded file. One of: + + - `assistants`: Used in the Assistants API + - `batch`: Used in the Batch API + - `fine-tune`: Used for fine-tuning + - `vision`: Images used for vision fine-tuning + - `user_data`: Flexible file type for any purpose + - `evals`: Used for eval data sets expires_after: The expiration policy for a file. By default, files with `purpose=batch` expire after 30 days and all other files are persisted until they are manually deleted. @@ -384,8 +389,8 @@ async def create( """Upload a file that can be used across various endpoints. Individual files can be - up to 512 MB, and the size of all files uploaded by one organization can be up - to 1 TB. + up to 512 MB, and each project can store up to 2.5 TB of files in total. There + is no organization-wide storage limit. - The Assistants API supports files up to 2 million tokens and of specific file types. See the @@ -407,10 +412,15 @@ async def create( Args: file: The File object (not file name) to be uploaded. - purpose: The intended purpose of the uploaded file. One of: - `assistants`: Used in the - Assistants API - `batch`: Used in the Batch API - `fine-tune`: Used for - fine-tuning - `vision`: Images used for vision fine-tuning - `user_data`: - Flexible file type for any purpose - `evals`: Used for eval data sets + purpose: + The intended purpose of the uploaded file. One of: + + - `assistants`: Used in the Assistants API + - `batch`: Used in the Batch API + - `fine-tune`: Used for fine-tuning + - `vision`: Images used for vision fine-tuning + - `user_data`: Flexible file type for any purpose + - `evals`: Used for eval data sets expires_after: The expiration policy for a file. By default, files with `purpose=batch` expire after 30 days and all other files are persisted until they are manually deleted. diff --git a/portkey_ai/_vendor/openai/resources/images.py b/portkey_ai/_vendor/openai/resources/images.py index 265be6f7..80582848 100644 --- a/portkey_ai/_vendor/openai/resources/images.py +++ b/portkey_ai/_vendor/openai/resources/images.py @@ -146,24 +146,26 @@ def edit( """Creates an edited or extended image given one or more source images and a prompt. - This endpoint only supports `gpt-image-1` and `dall-e-2`. + This endpoint supports GPT Image models (`gpt-image-1.5`, `gpt-image-1`, + and `gpt-image-1-mini`) and `dall-e-2`. Args: image: The image(s) to edit. Must be a supported image file or an array of images. - For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than + For the GPT image models (`gpt-image-1`, `gpt-image-1-mini`, and + `gpt-image-1.5`), each image should be a `png`, `webp`, or `jpg` file less than 50MB. You can provide up to 16 images. For `dall-e-2`, you can only provide one image, and it should be a square `png` file less than 4MB. prompt: A text description of the desired image(s). The maximum length is 1000 - characters for `dall-e-2`, and 32000 characters for `gpt-image-1`. + characters for `dall-e-2`, and 32000 characters for the GPT image models. background: Allows to set transparency for the background of the generated image(s). This - parameter is only supported for `gpt-image-1`. Must be one of `transparent`, - `opaque` or `auto` (default value). When `auto` is used, the model will - automatically determine the best background for the image. + parameter is only supported for the GPT image models. Must be one of + `transparent`, `opaque` or `auto` (default value). When `auto` is used, the + model will automatically determine the best background for the image. If `transparent`, the output format needs to support transparency, so it should be set to either `png` (default value) or `webp`. @@ -178,18 +180,18 @@ def edit( the mask will be applied on the first image. Must be a valid PNG file, less than 4MB, and have the same dimensions as `image`. - model: The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are - supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1` - is used. + model: The model to use for image generation. Only `dall-e-2` and the GPT image models + are supported. Defaults to `dall-e-2` unless a parameter specific to the GPT + image models is used. n: The number of images to generate. Must be between 1 and 10. output_compression: The compression level (0-100%) for the generated images. This parameter is only - supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and + supported for the GPT image models with the `webp` or `jpeg` output formats, and defaults to 100. output_format: The format in which the generated images are returned. This parameter is only - supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The + supported for the GPT image models. Must be one of `png`, `jpeg`, or `webp`. The default value is `png`. partial_images: The number of partial images to generate. This parameter is used for streaming @@ -200,17 +202,17 @@ def edit( are generated if the full image is generated more quickly. quality: The quality of the image that will be generated. `high`, `medium` and `low` are - only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality. - Defaults to `auto`. + only supported for the GPT image models. `dall-e-2` only supports `standard` + quality. Defaults to `auto`. response_format: The format in which the generated images are returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes after the image has been - generated. This parameter is only supported for `dall-e-2`, as `gpt-image-1` - will always return base64-encoded images. + generated. This parameter is only supported for `dall-e-2`, as the GPT image + models always return base64-encoded images. size: The size of the generated images. Must be one of `1024x1024`, `1536x1024` - (landscape), `1024x1536` (portrait), or `auto` (default value) for - `gpt-image-1`, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`. + (landscape), `1024x1536` (portrait), or `auto` (default value) for the GPT image + models, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`. stream: Edit the image in streaming mode. Defaults to `false`. See the [Image generation guide](https://platform.openai.com/docs/guides/image-generation) @@ -259,28 +261,30 @@ def edit( """Creates an edited or extended image given one or more source images and a prompt. - This endpoint only supports `gpt-image-1` and `dall-e-2`. + This endpoint supports GPT Image models (`gpt-image-1.5`, `gpt-image-1`, + and `gpt-image-1-mini`) and `dall-e-2`. Args: image: The image(s) to edit. Must be a supported image file or an array of images. - For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than + For the GPT image models (`gpt-image-1`, `gpt-image-1-mini`, and + `gpt-image-1.5`), each image should be a `png`, `webp`, or `jpg` file less than 50MB. You can provide up to 16 images. For `dall-e-2`, you can only provide one image, and it should be a square `png` file less than 4MB. prompt: A text description of the desired image(s). The maximum length is 1000 - characters for `dall-e-2`, and 32000 characters for `gpt-image-1`. + characters for `dall-e-2`, and 32000 characters for the GPT image models. stream: Edit the image in streaming mode. Defaults to `false`. See the [Image generation guide](https://platform.openai.com/docs/guides/image-generation) for more information. background: Allows to set transparency for the background of the generated image(s). This - parameter is only supported for `gpt-image-1`. Must be one of `transparent`, - `opaque` or `auto` (default value). When `auto` is used, the model will - automatically determine the best background for the image. + parameter is only supported for the GPT image models. Must be one of + `transparent`, `opaque` or `auto` (default value). When `auto` is used, the + model will automatically determine the best background for the image. If `transparent`, the output format needs to support transparency, so it should be set to either `png` (default value) or `webp`. @@ -295,18 +299,18 @@ def edit( the mask will be applied on the first image. Must be a valid PNG file, less than 4MB, and have the same dimensions as `image`. - model: The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are - supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1` - is used. + model: The model to use for image generation. Only `dall-e-2` and the GPT image models + are supported. Defaults to `dall-e-2` unless a parameter specific to the GPT + image models is used. n: The number of images to generate. Must be between 1 and 10. output_compression: The compression level (0-100%) for the generated images. This parameter is only - supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and + supported for the GPT image models with the `webp` or `jpeg` output formats, and defaults to 100. output_format: The format in which the generated images are returned. This parameter is only - supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The + supported for the GPT image models. Must be one of `png`, `jpeg`, or `webp`. The default value is `png`. partial_images: The number of partial images to generate. This parameter is used for streaming @@ -317,17 +321,17 @@ def edit( are generated if the full image is generated more quickly. quality: The quality of the image that will be generated. `high`, `medium` and `low` are - only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality. - Defaults to `auto`. + only supported for the GPT image models. `dall-e-2` only supports `standard` + quality. Defaults to `auto`. response_format: The format in which the generated images are returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes after the image has been - generated. This parameter is only supported for `dall-e-2`, as `gpt-image-1` - will always return base64-encoded images. + generated. This parameter is only supported for `dall-e-2`, as the GPT image + models always return base64-encoded images. size: The size of the generated images. Must be one of `1024x1024`, `1536x1024` - (landscape), `1024x1536` (portrait), or `auto` (default value) for - `gpt-image-1`, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`. + (landscape), `1024x1536` (portrait), or `auto` (default value) for the GPT image + models, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`. user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. @@ -372,28 +376,30 @@ def edit( """Creates an edited or extended image given one or more source images and a prompt. - This endpoint only supports `gpt-image-1` and `dall-e-2`. + This endpoint supports GPT Image models (`gpt-image-1.5`, `gpt-image-1`, + and `gpt-image-1-mini`) and `dall-e-2`. Args: image: The image(s) to edit. Must be a supported image file or an array of images. - For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than + For the GPT image models (`gpt-image-1`, `gpt-image-1-mini`, and + `gpt-image-1.5`), each image should be a `png`, `webp`, or `jpg` file less than 50MB. You can provide up to 16 images. For `dall-e-2`, you can only provide one image, and it should be a square `png` file less than 4MB. prompt: A text description of the desired image(s). The maximum length is 1000 - characters for `dall-e-2`, and 32000 characters for `gpt-image-1`. + characters for `dall-e-2`, and 32000 characters for the GPT image models. stream: Edit the image in streaming mode. Defaults to `false`. See the [Image generation guide](https://platform.openai.com/docs/guides/image-generation) for more information. background: Allows to set transparency for the background of the generated image(s). This - parameter is only supported for `gpt-image-1`. Must be one of `transparent`, - `opaque` or `auto` (default value). When `auto` is used, the model will - automatically determine the best background for the image. + parameter is only supported for the GPT image models. Must be one of + `transparent`, `opaque` or `auto` (default value). When `auto` is used, the + model will automatically determine the best background for the image. If `transparent`, the output format needs to support transparency, so it should be set to either `png` (default value) or `webp`. @@ -408,18 +414,18 @@ def edit( the mask will be applied on the first image. Must be a valid PNG file, less than 4MB, and have the same dimensions as `image`. - model: The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are - supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1` - is used. + model: The model to use for image generation. Only `dall-e-2` and the GPT image models + are supported. Defaults to `dall-e-2` unless a parameter specific to the GPT + image models is used. n: The number of images to generate. Must be between 1 and 10. output_compression: The compression level (0-100%) for the generated images. This parameter is only - supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and + supported for the GPT image models with the `webp` or `jpeg` output formats, and defaults to 100. output_format: The format in which the generated images are returned. This parameter is only - supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The + supported for the GPT image models. Must be one of `png`, `jpeg`, or `webp`. The default value is `png`. partial_images: The number of partial images to generate. This parameter is used for streaming @@ -430,17 +436,17 @@ def edit( are generated if the full image is generated more quickly. quality: The quality of the image that will be generated. `high`, `medium` and `low` are - only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality. - Defaults to `auto`. + only supported for the GPT image models. `dall-e-2` only supports `standard` + quality. Defaults to `auto`. response_format: The format in which the generated images are returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes after the image has been - generated. This parameter is only supported for `dall-e-2`, as `gpt-image-1` - will always return base64-encoded images. + generated. This parameter is only supported for `dall-e-2`, as the GPT image + models always return base64-encoded images. size: The size of the generated images. Must be one of `1024x1024`, `1536x1024` - (landscape), `1024x1536` (portrait), or `auto` (default value) for - `gpt-image-1`, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`. + (landscape), `1024x1536` (portrait), or `auto` (default value) for the GPT image + models, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`. user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. @@ -555,33 +561,34 @@ def generate( Args: prompt: A text description of the desired image(s). The maximum length is 32000 - characters for `gpt-image-1`, 1000 characters for `dall-e-2` and 4000 characters - for `dall-e-3`. + characters for the GPT image models, 1000 characters for `dall-e-2` and 4000 + characters for `dall-e-3`. background: Allows to set transparency for the background of the generated image(s). This - parameter is only supported for `gpt-image-1`. Must be one of `transparent`, - `opaque` or `auto` (default value). When `auto` is used, the model will - automatically determine the best background for the image. + parameter is only supported for the GPT image models. Must be one of + `transparent`, `opaque` or `auto` (default value). When `auto` is used, the + model will automatically determine the best background for the image. If `transparent`, the output format needs to support transparency, so it should be set to either `png` (default value) or `webp`. - model: The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or - `gpt-image-1`. Defaults to `dall-e-2` unless a parameter specific to - `gpt-image-1` is used. + model: The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or a GPT + image model (`gpt-image-1`, `gpt-image-1-mini`, `gpt-image-1.5`). Defaults to + `dall-e-2` unless a parameter specific to the GPT image models is used. - moderation: Control the content-moderation level for images generated by `gpt-image-1`. Must - be either `low` for less restrictive filtering or `auto` (default value). + moderation: Control the content-moderation level for images generated by the GPT image + models. Must be either `low` for less restrictive filtering or `auto` (default + value). n: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only `n=1` is supported. output_compression: The compression level (0-100%) for the generated images. This parameter is only - supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and + supported for the GPT image models with the `webp` or `jpeg` output formats, and defaults to 100. output_format: The format in which the generated images are returned. This parameter is only - supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. + supported for the GPT image models. Must be one of `png`, `jpeg`, or `webp`. partial_images: The number of partial images to generate. This parameter is used for streaming responses that return partial images. Value must be between 0 and 3. When set to @@ -594,23 +601,23 @@ def generate( - `auto` (default value) will automatically select the best quality for the given model. - - `high`, `medium` and `low` are supported for `gpt-image-1`. + - `high`, `medium` and `low` are supported for the GPT image models. - `hd` and `standard` are supported for `dall-e-3`. - `standard` is the only option for `dall-e-2`. response_format: The format in which generated images with `dall-e-2` and `dall-e-3` are returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes - after the image has been generated. This parameter isn't supported for - `gpt-image-1` which will always return base64-encoded images. + after the image has been generated. This parameter isn't supported for the GPT + image models, which always return base64-encoded images. size: The size of the generated images. Must be one of `1024x1024`, `1536x1024` - (landscape), `1024x1536` (portrait), or `auto` (default value) for - `gpt-image-1`, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and - one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`. + (landscape), `1024x1536` (portrait), or `auto` (default value) for the GPT image + models, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and one of + `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`. stream: Generate the image in streaming mode. Defaults to `false`. See the [Image generation guide](https://platform.openai.com/docs/guides/image-generation) - for more information. This parameter is only supported for `gpt-image-1`. + for more information. This parameter is only supported for the GPT image models. style: The style of the generated images. This parameter is only supported for `dall-e-3`. Must be one of `vivid` or `natural`. Vivid causes the model to lean @@ -665,37 +672,38 @@ def generate( Args: prompt: A text description of the desired image(s). The maximum length is 32000 - characters for `gpt-image-1`, 1000 characters for `dall-e-2` and 4000 characters - for `dall-e-3`. + characters for the GPT image models, 1000 characters for `dall-e-2` and 4000 + characters for `dall-e-3`. stream: Generate the image in streaming mode. Defaults to `false`. See the [Image generation guide](https://platform.openai.com/docs/guides/image-generation) - for more information. This parameter is only supported for `gpt-image-1`. + for more information. This parameter is only supported for the GPT image models. background: Allows to set transparency for the background of the generated image(s). This - parameter is only supported for `gpt-image-1`. Must be one of `transparent`, - `opaque` or `auto` (default value). When `auto` is used, the model will - automatically determine the best background for the image. + parameter is only supported for the GPT image models. Must be one of + `transparent`, `opaque` or `auto` (default value). When `auto` is used, the + model will automatically determine the best background for the image. If `transparent`, the output format needs to support transparency, so it should be set to either `png` (default value) or `webp`. - model: The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or - `gpt-image-1`. Defaults to `dall-e-2` unless a parameter specific to - `gpt-image-1` is used. + model: The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or a GPT + image model (`gpt-image-1`, `gpt-image-1-mini`, `gpt-image-1.5`). Defaults to + `dall-e-2` unless a parameter specific to the GPT image models is used. - moderation: Control the content-moderation level for images generated by `gpt-image-1`. Must - be either `low` for less restrictive filtering or `auto` (default value). + moderation: Control the content-moderation level for images generated by the GPT image + models. Must be either `low` for less restrictive filtering or `auto` (default + value). n: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only `n=1` is supported. output_compression: The compression level (0-100%) for the generated images. This parameter is only - supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and + supported for the GPT image models with the `webp` or `jpeg` output formats, and defaults to 100. output_format: The format in which the generated images are returned. This parameter is only - supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. + supported for the GPT image models. Must be one of `png`, `jpeg`, or `webp`. partial_images: The number of partial images to generate. This parameter is used for streaming responses that return partial images. Value must be between 0 and 3. When set to @@ -708,19 +716,19 @@ def generate( - `auto` (default value) will automatically select the best quality for the given model. - - `high`, `medium` and `low` are supported for `gpt-image-1`. + - `high`, `medium` and `low` are supported for the GPT image models. - `hd` and `standard` are supported for `dall-e-3`. - `standard` is the only option for `dall-e-2`. response_format: The format in which generated images with `dall-e-2` and `dall-e-3` are returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes - after the image has been generated. This parameter isn't supported for - `gpt-image-1` which will always return base64-encoded images. + after the image has been generated. This parameter isn't supported for the GPT + image models, which always return base64-encoded images. size: The size of the generated images. Must be one of `1024x1024`, `1536x1024` - (landscape), `1024x1536` (portrait), or `auto` (default value) for - `gpt-image-1`, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and - one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`. + (landscape), `1024x1536` (portrait), or `auto` (default value) for the GPT image + models, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and one of + `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`. style: The style of the generated images. This parameter is only supported for `dall-e-3`. Must be one of `vivid` or `natural`. Vivid causes the model to lean @@ -775,37 +783,38 @@ def generate( Args: prompt: A text description of the desired image(s). The maximum length is 32000 - characters for `gpt-image-1`, 1000 characters for `dall-e-2` and 4000 characters - for `dall-e-3`. + characters for the GPT image models, 1000 characters for `dall-e-2` and 4000 + characters for `dall-e-3`. stream: Generate the image in streaming mode. Defaults to `false`. See the [Image generation guide](https://platform.openai.com/docs/guides/image-generation) - for more information. This parameter is only supported for `gpt-image-1`. + for more information. This parameter is only supported for the GPT image models. background: Allows to set transparency for the background of the generated image(s). This - parameter is only supported for `gpt-image-1`. Must be one of `transparent`, - `opaque` or `auto` (default value). When `auto` is used, the model will - automatically determine the best background for the image. + parameter is only supported for the GPT image models. Must be one of + `transparent`, `opaque` or `auto` (default value). When `auto` is used, the + model will automatically determine the best background for the image. If `transparent`, the output format needs to support transparency, so it should be set to either `png` (default value) or `webp`. - model: The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or - `gpt-image-1`. Defaults to `dall-e-2` unless a parameter specific to - `gpt-image-1` is used. + model: The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or a GPT + image model (`gpt-image-1`, `gpt-image-1-mini`, `gpt-image-1.5`). Defaults to + `dall-e-2` unless a parameter specific to the GPT image models is used. - moderation: Control the content-moderation level for images generated by `gpt-image-1`. Must - be either `low` for less restrictive filtering or `auto` (default value). + moderation: Control the content-moderation level for images generated by the GPT image + models. Must be either `low` for less restrictive filtering or `auto` (default + value). n: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only `n=1` is supported. output_compression: The compression level (0-100%) for the generated images. This parameter is only - supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and + supported for the GPT image models with the `webp` or `jpeg` output formats, and defaults to 100. output_format: The format in which the generated images are returned. This parameter is only - supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. + supported for the GPT image models. Must be one of `png`, `jpeg`, or `webp`. partial_images: The number of partial images to generate. This parameter is used for streaming responses that return partial images. Value must be between 0 and 3. When set to @@ -818,19 +827,19 @@ def generate( - `auto` (default value) will automatically select the best quality for the given model. - - `high`, `medium` and `low` are supported for `gpt-image-1`. + - `high`, `medium` and `low` are supported for the GPT image models. - `hd` and `standard` are supported for `dall-e-3`. - `standard` is the only option for `dall-e-2`. response_format: The format in which generated images with `dall-e-2` and `dall-e-3` are returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes - after the image has been generated. This parameter isn't supported for - `gpt-image-1` which will always return base64-encoded images. + after the image has been generated. This parameter isn't supported for the GPT + image models, which always return base64-encoded images. size: The size of the generated images. Must be one of `1024x1024`, `1536x1024` - (landscape), `1024x1536` (portrait), or `auto` (default value) for - `gpt-image-1`, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and - one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`. + (landscape), `1024x1536` (portrait), or `auto` (default value) for the GPT image + models, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and one of + `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`. style: The style of the generated images. This parameter is only supported for `dall-e-3`. Must be one of `vivid` or `natural`. Vivid causes the model to lean @@ -1033,24 +1042,26 @@ async def edit( """Creates an edited or extended image given one or more source images and a prompt. - This endpoint only supports `gpt-image-1` and `dall-e-2`. + This endpoint supports GPT Image models (`gpt-image-1.5`, `gpt-image-1`, + and `gpt-image-1-mini`) and `dall-e-2`. Args: image: The image(s) to edit. Must be a supported image file or an array of images. - For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than + For the GPT image models (`gpt-image-1`, `gpt-image-1-mini`, and + `gpt-image-1.5`), each image should be a `png`, `webp`, or `jpg` file less than 50MB. You can provide up to 16 images. For `dall-e-2`, you can only provide one image, and it should be a square `png` file less than 4MB. prompt: A text description of the desired image(s). The maximum length is 1000 - characters for `dall-e-2`, and 32000 characters for `gpt-image-1`. + characters for `dall-e-2`, and 32000 characters for the GPT image models. background: Allows to set transparency for the background of the generated image(s). This - parameter is only supported for `gpt-image-1`. Must be one of `transparent`, - `opaque` or `auto` (default value). When `auto` is used, the model will - automatically determine the best background for the image. + parameter is only supported for the GPT image models. Must be one of + `transparent`, `opaque` or `auto` (default value). When `auto` is used, the + model will automatically determine the best background for the image. If `transparent`, the output format needs to support transparency, so it should be set to either `png` (default value) or `webp`. @@ -1065,18 +1076,18 @@ async def edit( the mask will be applied on the first image. Must be a valid PNG file, less than 4MB, and have the same dimensions as `image`. - model: The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are - supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1` - is used. + model: The model to use for image generation. Only `dall-e-2` and the GPT image models + are supported. Defaults to `dall-e-2` unless a parameter specific to the GPT + image models is used. n: The number of images to generate. Must be between 1 and 10. output_compression: The compression level (0-100%) for the generated images. This parameter is only - supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and + supported for the GPT image models with the `webp` or `jpeg` output formats, and defaults to 100. output_format: The format in which the generated images are returned. This parameter is only - supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The + supported for the GPT image models. Must be one of `png`, `jpeg`, or `webp`. The default value is `png`. partial_images: The number of partial images to generate. This parameter is used for streaming @@ -1087,17 +1098,17 @@ async def edit( are generated if the full image is generated more quickly. quality: The quality of the image that will be generated. `high`, `medium` and `low` are - only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality. - Defaults to `auto`. + only supported for the GPT image models. `dall-e-2` only supports `standard` + quality. Defaults to `auto`. response_format: The format in which the generated images are returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes after the image has been - generated. This parameter is only supported for `dall-e-2`, as `gpt-image-1` - will always return base64-encoded images. + generated. This parameter is only supported for `dall-e-2`, as the GPT image + models always return base64-encoded images. size: The size of the generated images. Must be one of `1024x1024`, `1536x1024` - (landscape), `1024x1536` (portrait), or `auto` (default value) for - `gpt-image-1`, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`. + (landscape), `1024x1536` (portrait), or `auto` (default value) for the GPT image + models, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`. stream: Edit the image in streaming mode. Defaults to `false`. See the [Image generation guide](https://platform.openai.com/docs/guides/image-generation) @@ -1146,28 +1157,30 @@ async def edit( """Creates an edited or extended image given one or more source images and a prompt. - This endpoint only supports `gpt-image-1` and `dall-e-2`. + This endpoint supports GPT Image models (`gpt-image-1.5`, `gpt-image-1`, + and `gpt-image-1-mini`) and `dall-e-2`. Args: image: The image(s) to edit. Must be a supported image file or an array of images. - For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than + For the GPT image models (`gpt-image-1`, `gpt-image-1-mini`, and + `gpt-image-1.5`), each image should be a `png`, `webp`, or `jpg` file less than 50MB. You can provide up to 16 images. For `dall-e-2`, you can only provide one image, and it should be a square `png` file less than 4MB. prompt: A text description of the desired image(s). The maximum length is 1000 - characters for `dall-e-2`, and 32000 characters for `gpt-image-1`. + characters for `dall-e-2`, and 32000 characters for the GPT image models. stream: Edit the image in streaming mode. Defaults to `false`. See the [Image generation guide](https://platform.openai.com/docs/guides/image-generation) for more information. background: Allows to set transparency for the background of the generated image(s). This - parameter is only supported for `gpt-image-1`. Must be one of `transparent`, - `opaque` or `auto` (default value). When `auto` is used, the model will - automatically determine the best background for the image. + parameter is only supported for the GPT image models. Must be one of + `transparent`, `opaque` or `auto` (default value). When `auto` is used, the + model will automatically determine the best background for the image. If `transparent`, the output format needs to support transparency, so it should be set to either `png` (default value) or `webp`. @@ -1182,18 +1195,18 @@ async def edit( the mask will be applied on the first image. Must be a valid PNG file, less than 4MB, and have the same dimensions as `image`. - model: The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are - supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1` - is used. + model: The model to use for image generation. Only `dall-e-2` and the GPT image models + are supported. Defaults to `dall-e-2` unless a parameter specific to the GPT + image models is used. n: The number of images to generate. Must be between 1 and 10. output_compression: The compression level (0-100%) for the generated images. This parameter is only - supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and + supported for the GPT image models with the `webp` or `jpeg` output formats, and defaults to 100. output_format: The format in which the generated images are returned. This parameter is only - supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The + supported for the GPT image models. Must be one of `png`, `jpeg`, or `webp`. The default value is `png`. partial_images: The number of partial images to generate. This parameter is used for streaming @@ -1204,17 +1217,17 @@ async def edit( are generated if the full image is generated more quickly. quality: The quality of the image that will be generated. `high`, `medium` and `low` are - only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality. - Defaults to `auto`. + only supported for the GPT image models. `dall-e-2` only supports `standard` + quality. Defaults to `auto`. response_format: The format in which the generated images are returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes after the image has been - generated. This parameter is only supported for `dall-e-2`, as `gpt-image-1` - will always return base64-encoded images. + generated. This parameter is only supported for `dall-e-2`, as the GPT image + models always return base64-encoded images. size: The size of the generated images. Must be one of `1024x1024`, `1536x1024` - (landscape), `1024x1536` (portrait), or `auto` (default value) for - `gpt-image-1`, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`. + (landscape), `1024x1536` (portrait), or `auto` (default value) for the GPT image + models, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`. user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. @@ -1259,28 +1272,30 @@ async def edit( """Creates an edited or extended image given one or more source images and a prompt. - This endpoint only supports `gpt-image-1` and `dall-e-2`. + This endpoint supports GPT Image models (`gpt-image-1.5`, `gpt-image-1`, + and `gpt-image-1-mini`) and `dall-e-2`. Args: image: The image(s) to edit. Must be a supported image file or an array of images. - For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than + For the GPT image models (`gpt-image-1`, `gpt-image-1-mini`, and + `gpt-image-1.5`), each image should be a `png`, `webp`, or `jpg` file less than 50MB. You can provide up to 16 images. For `dall-e-2`, you can only provide one image, and it should be a square `png` file less than 4MB. prompt: A text description of the desired image(s). The maximum length is 1000 - characters for `dall-e-2`, and 32000 characters for `gpt-image-1`. + characters for `dall-e-2`, and 32000 characters for the GPT image models. stream: Edit the image in streaming mode. Defaults to `false`. See the [Image generation guide](https://platform.openai.com/docs/guides/image-generation) for more information. background: Allows to set transparency for the background of the generated image(s). This - parameter is only supported for `gpt-image-1`. Must be one of `transparent`, - `opaque` or `auto` (default value). When `auto` is used, the model will - automatically determine the best background for the image. + parameter is only supported for the GPT image models. Must be one of + `transparent`, `opaque` or `auto` (default value). When `auto` is used, the + model will automatically determine the best background for the image. If `transparent`, the output format needs to support transparency, so it should be set to either `png` (default value) or `webp`. @@ -1295,18 +1310,18 @@ async def edit( the mask will be applied on the first image. Must be a valid PNG file, less than 4MB, and have the same dimensions as `image`. - model: The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are - supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1` - is used. + model: The model to use for image generation. Only `dall-e-2` and the GPT image models + are supported. Defaults to `dall-e-2` unless a parameter specific to the GPT + image models is used. n: The number of images to generate. Must be between 1 and 10. output_compression: The compression level (0-100%) for the generated images. This parameter is only - supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and + supported for the GPT image models with the `webp` or `jpeg` output formats, and defaults to 100. output_format: The format in which the generated images are returned. This parameter is only - supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The + supported for the GPT image models. Must be one of `png`, `jpeg`, or `webp`. The default value is `png`. partial_images: The number of partial images to generate. This parameter is used for streaming @@ -1317,17 +1332,17 @@ async def edit( are generated if the full image is generated more quickly. quality: The quality of the image that will be generated. `high`, `medium` and `low` are - only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality. - Defaults to `auto`. + only supported for the GPT image models. `dall-e-2` only supports `standard` + quality. Defaults to `auto`. response_format: The format in which the generated images are returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes after the image has been - generated. This parameter is only supported for `dall-e-2`, as `gpt-image-1` - will always return base64-encoded images. + generated. This parameter is only supported for `dall-e-2`, as the GPT image + models always return base64-encoded images. size: The size of the generated images. Must be one of `1024x1024`, `1536x1024` - (landscape), `1024x1536` (portrait), or `auto` (default value) for - `gpt-image-1`, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`. + (landscape), `1024x1536` (portrait), or `auto` (default value) for the GPT image + models, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`. user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. @@ -1442,33 +1457,34 @@ async def generate( Args: prompt: A text description of the desired image(s). The maximum length is 32000 - characters for `gpt-image-1`, 1000 characters for `dall-e-2` and 4000 characters - for `dall-e-3`. + characters for the GPT image models, 1000 characters for `dall-e-2` and 4000 + characters for `dall-e-3`. background: Allows to set transparency for the background of the generated image(s). This - parameter is only supported for `gpt-image-1`. Must be one of `transparent`, - `opaque` or `auto` (default value). When `auto` is used, the model will - automatically determine the best background for the image. + parameter is only supported for the GPT image models. Must be one of + `transparent`, `opaque` or `auto` (default value). When `auto` is used, the + model will automatically determine the best background for the image. If `transparent`, the output format needs to support transparency, so it should be set to either `png` (default value) or `webp`. - model: The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or - `gpt-image-1`. Defaults to `dall-e-2` unless a parameter specific to - `gpt-image-1` is used. + model: The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or a GPT + image model (`gpt-image-1`, `gpt-image-1-mini`, `gpt-image-1.5`). Defaults to + `dall-e-2` unless a parameter specific to the GPT image models is used. - moderation: Control the content-moderation level for images generated by `gpt-image-1`. Must - be either `low` for less restrictive filtering or `auto` (default value). + moderation: Control the content-moderation level for images generated by the GPT image + models. Must be either `low` for less restrictive filtering or `auto` (default + value). n: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only `n=1` is supported. output_compression: The compression level (0-100%) for the generated images. This parameter is only - supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and + supported for the GPT image models with the `webp` or `jpeg` output formats, and defaults to 100. output_format: The format in which the generated images are returned. This parameter is only - supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. + supported for the GPT image models. Must be one of `png`, `jpeg`, or `webp`. partial_images: The number of partial images to generate. This parameter is used for streaming responses that return partial images. Value must be between 0 and 3. When set to @@ -1481,23 +1497,23 @@ async def generate( - `auto` (default value) will automatically select the best quality for the given model. - - `high`, `medium` and `low` are supported for `gpt-image-1`. + - `high`, `medium` and `low` are supported for the GPT image models. - `hd` and `standard` are supported for `dall-e-3`. - `standard` is the only option for `dall-e-2`. response_format: The format in which generated images with `dall-e-2` and `dall-e-3` are returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes - after the image has been generated. This parameter isn't supported for - `gpt-image-1` which will always return base64-encoded images. + after the image has been generated. This parameter isn't supported for the GPT + image models, which always return base64-encoded images. size: The size of the generated images. Must be one of `1024x1024`, `1536x1024` - (landscape), `1024x1536` (portrait), or `auto` (default value) for - `gpt-image-1`, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and - one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`. + (landscape), `1024x1536` (portrait), or `auto` (default value) for the GPT image + models, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and one of + `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`. stream: Generate the image in streaming mode. Defaults to `false`. See the [Image generation guide](https://platform.openai.com/docs/guides/image-generation) - for more information. This parameter is only supported for `gpt-image-1`. + for more information. This parameter is only supported for the GPT image models. style: The style of the generated images. This parameter is only supported for `dall-e-3`. Must be one of `vivid` or `natural`. Vivid causes the model to lean @@ -1552,37 +1568,38 @@ async def generate( Args: prompt: A text description of the desired image(s). The maximum length is 32000 - characters for `gpt-image-1`, 1000 characters for `dall-e-2` and 4000 characters - for `dall-e-3`. + characters for the GPT image models, 1000 characters for `dall-e-2` and 4000 + characters for `dall-e-3`. stream: Generate the image in streaming mode. Defaults to `false`. See the [Image generation guide](https://platform.openai.com/docs/guides/image-generation) - for more information. This parameter is only supported for `gpt-image-1`. + for more information. This parameter is only supported for the GPT image models. background: Allows to set transparency for the background of the generated image(s). This - parameter is only supported for `gpt-image-1`. Must be one of `transparent`, - `opaque` or `auto` (default value). When `auto` is used, the model will - automatically determine the best background for the image. + parameter is only supported for the GPT image models. Must be one of + `transparent`, `opaque` or `auto` (default value). When `auto` is used, the + model will automatically determine the best background for the image. If `transparent`, the output format needs to support transparency, so it should be set to either `png` (default value) or `webp`. - model: The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or - `gpt-image-1`. Defaults to `dall-e-2` unless a parameter specific to - `gpt-image-1` is used. + model: The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or a GPT + image model (`gpt-image-1`, `gpt-image-1-mini`, `gpt-image-1.5`). Defaults to + `dall-e-2` unless a parameter specific to the GPT image models is used. - moderation: Control the content-moderation level for images generated by `gpt-image-1`. Must - be either `low` for less restrictive filtering or `auto` (default value). + moderation: Control the content-moderation level for images generated by the GPT image + models. Must be either `low` for less restrictive filtering or `auto` (default + value). n: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only `n=1` is supported. output_compression: The compression level (0-100%) for the generated images. This parameter is only - supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and + supported for the GPT image models with the `webp` or `jpeg` output formats, and defaults to 100. output_format: The format in which the generated images are returned. This parameter is only - supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. + supported for the GPT image models. Must be one of `png`, `jpeg`, or `webp`. partial_images: The number of partial images to generate. This parameter is used for streaming responses that return partial images. Value must be between 0 and 3. When set to @@ -1595,19 +1612,19 @@ async def generate( - `auto` (default value) will automatically select the best quality for the given model. - - `high`, `medium` and `low` are supported for `gpt-image-1`. + - `high`, `medium` and `low` are supported for the GPT image models. - `hd` and `standard` are supported for `dall-e-3`. - `standard` is the only option for `dall-e-2`. response_format: The format in which generated images with `dall-e-2` and `dall-e-3` are returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes - after the image has been generated. This parameter isn't supported for - `gpt-image-1` which will always return base64-encoded images. + after the image has been generated. This parameter isn't supported for the GPT + image models, which always return base64-encoded images. size: The size of the generated images. Must be one of `1024x1024`, `1536x1024` - (landscape), `1024x1536` (portrait), or `auto` (default value) for - `gpt-image-1`, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and - one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`. + (landscape), `1024x1536` (portrait), or `auto` (default value) for the GPT image + models, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and one of + `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`. style: The style of the generated images. This parameter is only supported for `dall-e-3`. Must be one of `vivid` or `natural`. Vivid causes the model to lean @@ -1662,37 +1679,38 @@ async def generate( Args: prompt: A text description of the desired image(s). The maximum length is 32000 - characters for `gpt-image-1`, 1000 characters for `dall-e-2` and 4000 characters - for `dall-e-3`. + characters for the GPT image models, 1000 characters for `dall-e-2` and 4000 + characters for `dall-e-3`. stream: Generate the image in streaming mode. Defaults to `false`. See the [Image generation guide](https://platform.openai.com/docs/guides/image-generation) - for more information. This parameter is only supported for `gpt-image-1`. + for more information. This parameter is only supported for the GPT image models. background: Allows to set transparency for the background of the generated image(s). This - parameter is only supported for `gpt-image-1`. Must be one of `transparent`, - `opaque` or `auto` (default value). When `auto` is used, the model will - automatically determine the best background for the image. + parameter is only supported for the GPT image models. Must be one of + `transparent`, `opaque` or `auto` (default value). When `auto` is used, the + model will automatically determine the best background for the image. If `transparent`, the output format needs to support transparency, so it should be set to either `png` (default value) or `webp`. - model: The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or - `gpt-image-1`. Defaults to `dall-e-2` unless a parameter specific to - `gpt-image-1` is used. + model: The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or a GPT + image model (`gpt-image-1`, `gpt-image-1-mini`, `gpt-image-1.5`). Defaults to + `dall-e-2` unless a parameter specific to the GPT image models is used. - moderation: Control the content-moderation level for images generated by `gpt-image-1`. Must - be either `low` for less restrictive filtering or `auto` (default value). + moderation: Control the content-moderation level for images generated by the GPT image + models. Must be either `low` for less restrictive filtering or `auto` (default + value). n: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only `n=1` is supported. output_compression: The compression level (0-100%) for the generated images. This parameter is only - supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and + supported for the GPT image models with the `webp` or `jpeg` output formats, and defaults to 100. output_format: The format in which the generated images are returned. This parameter is only - supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. + supported for the GPT image models. Must be one of `png`, `jpeg`, or `webp`. partial_images: The number of partial images to generate. This parameter is used for streaming responses that return partial images. Value must be between 0 and 3. When set to @@ -1705,19 +1723,19 @@ async def generate( - `auto` (default value) will automatically select the best quality for the given model. - - `high`, `medium` and `low` are supported for `gpt-image-1`. + - `high`, `medium` and `low` are supported for the GPT image models. - `hd` and `standard` are supported for `dall-e-3`. - `standard` is the only option for `dall-e-2`. response_format: The format in which generated images with `dall-e-2` and `dall-e-3` are returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes - after the image has been generated. This parameter isn't supported for - `gpt-image-1` which will always return base64-encoded images. + after the image has been generated. This parameter isn't supported for the GPT + image models, which always return base64-encoded images. size: The size of the generated images. Must be one of `1024x1024`, `1536x1024` - (landscape), `1024x1536` (portrait), or `auto` (default value) for - `gpt-image-1`, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and - one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`. + (landscape), `1024x1536` (portrait), or `auto` (default value) for the GPT image + models, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and one of + `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`. style: The style of the generated images. This parameter is only supported for `dall-e-3`. Must be one of `vivid` or `natural`. Vivid causes the model to lean diff --git a/portkey_ai/_vendor/openai/resources/realtime/calls.py b/portkey_ai/_vendor/openai/resources/realtime/calls.py index 7d2c92fe..20a22fc3 100644 --- a/portkey_ai/_vendor/openai/resources/realtime/calls.py +++ b/portkey_ai/_vendor/openai/resources/realtime/calls.py @@ -125,8 +125,10 @@ def accept( "gpt-4o-mini-realtime-preview-2024-12-17", "gpt-realtime-mini", "gpt-realtime-mini-2025-10-06", + "gpt-realtime-mini-2025-12-15", "gpt-audio-mini", "gpt-audio-mini-2025-10-06", + "gpt-audio-mini-2025-12-15", ], ] | Omit = omit, @@ -199,15 +201,20 @@ def accept( limit, the conversation be truncated, meaning messages (starting from the oldest) will not be included in the model's context. A 32k context model with 4,096 max output tokens can only include 28,224 tokens in the context before - truncation occurs. Clients can configure truncation behavior to truncate with a - lower max token limit, which is an effective way to control token usage and - cost. Truncation will reduce the number of cached tokens on the next turn - (busting the cache), since messages are dropped from the beginning of the - context. However, clients can also configure truncation to retain messages up to - a fraction of the maximum context size, which will reduce the need for future - truncations and thus improve the cache rate. Truncation can be disabled - entirely, which means the server will never truncate but would instead return an - error if the conversation exceeds the model's input token limit. + truncation occurs. + + Clients can configure truncation behavior to truncate with a lower max token + limit, which is an effective way to control token usage and cost. + + Truncation will reduce the number of cached tokens on the next turn (busting the + cache), since messages are dropped from the beginning of the context. However, + clients can also configure truncation to retain messages up to a fraction of the + maximum context size, which will reduce the need for future truncations and thus + improve the cache rate. + + Truncation can be disabled entirely, which means the server will never truncate + but would instead return an error if the conversation exceeds the model's input + token limit. extra_headers: Send extra headers @@ -445,8 +452,10 @@ async def accept( "gpt-4o-mini-realtime-preview-2024-12-17", "gpt-realtime-mini", "gpt-realtime-mini-2025-10-06", + "gpt-realtime-mini-2025-12-15", "gpt-audio-mini", "gpt-audio-mini-2025-10-06", + "gpt-audio-mini-2025-12-15", ], ] | Omit = omit, @@ -519,15 +528,20 @@ async def accept( limit, the conversation be truncated, meaning messages (starting from the oldest) will not be included in the model's context. A 32k context model with 4,096 max output tokens can only include 28,224 tokens in the context before - truncation occurs. Clients can configure truncation behavior to truncate with a - lower max token limit, which is an effective way to control token usage and - cost. Truncation will reduce the number of cached tokens on the next turn - (busting the cache), since messages are dropped from the beginning of the - context. However, clients can also configure truncation to retain messages up to - a fraction of the maximum context size, which will reduce the need for future - truncations and thus improve the cache rate. Truncation can be disabled - entirely, which means the server will never truncate but would instead return an - error if the conversation exceeds the model's input token limit. + truncation occurs. + + Clients can configure truncation behavior to truncate with a lower max token + limit, which is an effective way to control token usage and cost. + + Truncation will reduce the number of cached tokens on the next turn (busting the + cache), since messages are dropped from the beginning of the context. However, + clients can also configure truncation to retain messages up to a fraction of the + maximum context size, which will reduce the need for future truncations and thus + improve the cache rate. + + Truncation can be disabled entirely, which means the server will never truncate + but would instead return an error if the conversation exceeds the model's input + token limit. extra_headers: Send extra headers diff --git a/portkey_ai/_vendor/openai/resources/realtime/realtime.py b/portkey_ai/_vendor/openai/resources/realtime/realtime.py index 6e692586..44f14cd3 100644 --- a/portkey_ai/_vendor/openai/resources/realtime/realtime.py +++ b/portkey_ai/_vendor/openai/resources/realtime/realtime.py @@ -232,7 +232,7 @@ def calls(self) -> AsyncCallsWithStreamingResponse: class AsyncRealtimeConnection: - """Represents a live websocket connection to the Realtime API""" + """Represents a live WebSocket connection to the Realtime API""" session: AsyncRealtimeSessionResource response: AsyncRealtimeResponseResource @@ -421,7 +421,7 @@ async def __aexit__( class RealtimeConnection: - """Represents a live websocket connection to the Realtime API""" + """Represents a live WebSocket connection to the Realtime API""" session: RealtimeSessionResource response: RealtimeResponseResource @@ -829,7 +829,7 @@ def retrieve(self, *, item_id: str, event_id: str | Omit = omit) -> None: class RealtimeOutputAudioBufferResource(BaseRealtimeConnectionResource): def clear(self, *, event_id: str | Omit = omit) -> None: - """**WebRTC Only:** Emit to cut off the current audio response. + """**WebRTC/SIP Only:** Emit to cut off the current audio response. This will trigger the server to stop generating audio and emit a `output_audio_buffer.cleared` event. This @@ -1066,7 +1066,7 @@ async def retrieve(self, *, item_id: str, event_id: str | Omit = omit) -> None: class AsyncRealtimeOutputAudioBufferResource(BaseAsyncRealtimeConnectionResource): async def clear(self, *, event_id: str | Omit = omit) -> None: - """**WebRTC Only:** Emit to cut off the current audio response. + """**WebRTC/SIP Only:** Emit to cut off the current audio response. This will trigger the server to stop generating audio and emit a `output_audio_buffer.cleared` event. This diff --git a/portkey_ai/_vendor/openai/resources/responses/input_tokens.py b/portkey_ai/_vendor/openai/resources/responses/input_tokens.py index 0f47955f..86641646 100644 --- a/portkey_ai/_vendor/openai/resources/responses/input_tokens.py +++ b/portkey_ai/_vendor/openai/resources/responses/input_tokens.py @@ -102,9 +102,7 @@ def count( - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) - tool_choice: How the model should select which tool (or tools) to use when generating a - response. See the `tools` parameter to see how to specify which tools the model - can call. + tool_choice: Controls which tool the model should use, if any. tools: An array of tools the model may call while generating a response. You can specify which tool to use by setting the `tool_choice` parameter. @@ -227,9 +225,7 @@ async def count( - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) - tool_choice: How the model should select which tool (or tools) to use when generating a - response. See the `tools` parameter to see how to specify which tools the model - can call. + tool_choice: Controls which tool the model should use, if any. tools: An array of tools the model may call while generating a response. You can specify which tool to use by setting the `tool_choice` parameter. diff --git a/portkey_ai/_vendor/openai/resources/responses/responses.py b/portkey_ai/_vendor/openai/resources/responses/responses.py index 439cf8d3..8e80f679 100644 --- a/portkey_ai/_vendor/openai/resources/responses/responses.py +++ b/portkey_ai/_vendor/openai/resources/responses/responses.py @@ -2,6 +2,7 @@ from __future__ import annotations +from copy import copy from typing import Any, List, Type, Union, Iterable, Optional, cast from functools import partial from typing_extensions import Literal, overload @@ -33,7 +34,11 @@ AsyncInputTokensWithStreamingResponse, ) from ..._base_client import make_request_options -from ...types.responses import response_create_params, response_retrieve_params +from ...types.responses import ( + response_create_params, + response_compact_params, + response_retrieve_params, +) from ...lib._parsing._responses import ( TextFormatT, parse_response, @@ -45,11 +50,13 @@ from ...types.shared_params.reasoning import Reasoning from ...types.responses.parsed_response import ParsedResponse from ...lib.streaming.responses._responses import ResponseStreamManager, AsyncResponseStreamManager +from ...types.responses.compacted_response import CompactedResponse from ...types.responses.response_includable import ResponseIncludable from ...types.shared_params.responses_model import ResponsesModel from ...types.responses.response_input_param import ResponseInputParam from ...types.responses.response_prompt_param import ResponsePromptParam from ...types.responses.response_stream_event import ResponseStreamEvent +from ...types.responses.response_input_item_param import ResponseInputItemParam from ...types.responses.response_text_config_param import ResponseTextConfigParam __all__ = ["Responses", "AsyncResponses"] @@ -100,6 +107,7 @@ def create( previous_response_id: Optional[str] | Omit = omit, prompt: Optional[ResponsePromptParam] | Omit = omit, prompt_cache_key: str | Omit = omit, + prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit, reasoning: Optional[Reasoning] | Omit = omit, safety_identifier: str | Omit = omit, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit, @@ -215,6 +223,11 @@ def create( hit rates. Replaces the `user` field. [Learn more](https://platform.openai.com/docs/guides/prompt-caching). + prompt_cache_retention: The retention policy for the prompt cache. Set to `24h` to enable extended + prompt caching, which keeps cached prefixes active for longer, up to a maximum + of 24 hours. + [Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention). + reasoning: **gpt-5 and o-series models only** Configuration options for @@ -340,6 +353,7 @@ def create( previous_response_id: Optional[str] | Omit = omit, prompt: Optional[ResponsePromptParam] | Omit = omit, prompt_cache_key: str | Omit = omit, + prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit, reasoning: Optional[Reasoning] | Omit = omit, safety_identifier: str | Omit = omit, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit, @@ -461,6 +475,11 @@ def create( hit rates. Replaces the `user` field. [Learn more](https://platform.openai.com/docs/guides/prompt-caching). + prompt_cache_retention: The retention policy for the prompt cache. Set to `24h` to enable extended + prompt caching, which keeps cached prefixes active for longer, up to a maximum + of 24 hours. + [Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention). + reasoning: **gpt-5 and o-series models only** Configuration options for @@ -579,6 +598,7 @@ def create( previous_response_id: Optional[str] | Omit = omit, prompt: Optional[ResponsePromptParam] | Omit = omit, prompt_cache_key: str | Omit = omit, + prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit, reasoning: Optional[Reasoning] | Omit = omit, safety_identifier: str | Omit = omit, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit, @@ -700,6 +720,11 @@ def create( hit rates. Replaces the `user` field. [Learn more](https://platform.openai.com/docs/guides/prompt-caching). + prompt_cache_retention: The retention policy for the prompt cache. Set to `24h` to enable extended + prompt caching, which keeps cached prefixes active for longer, up to a maximum + of 24 hours. + [Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention). + reasoning: **gpt-5 and o-series models only** Configuration options for @@ -816,6 +841,7 @@ def create( previous_response_id: Optional[str] | Omit = omit, prompt: Optional[ResponsePromptParam] | Omit = omit, prompt_cache_key: str | Omit = omit, + prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit, reasoning: Optional[Reasoning] | Omit = omit, safety_identifier: str | Omit = omit, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit, @@ -854,6 +880,7 @@ def create( "previous_response_id": previous_response_id, "prompt": prompt, "prompt_cache_key": prompt_cache_key, + "prompt_cache_retention": prompt_cache_retention, "reasoning": reasoning, "safety_identifier": safety_identifier, "service_tier": service_tier, @@ -915,6 +942,7 @@ def stream( previous_response_id: Optional[str] | Omit = omit, prompt: Optional[ResponsePromptParam] | Omit = omit, prompt_cache_key: str | Omit = omit, + prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit, reasoning: Optional[Reasoning] | Omit = omit, safety_identifier: str | Omit = omit, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit, @@ -954,6 +982,7 @@ def stream( previous_response_id: Optional[str] | Omit = omit, prompt: Optional[ResponsePromptParam] | Omit = omit, prompt_cache_key: str | Omit = omit, + prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit, reasoning: Optional[Reasoning] | Omit = omit, safety_identifier: str | Omit = omit, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit, @@ -987,6 +1016,7 @@ def stream( "previous_response_id": previous_response_id, "prompt": prompt, "prompt_cache_key": prompt_cache_key, + "prompt_cache_retention": prompt_cache_retention, "reasoning": reasoning, "safety_identifier": safety_identifier, "service_tier": service_tier, @@ -1023,6 +1053,7 @@ def stream( if "format" in text: raise TypeError("Cannot mix and match text.format with text_format") + text = copy(text) text["format"] = _type_to_text_format_param(text_format) api_request: partial[Stream[ResponseStreamEvent]] = partial( @@ -1040,6 +1071,7 @@ def stream( previous_response_id=previous_response_id, prompt=prompt, prompt_cache_key=prompt_cache_key, + prompt_cache_retention=prompt_cache_retention, store=store, stream_options=stream_options, stream=True, @@ -1098,6 +1130,7 @@ def parse( previous_response_id: Optional[str] | Omit = omit, prompt: Optional[ResponsePromptParam] | Omit = omit, prompt_cache_key: str | Omit = omit, + prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit, reasoning: Optional[Reasoning] | Omit = omit, safety_identifier: str | Omit = omit, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit, @@ -1126,7 +1159,7 @@ def parse( if "format" in text: raise TypeError("Cannot mix and match text.format with text_format") - + text = copy(text) text["format"] = _type_to_text_format_param(text_format) tools = _make_tools(tools) @@ -1155,6 +1188,7 @@ def parser(raw_response: Response) -> ParsedResponse[TextFormatT]: "previous_response_id": previous_response_id, "prompt": prompt, "prompt_cache_key": prompt_cache_key, + "prompt_cache_retention": prompt_cache_retention, "reasoning": reasoning, "safety_identifier": safety_identifier, "service_tier": service_tier, @@ -1489,6 +1523,158 @@ def cancel( cast_to=Response, ) + def compact( + self, + *, + model: Union[ + Literal[ + "gpt-5.2", + "gpt-5.2-2025-12-11", + "gpt-5.2-chat-latest", + "gpt-5.2-pro", + "gpt-5.2-pro-2025-12-11", + "gpt-5.1", + "gpt-5.1-2025-11-13", + "gpt-5.1-codex", + "gpt-5.1-mini", + "gpt-5.1-chat-latest", + "gpt-5", + "gpt-5-mini", + "gpt-5-nano", + "gpt-5-2025-08-07", + "gpt-5-mini-2025-08-07", + "gpt-5-nano-2025-08-07", + "gpt-5-chat-latest", + "gpt-4.1", + "gpt-4.1-mini", + "gpt-4.1-nano", + "gpt-4.1-2025-04-14", + "gpt-4.1-mini-2025-04-14", + "gpt-4.1-nano-2025-04-14", + "o4-mini", + "o4-mini-2025-04-16", + "o3", + "o3-2025-04-16", + "o3-mini", + "o3-mini-2025-01-31", + "o1", + "o1-2024-12-17", + "o1-preview", + "o1-preview-2024-09-12", + "o1-mini", + "o1-mini-2024-09-12", + "gpt-4o", + "gpt-4o-2024-11-20", + "gpt-4o-2024-08-06", + "gpt-4o-2024-05-13", + "gpt-4o-audio-preview", + "gpt-4o-audio-preview-2024-10-01", + "gpt-4o-audio-preview-2024-12-17", + "gpt-4o-audio-preview-2025-06-03", + "gpt-4o-mini-audio-preview", + "gpt-4o-mini-audio-preview-2024-12-17", + "gpt-4o-search-preview", + "gpt-4o-mini-search-preview", + "gpt-4o-search-preview-2025-03-11", + "gpt-4o-mini-search-preview-2025-03-11", + "chatgpt-4o-latest", + "codex-mini-latest", + "gpt-4o-mini", + "gpt-4o-mini-2024-07-18", + "gpt-4-turbo", + "gpt-4-turbo-2024-04-09", + "gpt-4-0125-preview", + "gpt-4-turbo-preview", + "gpt-4-1106-preview", + "gpt-4-vision-preview", + "gpt-4", + "gpt-4-0314", + "gpt-4-0613", + "gpt-4-32k", + "gpt-4-32k-0314", + "gpt-4-32k-0613", + "gpt-3.5-turbo", + "gpt-3.5-turbo-16k", + "gpt-3.5-turbo-0301", + "gpt-3.5-turbo-0613", + "gpt-3.5-turbo-1106", + "gpt-3.5-turbo-0125", + "gpt-3.5-turbo-16k-0613", + "o1-pro", + "o1-pro-2025-03-19", + "o3-pro", + "o3-pro-2025-06-10", + "o3-deep-research", + "o3-deep-research-2025-06-26", + "o4-mini-deep-research", + "o4-mini-deep-research-2025-06-26", + "computer-use-preview", + "computer-use-preview-2025-03-11", + "gpt-5-codex", + "gpt-5-pro", + "gpt-5-pro-2025-10-06", + "gpt-5.1-codex-max", + ], + str, + None, + ], + input: Union[str, Iterable[ResponseInputItemParam], None] | Omit = omit, + instructions: Optional[str] | Omit = omit, + previous_response_id: Optional[str] | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> CompactedResponse: + """ + Compact conversation + + Args: + model: Model ID used to generate the response, like `gpt-5` or `o3`. OpenAI offers a + wide range of models with different capabilities, performance characteristics, + and price points. Refer to the + [model guide](https://platform.openai.com/docs/models) to browse and compare + available models. + + input: Text, image, or file inputs to the model, used to generate a response + + instructions: A system (or developer) message inserted into the model's context. When used + along with `previous_response_id`, the instructions from a previous response + will not be carried over to the next response. This makes it simple to swap out + system (or developer) messages in new responses. + + previous_response_id: The unique ID of the previous response to the model. Use this to create + multi-turn conversations. Learn more about + [conversation state](https://platform.openai.com/docs/guides/conversation-state). + Cannot be used in conjunction with `conversation`. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._post( + "/responses/compact", + body=maybe_transform( + { + "model": model, + "input": input, + "instructions": instructions, + "previous_response_id": previous_response_id, + }, + response_compact_params.ResponseCompactParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=CompactedResponse, + ) + class AsyncResponses(AsyncAPIResource): @cached_property @@ -1535,6 +1721,7 @@ async def create( previous_response_id: Optional[str] | Omit = omit, prompt: Optional[ResponsePromptParam] | Omit = omit, prompt_cache_key: str | Omit = omit, + prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit, reasoning: Optional[Reasoning] | Omit = omit, safety_identifier: str | Omit = omit, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit, @@ -1650,6 +1837,11 @@ async def create( hit rates. Replaces the `user` field. [Learn more](https://platform.openai.com/docs/guides/prompt-caching). + prompt_cache_retention: The retention policy for the prompt cache. Set to `24h` to enable extended + prompt caching, which keeps cached prefixes active for longer, up to a maximum + of 24 hours. + [Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention). + reasoning: **gpt-5 and o-series models only** Configuration options for @@ -1775,6 +1967,7 @@ async def create( previous_response_id: Optional[str] | Omit = omit, prompt: Optional[ResponsePromptParam] | Omit = omit, prompt_cache_key: str | Omit = omit, + prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit, reasoning: Optional[Reasoning] | Omit = omit, safety_identifier: str | Omit = omit, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit, @@ -1896,6 +2089,11 @@ async def create( hit rates. Replaces the `user` field. [Learn more](https://platform.openai.com/docs/guides/prompt-caching). + prompt_cache_retention: The retention policy for the prompt cache. Set to `24h` to enable extended + prompt caching, which keeps cached prefixes active for longer, up to a maximum + of 24 hours. + [Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention). + reasoning: **gpt-5 and o-series models only** Configuration options for @@ -2014,6 +2212,7 @@ async def create( previous_response_id: Optional[str] | Omit = omit, prompt: Optional[ResponsePromptParam] | Omit = omit, prompt_cache_key: str | Omit = omit, + prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit, reasoning: Optional[Reasoning] | Omit = omit, safety_identifier: str | Omit = omit, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit, @@ -2135,6 +2334,11 @@ async def create( hit rates. Replaces the `user` field. [Learn more](https://platform.openai.com/docs/guides/prompt-caching). + prompt_cache_retention: The retention policy for the prompt cache. Set to `24h` to enable extended + prompt caching, which keeps cached prefixes active for longer, up to a maximum + of 24 hours. + [Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention). + reasoning: **gpt-5 and o-series models only** Configuration options for @@ -2251,6 +2455,7 @@ async def create( previous_response_id: Optional[str] | Omit = omit, prompt: Optional[ResponsePromptParam] | Omit = omit, prompt_cache_key: str | Omit = omit, + prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit, reasoning: Optional[Reasoning] | Omit = omit, safety_identifier: str | Omit = omit, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit, @@ -2289,6 +2494,7 @@ async def create( "previous_response_id": previous_response_id, "prompt": prompt, "prompt_cache_key": prompt_cache_key, + "prompt_cache_retention": prompt_cache_retention, "reasoning": reasoning, "safety_identifier": safety_identifier, "service_tier": service_tier, @@ -2350,6 +2556,7 @@ def stream( previous_response_id: Optional[str] | Omit = omit, prompt: Optional[ResponsePromptParam] | Omit = omit, prompt_cache_key: str | Omit = omit, + prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit, reasoning: Optional[Reasoning] | Omit = omit, safety_identifier: str | Omit = omit, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit, @@ -2389,6 +2596,7 @@ def stream( previous_response_id: Optional[str] | Omit = omit, prompt: Optional[ResponsePromptParam] | Omit = omit, prompt_cache_key: str | Omit = omit, + prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit, reasoning: Optional[Reasoning] | Omit = omit, safety_identifier: str | Omit = omit, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit, @@ -2422,6 +2630,7 @@ def stream( "previous_response_id": previous_response_id, "prompt": prompt, "prompt_cache_key": prompt_cache_key, + "prompt_cache_retention": prompt_cache_retention, "reasoning": reasoning, "safety_identifier": safety_identifier, "service_tier": service_tier, @@ -2458,7 +2667,7 @@ def stream( if "format" in text: raise TypeError("Cannot mix and match text.format with text_format") - + text = copy(text) text["format"] = _type_to_text_format_param(text_format) api_request = self.create( @@ -2476,6 +2685,7 @@ def stream( previous_response_id=previous_response_id, prompt=prompt, prompt_cache_key=prompt_cache_key, + prompt_cache_retention=prompt_cache_retention, store=store, stream_options=stream_options, temperature=temperature, @@ -2538,6 +2748,7 @@ async def parse( previous_response_id: Optional[str] | Omit = omit, prompt: Optional[ResponsePromptParam] | Omit = omit, prompt_cache_key: str | Omit = omit, + prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit, reasoning: Optional[Reasoning] | Omit = omit, safety_identifier: str | Omit = omit, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit, @@ -2566,7 +2777,7 @@ async def parse( if "format" in text: raise TypeError("Cannot mix and match text.format with text_format") - + text = copy(text) text["format"] = _type_to_text_format_param(text_format) tools = _make_tools(tools) @@ -2595,6 +2806,7 @@ def parser(raw_response: Response) -> ParsedResponse[TextFormatT]: "previous_response_id": previous_response_id, "prompt": prompt, "prompt_cache_key": prompt_cache_key, + "prompt_cache_retention": prompt_cache_retention, "reasoning": reasoning, "safety_identifier": safety_identifier, "service_tier": service_tier, @@ -2929,6 +3141,158 @@ async def cancel( cast_to=Response, ) + async def compact( + self, + *, + model: Union[ + Literal[ + "gpt-5.2", + "gpt-5.2-2025-12-11", + "gpt-5.2-chat-latest", + "gpt-5.2-pro", + "gpt-5.2-pro-2025-12-11", + "gpt-5.1", + "gpt-5.1-2025-11-13", + "gpt-5.1-codex", + "gpt-5.1-mini", + "gpt-5.1-chat-latest", + "gpt-5", + "gpt-5-mini", + "gpt-5-nano", + "gpt-5-2025-08-07", + "gpt-5-mini-2025-08-07", + "gpt-5-nano-2025-08-07", + "gpt-5-chat-latest", + "gpt-4.1", + "gpt-4.1-mini", + "gpt-4.1-nano", + "gpt-4.1-2025-04-14", + "gpt-4.1-mini-2025-04-14", + "gpt-4.1-nano-2025-04-14", + "o4-mini", + "o4-mini-2025-04-16", + "o3", + "o3-2025-04-16", + "o3-mini", + "o3-mini-2025-01-31", + "o1", + "o1-2024-12-17", + "o1-preview", + "o1-preview-2024-09-12", + "o1-mini", + "o1-mini-2024-09-12", + "gpt-4o", + "gpt-4o-2024-11-20", + "gpt-4o-2024-08-06", + "gpt-4o-2024-05-13", + "gpt-4o-audio-preview", + "gpt-4o-audio-preview-2024-10-01", + "gpt-4o-audio-preview-2024-12-17", + "gpt-4o-audio-preview-2025-06-03", + "gpt-4o-mini-audio-preview", + "gpt-4o-mini-audio-preview-2024-12-17", + "gpt-4o-search-preview", + "gpt-4o-mini-search-preview", + "gpt-4o-search-preview-2025-03-11", + "gpt-4o-mini-search-preview-2025-03-11", + "chatgpt-4o-latest", + "codex-mini-latest", + "gpt-4o-mini", + "gpt-4o-mini-2024-07-18", + "gpt-4-turbo", + "gpt-4-turbo-2024-04-09", + "gpt-4-0125-preview", + "gpt-4-turbo-preview", + "gpt-4-1106-preview", + "gpt-4-vision-preview", + "gpt-4", + "gpt-4-0314", + "gpt-4-0613", + "gpt-4-32k", + "gpt-4-32k-0314", + "gpt-4-32k-0613", + "gpt-3.5-turbo", + "gpt-3.5-turbo-16k", + "gpt-3.5-turbo-0301", + "gpt-3.5-turbo-0613", + "gpt-3.5-turbo-1106", + "gpt-3.5-turbo-0125", + "gpt-3.5-turbo-16k-0613", + "o1-pro", + "o1-pro-2025-03-19", + "o3-pro", + "o3-pro-2025-06-10", + "o3-deep-research", + "o3-deep-research-2025-06-26", + "o4-mini-deep-research", + "o4-mini-deep-research-2025-06-26", + "computer-use-preview", + "computer-use-preview-2025-03-11", + "gpt-5-codex", + "gpt-5-pro", + "gpt-5-pro-2025-10-06", + "gpt-5.1-codex-max", + ], + str, + None, + ], + input: Union[str, Iterable[ResponseInputItemParam], None] | Omit = omit, + instructions: Optional[str] | Omit = omit, + previous_response_id: Optional[str] | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> CompactedResponse: + """ + Compact conversation + + Args: + model: Model ID used to generate the response, like `gpt-5` or `o3`. OpenAI offers a + wide range of models with different capabilities, performance characteristics, + and price points. Refer to the + [model guide](https://platform.openai.com/docs/models) to browse and compare + available models. + + input: Text, image, or file inputs to the model, used to generate a response + + instructions: A system (or developer) message inserted into the model's context. When used + along with `previous_response_id`, the instructions from a previous response + will not be carried over to the next response. This makes it simple to swap out + system (or developer) messages in new responses. + + previous_response_id: The unique ID of the previous response to the model. Use this to create + multi-turn conversations. Learn more about + [conversation state](https://platform.openai.com/docs/guides/conversation-state). + Cannot be used in conjunction with `conversation`. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._post( + "/responses/compact", + body=await async_maybe_transform( + { + "model": model, + "input": input, + "instructions": instructions, + "previous_response_id": previous_response_id, + }, + response_compact_params.ResponseCompactParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=CompactedResponse, + ) + class ResponsesWithRawResponse: def __init__(self, responses: Responses) -> None: @@ -2946,6 +3310,9 @@ def __init__(self, responses: Responses) -> None: self.cancel = _legacy_response.to_raw_response_wrapper( responses.cancel, ) + self.compact = _legacy_response.to_raw_response_wrapper( + responses.compact, + ) self.parse = _legacy_response.to_raw_response_wrapper( responses.parse, ) @@ -2975,6 +3342,9 @@ def __init__(self, responses: AsyncResponses) -> None: self.cancel = _legacy_response.async_to_raw_response_wrapper( responses.cancel, ) + self.compact = _legacy_response.async_to_raw_response_wrapper( + responses.compact, + ) self.parse = _legacy_response.async_to_raw_response_wrapper( responses.parse, ) @@ -3004,6 +3374,9 @@ def __init__(self, responses: Responses) -> None: self.cancel = to_streamed_response_wrapper( responses.cancel, ) + self.compact = to_streamed_response_wrapper( + responses.compact, + ) @cached_property def input_items(self) -> InputItemsWithStreamingResponse: @@ -3030,6 +3403,9 @@ def __init__(self, responses: AsyncResponses) -> None: self.cancel = async_to_streamed_response_wrapper( responses.cancel, ) + self.compact = async_to_streamed_response_wrapper( + responses.compact, + ) @cached_property def input_items(self) -> AsyncInputItemsWithStreamingResponse: diff --git a/portkey_ai/_vendor/openai/resources/videos.py b/portkey_ai/_vendor/openai/resources/videos.py index 4df5f020..9f74c942 100644 --- a/portkey_ai/_vendor/openai/resources/videos.py +++ b/portkey_ai/_vendor/openai/resources/videos.py @@ -10,7 +10,6 @@ from .. import _legacy_response from ..types import ( VideoSize, - VideoModel, VideoSeconds, video_list_params, video_remix_params, @@ -34,8 +33,8 @@ from .._base_client import AsyncPaginator, make_request_options from .._utils._utils import is_given from ..types.video_size import VideoSize -from ..types.video_model import VideoModel from ..types.video_seconds import VideoSeconds +from ..types.video_model_param import VideoModelParam from ..types.video_delete_response import VideoDeleteResponse __all__ = ["Videos", "AsyncVideos"] @@ -66,7 +65,7 @@ def create( *, prompt: str, input_reference: FileTypes | Omit = omit, - model: VideoModel | Omit = omit, + model: VideoModelParam | Omit = omit, seconds: VideoSeconds | Omit = omit, size: VideoSize | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -84,11 +83,13 @@ def create( input_reference: Optional image reference that guides generation. - model: The video generation model to use. Defaults to `sora-2`. + model: The video generation model to use (allowed values: sora-2, sora-2-pro). Defaults + to `sora-2`. - seconds: Clip duration in seconds. Defaults to 4 seconds. + seconds: Clip duration in seconds (allowed values: 4, 8, 12). Defaults to 4 seconds. - size: Output resolution formatted as width x height. Defaults to 720x1280. + size: Output resolution formatted as width x height (allowed values: 720x1280, + 1280x720, 1024x1792, 1792x1024). Defaults to 720x1280. extra_headers: Send extra headers @@ -128,7 +129,7 @@ def create_and_poll( *, prompt: str, input_reference: FileTypes | Omit = omit, - model: VideoModel | Omit = omit, + model: VideoModelParam | Omit = omit, seconds: VideoSeconds | Omit = omit, size: VideoSize | Omit = omit, poll_interval_ms: int | Omit = omit, @@ -419,7 +420,7 @@ async def create( *, prompt: str, input_reference: FileTypes | Omit = omit, - model: VideoModel | Omit = omit, + model: VideoModelParam | Omit = omit, seconds: VideoSeconds | Omit = omit, size: VideoSize | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -437,11 +438,13 @@ async def create( input_reference: Optional image reference that guides generation. - model: The video generation model to use. Defaults to `sora-2`. + model: The video generation model to use (allowed values: sora-2, sora-2-pro). Defaults + to `sora-2`. - seconds: Clip duration in seconds. Defaults to 4 seconds. + seconds: Clip duration in seconds (allowed values: 4, 8, 12). Defaults to 4 seconds. - size: Output resolution formatted as width x height. Defaults to 720x1280. + size: Output resolution formatted as width x height (allowed values: 720x1280, + 1280x720, 1024x1792, 1792x1024). Defaults to 720x1280. extra_headers: Send extra headers @@ -481,7 +484,7 @@ async def create_and_poll( *, prompt: str, input_reference: FileTypes | Omit = omit, - model: VideoModel | Omit = omit, + model: VideoModelParam | Omit = omit, seconds: VideoSeconds | Omit = omit, size: VideoSize | Omit = omit, poll_interval_ms: int | Omit = omit, diff --git a/portkey_ai/_vendor/openai/types/__init__.py b/portkey_ai/_vendor/openai/types/__init__.py index a98ca16e..5eb267e8 100644 --- a/portkey_ai/_vendor/openai/types/__init__.py +++ b/portkey_ai/_vendor/openai/types/__init__.py @@ -53,6 +53,7 @@ from .completion_choice import CompletionChoice as CompletionChoice from .image_edit_params import ImageEditParams as ImageEditParams from .video_list_params import VideoListParams as VideoListParams +from .video_model_param import VideoModelParam as VideoModelParam from .eval_create_params import EvalCreateParams as EvalCreateParams from .eval_list_response import EvalListResponse as EvalListResponse from .eval_update_params import EvalUpdateParams as EvalUpdateParams diff --git a/portkey_ai/_vendor/openai/types/audio/speech_create_params.py b/portkey_ai/_vendor/openai/types/audio/speech_create_params.py index 634d7881..417df5b2 100644 --- a/portkey_ai/_vendor/openai/types/audio/speech_create_params.py +++ b/portkey_ai/_vendor/openai/types/audio/speech_create_params.py @@ -17,7 +17,7 @@ class SpeechCreateParams(TypedDict, total=False): model: Required[Union[str, SpeechModel]] """ One of the available [TTS models](https://platform.openai.com/docs/models#tts): - `tts-1`, `tts-1-hd` or `gpt-4o-mini-tts`. + `tts-1`, `tts-1-hd`, `gpt-4o-mini-tts`, or `gpt-4o-mini-tts-2025-12-15`. """ voice: Required[ @@ -25,9 +25,9 @@ class SpeechCreateParams(TypedDict, total=False): ] """The voice to use when generating the audio. - Supported voices are `alloy`, `ash`, `ballad`, `coral`, `echo`, `fable`, `onyx`, - `nova`, `sage`, `shimmer`, and `verse`. Previews of the voices are available in - the + Supported built-in voices are `alloy`, `ash`, `ballad`, `coral`, `echo`, + `fable`, `onyx`, `nova`, `sage`, `shimmer`, `verse`, `marin`, and `cedar`. + Previews of the voices are available in the [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options). """ diff --git a/portkey_ai/_vendor/openai/types/audio/speech_model.py b/portkey_ai/_vendor/openai/types/audio/speech_model.py index f004f805..31294a05 100644 --- a/portkey_ai/_vendor/openai/types/audio/speech_model.py +++ b/portkey_ai/_vendor/openai/types/audio/speech_model.py @@ -4,4 +4,4 @@ __all__ = ["SpeechModel"] -SpeechModel: TypeAlias = Literal["tts-1", "tts-1-hd", "gpt-4o-mini-tts"] +SpeechModel: TypeAlias = Literal["tts-1", "tts-1-hd", "gpt-4o-mini-tts", "gpt-4o-mini-tts-2025-12-15"] diff --git a/portkey_ai/_vendor/openai/types/audio/transcription.py b/portkey_ai/_vendor/openai/types/audio/transcription.py index 4c588215..cbae8bf7 100644 --- a/portkey_ai/_vendor/openai/types/audio/transcription.py +++ b/portkey_ai/_vendor/openai/types/audio/transcription.py @@ -21,6 +21,8 @@ class Logprob(BaseModel): class UsageTokensInputTokenDetails(BaseModel): + """Details about the input tokens billed for this request.""" + audio_tokens: Optional[int] = None """Number of audio tokens billed for this request.""" @@ -29,6 +31,8 @@ class UsageTokensInputTokenDetails(BaseModel): class UsageTokens(BaseModel): + """Usage statistics for models billed by token usage.""" + input_tokens: int """Number of input tokens billed for this request.""" @@ -46,6 +50,8 @@ class UsageTokens(BaseModel): class UsageDuration(BaseModel): + """Usage statistics for models billed by audio input duration.""" + seconds: float """Duration of the input audio in seconds.""" @@ -57,6 +63,10 @@ class UsageDuration(BaseModel): class Transcription(BaseModel): + """ + Represents a transcription response returned by model, based on the provided input. + """ + text: str """The transcribed text.""" diff --git a/portkey_ai/_vendor/openai/types/audio/transcription_create_params.py b/portkey_ai/_vendor/openai/types/audio/transcription_create_params.py index adaef9f5..15540fc7 100644 --- a/portkey_ai/_vendor/openai/types/audio/transcription_create_params.py +++ b/portkey_ai/_vendor/openai/types/audio/transcription_create_params.py @@ -29,9 +29,9 @@ class TranscriptionCreateParamsBase(TypedDict, total=False): model: Required[Union[str, AudioModel]] """ID of the model to use. - The options are `gpt-4o-transcribe`, `gpt-4o-mini-transcribe`, `whisper-1` - (which is powered by our open source Whisper V2 model), and - `gpt-4o-transcribe-diarize`. + The options are `gpt-4o-transcribe`, `gpt-4o-mini-transcribe`, + `gpt-4o-mini-transcribe-2025-12-15`, `whisper-1` (which is powered by our open + source Whisper V2 model), and `gpt-4o-transcribe-diarize`. """ chunking_strategy: Optional[ChunkingStrategy] @@ -49,9 +49,9 @@ class TranscriptionCreateParamsBase(TypedDict, total=False): Additional information to include in the transcription response. `logprobs` will return the log probabilities of the tokens in the response to understand the model's confidence in the transcription. `logprobs` only works with - response_format set to `json` and only with the models `gpt-4o-transcribe` and - `gpt-4o-mini-transcribe`. This field is not supported when using - `gpt-4o-transcribe-diarize`. + response_format set to `json` and only with the models `gpt-4o-transcribe`, + `gpt-4o-mini-transcribe`, and `gpt-4o-mini-transcribe-2025-12-15`. This field is + not supported when using `gpt-4o-transcribe-diarize`. """ known_speaker_names: SequenceNotStr[str] diff --git a/portkey_ai/_vendor/openai/types/audio/transcription_diarized.py b/portkey_ai/_vendor/openai/types/audio/transcription_diarized.py index b7dd2b8e..07585fe2 100644 --- a/portkey_ai/_vendor/openai/types/audio/transcription_diarized.py +++ b/portkey_ai/_vendor/openai/types/audio/transcription_diarized.py @@ -11,6 +11,8 @@ class UsageTokensInputTokenDetails(BaseModel): + """Details about the input tokens billed for this request.""" + audio_tokens: Optional[int] = None """Number of audio tokens billed for this request.""" @@ -19,6 +21,8 @@ class UsageTokensInputTokenDetails(BaseModel): class UsageTokens(BaseModel): + """Usage statistics for models billed by token usage.""" + input_tokens: int """Number of input tokens billed for this request.""" @@ -36,6 +40,8 @@ class UsageTokens(BaseModel): class UsageDuration(BaseModel): + """Usage statistics for models billed by audio input duration.""" + seconds: float """Duration of the input audio in seconds.""" @@ -47,6 +53,10 @@ class UsageDuration(BaseModel): class TranscriptionDiarized(BaseModel): + """ + Represents a diarized transcription response returned by the model, including the combined transcript and speaker-segment annotations. + """ + duration: float """Duration of the input audio in seconds.""" diff --git a/portkey_ai/_vendor/openai/types/audio/transcription_diarized_segment.py b/portkey_ai/_vendor/openai/types/audio/transcription_diarized_segment.py index fe87bb4f..fcfdb363 100644 --- a/portkey_ai/_vendor/openai/types/audio/transcription_diarized_segment.py +++ b/portkey_ai/_vendor/openai/types/audio/transcription_diarized_segment.py @@ -8,6 +8,8 @@ class TranscriptionDiarizedSegment(BaseModel): + """A segment of diarized transcript text with speaker metadata.""" + id: str """Unique identifier for the segment.""" diff --git a/portkey_ai/_vendor/openai/types/audio/transcription_text_delta_event.py b/portkey_ai/_vendor/openai/types/audio/transcription_text_delta_event.py index 363b6a63..a6e83133 100644 --- a/portkey_ai/_vendor/openai/types/audio/transcription_text_delta_event.py +++ b/portkey_ai/_vendor/openai/types/audio/transcription_text_delta_event.py @@ -20,6 +20,11 @@ class Logprob(BaseModel): class TranscriptionTextDeltaEvent(BaseModel): + """Emitted when there is an additional text delta. + + This is also the first event emitted when the transcription starts. Only emitted when you [create a transcription](https://platform.openai.com/docs/api-reference/audio/create-transcription) with the `Stream` parameter set to `true`. + """ + delta: str """The text delta that was additionally transcribed.""" diff --git a/portkey_ai/_vendor/openai/types/audio/transcription_text_done_event.py b/portkey_ai/_vendor/openai/types/audio/transcription_text_done_event.py index 9665edc5..c8f7fc07 100644 --- a/portkey_ai/_vendor/openai/types/audio/transcription_text_done_event.py +++ b/portkey_ai/_vendor/openai/types/audio/transcription_text_done_event.py @@ -20,6 +20,8 @@ class Logprob(BaseModel): class UsageInputTokenDetails(BaseModel): + """Details about the input tokens billed for this request.""" + audio_tokens: Optional[int] = None """Number of audio tokens billed for this request.""" @@ -28,6 +30,8 @@ class UsageInputTokenDetails(BaseModel): class Usage(BaseModel): + """Usage statistics for models billed by token usage.""" + input_tokens: int """Number of input tokens billed for this request.""" @@ -45,6 +49,11 @@ class Usage(BaseModel): class TranscriptionTextDoneEvent(BaseModel): + """Emitted when the transcription is complete. + + Contains the complete transcription text. Only emitted when you [create a transcription](https://platform.openai.com/docs/api-reference/audio/create-transcription) with the `Stream` parameter set to `true`. + """ + text: str """The text that was transcribed.""" diff --git a/portkey_ai/_vendor/openai/types/audio/transcription_text_segment_event.py b/portkey_ai/_vendor/openai/types/audio/transcription_text_segment_event.py index d4f76645..e95472e6 100644 --- a/portkey_ai/_vendor/openai/types/audio/transcription_text_segment_event.py +++ b/portkey_ai/_vendor/openai/types/audio/transcription_text_segment_event.py @@ -8,6 +8,10 @@ class TranscriptionTextSegmentEvent(BaseModel): + """ + Emitted when a diarized transcription returns a completed segment with speaker information. Only emitted when you [create a transcription](https://platform.openai.com/docs/api-reference/audio/create-transcription) with `stream` set to `true` and `response_format` set to `diarized_json`. + """ + id: str """Unique identifier for the segment.""" diff --git a/portkey_ai/_vendor/openai/types/audio/transcription_verbose.py b/portkey_ai/_vendor/openai/types/audio/transcription_verbose.py index addda71e..b1a95e9c 100644 --- a/portkey_ai/_vendor/openai/types/audio/transcription_verbose.py +++ b/portkey_ai/_vendor/openai/types/audio/transcription_verbose.py @@ -11,6 +11,8 @@ class Usage(BaseModel): + """Usage statistics for models billed by audio input duration.""" + seconds: float """Duration of the input audio in seconds.""" @@ -19,6 +21,10 @@ class Usage(BaseModel): class TranscriptionVerbose(BaseModel): + """ + Represents a verbose json transcription response returned by model, based on the provided input. + """ + duration: float """The duration of the input audio.""" diff --git a/portkey_ai/_vendor/openai/types/audio_model.py b/portkey_ai/_vendor/openai/types/audio_model.py index 68031a21..8acada6d 100644 --- a/portkey_ai/_vendor/openai/types/audio_model.py +++ b/portkey_ai/_vendor/openai/types/audio_model.py @@ -4,4 +4,10 @@ __all__ = ["AudioModel"] -AudioModel: TypeAlias = Literal["whisper-1", "gpt-4o-transcribe", "gpt-4o-mini-transcribe", "gpt-4o-transcribe-diarize"] +AudioModel: TypeAlias = Literal[ + "whisper-1", + "gpt-4o-transcribe", + "gpt-4o-mini-transcribe", + "gpt-4o-mini-transcribe-2025-12-15", + "gpt-4o-transcribe-diarize", +] diff --git a/portkey_ai/_vendor/openai/types/auto_file_chunking_strategy_param.py b/portkey_ai/_vendor/openai/types/auto_file_chunking_strategy_param.py index 6f17836b..db7cbf59 100644 --- a/portkey_ai/_vendor/openai/types/auto_file_chunking_strategy_param.py +++ b/portkey_ai/_vendor/openai/types/auto_file_chunking_strategy_param.py @@ -8,5 +8,10 @@ class AutoFileChunkingStrategyParam(TypedDict, total=False): + """The default strategy. + + This strategy currently uses a `max_chunk_size_tokens` of `800` and `chunk_overlap_tokens` of `400`. + """ + type: Required[Literal["auto"]] """Always `auto`.""" diff --git a/portkey_ai/_vendor/openai/types/batch_create_params.py b/portkey_ai/_vendor/openai/types/batch_create_params.py index c0f9034d..1088aab3 100644 --- a/portkey_ai/_vendor/openai/types/batch_create_params.py +++ b/portkey_ai/_vendor/openai/types/batch_create_params.py @@ -17,13 +17,15 @@ class BatchCreateParams(TypedDict, total=False): Currently only `24h` is supported. """ - endpoint: Required[Literal["/v1/responses", "/v1/chat/completions", "/v1/embeddings", "/v1/completions"]] + endpoint: Required[ + Literal["/v1/responses", "/v1/chat/completions", "/v1/embeddings", "/v1/completions", "/v1/moderations"] + ] """The endpoint to be used for all requests in the batch. - Currently `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, and - `/v1/completions` are supported. Note that `/v1/embeddings` batches are also - restricted to a maximum of 50,000 embedding inputs across all requests in the - batch. + Currently `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, + `/v1/completions`, and `/v1/moderations` are supported. Note that + `/v1/embeddings` batches are also restricted to a maximum of 50,000 embedding + inputs across all requests in the batch. """ input_file_id: Required[str] @@ -56,6 +58,10 @@ class BatchCreateParams(TypedDict, total=False): class OutputExpiresAfter(TypedDict, total=False): + """ + The expiration policy for the output and/or error file that are generated for a batch. + """ + anchor: Required[Literal["created_at"]] """Anchor timestamp after which the expiration policy applies. diff --git a/portkey_ai/_vendor/openai/types/batch_request_counts.py b/portkey_ai/_vendor/openai/types/batch_request_counts.py index 068b071a..64a57074 100644 --- a/portkey_ai/_vendor/openai/types/batch_request_counts.py +++ b/portkey_ai/_vendor/openai/types/batch_request_counts.py @@ -6,6 +6,8 @@ class BatchRequestCounts(BaseModel): + """The request counts for different statuses within the batch.""" + completed: int """Number of requests that have been completed successfully.""" diff --git a/portkey_ai/_vendor/openai/types/batch_usage.py b/portkey_ai/_vendor/openai/types/batch_usage.py index 578f64a5..d68d7110 100644 --- a/portkey_ai/_vendor/openai/types/batch_usage.py +++ b/portkey_ai/_vendor/openai/types/batch_usage.py @@ -6,6 +6,8 @@ class InputTokensDetails(BaseModel): + """A detailed breakdown of the input tokens.""" + cached_tokens: int """The number of tokens that were retrieved from the cache. @@ -14,11 +16,19 @@ class InputTokensDetails(BaseModel): class OutputTokensDetails(BaseModel): + """A detailed breakdown of the output tokens.""" + reasoning_tokens: int """The number of reasoning tokens.""" class BatchUsage(BaseModel): + """ + Represents token usage details including input tokens, output tokens, a + breakdown of output tokens, and the total tokens used. Only populated on + batches created after September 7, 2025. + """ + input_tokens: int """The number of input tokens.""" diff --git a/portkey_ai/_vendor/openai/types/beta/assistant.py b/portkey_ai/_vendor/openai/types/beta/assistant.py index 58421e0f..61344f85 100644 --- a/portkey_ai/_vendor/openai/types/beta/assistant.py +++ b/portkey_ai/_vendor/openai/types/beta/assistant.py @@ -31,12 +31,19 @@ class ToolResourcesFileSearch(BaseModel): class ToolResources(BaseModel): + """A set of resources that are used by the assistant's tools. + + The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. + """ + code_interpreter: Optional[ToolResourcesCodeInterpreter] = None file_search: Optional[ToolResourcesFileSearch] = None class Assistant(BaseModel): + """Represents an `assistant` that can call the model and use tools.""" + id: str """The identifier, which can be referenced in API endpoints.""" diff --git a/portkey_ai/_vendor/openai/types/beta/assistant_create_params.py b/portkey_ai/_vendor/openai/types/beta/assistant_create_params.py index 6fb1551f..461d871a 100644 --- a/portkey_ai/_vendor/openai/types/beta/assistant_create_params.py +++ b/portkey_ai/_vendor/openai/types/beta/assistant_create_params.py @@ -62,12 +62,17 @@ class AssistantCreateParams(TypedDict, total=False): """ Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - effort can result in faster responses and fewer tokens used on reasoning in a - response. + supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. + Reducing reasoning effort can result in faster responses and fewer tokens used + on reasoning in a response. - Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - effort. + - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + calls are supported for all reasoning values in gpt-5.1. + - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + support `none`. + - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. + - `xhigh` is supported for all models after `gpt-5.1-codex-max`. """ response_format: Optional[AssistantResponseFormatOptionParam] @@ -136,6 +141,11 @@ class ToolResourcesCodeInterpreter(TypedDict, total=False): class ToolResourcesFileSearchVectorStoreChunkingStrategyAuto(TypedDict, total=False): + """The default strategy. + + This strategy currently uses a `max_chunk_size_tokens` of `800` and `chunk_overlap_tokens` of `400`. + """ + type: Required[Literal["auto"]] """Always `auto`.""" @@ -211,6 +221,11 @@ class ToolResourcesFileSearch(TypedDict, total=False): class ToolResources(TypedDict, total=False): + """A set of resources that are used by the assistant's tools. + + The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. + """ + code_interpreter: ToolResourcesCodeInterpreter file_search: ToolResourcesFileSearch diff --git a/portkey_ai/_vendor/openai/types/beta/assistant_stream_event.py b/portkey_ai/_vendor/openai/types/beta/assistant_stream_event.py index 41d3a0c5..87620a11 100644 --- a/portkey_ai/_vendor/openai/types/beta/assistant_stream_event.py +++ b/portkey_ai/_vendor/openai/types/beta/assistant_stream_event.py @@ -43,6 +43,10 @@ class ThreadCreated(BaseModel): + """ + Occurs when a new [thread](https://platform.openai.com/docs/api-reference/threads/object) is created. + """ + data: Thread """ Represents a thread that contains @@ -56,6 +60,10 @@ class ThreadCreated(BaseModel): class ThreadRunCreated(BaseModel): + """ + Occurs when a new [run](https://platform.openai.com/docs/api-reference/runs/object) is created. + """ + data: Run """ Represents an execution run on a @@ -66,6 +74,10 @@ class ThreadRunCreated(BaseModel): class ThreadRunQueued(BaseModel): + """ + Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) moves to a `queued` status. + """ + data: Run """ Represents an execution run on a @@ -76,6 +88,10 @@ class ThreadRunQueued(BaseModel): class ThreadRunInProgress(BaseModel): + """ + Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) moves to an `in_progress` status. + """ + data: Run """ Represents an execution run on a @@ -86,6 +102,10 @@ class ThreadRunInProgress(BaseModel): class ThreadRunRequiresAction(BaseModel): + """ + Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) moves to a `requires_action` status. + """ + data: Run """ Represents an execution run on a @@ -96,6 +116,10 @@ class ThreadRunRequiresAction(BaseModel): class ThreadRunCompleted(BaseModel): + """ + Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) is completed. + """ + data: Run """ Represents an execution run on a @@ -106,6 +130,10 @@ class ThreadRunCompleted(BaseModel): class ThreadRunIncomplete(BaseModel): + """ + Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) ends with status `incomplete`. + """ + data: Run """ Represents an execution run on a @@ -116,6 +144,10 @@ class ThreadRunIncomplete(BaseModel): class ThreadRunFailed(BaseModel): + """ + Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) fails. + """ + data: Run """ Represents an execution run on a @@ -126,6 +158,10 @@ class ThreadRunFailed(BaseModel): class ThreadRunCancelling(BaseModel): + """ + Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) moves to a `cancelling` status. + """ + data: Run """ Represents an execution run on a @@ -136,6 +172,10 @@ class ThreadRunCancelling(BaseModel): class ThreadRunCancelled(BaseModel): + """ + Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) is cancelled. + """ + data: Run """ Represents an execution run on a @@ -146,6 +186,10 @@ class ThreadRunCancelled(BaseModel): class ThreadRunExpired(BaseModel): + """ + Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) expires. + """ + data: Run """ Represents an execution run on a @@ -156,6 +200,10 @@ class ThreadRunExpired(BaseModel): class ThreadRunStepCreated(BaseModel): + """ + Occurs when a [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) is created. + """ + data: RunStep """Represents a step in execution of a run.""" @@ -163,6 +211,10 @@ class ThreadRunStepCreated(BaseModel): class ThreadRunStepInProgress(BaseModel): + """ + Occurs when a [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) moves to an `in_progress` state. + """ + data: RunStep """Represents a step in execution of a run.""" @@ -170,6 +222,10 @@ class ThreadRunStepInProgress(BaseModel): class ThreadRunStepDelta(BaseModel): + """ + Occurs when parts of a [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) are being streamed. + """ + data: RunStepDeltaEvent """Represents a run step delta i.e. @@ -180,6 +236,10 @@ class ThreadRunStepDelta(BaseModel): class ThreadRunStepCompleted(BaseModel): + """ + Occurs when a [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) is completed. + """ + data: RunStep """Represents a step in execution of a run.""" @@ -187,6 +247,10 @@ class ThreadRunStepCompleted(BaseModel): class ThreadRunStepFailed(BaseModel): + """ + Occurs when a [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) fails. + """ + data: RunStep """Represents a step in execution of a run.""" @@ -194,6 +258,10 @@ class ThreadRunStepFailed(BaseModel): class ThreadRunStepCancelled(BaseModel): + """ + Occurs when a [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) is cancelled. + """ + data: RunStep """Represents a step in execution of a run.""" @@ -201,6 +269,10 @@ class ThreadRunStepCancelled(BaseModel): class ThreadRunStepExpired(BaseModel): + """ + Occurs when a [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) expires. + """ + data: RunStep """Represents a step in execution of a run.""" @@ -208,6 +280,10 @@ class ThreadRunStepExpired(BaseModel): class ThreadMessageCreated(BaseModel): + """ + Occurs when a [message](https://platform.openai.com/docs/api-reference/messages/object) is created. + """ + data: Message """ Represents a message within a @@ -218,6 +294,10 @@ class ThreadMessageCreated(BaseModel): class ThreadMessageInProgress(BaseModel): + """ + Occurs when a [message](https://platform.openai.com/docs/api-reference/messages/object) moves to an `in_progress` state. + """ + data: Message """ Represents a message within a @@ -228,6 +308,10 @@ class ThreadMessageInProgress(BaseModel): class ThreadMessageDelta(BaseModel): + """ + Occurs when parts of a [Message](https://platform.openai.com/docs/api-reference/messages/object) are being streamed. + """ + data: MessageDeltaEvent """Represents a message delta i.e. @@ -238,6 +322,10 @@ class ThreadMessageDelta(BaseModel): class ThreadMessageCompleted(BaseModel): + """ + Occurs when a [message](https://platform.openai.com/docs/api-reference/messages/object) is completed. + """ + data: Message """ Represents a message within a @@ -248,6 +336,10 @@ class ThreadMessageCompleted(BaseModel): class ThreadMessageIncomplete(BaseModel): + """ + Occurs when a [message](https://platform.openai.com/docs/api-reference/messages/object) ends before it is completed. + """ + data: Message """ Represents a message within a @@ -258,6 +350,10 @@ class ThreadMessageIncomplete(BaseModel): class ErrorEvent(BaseModel): + """ + Occurs when an [error](https://platform.openai.com/docs/guides/error-codes#api-errors) occurs. This can happen due to an internal server error or a timeout. + """ + data: ErrorObject event: Literal["error"] diff --git a/portkey_ai/_vendor/openai/types/beta/assistant_tool_choice.py b/portkey_ai/_vendor/openai/types/beta/assistant_tool_choice.py index d73439f0..cabded0b 100644 --- a/portkey_ai/_vendor/openai/types/beta/assistant_tool_choice.py +++ b/portkey_ai/_vendor/openai/types/beta/assistant_tool_choice.py @@ -10,6 +10,11 @@ class AssistantToolChoice(BaseModel): + """Specifies a tool the model should use. + + Use to force the model to call a specific tool. + """ + type: Literal["function", "code_interpreter", "file_search"] """The type of the tool. If type is `function`, the function name must be set""" diff --git a/portkey_ai/_vendor/openai/types/beta/assistant_tool_choice_param.py b/portkey_ai/_vendor/openai/types/beta/assistant_tool_choice_param.py index 904f489e..05916bb6 100644 --- a/portkey_ai/_vendor/openai/types/beta/assistant_tool_choice_param.py +++ b/portkey_ai/_vendor/openai/types/beta/assistant_tool_choice_param.py @@ -10,6 +10,11 @@ class AssistantToolChoiceParam(TypedDict, total=False): + """Specifies a tool the model should use. + + Use to force the model to call a specific tool. + """ + type: Required[Literal["function", "code_interpreter", "file_search"]] """The type of the tool. If type is `function`, the function name must be set""" diff --git a/portkey_ai/_vendor/openai/types/beta/assistant_update_params.py b/portkey_ai/_vendor/openai/types/beta/assistant_update_params.py index 6d20b8e0..7896fcd9 100644 --- a/portkey_ai/_vendor/openai/types/beta/assistant_update_params.py +++ b/portkey_ai/_vendor/openai/types/beta/assistant_update_params.py @@ -97,12 +97,17 @@ class AssistantUpdateParams(TypedDict, total=False): """ Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - effort can result in faster responses and fewer tokens used on reasoning in a - response. - - Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - effort. + supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. + Reducing reasoning effort can result in faster responses and fewer tokens used + on reasoning in a response. + + - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + calls are supported for all reasoning values in gpt-5.1. + - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + support `none`. + - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. + - `xhigh` is supported for all models after `gpt-5.1-codex-max`. """ response_format: Optional[AssistantResponseFormatOptionParam] @@ -182,6 +187,11 @@ class ToolResourcesFileSearch(TypedDict, total=False): class ToolResources(TypedDict, total=False): + """A set of resources that are used by the assistant's tools. + + The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. + """ + code_interpreter: ToolResourcesCodeInterpreter file_search: ToolResourcesFileSearch diff --git a/portkey_ai/_vendor/openai/types/beta/chatkit/chat_session.py b/portkey_ai/_vendor/openai/types/beta/chatkit/chat_session.py index 82baea21..9db9fc93 100644 --- a/portkey_ai/_vendor/openai/types/beta/chatkit/chat_session.py +++ b/portkey_ai/_vendor/openai/types/beta/chatkit/chat_session.py @@ -12,6 +12,8 @@ class ChatSession(BaseModel): + """Represents a ChatKit session and its resolved configuration.""" + id: str """Identifier for the ChatKit session.""" diff --git a/portkey_ai/_vendor/openai/types/beta/chatkit/chat_session_automatic_thread_titling.py b/portkey_ai/_vendor/openai/types/beta/chatkit/chat_session_automatic_thread_titling.py index 4fa96a44..1d95255e 100644 --- a/portkey_ai/_vendor/openai/types/beta/chatkit/chat_session_automatic_thread_titling.py +++ b/portkey_ai/_vendor/openai/types/beta/chatkit/chat_session_automatic_thread_titling.py @@ -6,5 +6,7 @@ class ChatSessionAutomaticThreadTitling(BaseModel): + """Automatic thread title preferences for the session.""" + enabled: bool """Whether automatic thread titling is enabled.""" diff --git a/portkey_ai/_vendor/openai/types/beta/chatkit/chat_session_chatkit_configuration.py b/portkey_ai/_vendor/openai/types/beta/chatkit/chat_session_chatkit_configuration.py index 6205b172..f9fa0cef 100644 --- a/portkey_ai/_vendor/openai/types/beta/chatkit/chat_session_chatkit_configuration.py +++ b/portkey_ai/_vendor/openai/types/beta/chatkit/chat_session_chatkit_configuration.py @@ -9,6 +9,8 @@ class ChatSessionChatKitConfiguration(BaseModel): + """ChatKit configuration for the session.""" + automatic_thread_titling: ChatSessionAutomaticThreadTitling """Automatic thread titling preferences.""" diff --git a/portkey_ai/_vendor/openai/types/beta/chatkit/chat_session_chatkit_configuration_param.py b/portkey_ai/_vendor/openai/types/beta/chatkit/chat_session_chatkit_configuration_param.py index 0a5ae80a..834de71e 100644 --- a/portkey_ai/_vendor/openai/types/beta/chatkit/chat_session_chatkit_configuration_param.py +++ b/portkey_ai/_vendor/openai/types/beta/chatkit/chat_session_chatkit_configuration_param.py @@ -8,11 +8,21 @@ class AutomaticThreadTitling(TypedDict, total=False): + """Configuration for automatic thread titling. + + When omitted, automatic thread titling is enabled by default. + """ + enabled: bool """Enable automatic thread title generation. Defaults to true.""" class FileUpload(TypedDict, total=False): + """Configuration for upload enablement and limits. + + When omitted, uploads are disabled by default (max_files 10, max_file_size 512 MB). + """ + enabled: bool """Enable uploads for this session. Defaults to false.""" @@ -27,6 +37,11 @@ class FileUpload(TypedDict, total=False): class History(TypedDict, total=False): + """Configuration for chat history retention. + + When omitted, history is enabled by default with no limit on recent_threads (null). + """ + enabled: bool """Enables chat users to access previous ChatKit threads. Defaults to true.""" @@ -38,6 +53,8 @@ class History(TypedDict, total=False): class ChatSessionChatKitConfigurationParam(TypedDict, total=False): + """Optional per-session configuration settings for ChatKit behavior.""" + automatic_thread_titling: AutomaticThreadTitling """Configuration for automatic thread titling. diff --git a/portkey_ai/_vendor/openai/types/beta/chatkit/chat_session_expires_after_param.py b/portkey_ai/_vendor/openai/types/beta/chatkit/chat_session_expires_after_param.py index ceb5a984..c1de8a76 100644 --- a/portkey_ai/_vendor/openai/types/beta/chatkit/chat_session_expires_after_param.py +++ b/portkey_ai/_vendor/openai/types/beta/chatkit/chat_session_expires_after_param.py @@ -8,6 +8,8 @@ class ChatSessionExpiresAfterParam(TypedDict, total=False): + """Controls when the session expires relative to an anchor timestamp.""" + anchor: Required[Literal["created_at"]] """Base timestamp used to calculate expiration. Currently fixed to `created_at`.""" diff --git a/portkey_ai/_vendor/openai/types/beta/chatkit/chat_session_file_upload.py b/portkey_ai/_vendor/openai/types/beta/chatkit/chat_session_file_upload.py index c63c7a01..0275859d 100644 --- a/portkey_ai/_vendor/openai/types/beta/chatkit/chat_session_file_upload.py +++ b/portkey_ai/_vendor/openai/types/beta/chatkit/chat_session_file_upload.py @@ -8,6 +8,8 @@ class ChatSessionFileUpload(BaseModel): + """Upload permissions and limits applied to the session.""" + enabled: bool """Indicates if uploads are enabled for the session.""" diff --git a/portkey_ai/_vendor/openai/types/beta/chatkit/chat_session_history.py b/portkey_ai/_vendor/openai/types/beta/chatkit/chat_session_history.py index 66ebe008..54690009 100644 --- a/portkey_ai/_vendor/openai/types/beta/chatkit/chat_session_history.py +++ b/portkey_ai/_vendor/openai/types/beta/chatkit/chat_session_history.py @@ -8,6 +8,8 @@ class ChatSessionHistory(BaseModel): + """History retention preferences returned for the session.""" + enabled: bool """Indicates if chat history is persisted for the session.""" diff --git a/portkey_ai/_vendor/openai/types/beta/chatkit/chat_session_rate_limits.py b/portkey_ai/_vendor/openai/types/beta/chatkit/chat_session_rate_limits.py index 392225e3..7c5bd94e 100644 --- a/portkey_ai/_vendor/openai/types/beta/chatkit/chat_session_rate_limits.py +++ b/portkey_ai/_vendor/openai/types/beta/chatkit/chat_session_rate_limits.py @@ -6,5 +6,7 @@ class ChatSessionRateLimits(BaseModel): + """Active per-minute request limit for the session.""" + max_requests_per_1_minute: int """Maximum allowed requests per one-minute window.""" diff --git a/portkey_ai/_vendor/openai/types/beta/chatkit/chat_session_rate_limits_param.py b/portkey_ai/_vendor/openai/types/beta/chatkit/chat_session_rate_limits_param.py index 7894c064..578f20b0 100644 --- a/portkey_ai/_vendor/openai/types/beta/chatkit/chat_session_rate_limits_param.py +++ b/portkey_ai/_vendor/openai/types/beta/chatkit/chat_session_rate_limits_param.py @@ -8,5 +8,7 @@ class ChatSessionRateLimitsParam(TypedDict, total=False): + """Controls request rate limits for the session.""" + max_requests_per_1_minute: int """Maximum number of requests allowed per minute for the session. Defaults to 10.""" diff --git a/portkey_ai/_vendor/openai/types/beta/chatkit/chat_session_workflow_param.py b/portkey_ai/_vendor/openai/types/beta/chatkit/chat_session_workflow_param.py index 55429221..abf52de5 100644 --- a/portkey_ai/_vendor/openai/types/beta/chatkit/chat_session_workflow_param.py +++ b/portkey_ai/_vendor/openai/types/beta/chatkit/chat_session_workflow_param.py @@ -9,11 +9,18 @@ class Tracing(TypedDict, total=False): + """Optional tracing overrides for the workflow invocation. + + When omitted, tracing is enabled by default. + """ + enabled: bool """Whether tracing is enabled during the session. Defaults to true.""" class ChatSessionWorkflowParam(TypedDict, total=False): + """Workflow reference and overrides applied to the chat session.""" + id: Required[str] """Identifier for the workflow invoked by the session.""" diff --git a/portkey_ai/_vendor/openai/types/beta/chatkit/chatkit_attachment.py b/portkey_ai/_vendor/openai/types/beta/chatkit/chatkit_attachment.py index 8d8ad3e1..7750925e 100644 --- a/portkey_ai/_vendor/openai/types/beta/chatkit/chatkit_attachment.py +++ b/portkey_ai/_vendor/openai/types/beta/chatkit/chatkit_attachment.py @@ -9,6 +9,8 @@ class ChatKitAttachment(BaseModel): + """Attachment metadata included on thread items.""" + id: str """Identifier for the attachment.""" diff --git a/portkey_ai/_vendor/openai/types/beta/chatkit/chatkit_response_output_text.py b/portkey_ai/_vendor/openai/types/beta/chatkit/chatkit_response_output_text.py index 116b797e..1348fed2 100644 --- a/portkey_ai/_vendor/openai/types/beta/chatkit/chatkit_response_output_text.py +++ b/portkey_ai/_vendor/openai/types/beta/chatkit/chatkit_response_output_text.py @@ -17,6 +17,8 @@ class AnnotationFileSource(BaseModel): + """File attachment referenced by the annotation.""" + filename: str """Filename referenced by the annotation.""" @@ -25,6 +27,8 @@ class AnnotationFileSource(BaseModel): class AnnotationFile(BaseModel): + """Annotation that references an uploaded file.""" + source: AnnotationFileSource """File attachment referenced by the annotation.""" @@ -33,6 +37,8 @@ class AnnotationFile(BaseModel): class AnnotationURLSource(BaseModel): + """URL referenced by the annotation.""" + type: Literal["url"] """Type discriminator that is always `url`.""" @@ -41,6 +47,8 @@ class AnnotationURLSource(BaseModel): class AnnotationURL(BaseModel): + """Annotation that references a URL.""" + source: AnnotationURLSource """URL referenced by the annotation.""" @@ -52,6 +60,8 @@ class AnnotationURL(BaseModel): class ChatKitResponseOutputText(BaseModel): + """Assistant response text accompanied by optional annotations.""" + annotations: List[Annotation] """Ordered list of annotations attached to the response text.""" diff --git a/portkey_ai/_vendor/openai/types/beta/chatkit/chatkit_thread.py b/portkey_ai/_vendor/openai/types/beta/chatkit/chatkit_thread.py index abd1a9ea..32075233 100644 --- a/portkey_ai/_vendor/openai/types/beta/chatkit/chatkit_thread.py +++ b/portkey_ai/_vendor/openai/types/beta/chatkit/chatkit_thread.py @@ -10,11 +10,15 @@ class StatusActive(BaseModel): + """Indicates that a thread is active.""" + type: Literal["active"] """Status discriminator that is always `active`.""" class StatusLocked(BaseModel): + """Indicates that a thread is locked and cannot accept new input.""" + reason: Optional[str] = None """Reason that the thread was locked. Defaults to null when no reason is recorded.""" @@ -23,6 +27,8 @@ class StatusLocked(BaseModel): class StatusClosed(BaseModel): + """Indicates that a thread has been closed.""" + reason: Optional[str] = None """Reason that the thread was closed. Defaults to null when no reason is recorded.""" @@ -34,6 +40,8 @@ class StatusClosed(BaseModel): class ChatKitThread(BaseModel): + """Represents a ChatKit thread and its current status.""" + id: str """Identifier of the thread.""" diff --git a/portkey_ai/_vendor/openai/types/beta/chatkit/chatkit_thread_assistant_message_item.py b/portkey_ai/_vendor/openai/types/beta/chatkit/chatkit_thread_assistant_message_item.py index f4afd053..337f53a8 100644 --- a/portkey_ai/_vendor/openai/types/beta/chatkit/chatkit_thread_assistant_message_item.py +++ b/portkey_ai/_vendor/openai/types/beta/chatkit/chatkit_thread_assistant_message_item.py @@ -10,6 +10,8 @@ class ChatKitThreadAssistantMessageItem(BaseModel): + """Assistant-authored message within a thread.""" + id: str """Identifier of the thread item.""" diff --git a/portkey_ai/_vendor/openai/types/beta/chatkit/chatkit_thread_item_list.py b/portkey_ai/_vendor/openai/types/beta/chatkit/chatkit_thread_item_list.py index 173bd150..049ca544 100644 --- a/portkey_ai/_vendor/openai/types/beta/chatkit/chatkit_thread_item_list.py +++ b/portkey_ai/_vendor/openai/types/beta/chatkit/chatkit_thread_item_list.py @@ -20,6 +20,8 @@ class DataChatKitClientToolCall(BaseModel): + """Record of a client side tool invocation initiated by the assistant.""" + id: str """Identifier of the thread item.""" @@ -55,6 +57,8 @@ class DataChatKitClientToolCall(BaseModel): class DataChatKitTask(BaseModel): + """Task emitted by the workflow to show progress and status updates.""" + id: str """Identifier of the thread item.""" @@ -81,6 +85,8 @@ class DataChatKitTask(BaseModel): class DataChatKitTaskGroupTask(BaseModel): + """Task entry that appears within a TaskGroup.""" + heading: Optional[str] = None """Optional heading for the grouped task. Defaults to null when not provided.""" @@ -95,6 +101,8 @@ class DataChatKitTaskGroupTask(BaseModel): class DataChatKitTaskGroup(BaseModel): + """Collection of workflow tasks grouped together in the thread.""" + id: str """Identifier of the thread item.""" @@ -128,6 +136,8 @@ class DataChatKitTaskGroup(BaseModel): class ChatKitThreadItemList(BaseModel): + """A paginated list of thread items rendered for the ChatKit API.""" + data: List[Data] """A list of items""" diff --git a/portkey_ai/_vendor/openai/types/beta/chatkit/chatkit_thread_user_message_item.py b/portkey_ai/_vendor/openai/types/beta/chatkit/chatkit_thread_user_message_item.py index 233d0723..d7552c4f 100644 --- a/portkey_ai/_vendor/openai/types/beta/chatkit/chatkit_thread_user_message_item.py +++ b/portkey_ai/_vendor/openai/types/beta/chatkit/chatkit_thread_user_message_item.py @@ -18,6 +18,8 @@ class ContentInputText(BaseModel): + """Text block that a user contributed to the thread.""" + text: str """Plain-text content supplied by the user.""" @@ -26,6 +28,8 @@ class ContentInputText(BaseModel): class ContentQuotedText(BaseModel): + """Quoted snippet that the user referenced in their message.""" + text: str """Quoted text content.""" @@ -37,11 +41,15 @@ class ContentQuotedText(BaseModel): class InferenceOptionsToolChoice(BaseModel): + """Preferred tool to invoke. Defaults to null when ChatKit should auto-select.""" + id: str """Identifier of the requested tool.""" class InferenceOptions(BaseModel): + """Inference overrides applied to the message. Defaults to null when unset.""" + model: Optional[str] = None """Model name that generated the response. @@ -53,6 +61,8 @@ class InferenceOptions(BaseModel): class ChatKitThreadUserMessageItem(BaseModel): + """User-authored messages within a thread.""" + id: str """Identifier of the thread item.""" diff --git a/portkey_ai/_vendor/openai/types/beta/chatkit/chatkit_widget_item.py b/portkey_ai/_vendor/openai/types/beta/chatkit/chatkit_widget_item.py index c7f18225..a269c736 100644 --- a/portkey_ai/_vendor/openai/types/beta/chatkit/chatkit_widget_item.py +++ b/portkey_ai/_vendor/openai/types/beta/chatkit/chatkit_widget_item.py @@ -8,6 +8,8 @@ class ChatKitWidgetItem(BaseModel): + """Thread item that renders a widget payload.""" + id: str """Identifier of the thread item.""" diff --git a/portkey_ai/_vendor/openai/types/beta/chatkit/thread_delete_response.py b/portkey_ai/_vendor/openai/types/beta/chatkit/thread_delete_response.py index 03fdec9c..45b686bf 100644 --- a/portkey_ai/_vendor/openai/types/beta/chatkit/thread_delete_response.py +++ b/portkey_ai/_vendor/openai/types/beta/chatkit/thread_delete_response.py @@ -8,6 +8,8 @@ class ThreadDeleteResponse(BaseModel): + """Confirmation payload returned after deleting a thread.""" + id: str """Identifier of the deleted thread.""" diff --git a/portkey_ai/_vendor/openai/types/beta/chatkit_workflow.py b/portkey_ai/_vendor/openai/types/beta/chatkit_workflow.py index 00fbcf41..b6f5b55b 100644 --- a/portkey_ai/_vendor/openai/types/beta/chatkit_workflow.py +++ b/portkey_ai/_vendor/openai/types/beta/chatkit_workflow.py @@ -8,11 +8,15 @@ class Tracing(BaseModel): + """Tracing settings applied to the workflow.""" + enabled: bool """Indicates whether tracing is enabled.""" class ChatKitWorkflow(BaseModel): + """Workflow metadata and state returned for the session.""" + id: str """Identifier of the workflow backing the session.""" diff --git a/portkey_ai/_vendor/openai/types/beta/file_search_tool.py b/portkey_ai/_vendor/openai/types/beta/file_search_tool.py index 89fc16c0..9e33249e 100644 --- a/portkey_ai/_vendor/openai/types/beta/file_search_tool.py +++ b/portkey_ai/_vendor/openai/types/beta/file_search_tool.py @@ -9,6 +9,13 @@ class FileSearchRankingOptions(BaseModel): + """The ranking options for the file search. + + If not specified, the file search tool will use the `auto` ranker and a score_threshold of 0. + + See the [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) for more information. + """ + score_threshold: float """The score threshold for the file search. @@ -23,6 +30,8 @@ class FileSearchRankingOptions(BaseModel): class FileSearch(BaseModel): + """Overrides for the file search tool.""" + max_num_results: Optional[int] = None """The maximum number of results the file search tool should output. diff --git a/portkey_ai/_vendor/openai/types/beta/file_search_tool_param.py b/portkey_ai/_vendor/openai/types/beta/file_search_tool_param.py index c73d0af7..9906b4b2 100644 --- a/portkey_ai/_vendor/openai/types/beta/file_search_tool_param.py +++ b/portkey_ai/_vendor/openai/types/beta/file_search_tool_param.py @@ -8,6 +8,13 @@ class FileSearchRankingOptions(TypedDict, total=False): + """The ranking options for the file search. + + If not specified, the file search tool will use the `auto` ranker and a score_threshold of 0. + + See the [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) for more information. + """ + score_threshold: Required[float] """The score threshold for the file search. @@ -22,6 +29,8 @@ class FileSearchRankingOptions(TypedDict, total=False): class FileSearch(TypedDict, total=False): + """Overrides for the file search tool.""" + max_num_results: int """The maximum number of results the file search tool should output. diff --git a/portkey_ai/_vendor/openai/types/beta/thread.py b/portkey_ai/_vendor/openai/types/beta/thread.py index 789f66e4..83d90551 100644 --- a/portkey_ai/_vendor/openai/types/beta/thread.py +++ b/portkey_ai/_vendor/openai/types/beta/thread.py @@ -29,12 +29,20 @@ class ToolResourcesFileSearch(BaseModel): class ToolResources(BaseModel): + """ + A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. + """ + code_interpreter: Optional[ToolResourcesCodeInterpreter] = None file_search: Optional[ToolResourcesFileSearch] = None class Thread(BaseModel): + """ + Represents a thread that contains [messages](https://platform.openai.com/docs/api-reference/messages). + """ + id: str """The identifier, which can be referenced in API endpoints.""" diff --git a/portkey_ai/_vendor/openai/types/beta/thread_create_and_run_params.py b/portkey_ai/_vendor/openai/types/beta/thread_create_and_run_params.py index 734e5e2a..c0aee3e9 100644 --- a/portkey_ai/_vendor/openai/types/beta/thread_create_and_run_params.py +++ b/portkey_ai/_vendor/openai/types/beta/thread_create_and_run_params.py @@ -227,6 +227,11 @@ class ThreadToolResourcesCodeInterpreter(TypedDict, total=False): class ThreadToolResourcesFileSearchVectorStoreChunkingStrategyAuto(TypedDict, total=False): + """The default strategy. + + This strategy currently uses a `max_chunk_size_tokens` of `800` and `chunk_overlap_tokens` of `400`. + """ + type: Required[Literal["auto"]] """Always `auto`.""" @@ -303,12 +308,22 @@ class ThreadToolResourcesFileSearch(TypedDict, total=False): class ThreadToolResources(TypedDict, total=False): + """ + A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. + """ + code_interpreter: ThreadToolResourcesCodeInterpreter file_search: ThreadToolResourcesFileSearch class Thread(TypedDict, total=False): + """Options to create a new thread. + + If no thread is provided when running a + request, an empty thread will be created. + """ + messages: Iterable[ThreadMessage] """ A list of [messages](https://platform.openai.com/docs/api-reference/messages) to @@ -354,12 +369,22 @@ class ToolResourcesFileSearch(TypedDict, total=False): class ToolResources(TypedDict, total=False): + """A set of resources that are used by the assistant's tools. + + The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. + """ + code_interpreter: ToolResourcesCodeInterpreter file_search: ToolResourcesFileSearch class TruncationStrategy(TypedDict, total=False): + """Controls for how a thread will be truncated prior to the run. + + Use this to control the initial context window of the run. + """ + type: Required[Literal["auto", "last_messages"]] """The truncation strategy to use for the thread. diff --git a/portkey_ai/_vendor/openai/types/beta/thread_create_params.py b/portkey_ai/_vendor/openai/types/beta/thread_create_params.py index 8fd9f38d..ef83e3d4 100644 --- a/portkey_ai/_vendor/openai/types/beta/thread_create_params.py +++ b/portkey_ai/_vendor/openai/types/beta/thread_create_params.py @@ -106,6 +106,11 @@ class ToolResourcesCodeInterpreter(TypedDict, total=False): class ToolResourcesFileSearchVectorStoreChunkingStrategyAuto(TypedDict, total=False): + """The default strategy. + + This strategy currently uses a `max_chunk_size_tokens` of `800` and `chunk_overlap_tokens` of `400`. + """ + type: Required[Literal["auto"]] """Always `auto`.""" @@ -181,6 +186,10 @@ class ToolResourcesFileSearch(TypedDict, total=False): class ToolResources(TypedDict, total=False): + """ + A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. + """ + code_interpreter: ToolResourcesCodeInterpreter file_search: ToolResourcesFileSearch diff --git a/portkey_ai/_vendor/openai/types/beta/thread_update_params.py b/portkey_ai/_vendor/openai/types/beta/thread_update_params.py index 464ea8d7..e000edc0 100644 --- a/portkey_ai/_vendor/openai/types/beta/thread_update_params.py +++ b/portkey_ai/_vendor/openai/types/beta/thread_update_params.py @@ -51,6 +51,10 @@ class ToolResourcesFileSearch(TypedDict, total=False): class ToolResources(TypedDict, total=False): + """ + A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. + """ + code_interpreter: ToolResourcesCodeInterpreter file_search: ToolResourcesFileSearch diff --git a/portkey_ai/_vendor/openai/types/beta/threads/file_citation_annotation.py b/portkey_ai/_vendor/openai/types/beta/threads/file_citation_annotation.py index c3085aed..929da0ac 100644 --- a/portkey_ai/_vendor/openai/types/beta/threads/file_citation_annotation.py +++ b/portkey_ai/_vendor/openai/types/beta/threads/file_citation_annotation.py @@ -13,6 +13,10 @@ class FileCitation(BaseModel): class FileCitationAnnotation(BaseModel): + """ + A citation within the message that points to a specific quote from a specific File associated with the assistant or the message. Generated when the assistant uses the "file_search" tool to search files. + """ + end_index: int file_citation: FileCitation diff --git a/portkey_ai/_vendor/openai/types/beta/threads/file_citation_delta_annotation.py b/portkey_ai/_vendor/openai/types/beta/threads/file_citation_delta_annotation.py index b40c0d12..591e3223 100644 --- a/portkey_ai/_vendor/openai/types/beta/threads/file_citation_delta_annotation.py +++ b/portkey_ai/_vendor/openai/types/beta/threads/file_citation_delta_annotation.py @@ -17,6 +17,10 @@ class FileCitation(BaseModel): class FileCitationDeltaAnnotation(BaseModel): + """ + A citation within the message that points to a specific quote from a specific File associated with the assistant or the message. Generated when the assistant uses the "file_search" tool to search files. + """ + index: int """The index of the annotation in the text content part.""" diff --git a/portkey_ai/_vendor/openai/types/beta/threads/file_path_annotation.py b/portkey_ai/_vendor/openai/types/beta/threads/file_path_annotation.py index 9812737e..d3c144c2 100644 --- a/portkey_ai/_vendor/openai/types/beta/threads/file_path_annotation.py +++ b/portkey_ai/_vendor/openai/types/beta/threads/file_path_annotation.py @@ -13,6 +13,10 @@ class FilePath(BaseModel): class FilePathAnnotation(BaseModel): + """ + A URL for the file that's generated when the assistant used the `code_interpreter` tool to generate a file. + """ + end_index: int file_path: FilePath diff --git a/portkey_ai/_vendor/openai/types/beta/threads/file_path_delta_annotation.py b/portkey_ai/_vendor/openai/types/beta/threads/file_path_delta_annotation.py index 0cbb445e..54168747 100644 --- a/portkey_ai/_vendor/openai/types/beta/threads/file_path_delta_annotation.py +++ b/portkey_ai/_vendor/openai/types/beta/threads/file_path_delta_annotation.py @@ -14,6 +14,10 @@ class FilePath(BaseModel): class FilePathDeltaAnnotation(BaseModel): + """ + A URL for the file that's generated when the assistant used the `code_interpreter` tool to generate a file. + """ + index: int """The index of the annotation in the text content part.""" diff --git a/portkey_ai/_vendor/openai/types/beta/threads/image_file_content_block.py b/portkey_ai/_vendor/openai/types/beta/threads/image_file_content_block.py index a9099990..5a082cd4 100644 --- a/portkey_ai/_vendor/openai/types/beta/threads/image_file_content_block.py +++ b/portkey_ai/_vendor/openai/types/beta/threads/image_file_content_block.py @@ -9,6 +9,10 @@ class ImageFileContentBlock(BaseModel): + """ + References an image [File](https://platform.openai.com/docs/api-reference/files) in the content of a message. + """ + image_file: ImageFile type: Literal["image_file"] diff --git a/portkey_ai/_vendor/openai/types/beta/threads/image_file_content_block_param.py b/portkey_ai/_vendor/openai/types/beta/threads/image_file_content_block_param.py index 48d94bee..da095a5f 100644 --- a/portkey_ai/_vendor/openai/types/beta/threads/image_file_content_block_param.py +++ b/portkey_ai/_vendor/openai/types/beta/threads/image_file_content_block_param.py @@ -10,6 +10,10 @@ class ImageFileContentBlockParam(TypedDict, total=False): + """ + References an image [File](https://platform.openai.com/docs/api-reference/files) in the content of a message. + """ + image_file: Required[ImageFileParam] type: Required[Literal["image_file"]] diff --git a/portkey_ai/_vendor/openai/types/beta/threads/image_file_delta_block.py b/portkey_ai/_vendor/openai/types/beta/threads/image_file_delta_block.py index 0a5a2e8a..ed17f7ff 100644 --- a/portkey_ai/_vendor/openai/types/beta/threads/image_file_delta_block.py +++ b/portkey_ai/_vendor/openai/types/beta/threads/image_file_delta_block.py @@ -10,6 +10,10 @@ class ImageFileDeltaBlock(BaseModel): + """ + References an image [File](https://platform.openai.com/docs/api-reference/files) in the content of a message. + """ + index: int """The index of the content part in the message.""" diff --git a/portkey_ai/_vendor/openai/types/beta/threads/image_url_content_block.py b/portkey_ai/_vendor/openai/types/beta/threads/image_url_content_block.py index 40a16c1d..8dc1f16a 100644 --- a/portkey_ai/_vendor/openai/types/beta/threads/image_url_content_block.py +++ b/portkey_ai/_vendor/openai/types/beta/threads/image_url_content_block.py @@ -9,6 +9,8 @@ class ImageURLContentBlock(BaseModel): + """References an image URL in the content of a message.""" + image_url: ImageURL type: Literal["image_url"] diff --git a/portkey_ai/_vendor/openai/types/beta/threads/image_url_content_block_param.py b/portkey_ai/_vendor/openai/types/beta/threads/image_url_content_block_param.py index 585b926c..a5c59e02 100644 --- a/portkey_ai/_vendor/openai/types/beta/threads/image_url_content_block_param.py +++ b/portkey_ai/_vendor/openai/types/beta/threads/image_url_content_block_param.py @@ -10,6 +10,8 @@ class ImageURLContentBlockParam(TypedDict, total=False): + """References an image URL in the content of a message.""" + image_url: Required[ImageURLParam] type: Required[Literal["image_url"]] diff --git a/portkey_ai/_vendor/openai/types/beta/threads/image_url_delta_block.py b/portkey_ai/_vendor/openai/types/beta/threads/image_url_delta_block.py index 5252da12..3128d8e7 100644 --- a/portkey_ai/_vendor/openai/types/beta/threads/image_url_delta_block.py +++ b/portkey_ai/_vendor/openai/types/beta/threads/image_url_delta_block.py @@ -10,6 +10,8 @@ class ImageURLDeltaBlock(BaseModel): + """References an image URL in the content of a message.""" + index: int """The index of the content part in the message.""" diff --git a/portkey_ai/_vendor/openai/types/beta/threads/message.py b/portkey_ai/_vendor/openai/types/beta/threads/message.py index 4a05a128..fc7f73f0 100644 --- a/portkey_ai/_vendor/openai/types/beta/threads/message.py +++ b/portkey_ai/_vendor/openai/types/beta/threads/message.py @@ -34,11 +34,17 @@ class Attachment(BaseModel): class IncompleteDetails(BaseModel): + """On an incomplete message, details about why the message is incomplete.""" + reason: Literal["content_filter", "max_tokens", "run_cancelled", "run_expired", "run_failed"] """The reason the message is incomplete.""" class Message(BaseModel): + """ + Represents a message within a [thread](https://platform.openai.com/docs/api-reference/threads). + """ + id: str """The identifier, which can be referenced in API endpoints.""" diff --git a/portkey_ai/_vendor/openai/types/beta/threads/message_delta.py b/portkey_ai/_vendor/openai/types/beta/threads/message_delta.py index ecd0dfe3..fdeebb3a 100644 --- a/portkey_ai/_vendor/openai/types/beta/threads/message_delta.py +++ b/portkey_ai/_vendor/openai/types/beta/threads/message_delta.py @@ -10,6 +10,8 @@ class MessageDelta(BaseModel): + """The delta containing the fields that have changed on the Message.""" + content: Optional[List[MessageContentDelta]] = None """The content of the message in array of text and/or images.""" diff --git a/portkey_ai/_vendor/openai/types/beta/threads/message_delta_event.py b/portkey_ai/_vendor/openai/types/beta/threads/message_delta_event.py index 3811cef6..d5ba1e17 100644 --- a/portkey_ai/_vendor/openai/types/beta/threads/message_delta_event.py +++ b/portkey_ai/_vendor/openai/types/beta/threads/message_delta_event.py @@ -9,6 +9,11 @@ class MessageDeltaEvent(BaseModel): + """Represents a message delta i.e. + + any changed fields on a message during streaming. + """ + id: str """The identifier of the message, which can be referenced in API endpoints.""" diff --git a/portkey_ai/_vendor/openai/types/beta/threads/refusal_content_block.py b/portkey_ai/_vendor/openai/types/beta/threads/refusal_content_block.py index d54f9485..b4512b3c 100644 --- a/portkey_ai/_vendor/openai/types/beta/threads/refusal_content_block.py +++ b/portkey_ai/_vendor/openai/types/beta/threads/refusal_content_block.py @@ -8,6 +8,8 @@ class RefusalContentBlock(BaseModel): + """The refusal content generated by the assistant.""" + refusal: str type: Literal["refusal"] diff --git a/portkey_ai/_vendor/openai/types/beta/threads/refusal_delta_block.py b/portkey_ai/_vendor/openai/types/beta/threads/refusal_delta_block.py index dbd8e626..85a1f08d 100644 --- a/portkey_ai/_vendor/openai/types/beta/threads/refusal_delta_block.py +++ b/portkey_ai/_vendor/openai/types/beta/threads/refusal_delta_block.py @@ -9,6 +9,8 @@ class RefusalDeltaBlock(BaseModel): + """The refusal content that is part of a message.""" + index: int """The index of the refusal part in the message.""" diff --git a/portkey_ai/_vendor/openai/types/beta/threads/required_action_function_tool_call.py b/portkey_ai/_vendor/openai/types/beta/threads/required_action_function_tool_call.py index a24dfd06..3cec8514 100644 --- a/portkey_ai/_vendor/openai/types/beta/threads/required_action_function_tool_call.py +++ b/portkey_ai/_vendor/openai/types/beta/threads/required_action_function_tool_call.py @@ -8,6 +8,8 @@ class Function(BaseModel): + """The function definition.""" + arguments: str """The arguments that the model expects you to pass to the function.""" @@ -16,6 +18,8 @@ class Function(BaseModel): class RequiredActionFunctionToolCall(BaseModel): + """Tool call objects""" + id: str """The ID of the tool call. diff --git a/portkey_ai/_vendor/openai/types/beta/threads/run.py b/portkey_ai/_vendor/openai/types/beta/threads/run.py index c545cc37..8a88fa16 100644 --- a/portkey_ai/_vendor/openai/types/beta/threads/run.py +++ b/portkey_ai/_vendor/openai/types/beta/threads/run.py @@ -23,6 +23,11 @@ class IncompleteDetails(BaseModel): + """Details on why the run is incomplete. + + Will be `null` if the run is not incomplete. + """ + reason: Optional[Literal["max_completion_tokens", "max_prompt_tokens"]] = None """The reason why the run is incomplete. @@ -32,6 +37,8 @@ class IncompleteDetails(BaseModel): class LastError(BaseModel): + """The last error associated with this run. Will be `null` if there are no errors.""" + code: Literal["server_error", "rate_limit_exceeded", "invalid_prompt"] """One of `server_error`, `rate_limit_exceeded`, or `invalid_prompt`.""" @@ -40,11 +47,18 @@ class LastError(BaseModel): class RequiredActionSubmitToolOutputs(BaseModel): + """Details on the tool outputs needed for this run to continue.""" + tool_calls: List[RequiredActionFunctionToolCall] """A list of the relevant tool calls.""" class RequiredAction(BaseModel): + """Details on the action required to continue the run. + + Will be `null` if no action is required. + """ + submit_tool_outputs: RequiredActionSubmitToolOutputs """Details on the tool outputs needed for this run to continue.""" @@ -53,6 +67,11 @@ class RequiredAction(BaseModel): class TruncationStrategy(BaseModel): + """Controls for how a thread will be truncated prior to the run. + + Use this to control the initial context window of the run. + """ + type: Literal["auto", "last_messages"] """The truncation strategy to use for the thread. @@ -70,6 +89,11 @@ class TruncationStrategy(BaseModel): class Usage(BaseModel): + """Usage statistics related to the run. + + This value will be `null` if the run is not in a terminal state (i.e. `in_progress`, `queued`, etc.). + """ + completion_tokens: int """Number of completion tokens used over the course of the run.""" @@ -81,6 +105,10 @@ class Usage(BaseModel): class Run(BaseModel): + """ + Represents an execution run on a [thread](https://platform.openai.com/docs/api-reference/threads). + """ + id: str """The identifier, which can be referenced in API endpoints.""" diff --git a/portkey_ai/_vendor/openai/types/beta/threads/run_create_params.py b/portkey_ai/_vendor/openai/types/beta/threads/run_create_params.py index 3190c8b3..376afc9a 100644 --- a/portkey_ai/_vendor/openai/types/beta/threads/run_create_params.py +++ b/portkey_ai/_vendor/openai/types/beta/threads/run_create_params.py @@ -111,12 +111,17 @@ class RunCreateParamsBase(TypedDict, total=False): """ Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - effort can result in faster responses and fewer tokens used on reasoning in a - response. + supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. + Reducing reasoning effort can result in faster responses and fewer tokens used + on reasoning in a response. - Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - effort. + - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + calls are supported for all reasoning values in gpt-5.1. + - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + support `none`. + - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. + - `xhigh` is supported for all models after `gpt-5.1-codex-max`. """ response_format: Optional[AssistantResponseFormatOptionParam] @@ -227,6 +232,11 @@ class AdditionalMessage(TypedDict, total=False): class TruncationStrategy(TypedDict, total=False): + """Controls for how a thread will be truncated prior to the run. + + Use this to control the initial context window of the run. + """ + type: Required[Literal["auto", "last_messages"]] """The truncation strategy to use for the thread. diff --git a/portkey_ai/_vendor/openai/types/beta/threads/runs/code_interpreter_logs.py b/portkey_ai/_vendor/openai/types/beta/threads/runs/code_interpreter_logs.py index 0bf8c1da..722fd2b4 100644 --- a/portkey_ai/_vendor/openai/types/beta/threads/runs/code_interpreter_logs.py +++ b/portkey_ai/_vendor/openai/types/beta/threads/runs/code_interpreter_logs.py @@ -9,6 +9,8 @@ class CodeInterpreterLogs(BaseModel): + """Text output from the Code Interpreter tool call as part of a run step.""" + index: int """The index of the output in the outputs array.""" diff --git a/portkey_ai/_vendor/openai/types/beta/threads/runs/code_interpreter_tool_call.py b/portkey_ai/_vendor/openai/types/beta/threads/runs/code_interpreter_tool_call.py index e7df4e19..bc78b5fa 100644 --- a/portkey_ai/_vendor/openai/types/beta/threads/runs/code_interpreter_tool_call.py +++ b/portkey_ai/_vendor/openai/types/beta/threads/runs/code_interpreter_tool_call.py @@ -17,6 +17,8 @@ class CodeInterpreterOutputLogs(BaseModel): + """Text output from the Code Interpreter tool call as part of a run step.""" + logs: str """The text output from the Code Interpreter tool call.""" @@ -45,6 +47,8 @@ class CodeInterpreterOutputImage(BaseModel): class CodeInterpreter(BaseModel): + """The Code Interpreter tool call definition.""" + input: str """The input to the Code Interpreter tool call.""" @@ -57,6 +61,8 @@ class CodeInterpreter(BaseModel): class CodeInterpreterToolCall(BaseModel): + """Details of the Code Interpreter tool call the run step was involved in.""" + id: str """The ID of the tool call.""" diff --git a/portkey_ai/_vendor/openai/types/beta/threads/runs/code_interpreter_tool_call_delta.py b/portkey_ai/_vendor/openai/types/beta/threads/runs/code_interpreter_tool_call_delta.py index 9d7a1563..efedac79 100644 --- a/portkey_ai/_vendor/openai/types/beta/threads/runs/code_interpreter_tool_call_delta.py +++ b/portkey_ai/_vendor/openai/types/beta/threads/runs/code_interpreter_tool_call_delta.py @@ -16,6 +16,8 @@ class CodeInterpreter(BaseModel): + """The Code Interpreter tool call definition.""" + input: Optional[str] = None """The input to the Code Interpreter tool call.""" @@ -28,6 +30,8 @@ class CodeInterpreter(BaseModel): class CodeInterpreterToolCallDelta(BaseModel): + """Details of the Code Interpreter tool call the run step was involved in.""" + index: int """The index of the tool call in the tool calls array.""" diff --git a/portkey_ai/_vendor/openai/types/beta/threads/runs/file_search_tool_call.py b/portkey_ai/_vendor/openai/types/beta/threads/runs/file_search_tool_call.py index a2068daa..291a93ec 100644 --- a/portkey_ai/_vendor/openai/types/beta/threads/runs/file_search_tool_call.py +++ b/portkey_ai/_vendor/openai/types/beta/threads/runs/file_search_tool_call.py @@ -15,6 +15,8 @@ class FileSearchRankingOptions(BaseModel): + """The ranking options for the file search.""" + ranker: Literal["auto", "default_2024_08_21"] """The ranker to use for the file search. @@ -37,6 +39,8 @@ class FileSearchResultContent(BaseModel): class FileSearchResult(BaseModel): + """A result instance of the file search.""" + file_id: str """The ID of the file that result was found in.""" @@ -57,6 +61,8 @@ class FileSearchResult(BaseModel): class FileSearch(BaseModel): + """For now, this is always going to be an empty object.""" + ranking_options: Optional[FileSearchRankingOptions] = None """The ranking options for the file search.""" diff --git a/portkey_ai/_vendor/openai/types/beta/threads/runs/function_tool_call.py b/portkey_ai/_vendor/openai/types/beta/threads/runs/function_tool_call.py index b1d354f8..dd0e22cf 100644 --- a/portkey_ai/_vendor/openai/types/beta/threads/runs/function_tool_call.py +++ b/portkey_ai/_vendor/openai/types/beta/threads/runs/function_tool_call.py @@ -9,6 +9,8 @@ class Function(BaseModel): + """The definition of the function that was called.""" + arguments: str """The arguments passed to the function.""" diff --git a/portkey_ai/_vendor/openai/types/beta/threads/runs/function_tool_call_delta.py b/portkey_ai/_vendor/openai/types/beta/threads/runs/function_tool_call_delta.py index faaf026f..4107e1b8 100644 --- a/portkey_ai/_vendor/openai/types/beta/threads/runs/function_tool_call_delta.py +++ b/portkey_ai/_vendor/openai/types/beta/threads/runs/function_tool_call_delta.py @@ -9,6 +9,8 @@ class Function(BaseModel): + """The definition of the function that was called.""" + arguments: Optional[str] = None """The arguments passed to the function.""" diff --git a/portkey_ai/_vendor/openai/types/beta/threads/runs/message_creation_step_details.py b/portkey_ai/_vendor/openai/types/beta/threads/runs/message_creation_step_details.py index 73439079..cd925b57 100644 --- a/portkey_ai/_vendor/openai/types/beta/threads/runs/message_creation_step_details.py +++ b/portkey_ai/_vendor/openai/types/beta/threads/runs/message_creation_step_details.py @@ -13,6 +13,8 @@ class MessageCreation(BaseModel): class MessageCreationStepDetails(BaseModel): + """Details of the message creation by the run step.""" + message_creation: MessageCreation type: Literal["message_creation"] diff --git a/portkey_ai/_vendor/openai/types/beta/threads/runs/run_step.py b/portkey_ai/_vendor/openai/types/beta/threads/runs/run_step.py index b5f380c7..97451229 100644 --- a/portkey_ai/_vendor/openai/types/beta/threads/runs/run_step.py +++ b/portkey_ai/_vendor/openai/types/beta/threads/runs/run_step.py @@ -13,6 +13,11 @@ class LastError(BaseModel): + """The last error associated with this run step. + + Will be `null` if there are no errors. + """ + code: Literal["server_error", "rate_limit_exceeded"] """One of `server_error` or `rate_limit_exceeded`.""" @@ -26,6 +31,11 @@ class LastError(BaseModel): class Usage(BaseModel): + """Usage statistics related to the run step. + + This value will be `null` while the run step's status is `in_progress`. + """ + completion_tokens: int """Number of completion tokens used over the course of the run step.""" @@ -37,6 +47,8 @@ class Usage(BaseModel): class RunStep(BaseModel): + """Represents a step in execution of a run.""" + id: str """The identifier of the run step, which can be referenced in API endpoints.""" diff --git a/portkey_ai/_vendor/openai/types/beta/threads/runs/run_step_delta.py b/portkey_ai/_vendor/openai/types/beta/threads/runs/run_step_delta.py index 1139088f..2ccb770d 100644 --- a/portkey_ai/_vendor/openai/types/beta/threads/runs/run_step_delta.py +++ b/portkey_ai/_vendor/openai/types/beta/threads/runs/run_step_delta.py @@ -16,5 +16,7 @@ class RunStepDelta(BaseModel): + """The delta containing the fields that have changed on the run step.""" + step_details: Optional[StepDetails] = None """The details of the run step.""" diff --git a/portkey_ai/_vendor/openai/types/beta/threads/runs/run_step_delta_event.py b/portkey_ai/_vendor/openai/types/beta/threads/runs/run_step_delta_event.py index 7f3f92aa..8f1c095a 100644 --- a/portkey_ai/_vendor/openai/types/beta/threads/runs/run_step_delta_event.py +++ b/portkey_ai/_vendor/openai/types/beta/threads/runs/run_step_delta_event.py @@ -9,6 +9,11 @@ class RunStepDeltaEvent(BaseModel): + """Represents a run step delta i.e. + + any changed fields on a run step during streaming. + """ + id: str """The identifier of the run step, which can be referenced in API endpoints.""" diff --git a/portkey_ai/_vendor/openai/types/beta/threads/runs/run_step_delta_message_delta.py b/portkey_ai/_vendor/openai/types/beta/threads/runs/run_step_delta_message_delta.py index f58ed3d9..4b18277c 100644 --- a/portkey_ai/_vendor/openai/types/beta/threads/runs/run_step_delta_message_delta.py +++ b/portkey_ai/_vendor/openai/types/beta/threads/runs/run_step_delta_message_delta.py @@ -14,6 +14,8 @@ class MessageCreation(BaseModel): class RunStepDeltaMessageDelta(BaseModel): + """Details of the message creation by the run step.""" + type: Literal["message_creation"] """Always `message_creation`.""" diff --git a/portkey_ai/_vendor/openai/types/beta/threads/runs/tool_call_delta_object.py b/portkey_ai/_vendor/openai/types/beta/threads/runs/tool_call_delta_object.py index 189dce77..dbd1096a 100644 --- a/portkey_ai/_vendor/openai/types/beta/threads/runs/tool_call_delta_object.py +++ b/portkey_ai/_vendor/openai/types/beta/threads/runs/tool_call_delta_object.py @@ -10,6 +10,8 @@ class ToolCallDeltaObject(BaseModel): + """Details of the tool call.""" + type: Literal["tool_calls"] """Always `tool_calls`.""" diff --git a/portkey_ai/_vendor/openai/types/beta/threads/runs/tool_calls_step_details.py b/portkey_ai/_vendor/openai/types/beta/threads/runs/tool_calls_step_details.py index a084d387..1f54a6aa 100644 --- a/portkey_ai/_vendor/openai/types/beta/threads/runs/tool_calls_step_details.py +++ b/portkey_ai/_vendor/openai/types/beta/threads/runs/tool_calls_step_details.py @@ -10,6 +10,8 @@ class ToolCallsStepDetails(BaseModel): + """Details of the tool call.""" + tool_calls: List[ToolCall] """An array of tool calls the run step was involved in. diff --git a/portkey_ai/_vendor/openai/types/beta/threads/text_content_block.py b/portkey_ai/_vendor/openai/types/beta/threads/text_content_block.py index 3706d6b9..b9b1368a 100644 --- a/portkey_ai/_vendor/openai/types/beta/threads/text_content_block.py +++ b/portkey_ai/_vendor/openai/types/beta/threads/text_content_block.py @@ -9,6 +9,8 @@ class TextContentBlock(BaseModel): + """The text content that is part of a message.""" + text: Text type: Literal["text"] diff --git a/portkey_ai/_vendor/openai/types/beta/threads/text_content_block_param.py b/portkey_ai/_vendor/openai/types/beta/threads/text_content_block_param.py index 6313de32..22c86443 100644 --- a/portkey_ai/_vendor/openai/types/beta/threads/text_content_block_param.py +++ b/portkey_ai/_vendor/openai/types/beta/threads/text_content_block_param.py @@ -8,6 +8,8 @@ class TextContentBlockParam(TypedDict, total=False): + """The text content that is part of a message.""" + text: Required[str] """Text content to be sent to the model""" diff --git a/portkey_ai/_vendor/openai/types/beta/threads/text_delta_block.py b/portkey_ai/_vendor/openai/types/beta/threads/text_delta_block.py index 586116e0..a3d339cc 100644 --- a/portkey_ai/_vendor/openai/types/beta/threads/text_delta_block.py +++ b/portkey_ai/_vendor/openai/types/beta/threads/text_delta_block.py @@ -10,6 +10,8 @@ class TextDeltaBlock(BaseModel): + """The text content that is part of a message.""" + index: int """The index of the content part in the message.""" diff --git a/portkey_ai/_vendor/openai/types/chat/chat_completion.py b/portkey_ai/_vendor/openai/types/chat/chat_completion.py index 6bc4bafe..31219aa8 100644 --- a/portkey_ai/_vendor/openai/types/chat/chat_completion.py +++ b/portkey_ai/_vendor/openai/types/chat/chat_completion.py @@ -12,6 +12,8 @@ class ChoiceLogprobs(BaseModel): + """Log probability information for the choice.""" + content: Optional[List[ChatCompletionTokenLogprob]] = None """A list of message content tokens with log probability information.""" @@ -41,6 +43,10 @@ class Choice(BaseModel): class ChatCompletion(BaseModel): + """ + Represents a chat completion response returned by model, based on the provided input. + """ + id: str """A unique identifier for the chat completion.""" diff --git a/portkey_ai/_vendor/openai/types/chat/chat_completion_allowed_tool_choice_param.py b/portkey_ai/_vendor/openai/types/chat/chat_completion_allowed_tool_choice_param.py index 813e6293..c5ba2162 100644 --- a/portkey_ai/_vendor/openai/types/chat/chat_completion_allowed_tool_choice_param.py +++ b/portkey_ai/_vendor/openai/types/chat/chat_completion_allowed_tool_choice_param.py @@ -10,6 +10,8 @@ class ChatCompletionAllowedToolChoiceParam(TypedDict, total=False): + """Constrains the tools available to the model to a pre-defined set.""" + allowed_tools: Required[ChatCompletionAllowedToolsParam] """Constrains the tools available to the model to a pre-defined set.""" diff --git a/portkey_ai/_vendor/openai/types/chat/chat_completion_allowed_tools_param.py b/portkey_ai/_vendor/openai/types/chat/chat_completion_allowed_tools_param.py index d9b72d8f..ac31fcb5 100644 --- a/portkey_ai/_vendor/openai/types/chat/chat_completion_allowed_tools_param.py +++ b/portkey_ai/_vendor/openai/types/chat/chat_completion_allowed_tools_param.py @@ -9,6 +9,8 @@ class ChatCompletionAllowedToolsParam(TypedDict, total=False): + """Constrains the tools available to the model to a pre-defined set.""" + mode: Required[Literal["auto", "required"]] """Constrains the tools available to the model to a pre-defined set. diff --git a/portkey_ai/_vendor/openai/types/chat/chat_completion_assistant_message_param.py b/portkey_ai/_vendor/openai/types/chat/chat_completion_assistant_message_param.py index 1a08a959..16a21843 100644 --- a/portkey_ai/_vendor/openai/types/chat/chat_completion_assistant_message_param.py +++ b/portkey_ai/_vendor/openai/types/chat/chat_completion_assistant_message_param.py @@ -13,6 +13,11 @@ class Audio(TypedDict, total=False): + """ + Data about a previous audio response from the model. + [Learn more](https://platform.openai.com/docs/guides/audio). + """ + id: Required[str] """Unique identifier for a previous audio response from the model.""" @@ -21,6 +26,11 @@ class Audio(TypedDict, total=False): class FunctionCall(TypedDict, total=False): + """Deprecated and replaced by `tool_calls`. + + The name and arguments of a function that should be called, as generated by the model. + """ + arguments: Required[str] """ The arguments to call the function with, as generated by the model in JSON @@ -34,6 +44,8 @@ class FunctionCall(TypedDict, total=False): class ChatCompletionAssistantMessageParam(TypedDict, total=False): + """Messages sent by the model in response to user messages.""" + role: Required[Literal["assistant"]] """The role of the messages author, in this case `assistant`.""" diff --git a/portkey_ai/_vendor/openai/types/chat/chat_completion_audio.py b/portkey_ai/_vendor/openai/types/chat/chat_completion_audio.py index 232d6056..df346d8c 100644 --- a/portkey_ai/_vendor/openai/types/chat/chat_completion_audio.py +++ b/portkey_ai/_vendor/openai/types/chat/chat_completion_audio.py @@ -6,6 +6,11 @@ class ChatCompletionAudio(BaseModel): + """ + If the audio output modality is requested, this object contains data + about the audio response from the model. [Learn more](https://platform.openai.com/docs/guides/audio). + """ + id: str """Unique identifier for this audio response.""" diff --git a/portkey_ai/_vendor/openai/types/chat/chat_completion_audio_param.py b/portkey_ai/_vendor/openai/types/chat/chat_completion_audio_param.py index b1576b41..1a73bb0c 100644 --- a/portkey_ai/_vendor/openai/types/chat/chat_completion_audio_param.py +++ b/portkey_ai/_vendor/openai/types/chat/chat_completion_audio_param.py @@ -9,6 +9,12 @@ class ChatCompletionAudioParam(TypedDict, total=False): + """Parameters for audio output. + + Required when audio output is requested with + `modalities: ["audio"]`. [Learn more](https://platform.openai.com/docs/guides/audio). + """ + format: Required[Literal["wav", "aac", "mp3", "flac", "opus", "pcm16"]] """Specifies the output audio format. @@ -20,6 +26,6 @@ class ChatCompletionAudioParam(TypedDict, total=False): ] """The voice the model uses to respond. - Supported voices are `alloy`, `ash`, `ballad`, `coral`, `echo`, `fable`, `nova`, - `onyx`, `sage`, and `shimmer`. + Supported built-in voices are `alloy`, `ash`, `ballad`, `coral`, `echo`, + `fable`, `nova`, `onyx`, `sage`, `shimmer`, `marin`, and `cedar`. """ diff --git a/portkey_ai/_vendor/openai/types/chat/chat_completion_chunk.py b/portkey_ai/_vendor/openai/types/chat/chat_completion_chunk.py index ea32d157..ecbfd0a5 100644 --- a/portkey_ai/_vendor/openai/types/chat/chat_completion_chunk.py +++ b/portkey_ai/_vendor/openai/types/chat/chat_completion_chunk.py @@ -19,6 +19,11 @@ class ChoiceDeltaFunctionCall(BaseModel): + """Deprecated and replaced by `tool_calls`. + + The name and arguments of a function that should be called, as generated by the model. + """ + arguments: Optional[str] = None """ The arguments to call the function with, as generated by the model in JSON @@ -57,6 +62,8 @@ class ChoiceDeltaToolCall(BaseModel): class ChoiceDelta(BaseModel): + """A chat completion delta generated by streamed model responses.""" + content: Optional[str] = None """The contents of the chunk message.""" @@ -77,6 +84,8 @@ class ChoiceDelta(BaseModel): class ChoiceLogprobs(BaseModel): + """Log probability information for the choice.""" + content: Optional[List[ChatCompletionTokenLogprob]] = None """A list of message content tokens with log probability information.""" @@ -106,6 +115,12 @@ class Choice(BaseModel): class ChatCompletionChunk(BaseModel): + """ + Represents a streamed chunk of a chat completion response returned + by the model, based on the provided input. + [Learn more](https://platform.openai.com/docs/guides/streaming-responses). + """ + id: str """A unique identifier for the chat completion. Each chunk has the same ID.""" diff --git a/portkey_ai/_vendor/openai/types/chat/chat_completion_content_part_image.py b/portkey_ai/_vendor/openai/types/chat/chat_completion_content_part_image.py index c1386b9d..a636c51f 100644 --- a/portkey_ai/_vendor/openai/types/chat/chat_completion_content_part_image.py +++ b/portkey_ai/_vendor/openai/types/chat/chat_completion_content_part_image.py @@ -21,6 +21,8 @@ class ImageURL(BaseModel): class ChatCompletionContentPartImage(BaseModel): + """Learn about [image inputs](https://platform.openai.com/docs/guides/vision).""" + image_url: ImageURL type: Literal["image_url"] diff --git a/portkey_ai/_vendor/openai/types/chat/chat_completion_content_part_image_param.py b/portkey_ai/_vendor/openai/types/chat/chat_completion_content_part_image_param.py index 9d407324..a230a340 100644 --- a/portkey_ai/_vendor/openai/types/chat/chat_completion_content_part_image_param.py +++ b/portkey_ai/_vendor/openai/types/chat/chat_completion_content_part_image_param.py @@ -20,6 +20,8 @@ class ImageURL(TypedDict, total=False): class ChatCompletionContentPartImageParam(TypedDict, total=False): + """Learn about [image inputs](https://platform.openai.com/docs/guides/vision).""" + image_url: Required[ImageURL] type: Required[Literal["image_url"]] diff --git a/portkey_ai/_vendor/openai/types/chat/chat_completion_content_part_input_audio_param.py b/portkey_ai/_vendor/openai/types/chat/chat_completion_content_part_input_audio_param.py index 0b1b1a80..98d9e3c5 100644 --- a/portkey_ai/_vendor/openai/types/chat/chat_completion_content_part_input_audio_param.py +++ b/portkey_ai/_vendor/openai/types/chat/chat_completion_content_part_input_audio_param.py @@ -16,6 +16,8 @@ class InputAudio(TypedDict, total=False): class ChatCompletionContentPartInputAudioParam(TypedDict, total=False): + """Learn about [audio inputs](https://platform.openai.com/docs/guides/audio).""" + input_audio: Required[InputAudio] type: Required[Literal["input_audio"]] diff --git a/portkey_ai/_vendor/openai/types/chat/chat_completion_content_part_param.py b/portkey_ai/_vendor/openai/types/chat/chat_completion_content_part_param.py index cbedc853..b8c710a9 100644 --- a/portkey_ai/_vendor/openai/types/chat/chat_completion_content_part_param.py +++ b/portkey_ai/_vendor/openai/types/chat/chat_completion_content_part_param.py @@ -27,6 +27,10 @@ class FileFile(TypedDict, total=False): class File(TypedDict, total=False): + """ + Learn about [file inputs](https://platform.openai.com/docs/guides/text) for text generation. + """ + file: Required[FileFile] type: Required[Literal["file"]] diff --git a/portkey_ai/_vendor/openai/types/chat/chat_completion_content_part_text.py b/portkey_ai/_vendor/openai/types/chat/chat_completion_content_part_text.py index f09f35f7..e6d1bf1e 100644 --- a/portkey_ai/_vendor/openai/types/chat/chat_completion_content_part_text.py +++ b/portkey_ai/_vendor/openai/types/chat/chat_completion_content_part_text.py @@ -8,6 +8,10 @@ class ChatCompletionContentPartText(BaseModel): + """ + Learn about [text inputs](https://platform.openai.com/docs/guides/text-generation). + """ + text: str """The text content.""" diff --git a/portkey_ai/_vendor/openai/types/chat/chat_completion_content_part_text_param.py b/portkey_ai/_vendor/openai/types/chat/chat_completion_content_part_text_param.py index a2707444..be69bf66 100644 --- a/portkey_ai/_vendor/openai/types/chat/chat_completion_content_part_text_param.py +++ b/portkey_ai/_vendor/openai/types/chat/chat_completion_content_part_text_param.py @@ -8,6 +8,10 @@ class ChatCompletionContentPartTextParam(TypedDict, total=False): + """ + Learn about [text inputs](https://platform.openai.com/docs/guides/text-generation). + """ + text: Required[str] """The text content.""" diff --git a/portkey_ai/_vendor/openai/types/chat/chat_completion_custom_tool_param.py b/portkey_ai/_vendor/openai/types/chat/chat_completion_custom_tool_param.py index 14959ee4..d4f21ba0 100644 --- a/portkey_ai/_vendor/openai/types/chat/chat_completion_custom_tool_param.py +++ b/portkey_ai/_vendor/openai/types/chat/chat_completion_custom_tool_param.py @@ -16,11 +16,15 @@ class CustomFormatText(TypedDict, total=False): + """Unconstrained free-form text.""" + type: Required[Literal["text"]] """Unconstrained text format. Always `text`.""" class CustomFormatGrammarGrammar(TypedDict, total=False): + """Your chosen grammar.""" + definition: Required[str] """The grammar definition.""" @@ -29,6 +33,8 @@ class CustomFormatGrammarGrammar(TypedDict, total=False): class CustomFormatGrammar(TypedDict, total=False): + """A grammar defined by the user.""" + grammar: Required[CustomFormatGrammarGrammar] """Your chosen grammar.""" @@ -40,6 +46,8 @@ class CustomFormatGrammar(TypedDict, total=False): class Custom(TypedDict, total=False): + """Properties of the custom tool.""" + name: Required[str] """The name of the custom tool, used to identify it in tool calls.""" @@ -51,6 +59,8 @@ class Custom(TypedDict, total=False): class ChatCompletionCustomToolParam(TypedDict, total=False): + """A custom tool that processes input using a specified format.""" + custom: Required[Custom] """Properties of the custom tool.""" diff --git a/portkey_ai/_vendor/openai/types/chat/chat_completion_developer_message_param.py b/portkey_ai/_vendor/openai/types/chat/chat_completion_developer_message_param.py index 01e4fdb6..94fb3359 100644 --- a/portkey_ai/_vendor/openai/types/chat/chat_completion_developer_message_param.py +++ b/portkey_ai/_vendor/openai/types/chat/chat_completion_developer_message_param.py @@ -11,6 +11,12 @@ class ChatCompletionDeveloperMessageParam(TypedDict, total=False): + """ + Developer-provided instructions that the model should follow, regardless of + messages sent by the user. With o1 models and newer, `developer` messages + replace the previous `system` messages. + """ + content: Required[Union[str, Iterable[ChatCompletionContentPartTextParam]]] """The contents of the developer message.""" diff --git a/portkey_ai/_vendor/openai/types/chat/chat_completion_function_call_option_param.py b/portkey_ai/_vendor/openai/types/chat/chat_completion_function_call_option_param.py index 2bc014af..b1ca37bf 100644 --- a/portkey_ai/_vendor/openai/types/chat/chat_completion_function_call_option_param.py +++ b/portkey_ai/_vendor/openai/types/chat/chat_completion_function_call_option_param.py @@ -8,5 +8,9 @@ class ChatCompletionFunctionCallOptionParam(TypedDict, total=False): + """ + Specifying a particular function via `{"name": "my_function"}` forces the model to call that function. + """ + name: Required[str] """The name of the function to call.""" diff --git a/portkey_ai/_vendor/openai/types/chat/chat_completion_function_tool.py b/portkey_ai/_vendor/openai/types/chat/chat_completion_function_tool.py index 641568ac..5d43a1e8 100644 --- a/portkey_ai/_vendor/openai/types/chat/chat_completion_function_tool.py +++ b/portkey_ai/_vendor/openai/types/chat/chat_completion_function_tool.py @@ -9,6 +9,8 @@ class ChatCompletionFunctionTool(BaseModel): + """A function tool that can be used to generate a response.""" + function: FunctionDefinition type: Literal["function"] diff --git a/portkey_ai/_vendor/openai/types/chat/chat_completion_function_tool_param.py b/portkey_ai/_vendor/openai/types/chat/chat_completion_function_tool_param.py index a39feea5..d336e8c0 100644 --- a/portkey_ai/_vendor/openai/types/chat/chat_completion_function_tool_param.py +++ b/portkey_ai/_vendor/openai/types/chat/chat_completion_function_tool_param.py @@ -10,6 +10,8 @@ class ChatCompletionFunctionToolParam(TypedDict, total=False): + """A function tool that can be used to generate a response.""" + function: Required[FunctionDefinition] type: Required[Literal["function"]] diff --git a/portkey_ai/_vendor/openai/types/chat/chat_completion_message.py b/portkey_ai/_vendor/openai/types/chat/chat_completion_message.py index 5bb153fe..3f88f776 100644 --- a/portkey_ai/_vendor/openai/types/chat/chat_completion_message.py +++ b/portkey_ai/_vendor/openai/types/chat/chat_completion_message.py @@ -11,6 +11,8 @@ class AnnotationURLCitation(BaseModel): + """A URL citation when using web search.""" + end_index: int """The index of the last character of the URL citation in the message.""" @@ -25,6 +27,8 @@ class AnnotationURLCitation(BaseModel): class Annotation(BaseModel): + """A URL citation when using web search.""" + type: Literal["url_citation"] """The type of the URL citation. Always `url_citation`.""" @@ -33,6 +37,11 @@ class Annotation(BaseModel): class FunctionCall(BaseModel): + """Deprecated and replaced by `tool_calls`. + + The name and arguments of a function that should be called, as generated by the model. + """ + arguments: str """ The arguments to call the function with, as generated by the model in JSON @@ -46,6 +55,8 @@ class FunctionCall(BaseModel): class ChatCompletionMessage(BaseModel): + """A chat completion message generated by the model.""" + content: Optional[str] = None """The contents of the message.""" diff --git a/portkey_ai/_vendor/openai/types/chat/chat_completion_message_custom_tool_call.py b/portkey_ai/_vendor/openai/types/chat/chat_completion_message_custom_tool_call.py index b13c176a..9542d8b9 100644 --- a/portkey_ai/_vendor/openai/types/chat/chat_completion_message_custom_tool_call.py +++ b/portkey_ai/_vendor/openai/types/chat/chat_completion_message_custom_tool_call.py @@ -8,6 +8,8 @@ class Custom(BaseModel): + """The custom tool that the model called.""" + input: str """The input for the custom tool call generated by the model.""" @@ -16,6 +18,8 @@ class Custom(BaseModel): class ChatCompletionMessageCustomToolCall(BaseModel): + """A call to a custom tool created by the model.""" + id: str """The ID of the tool call.""" diff --git a/portkey_ai/_vendor/openai/types/chat/chat_completion_message_custom_tool_call_param.py b/portkey_ai/_vendor/openai/types/chat/chat_completion_message_custom_tool_call_param.py index 3753e0f2..3d03f0a9 100644 --- a/portkey_ai/_vendor/openai/types/chat/chat_completion_message_custom_tool_call_param.py +++ b/portkey_ai/_vendor/openai/types/chat/chat_completion_message_custom_tool_call_param.py @@ -8,6 +8,8 @@ class Custom(TypedDict, total=False): + """The custom tool that the model called.""" + input: Required[str] """The input for the custom tool call generated by the model.""" @@ -16,6 +18,8 @@ class Custom(TypedDict, total=False): class ChatCompletionMessageCustomToolCallParam(TypedDict, total=False): + """A call to a custom tool created by the model.""" + id: Required[str] """The ID of the tool call.""" diff --git a/portkey_ai/_vendor/openai/types/chat/chat_completion_message_function_tool_call.py b/portkey_ai/_vendor/openai/types/chat/chat_completion_message_function_tool_call.py index d056d9af..e7278b92 100644 --- a/portkey_ai/_vendor/openai/types/chat/chat_completion_message_function_tool_call.py +++ b/portkey_ai/_vendor/openai/types/chat/chat_completion_message_function_tool_call.py @@ -8,6 +8,8 @@ class Function(BaseModel): + """The function that the model called.""" + arguments: str """ The arguments to call the function with, as generated by the model in JSON @@ -21,6 +23,8 @@ class Function(BaseModel): class ChatCompletionMessageFunctionToolCall(BaseModel): + """A call to a function tool created by the model.""" + id: str """The ID of the tool call.""" diff --git a/portkey_ai/_vendor/openai/types/chat/chat_completion_message_function_tool_call_param.py b/portkey_ai/_vendor/openai/types/chat/chat_completion_message_function_tool_call_param.py index 7c827edd..a8094ea6 100644 --- a/portkey_ai/_vendor/openai/types/chat/chat_completion_message_function_tool_call_param.py +++ b/portkey_ai/_vendor/openai/types/chat/chat_completion_message_function_tool_call_param.py @@ -8,6 +8,8 @@ class Function(TypedDict, total=False): + """The function that the model called.""" + arguments: Required[str] """ The arguments to call the function with, as generated by the model in JSON @@ -21,6 +23,8 @@ class Function(TypedDict, total=False): class ChatCompletionMessageFunctionToolCallParam(TypedDict, total=False): + """A call to a function tool created by the model.""" + id: Required[str] """The ID of the tool call.""" diff --git a/portkey_ai/_vendor/openai/types/chat/chat_completion_named_tool_choice_custom_param.py b/portkey_ai/_vendor/openai/types/chat/chat_completion_named_tool_choice_custom_param.py index 1c123c0a..147fb879 100644 --- a/portkey_ai/_vendor/openai/types/chat/chat_completion_named_tool_choice_custom_param.py +++ b/portkey_ai/_vendor/openai/types/chat/chat_completion_named_tool_choice_custom_param.py @@ -13,6 +13,11 @@ class Custom(TypedDict, total=False): class ChatCompletionNamedToolChoiceCustomParam(TypedDict, total=False): + """Specifies a tool the model should use. + + Use to force the model to call a specific custom tool. + """ + custom: Required[Custom] type: Required[Literal["custom"]] diff --git a/portkey_ai/_vendor/openai/types/chat/chat_completion_named_tool_choice_param.py b/portkey_ai/_vendor/openai/types/chat/chat_completion_named_tool_choice_param.py index ae1acfb9..f684fcea 100644 --- a/portkey_ai/_vendor/openai/types/chat/chat_completion_named_tool_choice_param.py +++ b/portkey_ai/_vendor/openai/types/chat/chat_completion_named_tool_choice_param.py @@ -13,6 +13,11 @@ class Function(TypedDict, total=False): class ChatCompletionNamedToolChoiceParam(TypedDict, total=False): + """Specifies a tool the model should use. + + Use to force the model to call a specific function. + """ + function: Required[Function] type: Required[Literal["function"]] diff --git a/portkey_ai/_vendor/openai/types/chat/chat_completion_prediction_content_param.py b/portkey_ai/_vendor/openai/types/chat/chat_completion_prediction_content_param.py index c44e6e36..6184a314 100644 --- a/portkey_ai/_vendor/openai/types/chat/chat_completion_prediction_content_param.py +++ b/portkey_ai/_vendor/openai/types/chat/chat_completion_prediction_content_param.py @@ -11,6 +11,11 @@ class ChatCompletionPredictionContentParam(TypedDict, total=False): + """ + Static predicted output content, such as the content of a text file that is + being regenerated. + """ + content: Required[Union[str, Iterable[ChatCompletionContentPartTextParam]]] """ The content that should be matched when generating a model response. If diff --git a/portkey_ai/_vendor/openai/types/chat/chat_completion_store_message.py b/portkey_ai/_vendor/openai/types/chat/chat_completion_store_message.py index 66134271..6a805cce 100644 --- a/portkey_ai/_vendor/openai/types/chat/chat_completion_store_message.py +++ b/portkey_ai/_vendor/openai/types/chat/chat_completion_store_message.py @@ -13,6 +13,8 @@ class ChatCompletionStoreMessage(ChatCompletionMessage): + """A chat completion message generated by the model.""" + id: str """The identifier of the chat message.""" diff --git a/portkey_ai/_vendor/openai/types/chat/chat_completion_stream_options_param.py b/portkey_ai/_vendor/openai/types/chat/chat_completion_stream_options_param.py index fc3191d2..9b881fff 100644 --- a/portkey_ai/_vendor/openai/types/chat/chat_completion_stream_options_param.py +++ b/portkey_ai/_vendor/openai/types/chat/chat_completion_stream_options_param.py @@ -8,6 +8,8 @@ class ChatCompletionStreamOptionsParam(TypedDict, total=False): + """Options for streaming response. Only set this when you set `stream: true`.""" + include_obfuscation: bool """When true, stream obfuscation will be enabled. diff --git a/portkey_ai/_vendor/openai/types/chat/chat_completion_system_message_param.py b/portkey_ai/_vendor/openai/types/chat/chat_completion_system_message_param.py index 172ccea0..9dcc5e07 100644 --- a/portkey_ai/_vendor/openai/types/chat/chat_completion_system_message_param.py +++ b/portkey_ai/_vendor/openai/types/chat/chat_completion_system_message_param.py @@ -11,6 +11,12 @@ class ChatCompletionSystemMessageParam(TypedDict, total=False): + """ + Developer-provided instructions that the model should follow, regardless of + messages sent by the user. With o1 models and newer, use `developer` messages + for this purpose instead. + """ + content: Required[Union[str, Iterable[ChatCompletionContentPartTextParam]]] """The contents of the system message.""" diff --git a/portkey_ai/_vendor/openai/types/chat/chat_completion_user_message_param.py b/portkey_ai/_vendor/openai/types/chat/chat_completion_user_message_param.py index 5c15322a..c97ba535 100644 --- a/portkey_ai/_vendor/openai/types/chat/chat_completion_user_message_param.py +++ b/portkey_ai/_vendor/openai/types/chat/chat_completion_user_message_param.py @@ -11,6 +11,11 @@ class ChatCompletionUserMessageParam(TypedDict, total=False): + """ + Messages sent by an end user, containing prompts or additional context + information. + """ + content: Required[Union[str, Iterable[ChatCompletionContentPartParam]]] """The contents of the user message.""" diff --git a/portkey_ai/_vendor/openai/types/chat/completion_create_params.py b/portkey_ai/_vendor/openai/types/chat/completion_create_params.py index 8b0fdd04..49cefb95 100644 --- a/portkey_ai/_vendor/openai/types/chat/completion_create_params.py +++ b/portkey_ai/_vendor/openai/types/chat/completion_create_params.py @@ -185,16 +185,29 @@ class CompletionCreateParamsBase(TypedDict, total=False): [Learn more](https://platform.openai.com/docs/guides/prompt-caching). """ + prompt_cache_retention: Optional[Literal["in-memory", "24h"]] + """The retention policy for the prompt cache. + + Set to `24h` to enable extended prompt caching, which keeps cached prefixes + active for longer, up to a maximum of 24 hours. + [Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention). + """ + reasoning_effort: Optional[ReasoningEffort] """ Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - effort can result in faster responses and fewer tokens used on reasoning in a - response. + supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. + Reducing reasoning effort can result in faster responses and fewer tokens used + on reasoning in a response. - Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - effort. + - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + calls are supported for all reasoning values in gpt-5.1. + - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + support `none`. + - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. + - `xhigh` is supported for all models after `gpt-5.1-codex-max`. """ response_format: ResponseFormat @@ -369,6 +382,8 @@ class Function(TypedDict, total=False): class WebSearchOptionsUserLocationApproximate(TypedDict, total=False): + """Approximate location parameters for the search.""" + city: str """Free text input for the city of the user, e.g. `San Francisco`.""" @@ -389,6 +404,8 @@ class WebSearchOptionsUserLocationApproximate(TypedDict, total=False): class WebSearchOptionsUserLocation(TypedDict, total=False): + """Approximate location parameters for the search.""" + approximate: Required[WebSearchOptionsUserLocationApproximate] """Approximate location parameters for the search.""" @@ -397,6 +414,11 @@ class WebSearchOptionsUserLocation(TypedDict, total=False): class WebSearchOptions(TypedDict, total=False): + """ + This tool searches the web for relevant results to use in a response. + Learn more about the [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat). + """ + search_context_size: Literal["low", "medium", "high"] """ High level guidance for the amount of context window space to use for the diff --git a/portkey_ai/_vendor/openai/types/completion.py b/portkey_ai/_vendor/openai/types/completion.py index d3b3102a..ee59b2e2 100644 --- a/portkey_ai/_vendor/openai/types/completion.py +++ b/portkey_ai/_vendor/openai/types/completion.py @@ -11,6 +11,11 @@ class Completion(BaseModel): + """Represents a completion response from the API. + + Note: both the streamed and non-streamed response objects share the same shape (unlike the chat endpoint). + """ + id: str """A unique identifier for the completion.""" diff --git a/portkey_ai/_vendor/openai/types/completion_usage.py b/portkey_ai/_vendor/openai/types/completion_usage.py index d8c4e84c..9b5202da 100644 --- a/portkey_ai/_vendor/openai/types/completion_usage.py +++ b/portkey_ai/_vendor/openai/types/completion_usage.py @@ -8,6 +8,8 @@ class CompletionTokensDetails(BaseModel): + """Breakdown of tokens used in a completion.""" + accepted_prediction_tokens: Optional[int] = None """ When using Predicted Outputs, the number of tokens in the prediction that @@ -30,6 +32,8 @@ class CompletionTokensDetails(BaseModel): class PromptTokensDetails(BaseModel): + """Breakdown of tokens used in the prompt.""" + audio_tokens: Optional[int] = None """Audio input tokens present in the prompt.""" @@ -38,6 +42,8 @@ class PromptTokensDetails(BaseModel): class CompletionUsage(BaseModel): + """Usage statistics for the completion request.""" + completion_tokens: int """Number of tokens in the generated completion.""" diff --git a/portkey_ai/_vendor/openai/types/container_create_params.py b/portkey_ai/_vendor/openai/types/container_create_params.py index 01a48ac4..47101ecd 100644 --- a/portkey_ai/_vendor/openai/types/container_create_params.py +++ b/portkey_ai/_vendor/openai/types/container_create_params.py @@ -19,8 +19,13 @@ class ContainerCreateParams(TypedDict, total=False): file_ids: SequenceNotStr[str] """IDs of files to copy to the container.""" + memory_limit: Literal["1g", "4g", "16g", "64g"] + """Optional memory limit for the container. Defaults to "1g".""" + class ExpiresAfter(TypedDict, total=False): + """Container expiration time in seconds relative to the 'anchor' time.""" + anchor: Required[Literal["last_active_at"]] """Time anchor for the expiration time. diff --git a/portkey_ai/_vendor/openai/types/container_create_response.py b/portkey_ai/_vendor/openai/types/container_create_response.py index c0ccc45a..0ebcc040 100644 --- a/portkey_ai/_vendor/openai/types/container_create_response.py +++ b/portkey_ai/_vendor/openai/types/container_create_response.py @@ -9,6 +9,12 @@ class ExpiresAfter(BaseModel): + """ + The container will expire after this time period. + The anchor is the reference point for the expiration. + The minutes is the number of minutes after the anchor before the container expires. + """ + anchor: Optional[Literal["last_active_at"]] = None """The reference point for the expiration.""" @@ -38,3 +44,9 @@ class ContainerCreateResponse(BaseModel): point for the expiration. The minutes is the number of minutes after the anchor before the container expires. """ + + last_active_at: Optional[int] = None + """Unix timestamp (in seconds) when the container was last active.""" + + memory_limit: Optional[Literal["1g", "4g", "16g", "64g"]] = None + """The memory limit configured for the container.""" diff --git a/portkey_ai/_vendor/openai/types/container_list_response.py b/portkey_ai/_vendor/openai/types/container_list_response.py index 2d9c11d8..8f395482 100644 --- a/portkey_ai/_vendor/openai/types/container_list_response.py +++ b/portkey_ai/_vendor/openai/types/container_list_response.py @@ -9,6 +9,12 @@ class ExpiresAfter(BaseModel): + """ + The container will expire after this time period. + The anchor is the reference point for the expiration. + The minutes is the number of minutes after the anchor before the container expires. + """ + anchor: Optional[Literal["last_active_at"]] = None """The reference point for the expiration.""" @@ -38,3 +44,9 @@ class ContainerListResponse(BaseModel): point for the expiration. The minutes is the number of minutes after the anchor before the container expires. """ + + last_active_at: Optional[int] = None + """Unix timestamp (in seconds) when the container was last active.""" + + memory_limit: Optional[Literal["1g", "4g", "16g", "64g"]] = None + """The memory limit configured for the container.""" diff --git a/portkey_ai/_vendor/openai/types/container_retrieve_response.py b/portkey_ai/_vendor/openai/types/container_retrieve_response.py index eab291b3..9ba3e18c 100644 --- a/portkey_ai/_vendor/openai/types/container_retrieve_response.py +++ b/portkey_ai/_vendor/openai/types/container_retrieve_response.py @@ -9,6 +9,12 @@ class ExpiresAfter(BaseModel): + """ + The container will expire after this time period. + The anchor is the reference point for the expiration. + The minutes is the number of minutes after the anchor before the container expires. + """ + anchor: Optional[Literal["last_active_at"]] = None """The reference point for the expiration.""" @@ -38,3 +44,9 @@ class ContainerRetrieveResponse(BaseModel): point for the expiration. The minutes is the number of minutes after the anchor before the container expires. """ + + last_active_at: Optional[int] = None + """Unix timestamp (in seconds) when the container was last active.""" + + memory_limit: Optional[Literal["1g", "4g", "16g", "64g"]] = None + """The memory limit configured for the container.""" diff --git a/portkey_ai/_vendor/openai/types/conversations/computer_screenshot_content.py b/portkey_ai/_vendor/openai/types/conversations/computer_screenshot_content.py index 897b7ada..e42096eb 100644 --- a/portkey_ai/_vendor/openai/types/conversations/computer_screenshot_content.py +++ b/portkey_ai/_vendor/openai/types/conversations/computer_screenshot_content.py @@ -9,6 +9,8 @@ class ComputerScreenshotContent(BaseModel): + """A screenshot of a computer.""" + file_id: Optional[str] = None """The identifier of an uploaded file that contains the screenshot.""" diff --git a/portkey_ai/_vendor/openai/types/conversations/conversation_item.py b/portkey_ai/_vendor/openai/types/conversations/conversation_item.py index 9e9fb400..46268d38 100644 --- a/portkey_ai/_vendor/openai/types/conversations/conversation_item.py +++ b/portkey_ai/_vendor/openai/types/conversations/conversation_item.py @@ -10,12 +10,16 @@ from ..responses.response_custom_tool_call import ResponseCustomToolCall from ..responses.response_computer_tool_call import ResponseComputerToolCall from ..responses.response_function_web_search import ResponseFunctionWebSearch +from ..responses.response_apply_patch_tool_call import ResponseApplyPatchToolCall from ..responses.response_file_search_tool_call import ResponseFileSearchToolCall from ..responses.response_custom_tool_call_output import ResponseCustomToolCallOutput from ..responses.response_function_tool_call_item import ResponseFunctionToolCallItem +from ..responses.response_function_shell_tool_call import ResponseFunctionShellToolCall from ..responses.response_code_interpreter_tool_call import ResponseCodeInterpreterToolCall +from ..responses.response_apply_patch_tool_call_output import ResponseApplyPatchToolCallOutput from ..responses.response_computer_tool_call_output_item import ResponseComputerToolCallOutputItem from ..responses.response_function_tool_call_output_item import ResponseFunctionToolCallOutputItem +from ..responses.response_function_shell_tool_call_output import ResponseFunctionShellToolCallOutput __all__ = [ "ConversationItem", @@ -32,6 +36,8 @@ class ImageGenerationCall(BaseModel): + """An image generation request made by the model.""" + id: str """The unique ID of the image generation call.""" @@ -46,6 +52,8 @@ class ImageGenerationCall(BaseModel): class LocalShellCallAction(BaseModel): + """Execute a shell command on the server.""" + command: List[str] """The command to run.""" @@ -66,6 +74,8 @@ class LocalShellCallAction(BaseModel): class LocalShellCall(BaseModel): + """A tool call to run a command on the local shell.""" + id: str """The unique ID of the local shell call.""" @@ -83,6 +93,8 @@ class LocalShellCall(BaseModel): class LocalShellCallOutput(BaseModel): + """The output of a local shell tool call.""" + id: str """The unique ID of the local shell tool call generated by the model.""" @@ -97,6 +109,8 @@ class LocalShellCallOutput(BaseModel): class McpListToolsTool(BaseModel): + """A tool available on an MCP server.""" + input_schema: object """The JSON schema describing the tool's input.""" @@ -111,6 +125,8 @@ class McpListToolsTool(BaseModel): class McpListTools(BaseModel): + """A list of tools available on an MCP server.""" + id: str """The unique ID of the list.""" @@ -128,6 +144,8 @@ class McpListTools(BaseModel): class McpApprovalRequest(BaseModel): + """A request for human approval of a tool invocation.""" + id: str """The unique ID of the approval request.""" @@ -145,6 +163,8 @@ class McpApprovalRequest(BaseModel): class McpApprovalResponse(BaseModel): + """A response to an MCP approval request.""" + id: str """The unique ID of the approval response""" @@ -162,6 +182,8 @@ class McpApprovalResponse(BaseModel): class McpCall(BaseModel): + """An invocation of a tool on an MCP server.""" + id: str """The unique ID of the tool call.""" @@ -211,6 +233,10 @@ class McpCall(BaseModel): ResponseCodeInterpreterToolCall, LocalShellCall, LocalShellCallOutput, + ResponseFunctionShellToolCall, + ResponseFunctionShellToolCallOutput, + ResponseApplyPatchToolCall, + ResponseApplyPatchToolCallOutput, McpListTools, McpApprovalRequest, McpApprovalResponse, diff --git a/portkey_ai/_vendor/openai/types/conversations/conversation_item_list.py b/portkey_ai/_vendor/openai/types/conversations/conversation_item_list.py index 20091102..74d945d8 100644 --- a/portkey_ai/_vendor/openai/types/conversations/conversation_item_list.py +++ b/portkey_ai/_vendor/openai/types/conversations/conversation_item_list.py @@ -10,6 +10,8 @@ class ConversationItemList(BaseModel): + """A list of Conversation items.""" + data: List[ConversationItem] """A list of conversation items.""" diff --git a/portkey_ai/_vendor/openai/types/conversations/message.py b/portkey_ai/_vendor/openai/types/conversations/message.py index dbf5a146..86c8860d 100644 --- a/portkey_ai/_vendor/openai/types/conversations/message.py +++ b/portkey_ai/_vendor/openai/types/conversations/message.py @@ -18,6 +18,8 @@ class ContentReasoningText(BaseModel): + """Reasoning text from the model.""" + text: str """The reasoning text from the model.""" @@ -42,6 +44,8 @@ class ContentReasoningText(BaseModel): class Message(BaseModel): + """A message to or from the model.""" + id: str """The unique ID of the message.""" diff --git a/portkey_ai/_vendor/openai/types/conversations/summary_text_content.py b/portkey_ai/_vendor/openai/types/conversations/summary_text_content.py index d357b157..6464a365 100644 --- a/portkey_ai/_vendor/openai/types/conversations/summary_text_content.py +++ b/portkey_ai/_vendor/openai/types/conversations/summary_text_content.py @@ -8,6 +8,8 @@ class SummaryTextContent(BaseModel): + """A summary text from the model.""" + text: str """A summary of the reasoning output from the model so far.""" diff --git a/portkey_ai/_vendor/openai/types/conversations/text_content.py b/portkey_ai/_vendor/openai/types/conversations/text_content.py index f1ae0795..e602466c 100644 --- a/portkey_ai/_vendor/openai/types/conversations/text_content.py +++ b/portkey_ai/_vendor/openai/types/conversations/text_content.py @@ -8,6 +8,8 @@ class TextContent(BaseModel): + """A text content.""" + text: str type: Literal["text"] diff --git a/portkey_ai/_vendor/openai/types/create_embedding_response.py b/portkey_ai/_vendor/openai/types/create_embedding_response.py index eff247a1..314a7f9a 100644 --- a/portkey_ai/_vendor/openai/types/create_embedding_response.py +++ b/portkey_ai/_vendor/openai/types/create_embedding_response.py @@ -10,6 +10,8 @@ class Usage(BaseModel): + """The usage information for the request.""" + prompt_tokens: int """The number of tokens used by the prompt.""" diff --git a/portkey_ai/_vendor/openai/types/embedding.py b/portkey_ai/_vendor/openai/types/embedding.py index 769b1d16..fbffec01 100644 --- a/portkey_ai/_vendor/openai/types/embedding.py +++ b/portkey_ai/_vendor/openai/types/embedding.py @@ -9,6 +9,8 @@ class Embedding(BaseModel): + """Represents an embedding vector returned by embedding endpoint.""" + embedding: List[float] """The embedding vector, which is a list of floats. diff --git a/portkey_ai/_vendor/openai/types/eval_create_params.py b/portkey_ai/_vendor/openai/types/eval_create_params.py index eb7f86cd..a1d5ea53 100644 --- a/portkey_ai/_vendor/openai/types/eval_create_params.py +++ b/portkey_ai/_vendor/openai/types/eval_create_params.py @@ -7,6 +7,7 @@ from .._types import SequenceNotStr from .shared_params.metadata import Metadata +from .graders.grader_inputs_param import GraderInputsParam from .graders.python_grader_param import PythonGraderParam from .graders.score_model_grader_param import ScoreModelGraderParam from .graders.string_check_grader_param import StringCheckGraderParam @@ -64,6 +65,13 @@ class EvalCreateParams(TypedDict, total=False): class DataSourceConfigCustom(TypedDict, total=False): + """ + A CustomDataSourceConfig object that defines the schema for the data source used for the evaluation runs. + This schema is used to define the shape of the data that will be: + - Used to define your testing criteria and + - What data is required when creating a run + """ + item_schema: Required[Dict[str, object]] """The json schema for each row in the data source.""" @@ -78,6 +86,11 @@ class DataSourceConfigCustom(TypedDict, total=False): class DataSourceConfigLogs(TypedDict, total=False): + """ + A data source config which specifies the metadata property of your logs query. + This is usually metadata like `usecase=chatbot` or `prompt-version=v2`, etc. + """ + type: Required[Literal["logs"]] """The type of data source. Always `logs`.""" @@ -86,6 +99,8 @@ class DataSourceConfigLogs(TypedDict, total=False): class DataSourceConfigStoredCompletions(TypedDict, total=False): + """Deprecated in favor of LogsDataSourceConfig.""" + type: Required[Literal["stored_completions"]] """The type of data source. Always `stored_completions`.""" @@ -105,6 +120,8 @@ class TestingCriterionLabelModelInputSimpleInputMessage(TypedDict, total=False): class TestingCriterionLabelModelInputEvalItemContentOutputText(TypedDict, total=False): + """A text output from the model.""" + text: Required[str] """The text output from the model.""" @@ -113,6 +130,8 @@ class TestingCriterionLabelModelInputEvalItemContentOutputText(TypedDict, total= class TestingCriterionLabelModelInputEvalItemContentInputImage(TypedDict, total=False): + """An image input block used within EvalItem content arrays.""" + image_url: Required[str] """The URL of the image input.""" @@ -132,13 +151,25 @@ class TestingCriterionLabelModelInputEvalItemContentInputImage(TypedDict, total= TestingCriterionLabelModelInputEvalItemContentOutputText, TestingCriterionLabelModelInputEvalItemContentInputImage, ResponseInputAudioParam, - Iterable[object], + GraderInputsParam, ] class TestingCriterionLabelModelInputEvalItem(TypedDict, total=False): + """ + A message input to the model with a role indicating instruction following + hierarchy. Instructions given with the `developer` or `system` role take + precedence over instructions given with the `user` role. Messages with the + `assistant` role are presumed to have been generated by the model in previous + interactions. + """ + content: Required[TestingCriterionLabelModelInputEvalItemContent] - """Inputs to the model - can contain template strings.""" + """Inputs to the model - can contain template strings. + + Supports text, output text, input images, and input audio, either as a single + item or an array of items. + """ role: Required[Literal["user", "assistant", "system", "developer"]] """The role of the message input. @@ -156,6 +187,11 @@ class TestingCriterionLabelModelInputEvalItem(TypedDict, total=False): class TestingCriterionLabelModel(TypedDict, total=False): + """ + A LabelModelGrader object which uses a model to assign labels to each item + in the evaluation. + """ + input: Required[Iterable[TestingCriterionLabelModelInput]] """A list of chat messages forming the prompt or context. @@ -179,16 +215,22 @@ class TestingCriterionLabelModel(TypedDict, total=False): class TestingCriterionTextSimilarity(TextSimilarityGraderParam, total=False): + """A TextSimilarityGrader object which grades text based on similarity metrics.""" + pass_threshold: Required[float] """The threshold for the score.""" class TestingCriterionPython(PythonGraderParam, total=False): + """A PythonGrader object that runs a python script on the input.""" + pass_threshold: float """The threshold for the score.""" class TestingCriterionScoreModel(ScoreModelGraderParam, total=False): + """A ScoreModelGrader object that uses a model to assign a score to the input.""" + pass_threshold: float """The threshold for the score.""" diff --git a/portkey_ai/_vendor/openai/types/eval_create_response.py b/portkey_ai/_vendor/openai/types/eval_create_response.py index 20b0e312..f3166422 100644 --- a/portkey_ai/_vendor/openai/types/eval_create_response.py +++ b/portkey_ai/_vendor/openai/types/eval_create_response.py @@ -28,6 +28,13 @@ class DataSourceConfigLogs(BaseModel): + """ + A LogsDataSourceConfig which specifies the metadata property of your logs query. + This is usually metadata like `usecase=chatbot` or `prompt-version=v2`, etc. + The schema returned by this data source config is used to defined what variables are available in your evals. + `item` and `sample` are both defined when using this data source config. + """ + schema_: Dict[str, object] = FieldInfo(alias="schema") """ The json schema for the run data source items. Learn how to build JSON schemas @@ -56,18 +63,21 @@ class DataSourceConfigLogs(BaseModel): class TestingCriterionEvalGraderTextSimilarity(TextSimilarityGrader): __test__ = False + """A TextSimilarityGrader object which grades text based on similarity metrics.""" pass_threshold: float """The threshold for the score.""" class TestingCriterionEvalGraderPython(PythonGrader): __test__ = False + """A PythonGrader object that runs a python script on the input.""" pass_threshold: Optional[float] = None """The threshold for the score.""" class TestingCriterionEvalGraderScoreModel(ScoreModelGrader): __test__ = False + """A ScoreModelGrader object that uses a model to assign a score to the input.""" pass_threshold: Optional[float] = None """The threshold for the score.""" @@ -82,6 +92,15 @@ class TestingCriterionEvalGraderScoreModel(ScoreModelGrader): class EvalCreateResponse(BaseModel): + """ + An Eval object with a data source config and testing criteria. + An Eval represents a task to be done for your LLM integration. + Like: + - Improve the quality of my chatbot + - See how well my chatbot handles customer support + - Check if o4-mini is better at my usecase than gpt-4o + """ + id: str """Unique identifier for the evaluation.""" diff --git a/portkey_ai/_vendor/openai/types/eval_custom_data_source_config.py b/portkey_ai/_vendor/openai/types/eval_custom_data_source_config.py index d99701cc..6234c4f4 100644 --- a/portkey_ai/_vendor/openai/types/eval_custom_data_source_config.py +++ b/portkey_ai/_vendor/openai/types/eval_custom_data_source_config.py @@ -11,6 +11,13 @@ class EvalCustomDataSourceConfig(BaseModel): + """ + A CustomDataSourceConfig which specifies the schema of your `item` and optionally `sample` namespaces. + The response schema defines the shape of the data that will be: + - Used to define your testing criteria and + - What data is required when creating a run + """ + schema_: Dict[str, object] = FieldInfo(alias="schema") """ The json schema for the run data source items. Learn how to build JSON schemas diff --git a/portkey_ai/_vendor/openai/types/eval_list_response.py b/portkey_ai/_vendor/openai/types/eval_list_response.py index 5ac4997c..7cd92c5a 100644 --- a/portkey_ai/_vendor/openai/types/eval_list_response.py +++ b/portkey_ai/_vendor/openai/types/eval_list_response.py @@ -28,6 +28,13 @@ class DataSourceConfigLogs(BaseModel): + """ + A LogsDataSourceConfig which specifies the metadata property of your logs query. + This is usually metadata like `usecase=chatbot` or `prompt-version=v2`, etc. + The schema returned by this data source config is used to defined what variables are available in your evals. + `item` and `sample` are both defined when using this data source config. + """ + schema_: Dict[str, object] = FieldInfo(alias="schema") """ The json schema for the run data source items. Learn how to build JSON schemas @@ -56,18 +63,21 @@ class DataSourceConfigLogs(BaseModel): class TestingCriterionEvalGraderTextSimilarity(TextSimilarityGrader): __test__ = False + """A TextSimilarityGrader object which grades text based on similarity metrics.""" pass_threshold: float """The threshold for the score.""" class TestingCriterionEvalGraderPython(PythonGrader): __test__ = False + """A PythonGrader object that runs a python script on the input.""" pass_threshold: Optional[float] = None """The threshold for the score.""" class TestingCriterionEvalGraderScoreModel(ScoreModelGrader): __test__ = False + """A ScoreModelGrader object that uses a model to assign a score to the input.""" pass_threshold: Optional[float] = None """The threshold for the score.""" @@ -82,6 +92,15 @@ class TestingCriterionEvalGraderScoreModel(ScoreModelGrader): class EvalListResponse(BaseModel): + """ + An Eval object with a data source config and testing criteria. + An Eval represents a task to be done for your LLM integration. + Like: + - Improve the quality of my chatbot + - See how well my chatbot handles customer support + - Check if o4-mini is better at my usecase than gpt-4o + """ + id: str """Unique identifier for the evaluation.""" diff --git a/portkey_ai/_vendor/openai/types/eval_retrieve_response.py b/portkey_ai/_vendor/openai/types/eval_retrieve_response.py index 758f9cc0..56db7d6b 100644 --- a/portkey_ai/_vendor/openai/types/eval_retrieve_response.py +++ b/portkey_ai/_vendor/openai/types/eval_retrieve_response.py @@ -28,6 +28,13 @@ class DataSourceConfigLogs(BaseModel): + """ + A LogsDataSourceConfig which specifies the metadata property of your logs query. + This is usually metadata like `usecase=chatbot` or `prompt-version=v2`, etc. + The schema returned by this data source config is used to defined what variables are available in your evals. + `item` and `sample` are both defined when using this data source config. + """ + schema_: Dict[str, object] = FieldInfo(alias="schema") """ The json schema for the run data source items. Learn how to build JSON schemas @@ -56,18 +63,21 @@ class DataSourceConfigLogs(BaseModel): class TestingCriterionEvalGraderTextSimilarity(TextSimilarityGrader): __test__ = False + """A TextSimilarityGrader object which grades text based on similarity metrics.""" pass_threshold: float """The threshold for the score.""" class TestingCriterionEvalGraderPython(PythonGrader): __test__ = False + """A PythonGrader object that runs a python script on the input.""" pass_threshold: Optional[float] = None """The threshold for the score.""" class TestingCriterionEvalGraderScoreModel(ScoreModelGrader): __test__ = False + """A ScoreModelGrader object that uses a model to assign a score to the input.""" pass_threshold: Optional[float] = None """The threshold for the score.""" @@ -82,6 +92,15 @@ class TestingCriterionEvalGraderScoreModel(ScoreModelGrader): class EvalRetrieveResponse(BaseModel): + """ + An Eval object with a data source config and testing criteria. + An Eval represents a task to be done for your LLM integration. + Like: + - Improve the quality of my chatbot + - See how well my chatbot handles customer support + - Check if o4-mini is better at my usecase than gpt-4o + """ + id: str """Unique identifier for the evaluation.""" diff --git a/portkey_ai/_vendor/openai/types/eval_stored_completions_data_source_config.py b/portkey_ai/_vendor/openai/types/eval_stored_completions_data_source_config.py index 98f86a47..d11f6ae1 100644 --- a/portkey_ai/_vendor/openai/types/eval_stored_completions_data_source_config.py +++ b/portkey_ai/_vendor/openai/types/eval_stored_completions_data_source_config.py @@ -12,6 +12,8 @@ class EvalStoredCompletionsDataSourceConfig(BaseModel): + """Deprecated in favor of LogsDataSourceConfig.""" + schema_: Dict[str, object] = FieldInfo(alias="schema") """ The json schema for the run data source items. Learn how to build JSON schemas diff --git a/portkey_ai/_vendor/openai/types/eval_update_response.py b/portkey_ai/_vendor/openai/types/eval_update_response.py index 3f0b90ae..30d4dbc3 100644 --- a/portkey_ai/_vendor/openai/types/eval_update_response.py +++ b/portkey_ai/_vendor/openai/types/eval_update_response.py @@ -28,6 +28,13 @@ class DataSourceConfigLogs(BaseModel): + """ + A LogsDataSourceConfig which specifies the metadata property of your logs query. + This is usually metadata like `usecase=chatbot` or `prompt-version=v2`, etc. + The schema returned by this data source config is used to defined what variables are available in your evals. + `item` and `sample` are both defined when using this data source config. + """ + schema_: Dict[str, object] = FieldInfo(alias="schema") """ The json schema for the run data source items. Learn how to build JSON schemas @@ -56,18 +63,21 @@ class DataSourceConfigLogs(BaseModel): class TestingCriterionEvalGraderTextSimilarity(TextSimilarityGrader): __test__ = False + """A TextSimilarityGrader object which grades text based on similarity metrics.""" pass_threshold: float """The threshold for the score.""" class TestingCriterionEvalGraderPython(PythonGrader): __test__ = False + """A PythonGrader object that runs a python script on the input.""" pass_threshold: Optional[float] = None """The threshold for the score.""" class TestingCriterionEvalGraderScoreModel(ScoreModelGrader): __test__ = False + """A ScoreModelGrader object that uses a model to assign a score to the input.""" pass_threshold: Optional[float] = None """The threshold for the score.""" @@ -82,6 +92,15 @@ class TestingCriterionEvalGraderScoreModel(ScoreModelGrader): class EvalUpdateResponse(BaseModel): + """ + An Eval object with a data source config and testing criteria. + An Eval represents a task to be done for your LLM integration. + Like: + - Improve the quality of my chatbot + - See how well my chatbot handles customer support + - Check if o4-mini is better at my usecase than gpt-4o + """ + id: str """Unique identifier for the evaluation.""" diff --git a/portkey_ai/_vendor/openai/types/evals/create_eval_completions_run_data_source.py b/portkey_ai/_vendor/openai/types/evals/create_eval_completions_run_data_source.py index a9f2fd08..726ae6ab 100644 --- a/portkey_ai/_vendor/openai/types/evals/create_eval_completions_run_data_source.py +++ b/portkey_ai/_vendor/openai/types/evals/create_eval_completions_run_data_source.py @@ -6,6 +6,7 @@ from ..._utils import PropertyInfo from ..._models import BaseModel from ..shared.metadata import Metadata +from ..graders.grader_inputs import GraderInputs from ..shared.reasoning_effort import ReasoningEffort from ..shared.response_format_text import ResponseFormatText from ..responses.easy_input_message import EasyInputMessage @@ -58,6 +59,8 @@ class SourceFileID(BaseModel): class SourceStoredCompletions(BaseModel): + """A StoredCompletionsRunDataSource configuration describing a set of filters""" + type: Literal["stored_completions"] """The type of source. Always `stored_completions`.""" @@ -90,6 +93,8 @@ class SourceStoredCompletions(BaseModel): class InputMessagesTemplateTemplateEvalItemContentOutputText(BaseModel): + """A text output from the model.""" + text: str """The text output from the model.""" @@ -98,6 +103,8 @@ class InputMessagesTemplateTemplateEvalItemContentOutputText(BaseModel): class InputMessagesTemplateTemplateEvalItemContentInputImage(BaseModel): + """An image input block used within EvalItem content arrays.""" + image_url: str """The URL of the image input.""" @@ -117,13 +124,25 @@ class InputMessagesTemplateTemplateEvalItemContentInputImage(BaseModel): InputMessagesTemplateTemplateEvalItemContentOutputText, InputMessagesTemplateTemplateEvalItemContentInputImage, ResponseInputAudio, - List[object], + GraderInputs, ] class InputMessagesTemplateTemplateEvalItem(BaseModel): + """ + A message input to the model with a role indicating instruction following + hierarchy. Instructions given with the `developer` or `system` role take + precedence over instructions given with the `user` role. Messages with the + `assistant` role are presumed to have been generated by the model in previous + interactions. + """ + content: InputMessagesTemplateTemplateEvalItemContent - """Inputs to the model - can contain template strings.""" + """Inputs to the model - can contain template strings. + + Supports text, output text, input images, and input audio, either as a single + item or an array of items. + """ role: Literal["user", "assistant", "system", "developer"] """The role of the message input. @@ -172,12 +191,17 @@ class SamplingParams(BaseModel): """ Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - effort can result in faster responses and fewer tokens used on reasoning in a - response. - - Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - effort. + supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. + Reducing reasoning effort can result in faster responses and fewer tokens used + on reasoning in a response. + + - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + calls are supported for all reasoning values in gpt-5.1. + - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + support `none`. + - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. + - `xhigh` is supported for all models after `gpt-5.1-codex-max`. """ response_format: Optional[SamplingParamsResponseFormat] = None @@ -212,6 +236,8 @@ class SamplingParams(BaseModel): class CreateEvalCompletionsRunDataSource(BaseModel): + """A CompletionsRunDataSource object describing a model sampling configuration.""" + source: Source """Determines what populates the `item` namespace in this run's data source.""" diff --git a/portkey_ai/_vendor/openai/types/evals/create_eval_completions_run_data_source_param.py b/portkey_ai/_vendor/openai/types/evals/create_eval_completions_run_data_source_param.py index e682e2db..6842f84a 100644 --- a/portkey_ai/_vendor/openai/types/evals/create_eval_completions_run_data_source_param.py +++ b/portkey_ai/_vendor/openai/types/evals/create_eval_completions_run_data_source_param.py @@ -7,6 +7,7 @@ from ..shared_params.metadata import Metadata from ..shared.reasoning_effort import ReasoningEffort +from ..graders.grader_inputs_param import GraderInputsParam from ..responses.easy_input_message_param import EasyInputMessageParam from ..shared_params.response_format_text import ResponseFormatText from ..responses.response_input_text_param import ResponseInputTextParam @@ -58,6 +59,8 @@ class SourceFileID(TypedDict, total=False): class SourceStoredCompletions(TypedDict, total=False): + """A StoredCompletionsRunDataSource configuration describing a set of filters""" + type: Required[Literal["stored_completions"]] """The type of source. Always `stored_completions`.""" @@ -88,6 +91,8 @@ class SourceStoredCompletions(TypedDict, total=False): class InputMessagesTemplateTemplateEvalItemContentOutputText(TypedDict, total=False): + """A text output from the model.""" + text: Required[str] """The text output from the model.""" @@ -96,6 +101,8 @@ class InputMessagesTemplateTemplateEvalItemContentOutputText(TypedDict, total=Fa class InputMessagesTemplateTemplateEvalItemContentInputImage(TypedDict, total=False): + """An image input block used within EvalItem content arrays.""" + image_url: Required[str] """The URL of the image input.""" @@ -115,13 +122,25 @@ class InputMessagesTemplateTemplateEvalItemContentInputImage(TypedDict, total=Fa InputMessagesTemplateTemplateEvalItemContentOutputText, InputMessagesTemplateTemplateEvalItemContentInputImage, ResponseInputAudioParam, - Iterable[object], + GraderInputsParam, ] class InputMessagesTemplateTemplateEvalItem(TypedDict, total=False): + """ + A message input to the model with a role indicating instruction following + hierarchy. Instructions given with the `developer` or `system` role take + precedence over instructions given with the `user` role. Messages with the + `assistant` role are presumed to have been generated by the model in previous + interactions. + """ + content: Required[InputMessagesTemplateTemplateEvalItemContent] - """Inputs to the model - can contain template strings.""" + """Inputs to the model - can contain template strings. + + Supports text, output text, input images, and input audio, either as a single + item or an array of items. + """ role: Required[Literal["user", "assistant", "system", "developer"]] """The role of the message input. @@ -168,12 +187,17 @@ class SamplingParams(TypedDict, total=False): """ Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - effort can result in faster responses and fewer tokens used on reasoning in a - response. - - Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - effort. + supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. + Reducing reasoning effort can result in faster responses and fewer tokens used + on reasoning in a response. + + - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + calls are supported for all reasoning values in gpt-5.1. + - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + support `none`. + - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. + - `xhigh` is supported for all models after `gpt-5.1-codex-max`. """ response_format: SamplingParamsResponseFormat @@ -208,6 +232,8 @@ class SamplingParams(TypedDict, total=False): class CreateEvalCompletionsRunDataSourceParam(TypedDict, total=False): + """A CompletionsRunDataSource object describing a model sampling configuration.""" + source: Required[Source] """Determines what populates the `item` namespace in this run's data source.""" diff --git a/portkey_ai/_vendor/openai/types/evals/create_eval_jsonl_run_data_source.py b/portkey_ai/_vendor/openai/types/evals/create_eval_jsonl_run_data_source.py index ae36f8c5..36ede2d9 100644 --- a/portkey_ai/_vendor/openai/types/evals/create_eval_jsonl_run_data_source.py +++ b/portkey_ai/_vendor/openai/types/evals/create_eval_jsonl_run_data_source.py @@ -35,6 +35,10 @@ class SourceFileID(BaseModel): class CreateEvalJSONLRunDataSource(BaseModel): + """ + A JsonlRunDataSource object with that specifies a JSONL file that matches the eval + """ + source: Source """Determines what populates the `item` namespace in the data source.""" diff --git a/portkey_ai/_vendor/openai/types/evals/create_eval_jsonl_run_data_source_param.py b/portkey_ai/_vendor/openai/types/evals/create_eval_jsonl_run_data_source_param.py index 217ee363..b87ba9c5 100644 --- a/portkey_ai/_vendor/openai/types/evals/create_eval_jsonl_run_data_source_param.py +++ b/portkey_ai/_vendor/openai/types/evals/create_eval_jsonl_run_data_source_param.py @@ -40,6 +40,10 @@ class SourceFileID(TypedDict, total=False): class CreateEvalJSONLRunDataSourceParam(TypedDict, total=False): + """ + A JsonlRunDataSource object with that specifies a JSONL file that matches the eval + """ + source: Required[Source] """Determines what populates the `item` namespace in the data source.""" diff --git a/portkey_ai/_vendor/openai/types/evals/eval_api_error.py b/portkey_ai/_vendor/openai/types/evals/eval_api_error.py index fe768710..9b2c1871 100644 --- a/portkey_ai/_vendor/openai/types/evals/eval_api_error.py +++ b/portkey_ai/_vendor/openai/types/evals/eval_api_error.py @@ -6,6 +6,8 @@ class EvalAPIError(BaseModel): + """An object representing an error response from the Eval API.""" + code: str """The error code.""" diff --git a/portkey_ai/_vendor/openai/types/evals/run_cancel_response.py b/portkey_ai/_vendor/openai/types/evals/run_cancel_response.py index 084dd6ce..ea4797ee 100644 --- a/portkey_ai/_vendor/openai/types/evals/run_cancel_response.py +++ b/portkey_ai/_vendor/openai/types/evals/run_cancel_response.py @@ -10,6 +10,7 @@ from .eval_api_error import EvalAPIError from ..responses.tool import Tool from ..shared.metadata import Metadata +from ..graders.grader_inputs import GraderInputs from ..shared.reasoning_effort import ReasoningEffort from ..responses.response_input_text import ResponseInputText from ..responses.response_input_audio import ResponseInputAudio @@ -66,6 +67,8 @@ class DataSourceResponsesSourceFileID(BaseModel): class DataSourceResponsesSourceResponses(BaseModel): + """A EvalResponsesSource object describing a run data source configuration.""" + type: Literal["responses"] """The type of run data source. Always `responses`.""" @@ -103,12 +106,17 @@ class DataSourceResponsesSourceResponses(BaseModel): """ Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - effort can result in faster responses and fewer tokens used on reasoning in a - response. - - Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - effort. + supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. + Reducing reasoning effort can result in faster responses and fewer tokens used + on reasoning in a response. + + - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + calls are supported for all reasoning values in gpt-5.1. + - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + support `none`. + - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. + - `xhigh` is supported for all models after `gpt-5.1-codex-max`. """ temperature: Optional[float] = None @@ -139,6 +147,8 @@ class DataSourceResponsesInputMessagesTemplateTemplateChatMessage(BaseModel): class DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText(BaseModel): + """A text output from the model.""" + text: str """The text output from the model.""" @@ -147,6 +157,8 @@ class DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText( class DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentInputImage(BaseModel): + """An image input block used within EvalItem content arrays.""" + image_url: str """The URL of the image input.""" @@ -166,13 +178,25 @@ class DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentInputImage( DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText, DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentInputImage, ResponseInputAudio, - List[object], + GraderInputs, ] class DataSourceResponsesInputMessagesTemplateTemplateEvalItem(BaseModel): + """ + A message input to the model with a role indicating instruction following + hierarchy. Instructions given with the `developer` or `system` role take + precedence over instructions given with the `user` role. Messages with the + `assistant` role are presumed to have been generated by the model in previous + interactions. + """ + content: DataSourceResponsesInputMessagesTemplateTemplateEvalItemContent - """Inputs to the model - can contain template strings.""" + """Inputs to the model - can contain template strings. + + Supports text, output text, input images, and input audio, either as a single + item or an array of items. + """ role: Literal["user", "assistant", "system", "developer"] """The role of the message input. @@ -216,6 +240,14 @@ class DataSourceResponsesInputMessagesItemReference(BaseModel): class DataSourceResponsesSamplingParamsText(BaseModel): + """Configuration options for a text response from the model. + + Can be plain + text or structured JSON data. Learn more: + - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + """ + format: Optional[ResponseFormatTextConfig] = None """An object specifying the format that the model must output. @@ -241,12 +273,17 @@ class DataSourceResponsesSamplingParams(BaseModel): """ Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - effort can result in faster responses and fewer tokens used on reasoning in a - response. - - Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - effort. + supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. + Reducing reasoning effort can result in faster responses and fewer tokens used + on reasoning in a response. + + - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + calls are supported for all reasoning values in gpt-5.1. + - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + support `none`. + - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. + - `xhigh` is supported for all models after `gpt-5.1-codex-max`. """ seed: Optional[int] = None @@ -287,6 +324,8 @@ class DataSourceResponsesSamplingParams(BaseModel): class DataSourceResponses(BaseModel): + """A ResponsesRunDataSource object describing a model sampling configuration.""" + source: DataSourceResponsesSource """Determines what populates the `item` namespace in this run's data source.""" @@ -345,6 +384,8 @@ class PerTestingCriteriaResult(BaseModel): class ResultCounts(BaseModel): + """Counters summarizing the outcomes of the evaluation run.""" + errored: int """Number of output items that resulted in an error.""" @@ -359,6 +400,8 @@ class ResultCounts(BaseModel): class RunCancelResponse(BaseModel): + """A schema representing an evaluation run.""" + id: str """Unique identifier for the evaluation run.""" diff --git a/portkey_ai/_vendor/openai/types/evals/run_create_params.py b/portkey_ai/_vendor/openai/types/evals/run_create_params.py index f114fae6..02804c30 100644 --- a/portkey_ai/_vendor/openai/types/evals/run_create_params.py +++ b/portkey_ai/_vendor/openai/types/evals/run_create_params.py @@ -9,6 +9,7 @@ from ..responses.tool_param import ToolParam from ..shared_params.metadata import Metadata from ..shared.reasoning_effort import ReasoningEffort +from ..graders.grader_inputs_param import GraderInputsParam from ..responses.response_input_text_param import ResponseInputTextParam from ..responses.response_input_audio_param import ResponseInputAudioParam from .create_eval_jsonl_run_data_source_param import CreateEvalJSONLRunDataSourceParam @@ -79,6 +80,8 @@ class DataSourceCreateEvalResponsesRunDataSourceSourceFileID(TypedDict, total=Fa class DataSourceCreateEvalResponsesRunDataSourceSourceResponses(TypedDict, total=False): + """A EvalResponsesSource object describing a run data source configuration.""" + type: Required[Literal["responses"]] """The type of run data source. Always `responses`.""" @@ -116,12 +119,17 @@ class DataSourceCreateEvalResponsesRunDataSourceSourceResponses(TypedDict, total """ Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - effort can result in faster responses and fewer tokens used on reasoning in a - response. - - Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - effort. + supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. + Reducing reasoning effort can result in faster responses and fewer tokens used + on reasoning in a response. + + - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + calls are supported for all reasoning values in gpt-5.1. + - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + support `none`. + - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. + - `xhigh` is supported for all models after `gpt-5.1-codex-max`. """ temperature: Optional[float] @@ -155,6 +163,8 @@ class DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateCha class DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEvalItemContentOutputText( TypedDict, total=False ): + """A text output from the model.""" + text: Required[str] """The text output from the model.""" @@ -165,6 +175,8 @@ class DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEva class DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEvalItemContentInputImage( TypedDict, total=False ): + """An image input block used within EvalItem content arrays.""" + image_url: Required[str] """The URL of the image input.""" @@ -184,13 +196,25 @@ class DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEva DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEvalItemContentOutputText, DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEvalItemContentInputImage, ResponseInputAudioParam, - Iterable[object], + GraderInputsParam, ] class DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEvalItem(TypedDict, total=False): + """ + A message input to the model with a role indicating instruction following + hierarchy. Instructions given with the `developer` or `system` role take + precedence over instructions given with the `user` role. Messages with the + `assistant` role are presumed to have been generated by the model in previous + interactions. + """ + content: Required[DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEvalItemContent] - """Inputs to the model - can contain template strings.""" + """Inputs to the model - can contain template strings. + + Supports text, output text, input images, and input audio, either as a single + item or an array of items. + """ role: Required[Literal["user", "assistant", "system", "developer"]] """The role of the message input. @@ -234,6 +258,14 @@ class DataSourceCreateEvalResponsesRunDataSourceInputMessagesItemReference(Typed class DataSourceCreateEvalResponsesRunDataSourceSamplingParamsText(TypedDict, total=False): + """Configuration options for a text response from the model. + + Can be plain + text or structured JSON data. Learn more: + - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + """ + format: ResponseFormatTextConfigParam """An object specifying the format that the model must output. @@ -259,12 +291,17 @@ class DataSourceCreateEvalResponsesRunDataSourceSamplingParams(TypedDict, total= """ Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - effort can result in faster responses and fewer tokens used on reasoning in a - response. - - Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - effort. + supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. + Reducing reasoning effort can result in faster responses and fewer tokens used + on reasoning in a response. + + - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + calls are supported for all reasoning values in gpt-5.1. + - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + support `none`. + - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. + - `xhigh` is supported for all models after `gpt-5.1-codex-max`. """ seed: int @@ -305,6 +342,8 @@ class DataSourceCreateEvalResponsesRunDataSourceSamplingParams(TypedDict, total= class DataSourceCreateEvalResponsesRunDataSource(TypedDict, total=False): + """A ResponsesRunDataSource object describing a model sampling configuration.""" + source: Required[DataSourceCreateEvalResponsesRunDataSourceSource] """Determines what populates the `item` namespace in this run's data source.""" diff --git a/portkey_ai/_vendor/openai/types/evals/run_create_response.py b/portkey_ai/_vendor/openai/types/evals/run_create_response.py index 1343335e..2cb856de 100644 --- a/portkey_ai/_vendor/openai/types/evals/run_create_response.py +++ b/portkey_ai/_vendor/openai/types/evals/run_create_response.py @@ -10,6 +10,7 @@ from .eval_api_error import EvalAPIError from ..responses.tool import Tool from ..shared.metadata import Metadata +from ..graders.grader_inputs import GraderInputs from ..shared.reasoning_effort import ReasoningEffort from ..responses.response_input_text import ResponseInputText from ..responses.response_input_audio import ResponseInputAudio @@ -66,6 +67,8 @@ class DataSourceResponsesSourceFileID(BaseModel): class DataSourceResponsesSourceResponses(BaseModel): + """A EvalResponsesSource object describing a run data source configuration.""" + type: Literal["responses"] """The type of run data source. Always `responses`.""" @@ -103,12 +106,17 @@ class DataSourceResponsesSourceResponses(BaseModel): """ Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - effort can result in faster responses and fewer tokens used on reasoning in a - response. - - Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - effort. + supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. + Reducing reasoning effort can result in faster responses and fewer tokens used + on reasoning in a response. + + - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + calls are supported for all reasoning values in gpt-5.1. + - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + support `none`. + - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. + - `xhigh` is supported for all models after `gpt-5.1-codex-max`. """ temperature: Optional[float] = None @@ -139,6 +147,8 @@ class DataSourceResponsesInputMessagesTemplateTemplateChatMessage(BaseModel): class DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText(BaseModel): + """A text output from the model.""" + text: str """The text output from the model.""" @@ -147,6 +157,8 @@ class DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText( class DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentInputImage(BaseModel): + """An image input block used within EvalItem content arrays.""" + image_url: str """The URL of the image input.""" @@ -166,13 +178,25 @@ class DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentInputImage( DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText, DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentInputImage, ResponseInputAudio, - List[object], + GraderInputs, ] class DataSourceResponsesInputMessagesTemplateTemplateEvalItem(BaseModel): + """ + A message input to the model with a role indicating instruction following + hierarchy. Instructions given with the `developer` or `system` role take + precedence over instructions given with the `user` role. Messages with the + `assistant` role are presumed to have been generated by the model in previous + interactions. + """ + content: DataSourceResponsesInputMessagesTemplateTemplateEvalItemContent - """Inputs to the model - can contain template strings.""" + """Inputs to the model - can contain template strings. + + Supports text, output text, input images, and input audio, either as a single + item or an array of items. + """ role: Literal["user", "assistant", "system", "developer"] """The role of the message input. @@ -216,6 +240,14 @@ class DataSourceResponsesInputMessagesItemReference(BaseModel): class DataSourceResponsesSamplingParamsText(BaseModel): + """Configuration options for a text response from the model. + + Can be plain + text or structured JSON data. Learn more: + - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + """ + format: Optional[ResponseFormatTextConfig] = None """An object specifying the format that the model must output. @@ -241,12 +273,17 @@ class DataSourceResponsesSamplingParams(BaseModel): """ Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - effort can result in faster responses and fewer tokens used on reasoning in a - response. - - Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - effort. + supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. + Reducing reasoning effort can result in faster responses and fewer tokens used + on reasoning in a response. + + - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + calls are supported for all reasoning values in gpt-5.1. + - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + support `none`. + - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. + - `xhigh` is supported for all models after `gpt-5.1-codex-max`. """ seed: Optional[int] = None @@ -287,6 +324,8 @@ class DataSourceResponsesSamplingParams(BaseModel): class DataSourceResponses(BaseModel): + """A ResponsesRunDataSource object describing a model sampling configuration.""" + source: DataSourceResponsesSource """Determines what populates the `item` namespace in this run's data source.""" @@ -345,6 +384,8 @@ class PerTestingCriteriaResult(BaseModel): class ResultCounts(BaseModel): + """Counters summarizing the outcomes of the evaluation run.""" + errored: int """Number of output items that resulted in an error.""" @@ -359,6 +400,8 @@ class ResultCounts(BaseModel): class RunCreateResponse(BaseModel): + """A schema representing an evaluation run.""" + id: str """Unique identifier for the evaluation run.""" diff --git a/portkey_ai/_vendor/openai/types/evals/run_list_response.py b/portkey_ai/_vendor/openai/types/evals/run_list_response.py index 7c32ce54..defd4aa6 100644 --- a/portkey_ai/_vendor/openai/types/evals/run_list_response.py +++ b/portkey_ai/_vendor/openai/types/evals/run_list_response.py @@ -10,6 +10,7 @@ from .eval_api_error import EvalAPIError from ..responses.tool import Tool from ..shared.metadata import Metadata +from ..graders.grader_inputs import GraderInputs from ..shared.reasoning_effort import ReasoningEffort from ..responses.response_input_text import ResponseInputText from ..responses.response_input_audio import ResponseInputAudio @@ -66,6 +67,8 @@ class DataSourceResponsesSourceFileID(BaseModel): class DataSourceResponsesSourceResponses(BaseModel): + """A EvalResponsesSource object describing a run data source configuration.""" + type: Literal["responses"] """The type of run data source. Always `responses`.""" @@ -103,12 +106,17 @@ class DataSourceResponsesSourceResponses(BaseModel): """ Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - effort can result in faster responses and fewer tokens used on reasoning in a - response. - - Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - effort. + supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. + Reducing reasoning effort can result in faster responses and fewer tokens used + on reasoning in a response. + + - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + calls are supported for all reasoning values in gpt-5.1. + - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + support `none`. + - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. + - `xhigh` is supported for all models after `gpt-5.1-codex-max`. """ temperature: Optional[float] = None @@ -139,6 +147,8 @@ class DataSourceResponsesInputMessagesTemplateTemplateChatMessage(BaseModel): class DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText(BaseModel): + """A text output from the model.""" + text: str """The text output from the model.""" @@ -147,6 +157,8 @@ class DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText( class DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentInputImage(BaseModel): + """An image input block used within EvalItem content arrays.""" + image_url: str """The URL of the image input.""" @@ -166,13 +178,25 @@ class DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentInputImage( DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText, DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentInputImage, ResponseInputAudio, - List[object], + GraderInputs, ] class DataSourceResponsesInputMessagesTemplateTemplateEvalItem(BaseModel): + """ + A message input to the model with a role indicating instruction following + hierarchy. Instructions given with the `developer` or `system` role take + precedence over instructions given with the `user` role. Messages with the + `assistant` role are presumed to have been generated by the model in previous + interactions. + """ + content: DataSourceResponsesInputMessagesTemplateTemplateEvalItemContent - """Inputs to the model - can contain template strings.""" + """Inputs to the model - can contain template strings. + + Supports text, output text, input images, and input audio, either as a single + item or an array of items. + """ role: Literal["user", "assistant", "system", "developer"] """The role of the message input. @@ -216,6 +240,14 @@ class DataSourceResponsesInputMessagesItemReference(BaseModel): class DataSourceResponsesSamplingParamsText(BaseModel): + """Configuration options for a text response from the model. + + Can be plain + text or structured JSON data. Learn more: + - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + """ + format: Optional[ResponseFormatTextConfig] = None """An object specifying the format that the model must output. @@ -241,12 +273,17 @@ class DataSourceResponsesSamplingParams(BaseModel): """ Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - effort can result in faster responses and fewer tokens used on reasoning in a - response. - - Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - effort. + supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. + Reducing reasoning effort can result in faster responses and fewer tokens used + on reasoning in a response. + + - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + calls are supported for all reasoning values in gpt-5.1. + - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + support `none`. + - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. + - `xhigh` is supported for all models after `gpt-5.1-codex-max`. """ seed: Optional[int] = None @@ -287,6 +324,8 @@ class DataSourceResponsesSamplingParams(BaseModel): class DataSourceResponses(BaseModel): + """A ResponsesRunDataSource object describing a model sampling configuration.""" + source: DataSourceResponsesSource """Determines what populates the `item` namespace in this run's data source.""" @@ -345,6 +384,8 @@ class PerTestingCriteriaResult(BaseModel): class ResultCounts(BaseModel): + """Counters summarizing the outcomes of the evaluation run.""" + errored: int """Number of output items that resulted in an error.""" @@ -359,6 +400,8 @@ class ResultCounts(BaseModel): class RunListResponse(BaseModel): + """A schema representing an evaluation run.""" + id: str """Unique identifier for the evaluation run.""" diff --git a/portkey_ai/_vendor/openai/types/evals/run_retrieve_response.py b/portkey_ai/_vendor/openai/types/evals/run_retrieve_response.py index f1212c16..4c218a05 100644 --- a/portkey_ai/_vendor/openai/types/evals/run_retrieve_response.py +++ b/portkey_ai/_vendor/openai/types/evals/run_retrieve_response.py @@ -10,6 +10,7 @@ from .eval_api_error import EvalAPIError from ..responses.tool import Tool from ..shared.metadata import Metadata +from ..graders.grader_inputs import GraderInputs from ..shared.reasoning_effort import ReasoningEffort from ..responses.response_input_text import ResponseInputText from ..responses.response_input_audio import ResponseInputAudio @@ -66,6 +67,8 @@ class DataSourceResponsesSourceFileID(BaseModel): class DataSourceResponsesSourceResponses(BaseModel): + """A EvalResponsesSource object describing a run data source configuration.""" + type: Literal["responses"] """The type of run data source. Always `responses`.""" @@ -103,12 +106,17 @@ class DataSourceResponsesSourceResponses(BaseModel): """ Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - effort can result in faster responses and fewer tokens used on reasoning in a - response. - - Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - effort. + supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. + Reducing reasoning effort can result in faster responses and fewer tokens used + on reasoning in a response. + + - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + calls are supported for all reasoning values in gpt-5.1. + - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + support `none`. + - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. + - `xhigh` is supported for all models after `gpt-5.1-codex-max`. """ temperature: Optional[float] = None @@ -139,6 +147,8 @@ class DataSourceResponsesInputMessagesTemplateTemplateChatMessage(BaseModel): class DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText(BaseModel): + """A text output from the model.""" + text: str """The text output from the model.""" @@ -147,6 +157,8 @@ class DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText( class DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentInputImage(BaseModel): + """An image input block used within EvalItem content arrays.""" + image_url: str """The URL of the image input.""" @@ -166,13 +178,25 @@ class DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentInputImage( DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText, DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentInputImage, ResponseInputAudio, - List[object], + GraderInputs, ] class DataSourceResponsesInputMessagesTemplateTemplateEvalItem(BaseModel): + """ + A message input to the model with a role indicating instruction following + hierarchy. Instructions given with the `developer` or `system` role take + precedence over instructions given with the `user` role. Messages with the + `assistant` role are presumed to have been generated by the model in previous + interactions. + """ + content: DataSourceResponsesInputMessagesTemplateTemplateEvalItemContent - """Inputs to the model - can contain template strings.""" + """Inputs to the model - can contain template strings. + + Supports text, output text, input images, and input audio, either as a single + item or an array of items. + """ role: Literal["user", "assistant", "system", "developer"] """The role of the message input. @@ -216,6 +240,14 @@ class DataSourceResponsesInputMessagesItemReference(BaseModel): class DataSourceResponsesSamplingParamsText(BaseModel): + """Configuration options for a text response from the model. + + Can be plain + text or structured JSON data. Learn more: + - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + """ + format: Optional[ResponseFormatTextConfig] = None """An object specifying the format that the model must output. @@ -241,12 +273,17 @@ class DataSourceResponsesSamplingParams(BaseModel): """ Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - effort can result in faster responses and fewer tokens used on reasoning in a - response. - - Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - effort. + supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. + Reducing reasoning effort can result in faster responses and fewer tokens used + on reasoning in a response. + + - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + calls are supported for all reasoning values in gpt-5.1. + - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + support `none`. + - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. + - `xhigh` is supported for all models after `gpt-5.1-codex-max`. """ seed: Optional[int] = None @@ -287,6 +324,8 @@ class DataSourceResponsesSamplingParams(BaseModel): class DataSourceResponses(BaseModel): + """A ResponsesRunDataSource object describing a model sampling configuration.""" + source: DataSourceResponsesSource """Determines what populates the `item` namespace in this run's data source.""" @@ -345,6 +384,8 @@ class PerTestingCriteriaResult(BaseModel): class ResultCounts(BaseModel): + """Counters summarizing the outcomes of the evaluation run.""" + errored: int """Number of output items that resulted in an error.""" @@ -359,6 +400,8 @@ class ResultCounts(BaseModel): class RunRetrieveResponse(BaseModel): + """A schema representing an evaluation run.""" + id: str """Unique identifier for the evaluation run.""" diff --git a/portkey_ai/_vendor/openai/types/evals/runs/output_item_list_response.py b/portkey_ai/_vendor/openai/types/evals/runs/output_item_list_response.py index e88c2176..a906a29d 100644 --- a/portkey_ai/_vendor/openai/types/evals/runs/output_item_list_response.py +++ b/portkey_ai/_vendor/openai/types/evals/runs/output_item_list_response.py @@ -12,6 +12,8 @@ class Result(BaseModel): + """A single grader result for an evaluation run output item.""" + name: str """The name of the grader.""" @@ -41,6 +43,8 @@ def __getattr__(self, attr: str) -> object: ... class SampleInput(BaseModel): + """An input message.""" + content: str """The content of the message.""" @@ -57,6 +61,8 @@ class SampleOutput(BaseModel): class SampleUsage(BaseModel): + """Token usage details for the sample.""" + cached_tokens: int """The number of tokens retrieved from cache.""" @@ -71,6 +77,8 @@ class SampleUsage(BaseModel): class Sample(BaseModel): + """A sample containing the input and output of the evaluation run.""" + error: EvalAPIError """An object representing an error response from the Eval API.""" @@ -103,6 +111,8 @@ class Sample(BaseModel): class OutputItemListResponse(BaseModel): + """A schema representing an evaluation run output item.""" + id: str """Unique identifier for the evaluation run output item.""" diff --git a/portkey_ai/_vendor/openai/types/evals/runs/output_item_retrieve_response.py b/portkey_ai/_vendor/openai/types/evals/runs/output_item_retrieve_response.py index c728629b..42ba4b28 100644 --- a/portkey_ai/_vendor/openai/types/evals/runs/output_item_retrieve_response.py +++ b/portkey_ai/_vendor/openai/types/evals/runs/output_item_retrieve_response.py @@ -12,6 +12,8 @@ class Result(BaseModel): + """A single grader result for an evaluation run output item.""" + name: str """The name of the grader.""" @@ -41,6 +43,8 @@ def __getattr__(self, attr: str) -> object: ... class SampleInput(BaseModel): + """An input message.""" + content: str """The content of the message.""" @@ -57,6 +61,8 @@ class SampleOutput(BaseModel): class SampleUsage(BaseModel): + """Token usage details for the sample.""" + cached_tokens: int """The number of tokens retrieved from cache.""" @@ -71,6 +77,8 @@ class SampleUsage(BaseModel): class Sample(BaseModel): + """A sample containing the input and output of the evaluation run.""" + error: EvalAPIError """An object representing an error response from the Eval API.""" @@ -103,6 +111,8 @@ class Sample(BaseModel): class OutputItemRetrieveResponse(BaseModel): + """A schema representing an evaluation run output item.""" + id: str """Unique identifier for the evaluation run output item.""" diff --git a/portkey_ai/_vendor/openai/types/file_create_params.py b/portkey_ai/_vendor/openai/types/file_create_params.py index f4583b16..5e2afc06 100644 --- a/portkey_ai/_vendor/openai/types/file_create_params.py +++ b/portkey_ai/_vendor/openai/types/file_create_params.py @@ -15,12 +15,14 @@ class FileCreateParams(TypedDict, total=False): """The File object (not file name) to be uploaded.""" purpose: Required[FilePurpose] - """The intended purpose of the uploaded file. - - One of: - `assistants`: Used in the Assistants API - `batch`: Used in the Batch - API - `fine-tune`: Used for fine-tuning - `vision`: Images used for vision - fine-tuning - `user_data`: Flexible file type for any purpose - `evals`: Used - for eval data sets + """The intended purpose of the uploaded file. One of: + + - `assistants`: Used in the Assistants API + - `batch`: Used in the Batch API + - `fine-tune`: Used for fine-tuning + - `vision`: Images used for vision fine-tuning + - `user_data`: Flexible file type for any purpose + - `evals`: Used for eval data sets """ expires_after: ExpiresAfter @@ -32,6 +34,11 @@ class FileCreateParams(TypedDict, total=False): class ExpiresAfter(TypedDict, total=False): + """The expiration policy for a file. + + By default, files with `purpose=batch` expire after 30 days and all other files are persisted until they are manually deleted. + """ + anchor: Required[Literal["created_at"]] """Anchor timestamp after which the expiration policy applies. diff --git a/portkey_ai/_vendor/openai/types/file_object.py b/portkey_ai/_vendor/openai/types/file_object.py index 883c2de0..4a9901fd 100644 --- a/portkey_ai/_vendor/openai/types/file_object.py +++ b/portkey_ai/_vendor/openai/types/file_object.py @@ -9,6 +9,8 @@ class FileObject(BaseModel): + """The `File` object represents a document that has been uploaded to OpenAI.""" + id: str """The file identifier, which can be referenced in the API endpoints.""" diff --git a/portkey_ai/_vendor/openai/types/fine_tuning/checkpoints/permission_create_response.py b/portkey_ai/_vendor/openai/types/fine_tuning/checkpoints/permission_create_response.py index 9bc14c00..459fa9de 100644 --- a/portkey_ai/_vendor/openai/types/fine_tuning/checkpoints/permission_create_response.py +++ b/portkey_ai/_vendor/openai/types/fine_tuning/checkpoints/permission_create_response.py @@ -8,6 +8,10 @@ class PermissionCreateResponse(BaseModel): + """ + The `checkpoint.permission` object represents a permission for a fine-tuned model checkpoint. + """ + id: str """The permission identifier, which can be referenced in the API endpoints.""" diff --git a/portkey_ai/_vendor/openai/types/fine_tuning/checkpoints/permission_retrieve_response.py b/portkey_ai/_vendor/openai/types/fine_tuning/checkpoints/permission_retrieve_response.py index 14c73b55..34208958 100644 --- a/portkey_ai/_vendor/openai/types/fine_tuning/checkpoints/permission_retrieve_response.py +++ b/portkey_ai/_vendor/openai/types/fine_tuning/checkpoints/permission_retrieve_response.py @@ -9,6 +9,10 @@ class Data(BaseModel): + """ + The `checkpoint.permission` object represents a permission for a fine-tuned model checkpoint. + """ + id: str """The permission identifier, which can be referenced in the API endpoints.""" diff --git a/portkey_ai/_vendor/openai/types/fine_tuning/dpo_hyperparameters.py b/portkey_ai/_vendor/openai/types/fine_tuning/dpo_hyperparameters.py index b0b3f058..cd39f308 100644 --- a/portkey_ai/_vendor/openai/types/fine_tuning/dpo_hyperparameters.py +++ b/portkey_ai/_vendor/openai/types/fine_tuning/dpo_hyperparameters.py @@ -9,6 +9,8 @@ class DpoHyperparameters(BaseModel): + """The hyperparameters used for the DPO fine-tuning job.""" + batch_size: Union[Literal["auto"], int, None] = None """Number of examples in each batch. diff --git a/portkey_ai/_vendor/openai/types/fine_tuning/dpo_hyperparameters_param.py b/portkey_ai/_vendor/openai/types/fine_tuning/dpo_hyperparameters_param.py index 87c6ee80..12b2c41c 100644 --- a/portkey_ai/_vendor/openai/types/fine_tuning/dpo_hyperparameters_param.py +++ b/portkey_ai/_vendor/openai/types/fine_tuning/dpo_hyperparameters_param.py @@ -9,6 +9,8 @@ class DpoHyperparametersParam(TypedDict, total=False): + """The hyperparameters used for the DPO fine-tuning job.""" + batch_size: Union[Literal["auto"], int] """Number of examples in each batch. diff --git a/portkey_ai/_vendor/openai/types/fine_tuning/dpo_method.py b/portkey_ai/_vendor/openai/types/fine_tuning/dpo_method.py index 3e20f360..452c1820 100644 --- a/portkey_ai/_vendor/openai/types/fine_tuning/dpo_method.py +++ b/portkey_ai/_vendor/openai/types/fine_tuning/dpo_method.py @@ -9,5 +9,7 @@ class DpoMethod(BaseModel): + """Configuration for the DPO fine-tuning method.""" + hyperparameters: Optional[DpoHyperparameters] = None """The hyperparameters used for the DPO fine-tuning job.""" diff --git a/portkey_ai/_vendor/openai/types/fine_tuning/dpo_method_param.py b/portkey_ai/_vendor/openai/types/fine_tuning/dpo_method_param.py index ce6b6510..6bd74d97 100644 --- a/portkey_ai/_vendor/openai/types/fine_tuning/dpo_method_param.py +++ b/portkey_ai/_vendor/openai/types/fine_tuning/dpo_method_param.py @@ -10,5 +10,7 @@ class DpoMethodParam(TypedDict, total=False): + """Configuration for the DPO fine-tuning method.""" + hyperparameters: DpoHyperparametersParam """The hyperparameters used for the DPO fine-tuning job.""" diff --git a/portkey_ai/_vendor/openai/types/fine_tuning/fine_tuning_job.py b/portkey_ai/_vendor/openai/types/fine_tuning/fine_tuning_job.py index f626fbba..bb8a4d59 100644 --- a/portkey_ai/_vendor/openai/types/fine_tuning/fine_tuning_job.py +++ b/portkey_ai/_vendor/openai/types/fine_tuning/fine_tuning_job.py @@ -14,6 +14,10 @@ class Error(BaseModel): + """ + For fine-tuning jobs that have `failed`, this will contain more information on the cause of the failure. + """ + code: str """A machine-readable error code.""" @@ -28,6 +32,11 @@ class Error(BaseModel): class Hyperparameters(BaseModel): + """The hyperparameters used for the fine-tuning job. + + This value will only be returned when running `supervised` jobs. + """ + batch_size: Union[Literal["auto"], int, None] = None """Number of examples in each batch. @@ -49,6 +58,8 @@ class Hyperparameters(BaseModel): class Method(BaseModel): + """The method used for fine-tuning.""" + type: Literal["supervised", "dpo", "reinforcement"] """The type of method. Is either `supervised`, `dpo`, or `reinforcement`.""" @@ -63,6 +74,10 @@ class Method(BaseModel): class FineTuningJob(BaseModel): + """ + The `fine_tuning.job` object represents a fine-tuning job that has been created through the API. + """ + id: str """The object identifier, which can be referenced in the API endpoints.""" diff --git a/portkey_ai/_vendor/openai/types/fine_tuning/fine_tuning_job_event.py b/portkey_ai/_vendor/openai/types/fine_tuning/fine_tuning_job_event.py index 1d728bd7..7452b818 100644 --- a/portkey_ai/_vendor/openai/types/fine_tuning/fine_tuning_job_event.py +++ b/portkey_ai/_vendor/openai/types/fine_tuning/fine_tuning_job_event.py @@ -10,6 +10,8 @@ class FineTuningJobEvent(BaseModel): + """Fine-tuning job event object""" + id: str """The object identifier.""" diff --git a/portkey_ai/_vendor/openai/types/fine_tuning/fine_tuning_job_wandb_integration.py b/portkey_ai/_vendor/openai/types/fine_tuning/fine_tuning_job_wandb_integration.py index 4ac282eb..0e33aa84 100644 --- a/portkey_ai/_vendor/openai/types/fine_tuning/fine_tuning_job_wandb_integration.py +++ b/portkey_ai/_vendor/openai/types/fine_tuning/fine_tuning_job_wandb_integration.py @@ -8,6 +8,13 @@ class FineTuningJobWandbIntegration(BaseModel): + """The settings for your integration with Weights and Biases. + + This payload specifies the project that + metrics will be sent to. Optionally, you can set an explicit display name for your run, add tags + to your run, and set a default entity (team, username, etc) to be associated with your run. + """ + project: str """The name of the project that the new run will be created under.""" diff --git a/portkey_ai/_vendor/openai/types/fine_tuning/job_create_params.py b/portkey_ai/_vendor/openai/types/fine_tuning/job_create_params.py index 351d4e0e..181bede2 100644 --- a/portkey_ai/_vendor/openai/types/fine_tuning/job_create_params.py +++ b/portkey_ai/_vendor/openai/types/fine_tuning/job_create_params.py @@ -100,6 +100,11 @@ class JobCreateParams(TypedDict, total=False): class Hyperparameters(TypedDict, total=False): + """ + The hyperparameters used for the fine-tuning job. + This value is now deprecated in favor of `method`, and should be passed in under the `method` parameter. + """ + batch_size: Union[Literal["auto"], int] """Number of examples in each batch. @@ -121,6 +126,13 @@ class Hyperparameters(TypedDict, total=False): class IntegrationWandb(TypedDict, total=False): + """The settings for your integration with Weights and Biases. + + This payload specifies the project that + metrics will be sent to. Optionally, you can set an explicit display name for your run, add tags + to your run, and set a default entity (team, username, etc) to be associated with your run. + """ + project: Required[str] """The name of the project that the new run will be created under.""" @@ -163,6 +175,8 @@ class Integration(TypedDict, total=False): class Method(TypedDict, total=False): + """The method used for fine-tuning.""" + type: Required[Literal["supervised", "dpo", "reinforcement"]] """The type of method. Is either `supervised`, `dpo`, or `reinforcement`.""" diff --git a/portkey_ai/_vendor/openai/types/fine_tuning/jobs/fine_tuning_job_checkpoint.py b/portkey_ai/_vendor/openai/types/fine_tuning/jobs/fine_tuning_job_checkpoint.py index bd07317a..f8a04b63 100644 --- a/portkey_ai/_vendor/openai/types/fine_tuning/jobs/fine_tuning_job_checkpoint.py +++ b/portkey_ai/_vendor/openai/types/fine_tuning/jobs/fine_tuning_job_checkpoint.py @@ -9,6 +9,8 @@ class Metrics(BaseModel): + """Metrics at the step number during the fine-tuning job.""" + full_valid_loss: Optional[float] = None full_valid_mean_token_accuracy: Optional[float] = None @@ -25,6 +27,10 @@ class Metrics(BaseModel): class FineTuningJobCheckpoint(BaseModel): + """ + The `fine_tuning.job.checkpoint` object represents a model checkpoint for a fine-tuning job that is ready to use. + """ + id: str """The checkpoint identifier, which can be referenced in the API endpoints.""" diff --git a/portkey_ai/_vendor/openai/types/fine_tuning/reinforcement_hyperparameters.py b/portkey_ai/_vendor/openai/types/fine_tuning/reinforcement_hyperparameters.py index 7c1762d3..4c289fd6 100644 --- a/portkey_ai/_vendor/openai/types/fine_tuning/reinforcement_hyperparameters.py +++ b/portkey_ai/_vendor/openai/types/fine_tuning/reinforcement_hyperparameters.py @@ -9,6 +9,8 @@ class ReinforcementHyperparameters(BaseModel): + """The hyperparameters used for the reinforcement fine-tuning job.""" + batch_size: Union[Literal["auto"], int, None] = None """Number of examples in each batch. diff --git a/portkey_ai/_vendor/openai/types/fine_tuning/reinforcement_hyperparameters_param.py b/portkey_ai/_vendor/openai/types/fine_tuning/reinforcement_hyperparameters_param.py index 0cc12fcb..7be716f1 100644 --- a/portkey_ai/_vendor/openai/types/fine_tuning/reinforcement_hyperparameters_param.py +++ b/portkey_ai/_vendor/openai/types/fine_tuning/reinforcement_hyperparameters_param.py @@ -9,6 +9,8 @@ class ReinforcementHyperparametersParam(TypedDict, total=False): + """The hyperparameters used for the reinforcement fine-tuning job.""" + batch_size: Union[Literal["auto"], int] """Number of examples in each batch. diff --git a/portkey_ai/_vendor/openai/types/fine_tuning/reinforcement_method.py b/portkey_ai/_vendor/openai/types/fine_tuning/reinforcement_method.py index 9b65c410..a8a36851 100644 --- a/portkey_ai/_vendor/openai/types/fine_tuning/reinforcement_method.py +++ b/portkey_ai/_vendor/openai/types/fine_tuning/reinforcement_method.py @@ -17,6 +17,8 @@ class ReinforcementMethod(BaseModel): + """Configuration for the reinforcement fine-tuning method.""" + grader: Grader """The grader used for the fine-tuning job.""" diff --git a/portkey_ai/_vendor/openai/types/fine_tuning/reinforcement_method_param.py b/portkey_ai/_vendor/openai/types/fine_tuning/reinforcement_method_param.py index 00d50605..ea75bfeb 100644 --- a/portkey_ai/_vendor/openai/types/fine_tuning/reinforcement_method_param.py +++ b/portkey_ai/_vendor/openai/types/fine_tuning/reinforcement_method_param.py @@ -20,6 +20,8 @@ class ReinforcementMethodParam(TypedDict, total=False): + """Configuration for the reinforcement fine-tuning method.""" + grader: Required[Grader] """The grader used for the fine-tuning job.""" diff --git a/portkey_ai/_vendor/openai/types/fine_tuning/supervised_hyperparameters.py b/portkey_ai/_vendor/openai/types/fine_tuning/supervised_hyperparameters.py index 3955ecf4..1231bbdd 100644 --- a/portkey_ai/_vendor/openai/types/fine_tuning/supervised_hyperparameters.py +++ b/portkey_ai/_vendor/openai/types/fine_tuning/supervised_hyperparameters.py @@ -9,6 +9,8 @@ class SupervisedHyperparameters(BaseModel): + """The hyperparameters used for the fine-tuning job.""" + batch_size: Union[Literal["auto"], int, None] = None """Number of examples in each batch. diff --git a/portkey_ai/_vendor/openai/types/fine_tuning/supervised_hyperparameters_param.py b/portkey_ai/_vendor/openai/types/fine_tuning/supervised_hyperparameters_param.py index bd37d9b2..de0e021d 100644 --- a/portkey_ai/_vendor/openai/types/fine_tuning/supervised_hyperparameters_param.py +++ b/portkey_ai/_vendor/openai/types/fine_tuning/supervised_hyperparameters_param.py @@ -9,6 +9,8 @@ class SupervisedHyperparametersParam(TypedDict, total=False): + """The hyperparameters used for the fine-tuning job.""" + batch_size: Union[Literal["auto"], int] """Number of examples in each batch. diff --git a/portkey_ai/_vendor/openai/types/fine_tuning/supervised_method.py b/portkey_ai/_vendor/openai/types/fine_tuning/supervised_method.py index 3a32bf27..96e10258 100644 --- a/portkey_ai/_vendor/openai/types/fine_tuning/supervised_method.py +++ b/portkey_ai/_vendor/openai/types/fine_tuning/supervised_method.py @@ -9,5 +9,7 @@ class SupervisedMethod(BaseModel): + """Configuration for the supervised fine-tuning method.""" + hyperparameters: Optional[SupervisedHyperparameters] = None """The hyperparameters used for the fine-tuning job.""" diff --git a/portkey_ai/_vendor/openai/types/fine_tuning/supervised_method_param.py b/portkey_ai/_vendor/openai/types/fine_tuning/supervised_method_param.py index ba277853..4381cd18 100644 --- a/portkey_ai/_vendor/openai/types/fine_tuning/supervised_method_param.py +++ b/portkey_ai/_vendor/openai/types/fine_tuning/supervised_method_param.py @@ -10,5 +10,7 @@ class SupervisedMethodParam(TypedDict, total=False): + """Configuration for the supervised fine-tuning method.""" + hyperparameters: SupervisedHyperparametersParam """The hyperparameters used for the fine-tuning job.""" diff --git a/portkey_ai/_vendor/openai/types/graders/__init__.py b/portkey_ai/_vendor/openai/types/graders/__init__.py index e0a90912..4f70eb6c 100644 --- a/portkey_ai/_vendor/openai/types/graders/__init__.py +++ b/portkey_ai/_vendor/openai/types/graders/__init__.py @@ -3,10 +3,12 @@ from __future__ import annotations from .multi_grader import MultiGrader as MultiGrader +from .grader_inputs import GraderInputs as GraderInputs from .python_grader import PythonGrader as PythonGrader from .label_model_grader import LabelModelGrader as LabelModelGrader from .multi_grader_param import MultiGraderParam as MultiGraderParam from .score_model_grader import ScoreModelGrader as ScoreModelGrader +from .grader_inputs_param import GraderInputsParam as GraderInputsParam from .python_grader_param import PythonGraderParam as PythonGraderParam from .string_check_grader import StringCheckGrader as StringCheckGrader from .text_similarity_grader import TextSimilarityGrader as TextSimilarityGrader diff --git a/portkey_ai/_vendor/openai/types/graders/grader_inputs.py b/portkey_ai/_vendor/openai/types/graders/grader_inputs.py new file mode 100644 index 00000000..edc966d8 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/graders/grader_inputs.py @@ -0,0 +1,43 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Union, Optional +from typing_extensions import Literal, TypeAlias + +from ..._models import BaseModel +from ..responses.response_input_text import ResponseInputText +from ..responses.response_input_audio import ResponseInputAudio + +__all__ = ["GraderInputs", "GraderInputItem", "GraderInputItemOutputText", "GraderInputItemInputImage"] + + +class GraderInputItemOutputText(BaseModel): + """A text output from the model.""" + + text: str + """The text output from the model.""" + + type: Literal["output_text"] + """The type of the output text. Always `output_text`.""" + + +class GraderInputItemInputImage(BaseModel): + """An image input block used within EvalItem content arrays.""" + + image_url: str + """The URL of the image input.""" + + type: Literal["input_image"] + """The type of the image input. Always `input_image`.""" + + detail: Optional[str] = None + """The detail level of the image to be sent to the model. + + One of `high`, `low`, or `auto`. Defaults to `auto`. + """ + + +GraderInputItem: TypeAlias = Union[ + str, ResponseInputText, GraderInputItemOutputText, GraderInputItemInputImage, ResponseInputAudio +] + +GraderInputs: TypeAlias = List[GraderInputItem] diff --git a/portkey_ai/_vendor/openai/types/graders/grader_inputs_param.py b/portkey_ai/_vendor/openai/types/graders/grader_inputs_param.py new file mode 100644 index 00000000..7d8341eb --- /dev/null +++ b/portkey_ai/_vendor/openai/types/graders/grader_inputs_param.py @@ -0,0 +1,53 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Union +from typing_extensions import Literal, Required, TypeAlias, TypedDict + +from ..responses.response_input_text_param import ResponseInputTextParam +from ..responses.response_input_audio_param import ResponseInputAudioParam + +__all__ = [ + "GraderInputsParam", + "GraderInputsParamItem", + "GraderInputsParamItemOutputText", + "GraderInputsParamItemInputImage", +] + + +class GraderInputsParamItemOutputText(TypedDict, total=False): + """A text output from the model.""" + + text: Required[str] + """The text output from the model.""" + + type: Required[Literal["output_text"]] + """The type of the output text. Always `output_text`.""" + + +class GraderInputsParamItemInputImage(TypedDict, total=False): + """An image input block used within EvalItem content arrays.""" + + image_url: Required[str] + """The URL of the image input.""" + + type: Required[Literal["input_image"]] + """The type of the image input. Always `input_image`.""" + + detail: str + """The detail level of the image to be sent to the model. + + One of `high`, `low`, or `auto`. Defaults to `auto`. + """ + + +GraderInputsParamItem: TypeAlias = Union[ + str, + ResponseInputTextParam, + GraderInputsParamItemOutputText, + GraderInputsParamItemInputImage, + ResponseInputAudioParam, +] + +GraderInputsParam: TypeAlias = List[GraderInputsParamItem] diff --git a/portkey_ai/_vendor/openai/types/graders/label_model_grader.py b/portkey_ai/_vendor/openai/types/graders/label_model_grader.py index 0929349c..d3c94223 100644 --- a/portkey_ai/_vendor/openai/types/graders/label_model_grader.py +++ b/portkey_ai/_vendor/openai/types/graders/label_model_grader.py @@ -4,6 +4,7 @@ from typing_extensions import Literal, TypeAlias from ..._models import BaseModel +from .grader_inputs import GraderInputs from ..responses.response_input_text import ResponseInputText from ..responses.response_input_audio import ResponseInputAudio @@ -11,6 +12,8 @@ class InputContentOutputText(BaseModel): + """A text output from the model.""" + text: str """The text output from the model.""" @@ -19,6 +22,8 @@ class InputContentOutputText(BaseModel): class InputContentInputImage(BaseModel): + """An image input block used within EvalItem content arrays.""" + image_url: str """The URL of the image input.""" @@ -33,13 +38,25 @@ class InputContentInputImage(BaseModel): InputContent: TypeAlias = Union[ - str, ResponseInputText, InputContentOutputText, InputContentInputImage, ResponseInputAudio, List[object] + str, ResponseInputText, InputContentOutputText, InputContentInputImage, ResponseInputAudio, GraderInputs ] class Input(BaseModel): + """ + A message input to the model with a role indicating instruction following + hierarchy. Instructions given with the `developer` or `system` role take + precedence over instructions given with the `user` role. Messages with the + `assistant` role are presumed to have been generated by the model in previous + interactions. + """ + content: InputContent - """Inputs to the model - can contain template strings.""" + """Inputs to the model - can contain template strings. + + Supports text, output text, input images, and input audio, either as a single + item or an array of items. + """ role: Literal["user", "assistant", "system", "developer"] """The role of the message input. @@ -52,6 +69,11 @@ class Input(BaseModel): class LabelModelGrader(BaseModel): + """ + A LabelModelGrader object which uses a model to assign labels to each item + in the evaluation. + """ + input: List[Input] labels: List[str] diff --git a/portkey_ai/_vendor/openai/types/graders/label_model_grader_param.py b/portkey_ai/_vendor/openai/types/graders/label_model_grader_param.py index 7bd6fdb4..a5b6959c 100644 --- a/portkey_ai/_vendor/openai/types/graders/label_model_grader_param.py +++ b/portkey_ai/_vendor/openai/types/graders/label_model_grader_param.py @@ -6,6 +6,7 @@ from typing_extensions import Literal, Required, TypeAlias, TypedDict from ..._types import SequenceNotStr +from .grader_inputs_param import GraderInputsParam from ..responses.response_input_text_param import ResponseInputTextParam from ..responses.response_input_audio_param import ResponseInputAudioParam @@ -13,6 +14,8 @@ class InputContentOutputText(TypedDict, total=False): + """A text output from the model.""" + text: Required[str] """The text output from the model.""" @@ -21,6 +24,8 @@ class InputContentOutputText(TypedDict, total=False): class InputContentInputImage(TypedDict, total=False): + """An image input block used within EvalItem content arrays.""" + image_url: Required[str] """The URL of the image input.""" @@ -40,13 +45,25 @@ class InputContentInputImage(TypedDict, total=False): InputContentOutputText, InputContentInputImage, ResponseInputAudioParam, - Iterable[object], + GraderInputsParam, ] class Input(TypedDict, total=False): + """ + A message input to the model with a role indicating instruction following + hierarchy. Instructions given with the `developer` or `system` role take + precedence over instructions given with the `user` role. Messages with the + `assistant` role are presumed to have been generated by the model in previous + interactions. + """ + content: Required[InputContent] - """Inputs to the model - can contain template strings.""" + """Inputs to the model - can contain template strings. + + Supports text, output text, input images, and input audio, either as a single + item or an array of items. + """ role: Required[Literal["user", "assistant", "system", "developer"]] """The role of the message input. @@ -59,6 +76,11 @@ class Input(TypedDict, total=False): class LabelModelGraderParam(TypedDict, total=False): + """ + A LabelModelGrader object which uses a model to assign labels to each item + in the evaluation. + """ + input: Required[Iterable[Input]] labels: Required[SequenceNotStr[str]] diff --git a/portkey_ai/_vendor/openai/types/graders/multi_grader.py b/portkey_ai/_vendor/openai/types/graders/multi_grader.py index 7539c68e..022ddb40 100644 --- a/portkey_ai/_vendor/openai/types/graders/multi_grader.py +++ b/portkey_ai/_vendor/openai/types/graders/multi_grader.py @@ -16,6 +16,10 @@ class MultiGrader(BaseModel): + """ + A MultiGrader object combines the output of multiple graders to produce a single score. + """ + calculate_output: str """A formula to calculate the output based on grader results.""" diff --git a/portkey_ai/_vendor/openai/types/graders/multi_grader_param.py b/portkey_ai/_vendor/openai/types/graders/multi_grader_param.py index 28a6705b..064267a5 100644 --- a/portkey_ai/_vendor/openai/types/graders/multi_grader_param.py +++ b/portkey_ai/_vendor/openai/types/graders/multi_grader_param.py @@ -19,6 +19,10 @@ class MultiGraderParam(TypedDict, total=False): + """ + A MultiGrader object combines the output of multiple graders to produce a single score. + """ + calculate_output: Required[str] """A formula to calculate the output based on grader results.""" diff --git a/portkey_ai/_vendor/openai/types/graders/python_grader.py b/portkey_ai/_vendor/openai/types/graders/python_grader.py index faa10b1e..81aafdae 100644 --- a/portkey_ai/_vendor/openai/types/graders/python_grader.py +++ b/portkey_ai/_vendor/openai/types/graders/python_grader.py @@ -9,6 +9,8 @@ class PythonGrader(BaseModel): + """A PythonGrader object that runs a python script on the input.""" + name: str """The name of the grader.""" diff --git a/portkey_ai/_vendor/openai/types/graders/python_grader_param.py b/portkey_ai/_vendor/openai/types/graders/python_grader_param.py index efb92375..3be7bab4 100644 --- a/portkey_ai/_vendor/openai/types/graders/python_grader_param.py +++ b/portkey_ai/_vendor/openai/types/graders/python_grader_param.py @@ -8,6 +8,8 @@ class PythonGraderParam(TypedDict, total=False): + """A PythonGrader object that runs a python script on the input.""" + name: Required[str] """The name of the grader.""" diff --git a/portkey_ai/_vendor/openai/types/graders/score_model_grader.py b/portkey_ai/_vendor/openai/types/graders/score_model_grader.py index 35e2dc14..85d11e86 100644 --- a/portkey_ai/_vendor/openai/types/graders/score_model_grader.py +++ b/portkey_ai/_vendor/openai/types/graders/score_model_grader.py @@ -4,6 +4,7 @@ from typing_extensions import Literal, TypeAlias from ..._models import BaseModel +from .grader_inputs import GraderInputs from ..shared.reasoning_effort import ReasoningEffort from ..responses.response_input_text import ResponseInputText from ..responses.response_input_audio import ResponseInputAudio @@ -19,6 +20,8 @@ class InputContentOutputText(BaseModel): + """A text output from the model.""" + text: str """The text output from the model.""" @@ -27,6 +30,8 @@ class InputContentOutputText(BaseModel): class InputContentInputImage(BaseModel): + """An image input block used within EvalItem content arrays.""" + image_url: str """The URL of the image input.""" @@ -41,13 +46,25 @@ class InputContentInputImage(BaseModel): InputContent: TypeAlias = Union[ - str, ResponseInputText, InputContentOutputText, InputContentInputImage, ResponseInputAudio, List[object] + str, ResponseInputText, InputContentOutputText, InputContentInputImage, ResponseInputAudio, GraderInputs ] class Input(BaseModel): + """ + A message input to the model with a role indicating instruction following + hierarchy. Instructions given with the `developer` or `system` role take + precedence over instructions given with the `user` role. Messages with the + `assistant` role are presumed to have been generated by the model in previous + interactions. + """ + content: InputContent - """Inputs to the model - can contain template strings.""" + """Inputs to the model - can contain template strings. + + Supports text, output text, input images, and input audio, either as a single + item or an array of items. + """ role: Literal["user", "assistant", "system", "developer"] """The role of the message input. @@ -60,6 +77,8 @@ class Input(BaseModel): class SamplingParams(BaseModel): + """The sampling parameters for the model.""" + max_completions_tokens: Optional[int] = None """The maximum number of tokens the grader model may generate in its response.""" @@ -67,12 +86,17 @@ class SamplingParams(BaseModel): """ Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - effort can result in faster responses and fewer tokens used on reasoning in a - response. - - Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - effort. + supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. + Reducing reasoning effort can result in faster responses and fewer tokens used + on reasoning in a response. + + - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + calls are supported for all reasoning values in gpt-5.1. + - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + support `none`. + - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. + - `xhigh` is supported for all models after `gpt-5.1-codex-max`. """ seed: Optional[int] = None @@ -86,8 +110,14 @@ class SamplingParams(BaseModel): class ScoreModelGrader(BaseModel): + """A ScoreModelGrader object that uses a model to assign a score to the input.""" + input: List[Input] - """The input text. This may include template strings.""" + """The input messages evaluated by the grader. + + Supports text, output text, input image, and input audio content blocks, and may + include template strings. + """ model: str """The model to use for the evaluation.""" diff --git a/portkey_ai/_vendor/openai/types/graders/score_model_grader_param.py b/portkey_ai/_vendor/openai/types/graders/score_model_grader_param.py index 168feeae..9f1c42e0 100644 --- a/portkey_ai/_vendor/openai/types/graders/score_model_grader_param.py +++ b/portkey_ai/_vendor/openai/types/graders/score_model_grader_param.py @@ -5,6 +5,7 @@ from typing import Union, Iterable, Optional from typing_extensions import Literal, Required, TypeAlias, TypedDict +from .grader_inputs_param import GraderInputsParam from ..shared.reasoning_effort import ReasoningEffort from ..responses.response_input_text_param import ResponseInputTextParam from ..responses.response_input_audio_param import ResponseInputAudioParam @@ -20,6 +21,8 @@ class InputContentOutputText(TypedDict, total=False): + """A text output from the model.""" + text: Required[str] """The text output from the model.""" @@ -28,6 +31,8 @@ class InputContentOutputText(TypedDict, total=False): class InputContentInputImage(TypedDict, total=False): + """An image input block used within EvalItem content arrays.""" + image_url: Required[str] """The URL of the image input.""" @@ -47,13 +52,25 @@ class InputContentInputImage(TypedDict, total=False): InputContentOutputText, InputContentInputImage, ResponseInputAudioParam, - Iterable[object], + GraderInputsParam, ] class Input(TypedDict, total=False): + """ + A message input to the model with a role indicating instruction following + hierarchy. Instructions given with the `developer` or `system` role take + precedence over instructions given with the `user` role. Messages with the + `assistant` role are presumed to have been generated by the model in previous + interactions. + """ + content: Required[InputContent] - """Inputs to the model - can contain template strings.""" + """Inputs to the model - can contain template strings. + + Supports text, output text, input images, and input audio, either as a single + item or an array of items. + """ role: Required[Literal["user", "assistant", "system", "developer"]] """The role of the message input. @@ -66,6 +83,8 @@ class Input(TypedDict, total=False): class SamplingParams(TypedDict, total=False): + """The sampling parameters for the model.""" + max_completions_tokens: Optional[int] """The maximum number of tokens the grader model may generate in its response.""" @@ -73,12 +92,17 @@ class SamplingParams(TypedDict, total=False): """ Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - effort can result in faster responses and fewer tokens used on reasoning in a - response. - - Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - effort. + supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. + Reducing reasoning effort can result in faster responses and fewer tokens used + on reasoning in a response. + + - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + calls are supported for all reasoning values in gpt-5.1. + - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + support `none`. + - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. + - `xhigh` is supported for all models after `gpt-5.1-codex-max`. """ seed: Optional[int] @@ -92,8 +116,14 @@ class SamplingParams(TypedDict, total=False): class ScoreModelGraderParam(TypedDict, total=False): + """A ScoreModelGrader object that uses a model to assign a score to the input.""" + input: Required[Iterable[Input]] - """The input text. This may include template strings.""" + """The input messages evaluated by the grader. + + Supports text, output text, input image, and input audio content blocks, and may + include template strings. + """ model: Required[str] """The model to use for the evaluation.""" diff --git a/portkey_ai/_vendor/openai/types/graders/string_check_grader.py b/portkey_ai/_vendor/openai/types/graders/string_check_grader.py index 3bf0b8c8..efd3679d 100644 --- a/portkey_ai/_vendor/openai/types/graders/string_check_grader.py +++ b/portkey_ai/_vendor/openai/types/graders/string_check_grader.py @@ -8,6 +8,10 @@ class StringCheckGrader(BaseModel): + """ + A StringCheckGrader object that performs a string comparison between input and reference using a specified operation. + """ + input: str """The input text. This may include template strings.""" diff --git a/portkey_ai/_vendor/openai/types/graders/string_check_grader_param.py b/portkey_ai/_vendor/openai/types/graders/string_check_grader_param.py index 27b204ce..da9e9615 100644 --- a/portkey_ai/_vendor/openai/types/graders/string_check_grader_param.py +++ b/portkey_ai/_vendor/openai/types/graders/string_check_grader_param.py @@ -8,6 +8,10 @@ class StringCheckGraderParam(TypedDict, total=False): + """ + A StringCheckGrader object that performs a string comparison between input and reference using a specified operation. + """ + input: Required[str] """The input text. This may include template strings.""" diff --git a/portkey_ai/_vendor/openai/types/graders/text_similarity_grader.py b/portkey_ai/_vendor/openai/types/graders/text_similarity_grader.py index 9082ac89..a9d39a2f 100644 --- a/portkey_ai/_vendor/openai/types/graders/text_similarity_grader.py +++ b/portkey_ai/_vendor/openai/types/graders/text_similarity_grader.py @@ -8,6 +8,8 @@ class TextSimilarityGrader(BaseModel): + """A TextSimilarityGrader object which grades text based on similarity metrics.""" + evaluation_metric: Literal[ "cosine", "fuzzy_match", diff --git a/portkey_ai/_vendor/openai/types/graders/text_similarity_grader_param.py b/portkey_ai/_vendor/openai/types/graders/text_similarity_grader_param.py index 1646afc8..0907c3c2 100644 --- a/portkey_ai/_vendor/openai/types/graders/text_similarity_grader_param.py +++ b/portkey_ai/_vendor/openai/types/graders/text_similarity_grader_param.py @@ -8,6 +8,8 @@ class TextSimilarityGraderParam(TypedDict, total=False): + """A TextSimilarityGrader object which grades text based on similarity metrics.""" + evaluation_metric: Required[ Literal[ "cosine", diff --git a/portkey_ai/_vendor/openai/types/image.py b/portkey_ai/_vendor/openai/types/image.py index ecaef3fd..dcbdb2ac 100644 --- a/portkey_ai/_vendor/openai/types/image.py +++ b/portkey_ai/_vendor/openai/types/image.py @@ -8,11 +8,13 @@ class Image(BaseModel): + """Represents the content or the URL of an image generated by the OpenAI API.""" + b64_json: Optional[str] = None """The base64-encoded JSON of the generated image. - Default value for `gpt-image-1`, and only present if `response_format` is set to - `b64_json` for `dall-e-2` and `dall-e-3`. + Returned by default for the GPT image models, and only present if + `response_format` is set to `b64_json` for `dall-e-2` and `dall-e-3`. """ revised_prompt: Optional[str] = None @@ -21,6 +23,6 @@ class Image(BaseModel): url: Optional[str] = None """ When using `dall-e-2` or `dall-e-3`, the URL of the generated image if - `response_format` is set to `url` (default value). Unsupported for - `gpt-image-1`. + `response_format` is set to `url` (default value). Unsupported for the GPT image + models. """ diff --git a/portkey_ai/_vendor/openai/types/image_edit_completed_event.py b/portkey_ai/_vendor/openai/types/image_edit_completed_event.py index a40682da..e2e19341 100644 --- a/portkey_ai/_vendor/openai/types/image_edit_completed_event.py +++ b/portkey_ai/_vendor/openai/types/image_edit_completed_event.py @@ -8,6 +8,8 @@ class UsageInputTokensDetails(BaseModel): + """The input tokens detailed information for the image generation.""" + image_tokens: int """The number of image tokens in the input prompt.""" @@ -16,6 +18,10 @@ class UsageInputTokensDetails(BaseModel): class Usage(BaseModel): + """ + For the GPT image models only, the token usage information for the image generation. + """ + input_tokens: int """The number of tokens (images and text) in the input prompt.""" @@ -30,6 +36,8 @@ class Usage(BaseModel): class ImageEditCompletedEvent(BaseModel): + """Emitted when image editing has completed and the final image is available.""" + b64_json: str """Base64-encoded final edited image data, suitable for rendering as an image.""" @@ -52,4 +60,7 @@ class ImageEditCompletedEvent(BaseModel): """The type of the event. Always `image_edit.completed`.""" usage: Usage - """For `gpt-image-1` only, the token usage information for the image generation.""" + """ + For the GPT image models only, the token usage information for the image + generation. + """ diff --git a/portkey_ai/_vendor/openai/types/image_edit_params.py b/portkey_ai/_vendor/openai/types/image_edit_params.py index 2a8fab0f..0bd5f39f 100644 --- a/portkey_ai/_vendor/openai/types/image_edit_params.py +++ b/portkey_ai/_vendor/openai/types/image_edit_params.py @@ -15,7 +15,8 @@ class ImageEditParamsBase(TypedDict, total=False): image: Required[Union[FileTypes, SequenceNotStr[FileTypes]]] """The image(s) to edit. Must be a supported image file or an array of images. - For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than + For the GPT image models (`gpt-image-1`, `gpt-image-1-mini`, and + `gpt-image-1.5`), each image should be a `png`, `webp`, or `jpg` file less than 50MB. You can provide up to 16 images. For `dall-e-2`, you can only provide one image, and it should be a square `png` @@ -26,15 +27,15 @@ class ImageEditParamsBase(TypedDict, total=False): """A text description of the desired image(s). The maximum length is 1000 characters for `dall-e-2`, and 32000 characters for - `gpt-image-1`. + the GPT image models. """ background: Optional[Literal["transparent", "opaque", "auto"]] """ Allows to set transparency for the background of the generated image(s). This - parameter is only supported for `gpt-image-1`. Must be one of `transparent`, - `opaque` or `auto` (default value). When `auto` is used, the model will - automatically determine the best background for the image. + parameter is only supported for the GPT image models. Must be one of + `transparent`, `opaque` or `auto` (default value). When `auto` is used, the + model will automatically determine the best background for the image. If `transparent`, the output format needs to support transparency, so it should be set to either `png` (default value) or `webp`. @@ -59,8 +60,8 @@ class ImageEditParamsBase(TypedDict, total=False): model: Union[str, ImageModel, None] """The model to use for image generation. - Only `dall-e-2` and `gpt-image-1` are supported. Defaults to `dall-e-2` unless a - parameter specific to `gpt-image-1` is used. + Only `dall-e-2` and the GPT image models are supported. Defaults to `dall-e-2` + unless a parameter specific to the GPT image models is used. """ n: Optional[int] @@ -69,14 +70,14 @@ class ImageEditParamsBase(TypedDict, total=False): output_compression: Optional[int] """The compression level (0-100%) for the generated images. - This parameter is only supported for `gpt-image-1` with the `webp` or `jpeg` - output formats, and defaults to 100. + This parameter is only supported for the GPT image models with the `webp` or + `jpeg` output formats, and defaults to 100. """ output_format: Optional[Literal["png", "jpeg", "webp"]] """The format in which the generated images are returned. - This parameter is only supported for `gpt-image-1`. Must be one of `png`, + This parameter is only supported for the GPT image models. Must be one of `png`, `jpeg`, or `webp`. The default value is `png`. """ @@ -94,8 +95,8 @@ class ImageEditParamsBase(TypedDict, total=False): quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] """The quality of the image that will be generated. - `high`, `medium` and `low` are only supported for `gpt-image-1`. `dall-e-2` only - supports `standard` quality. Defaults to `auto`. + `high`, `medium` and `low` are only supported for the GPT image models. + `dall-e-2` only supports `standard` quality. Defaults to `auto`. """ response_format: Optional[Literal["url", "b64_json"]] @@ -103,15 +104,15 @@ class ImageEditParamsBase(TypedDict, total=False): Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes after the image has been generated. This parameter is only supported for `dall-e-2`, as - `gpt-image-1` will always return base64-encoded images. + the GPT image models always return base64-encoded images. """ size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]] """The size of the generated images. Must be one of `1024x1024`, `1536x1024` (landscape), `1024x1536` (portrait), or - `auto` (default value) for `gpt-image-1`, and one of `256x256`, `512x512`, or - `1024x1024` for `dall-e-2`. + `auto` (default value) for the GPT image models, and one of `256x256`, + `512x512`, or `1024x1024` for `dall-e-2`. """ user: str diff --git a/portkey_ai/_vendor/openai/types/image_edit_partial_image_event.py b/portkey_ai/_vendor/openai/types/image_edit_partial_image_event.py index 20da45ef..7bbd8c9b 100644 --- a/portkey_ai/_vendor/openai/types/image_edit_partial_image_event.py +++ b/portkey_ai/_vendor/openai/types/image_edit_partial_image_event.py @@ -8,6 +8,8 @@ class ImageEditPartialImageEvent(BaseModel): + """Emitted when a partial image is available during image editing streaming.""" + b64_json: str """Base64-encoded partial image data, suitable for rendering as an image.""" diff --git a/portkey_ai/_vendor/openai/types/image_gen_completed_event.py b/portkey_ai/_vendor/openai/types/image_gen_completed_event.py index e78da842..813ed889 100644 --- a/portkey_ai/_vendor/openai/types/image_gen_completed_event.py +++ b/portkey_ai/_vendor/openai/types/image_gen_completed_event.py @@ -8,6 +8,8 @@ class UsageInputTokensDetails(BaseModel): + """The input tokens detailed information for the image generation.""" + image_tokens: int """The number of image tokens in the input prompt.""" @@ -16,6 +18,10 @@ class UsageInputTokensDetails(BaseModel): class Usage(BaseModel): + """ + For the GPT image models only, the token usage information for the image generation. + """ + input_tokens: int """The number of tokens (images and text) in the input prompt.""" @@ -30,6 +36,8 @@ class Usage(BaseModel): class ImageGenCompletedEvent(BaseModel): + """Emitted when image generation has completed and the final image is available.""" + b64_json: str """Base64-encoded image data, suitable for rendering as an image.""" @@ -52,4 +60,7 @@ class ImageGenCompletedEvent(BaseModel): """The type of the event. Always `image_generation.completed`.""" usage: Usage - """For `gpt-image-1` only, the token usage information for the image generation.""" + """ + For the GPT image models only, the token usage information for the image + generation. + """ diff --git a/portkey_ai/_vendor/openai/types/image_gen_partial_image_event.py b/portkey_ai/_vendor/openai/types/image_gen_partial_image_event.py index 965d4506..df29c00a 100644 --- a/portkey_ai/_vendor/openai/types/image_gen_partial_image_event.py +++ b/portkey_ai/_vendor/openai/types/image_gen_partial_image_event.py @@ -8,6 +8,8 @@ class ImageGenPartialImageEvent(BaseModel): + """Emitted when a partial image is available during image generation streaming.""" + b64_json: str """Base64-encoded partial image data, suitable for rendering as an image.""" diff --git a/portkey_ai/_vendor/openai/types/image_generate_params.py b/portkey_ai/_vendor/openai/types/image_generate_params.py index 3270ca1d..7a95b3dd 100644 --- a/portkey_ai/_vendor/openai/types/image_generate_params.py +++ b/portkey_ai/_vendor/openai/types/image_generate_params.py @@ -14,16 +14,16 @@ class ImageGenerateParamsBase(TypedDict, total=False): prompt: Required[str] """A text description of the desired image(s). - The maximum length is 32000 characters for `gpt-image-1`, 1000 characters for - `dall-e-2` and 4000 characters for `dall-e-3`. + The maximum length is 32000 characters for the GPT image models, 1000 characters + for `dall-e-2` and 4000 characters for `dall-e-3`. """ background: Optional[Literal["transparent", "opaque", "auto"]] """ Allows to set transparency for the background of the generated image(s). This - parameter is only supported for `gpt-image-1`. Must be one of `transparent`, - `opaque` or `auto` (default value). When `auto` is used, the model will - automatically determine the best background for the image. + parameter is only supported for the GPT image models. Must be one of + `transparent`, `opaque` or `auto` (default value). When `auto` is used, the + model will automatically determine the best background for the image. If `transparent`, the output format needs to support transparency, so it should be set to either `png` (default value) or `webp`. @@ -32,14 +32,16 @@ class ImageGenerateParamsBase(TypedDict, total=False): model: Union[str, ImageModel, None] """The model to use for image generation. - One of `dall-e-2`, `dall-e-3`, or `gpt-image-1`. Defaults to `dall-e-2` unless a - parameter specific to `gpt-image-1` is used. + One of `dall-e-2`, `dall-e-3`, or a GPT image model (`gpt-image-1`, + `gpt-image-1-mini`, `gpt-image-1.5`). Defaults to `dall-e-2` unless a parameter + specific to the GPT image models is used. """ moderation: Optional[Literal["low", "auto"]] - """Control the content-moderation level for images generated by `gpt-image-1`. - - Must be either `low` for less restrictive filtering or `auto` (default value). + """ + Control the content-moderation level for images generated by the GPT image + models. Must be either `low` for less restrictive filtering or `auto` (default + value). """ n: Optional[int] @@ -51,14 +53,14 @@ class ImageGenerateParamsBase(TypedDict, total=False): output_compression: Optional[int] """The compression level (0-100%) for the generated images. - This parameter is only supported for `gpt-image-1` with the `webp` or `jpeg` - output formats, and defaults to 100. + This parameter is only supported for the GPT image models with the `webp` or + `jpeg` output formats, and defaults to 100. """ output_format: Optional[Literal["png", "jpeg", "webp"]] """The format in which the generated images are returned. - This parameter is only supported for `gpt-image-1`. Must be one of `png`, + This parameter is only supported for the GPT image models. Must be one of `png`, `jpeg`, or `webp`. """ @@ -78,7 +80,7 @@ class ImageGenerateParamsBase(TypedDict, total=False): - `auto` (default value) will automatically select the best quality for the given model. - - `high`, `medium` and `low` are supported for `gpt-image-1`. + - `high`, `medium` and `low` are supported for the GPT image models. - `hd` and `standard` are supported for `dall-e-3`. - `standard` is the only option for `dall-e-2`. """ @@ -88,8 +90,8 @@ class ImageGenerateParamsBase(TypedDict, total=False): returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes after the - image has been generated. This parameter isn't supported for `gpt-image-1` which - will always return base64-encoded images. + image has been generated. This parameter isn't supported for the GPT image + models, which always return base64-encoded images. """ size: Optional[ @@ -98,7 +100,7 @@ class ImageGenerateParamsBase(TypedDict, total=False): """The size of the generated images. Must be one of `1024x1024`, `1536x1024` (landscape), `1024x1536` (portrait), or - `auto` (default value) for `gpt-image-1`, one of `256x256`, `512x512`, or + `auto` (default value) for the GPT image models, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`. """ @@ -126,7 +128,7 @@ class ImageGenerateParamsNonStreaming(ImageGenerateParamsBase, total=False): Defaults to `false`. See the [Image generation guide](https://platform.openai.com/docs/guides/image-generation) - for more information. This parameter is only supported for `gpt-image-1`. + for more information. This parameter is only supported for the GPT image models. """ @@ -136,7 +138,7 @@ class ImageGenerateParamsStreaming(ImageGenerateParamsBase): Defaults to `false`. See the [Image generation guide](https://platform.openai.com/docs/guides/image-generation) - for more information. This parameter is only supported for `gpt-image-1`. + for more information. This parameter is only supported for the GPT image models. """ diff --git a/portkey_ai/_vendor/openai/types/image_model.py b/portkey_ai/_vendor/openai/types/image_model.py index 22b1281f..8ea486fb 100644 --- a/portkey_ai/_vendor/openai/types/image_model.py +++ b/portkey_ai/_vendor/openai/types/image_model.py @@ -4,4 +4,4 @@ __all__ = ["ImageModel"] -ImageModel: TypeAlias = Literal["dall-e-2", "dall-e-3", "gpt-image-1", "gpt-image-1-mini"] +ImageModel: TypeAlias = Literal["gpt-image-1.5", "dall-e-2", "dall-e-3", "gpt-image-1", "gpt-image-1-mini"] diff --git a/portkey_ai/_vendor/openai/types/images_response.py b/portkey_ai/_vendor/openai/types/images_response.py index 89cc71df..3e832aad 100644 --- a/portkey_ai/_vendor/openai/types/images_response.py +++ b/portkey_ai/_vendor/openai/types/images_response.py @@ -6,10 +6,12 @@ from .image import Image from .._models import BaseModel -__all__ = ["ImagesResponse", "Usage", "UsageInputTokensDetails"] +__all__ = ["ImagesResponse", "Usage", "UsageInputTokensDetails", "UsageOutputTokensDetails"] class UsageInputTokensDetails(BaseModel): + """The input tokens detailed information for the image generation.""" + image_tokens: int """The number of image tokens in the input prompt.""" @@ -17,7 +19,19 @@ class UsageInputTokensDetails(BaseModel): """The number of text tokens in the input prompt.""" +class UsageOutputTokensDetails(BaseModel): + """The output token details for the image generation.""" + + image_tokens: int + """The number of image output tokens generated by the model.""" + + text_tokens: int + """The number of text output tokens generated by the model.""" + + class Usage(BaseModel): + """For `gpt-image-1` only, the token usage information for the image generation.""" + input_tokens: int """The number of tokens (images and text) in the input prompt.""" @@ -30,8 +44,13 @@ class Usage(BaseModel): total_tokens: int """The total number of tokens (images and text) used for the image generation.""" + output_tokens_details: Optional[UsageOutputTokensDetails] = None + """The output token details for the image generation.""" + class ImagesResponse(BaseModel): + """The response from the image generation endpoint.""" + created: int """The Unix timestamp (in seconds) of when the image was created.""" diff --git a/portkey_ai/_vendor/openai/types/model.py b/portkey_ai/_vendor/openai/types/model.py index 2631ee8d..6506224a 100644 --- a/portkey_ai/_vendor/openai/types/model.py +++ b/portkey_ai/_vendor/openai/types/model.py @@ -8,6 +8,8 @@ class Model(BaseModel): + """Describes an OpenAI model offering that can be used with the API.""" + id: str """The model identifier, which can be referenced in the API endpoints.""" diff --git a/portkey_ai/_vendor/openai/types/moderation.py b/portkey_ai/_vendor/openai/types/moderation.py index 608f5622..a6acc26d 100644 --- a/portkey_ai/_vendor/openai/types/moderation.py +++ b/portkey_ai/_vendor/openai/types/moderation.py @@ -11,6 +11,8 @@ class Categories(BaseModel): + """A list of the categories, and whether they are flagged or not.""" + harassment: bool """ Content that expresses, incites, or promotes harassing language towards any @@ -89,6 +91,10 @@ class Categories(BaseModel): class CategoryAppliedInputTypes(BaseModel): + """ + A list of the categories along with the input type(s) that the score applies to. + """ + harassment: List[Literal["text"]] """The applied input type(s) for the category 'harassment'.""" @@ -130,6 +136,8 @@ class CategoryAppliedInputTypes(BaseModel): class CategoryScores(BaseModel): + """A list of the categories along with their scores as predicted by model.""" + harassment: float """The score for the category 'harassment'.""" diff --git a/portkey_ai/_vendor/openai/types/moderation_create_response.py b/portkey_ai/_vendor/openai/types/moderation_create_response.py index 79684f8a..23c03875 100644 --- a/portkey_ai/_vendor/openai/types/moderation_create_response.py +++ b/portkey_ai/_vendor/openai/types/moderation_create_response.py @@ -9,6 +9,8 @@ class ModerationCreateResponse(BaseModel): + """Represents if a given text input is potentially harmful.""" + id: str """The unique identifier for the moderation request.""" diff --git a/portkey_ai/_vendor/openai/types/moderation_image_url_input_param.py b/portkey_ai/_vendor/openai/types/moderation_image_url_input_param.py index 9a69a6a2..9c0fe256 100644 --- a/portkey_ai/_vendor/openai/types/moderation_image_url_input_param.py +++ b/portkey_ai/_vendor/openai/types/moderation_image_url_input_param.py @@ -8,11 +8,15 @@ class ImageURL(TypedDict, total=False): + """Contains either an image URL or a data URL for a base64 encoded image.""" + url: Required[str] """Either a URL of the image or the base64 encoded image data.""" class ModerationImageURLInputParam(TypedDict, total=False): + """An object describing an image to classify.""" + image_url: Required[ImageURL] """Contains either an image URL or a data URL for a base64 encoded image.""" diff --git a/portkey_ai/_vendor/openai/types/moderation_text_input_param.py b/portkey_ai/_vendor/openai/types/moderation_text_input_param.py index e5da5333..786ecbe6 100644 --- a/portkey_ai/_vendor/openai/types/moderation_text_input_param.py +++ b/portkey_ai/_vendor/openai/types/moderation_text_input_param.py @@ -8,6 +8,8 @@ class ModerationTextInputParam(TypedDict, total=False): + """An object describing text to classify.""" + text: Required[str] """A string of text to classify.""" diff --git a/portkey_ai/_vendor/openai/types/other_file_chunking_strategy_object.py b/portkey_ai/_vendor/openai/types/other_file_chunking_strategy_object.py index e4cd61a8..a5371425 100644 --- a/portkey_ai/_vendor/openai/types/other_file_chunking_strategy_object.py +++ b/portkey_ai/_vendor/openai/types/other_file_chunking_strategy_object.py @@ -8,5 +8,10 @@ class OtherFileChunkingStrategyObject(BaseModel): + """This is returned when the chunking strategy is unknown. + + Typically, this is because the file was indexed before the `chunking_strategy` concept was introduced in the API. + """ + type: Literal["other"] """Always `other`.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/__init__.py b/portkey_ai/_vendor/openai/types/realtime/__init__.py index 83e81a03..c2a141d7 100644 --- a/portkey_ai/_vendor/openai/types/realtime/__init__.py +++ b/portkey_ai/_vendor/openai/types/realtime/__init__.py @@ -175,6 +175,9 @@ from .response_function_call_arguments_done_event import ( ResponseFunctionCallArgumentsDoneEvent as ResponseFunctionCallArgumentsDoneEvent, ) +from .input_audio_buffer_dtmf_event_received_event import ( + InputAudioBufferDtmfEventReceivedEvent as InputAudioBufferDtmfEventReceivedEvent, +) from .realtime_conversation_item_assistant_message import ( RealtimeConversationItemAssistantMessage as RealtimeConversationItemAssistantMessage, ) diff --git a/portkey_ai/_vendor/openai/types/realtime/audio_transcription.py b/portkey_ai/_vendor/openai/types/realtime/audio_transcription.py index 3e5c8e0c..0a8c1371 100644 --- a/portkey_ai/_vendor/openai/types/realtime/audio_transcription.py +++ b/portkey_ai/_vendor/openai/types/realtime/audio_transcription.py @@ -1,6 +1,6 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import Optional +from typing import Union, Optional from typing_extensions import Literal from ..._models import BaseModel @@ -17,13 +17,22 @@ class AudioTranscription(BaseModel): format will improve accuracy and latency. """ - model: Optional[ - Literal["whisper-1", "gpt-4o-mini-transcribe", "gpt-4o-transcribe", "gpt-4o-transcribe-diarize"] + model: Union[ + str, + Literal[ + "whisper-1", + "gpt-4o-mini-transcribe", + "gpt-4o-mini-transcribe-2025-12-15", + "gpt-4o-transcribe", + "gpt-4o-transcribe-diarize", + ], + None, ] = None """The model to use for transcription. - Current options are `whisper-1`, `gpt-4o-mini-transcribe`, `gpt-4o-transcribe`, - and `gpt-4o-transcribe-diarize`. Use `gpt-4o-transcribe-diarize` when you need + Current options are `whisper-1`, `gpt-4o-mini-transcribe`, + `gpt-4o-mini-transcribe-2025-12-15`, `gpt-4o-transcribe`, and + `gpt-4o-transcribe-diarize`. Use `gpt-4o-transcribe-diarize` when you need diarization with speaker labels. """ diff --git a/portkey_ai/_vendor/openai/types/realtime/audio_transcription_param.py b/portkey_ai/_vendor/openai/types/realtime/audio_transcription_param.py index 3b65e42c..7e60a003 100644 --- a/portkey_ai/_vendor/openai/types/realtime/audio_transcription_param.py +++ b/portkey_ai/_vendor/openai/types/realtime/audio_transcription_param.py @@ -2,6 +2,7 @@ from __future__ import annotations +from typing import Union from typing_extensions import Literal, TypedDict __all__ = ["AudioTranscriptionParam"] @@ -16,11 +17,21 @@ class AudioTranscriptionParam(TypedDict, total=False): format will improve accuracy and latency. """ - model: Literal["whisper-1", "gpt-4o-mini-transcribe", "gpt-4o-transcribe", "gpt-4o-transcribe-diarize"] + model: Union[ + str, + Literal[ + "whisper-1", + "gpt-4o-mini-transcribe", + "gpt-4o-mini-transcribe-2025-12-15", + "gpt-4o-transcribe", + "gpt-4o-transcribe-diarize", + ], + ] """The model to use for transcription. - Current options are `whisper-1`, `gpt-4o-mini-transcribe`, `gpt-4o-transcribe`, - and `gpt-4o-transcribe-diarize`. Use `gpt-4o-transcribe-diarize` when you need + Current options are `whisper-1`, `gpt-4o-mini-transcribe`, + `gpt-4o-mini-transcribe-2025-12-15`, `gpt-4o-transcribe`, and + `gpt-4o-transcribe-diarize`. Use `gpt-4o-transcribe-diarize` when you need diarization with speaker labels. """ diff --git a/portkey_ai/_vendor/openai/types/realtime/call_accept_params.py b/portkey_ai/_vendor/openai/types/realtime/call_accept_params.py index d6fc92b8..d950f59f 100644 --- a/portkey_ai/_vendor/openai/types/realtime/call_accept_params.py +++ b/portkey_ai/_vendor/openai/types/realtime/call_accept_params.py @@ -65,8 +65,10 @@ class CallAcceptParams(TypedDict, total=False): "gpt-4o-mini-realtime-preview-2024-12-17", "gpt-realtime-mini", "gpt-realtime-mini-2025-10-06", + "gpt-realtime-mini-2025-12-15", "gpt-audio-mini", "gpt-audio-mini-2025-10-06", + "gpt-audio-mini-2025-12-15", ], ] """The Realtime model used for this session.""" @@ -110,13 +112,18 @@ class CallAcceptParams(TypedDict, total=False): limit, the conversation be truncated, meaning messages (starting from the oldest) will not be included in the model's context. A 32k context model with 4,096 max output tokens can only include 28,224 tokens in the context before - truncation occurs. Clients can configure truncation behavior to truncate with a - lower max token limit, which is an effective way to control token usage and - cost. Truncation will reduce the number of cached tokens on the next turn - (busting the cache), since messages are dropped from the beginning of the - context. However, clients can also configure truncation to retain messages up to - a fraction of the maximum context size, which will reduce the need for future - truncations and thus improve the cache rate. Truncation can be disabled - entirely, which means the server will never truncate but would instead return an - error if the conversation exceeds the model's input token limit. + truncation occurs. + + Clients can configure truncation behavior to truncate with a lower max token + limit, which is an effective way to control token usage and cost. + + Truncation will reduce the number of cached tokens on the next turn (busting the + cache), since messages are dropped from the beginning of the context. However, + clients can also configure truncation to retain messages up to a fraction of the + maximum context size, which will reduce the need for future truncations and thus + improve the cache rate. + + Truncation can be disabled entirely, which means the server will never truncate + but would instead return an error if the conversation exceeds the model's input + token limit. """ diff --git a/portkey_ai/_vendor/openai/types/realtime/client_secret_create_params.py b/portkey_ai/_vendor/openai/types/realtime/client_secret_create_params.py index 5f0b0d79..2297f3f6 100644 --- a/portkey_ai/_vendor/openai/types/realtime/client_secret_create_params.py +++ b/portkey_ai/_vendor/openai/types/realtime/client_secret_create_params.py @@ -28,6 +28,14 @@ class ClientSecretCreateParams(TypedDict, total=False): class ExpiresAfter(TypedDict, total=False): + """Configuration for the client secret expiration. + + Expiration refers to the time after which + a client secret will no longer be valid for creating sessions. The session itself may + continue after that time once started. A secret can be used to create multiple sessions + until it expires. + """ + anchor: Literal["created_at"] """ The anchor point for the client secret expiration, meaning that `seconds` will diff --git a/portkey_ai/_vendor/openai/types/realtime/client_secret_create_response.py b/portkey_ai/_vendor/openai/types/realtime/client_secret_create_response.py index 2aed66a2..3a30b105 100644 --- a/portkey_ai/_vendor/openai/types/realtime/client_secret_create_response.py +++ b/portkey_ai/_vendor/openai/types/realtime/client_secret_create_response.py @@ -16,6 +16,8 @@ class ClientSecretCreateResponse(BaseModel): + """Response from creating a session and client secret for the Realtime API.""" + expires_at: int """Expiration timestamp for the client secret, in seconds since epoch.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/conversation_created_event.py b/portkey_ai/_vendor/openai/types/realtime/conversation_created_event.py index 6ec1dc8c..3026322e 100644 --- a/portkey_ai/_vendor/openai/types/realtime/conversation_created_event.py +++ b/portkey_ai/_vendor/openai/types/realtime/conversation_created_event.py @@ -9,6 +9,8 @@ class Conversation(BaseModel): + """The conversation resource.""" + id: Optional[str] = None """The unique ID of the conversation.""" @@ -17,6 +19,8 @@ class Conversation(BaseModel): class ConversationCreatedEvent(BaseModel): + """Returned when a conversation is created. Emitted right after session creation.""" + conversation: Conversation """The conversation resource.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/conversation_item_added.py b/portkey_ai/_vendor/openai/types/realtime/conversation_item_added.py index ae9f6803..0e336a92 100644 --- a/portkey_ai/_vendor/openai/types/realtime/conversation_item_added.py +++ b/portkey_ai/_vendor/openai/types/realtime/conversation_item_added.py @@ -10,6 +10,16 @@ class ConversationItemAdded(BaseModel): + """Sent by the server when an Item is added to the default Conversation. + + This can happen in several cases: + - When the client sends a `conversation.item.create` event. + - When the input audio buffer is committed. In this case the item will be a user message containing the audio from the buffer. + - When the model is generating a Response. In this case the `conversation.item.added` event will be sent when the model starts generating a specific Item, and thus it will not yet have any content (and `status` will be `in_progress`). + + The event will include the full content of the Item (except when model is generating a Response) except for audio data, which can be retrieved separately with a `conversation.item.retrieve` event if necessary. + """ + event_id: str """The unique ID of the server event.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/conversation_item_create_event.py b/portkey_ai/_vendor/openai/types/realtime/conversation_item_create_event.py index 8fa2dfe0..fd0fc00f 100644 --- a/portkey_ai/_vendor/openai/types/realtime/conversation_item_create_event.py +++ b/portkey_ai/_vendor/openai/types/realtime/conversation_item_create_event.py @@ -10,6 +10,16 @@ class ConversationItemCreateEvent(BaseModel): + """ + Add a new Item to the Conversation's context, including messages, function + calls, and function call responses. This event can be used both to populate a + "history" of the conversation and to add new items mid-stream, but has the + current limitation that it cannot populate assistant audio messages. + + If successful, the server will respond with a `conversation.item.created` + event, otherwise an `error` event will be sent. + """ + item: ConversationItem """A single item within a Realtime conversation.""" @@ -22,8 +32,12 @@ class ConversationItemCreateEvent(BaseModel): previous_item_id: Optional[str] = None """The ID of the preceding item after which the new item will be inserted. - If not set, the new item will be appended to the end of the conversation. If set - to `root`, the new item will be added to the beginning of the conversation. If - set to an existing ID, it allows an item to be inserted mid-conversation. If the - ID cannot be found, an error will be returned and the item will not be added. + If not set, the new item will be appended to the end of the conversation. + + If set to `root`, the new item will be added to the beginning of the + conversation. + + If set to an existing ID, it allows an item to be inserted mid-conversation. If + the ID cannot be found, an error will be returned and the item will not be + added. """ diff --git a/portkey_ai/_vendor/openai/types/realtime/conversation_item_create_event_param.py b/portkey_ai/_vendor/openai/types/realtime/conversation_item_create_event_param.py index 8530dc72..e991e37c 100644 --- a/portkey_ai/_vendor/openai/types/realtime/conversation_item_create_event_param.py +++ b/portkey_ai/_vendor/openai/types/realtime/conversation_item_create_event_param.py @@ -10,6 +10,16 @@ class ConversationItemCreateEventParam(TypedDict, total=False): + """ + Add a new Item to the Conversation's context, including messages, function + calls, and function call responses. This event can be used both to populate a + "history" of the conversation and to add new items mid-stream, but has the + current limitation that it cannot populate assistant audio messages. + + If successful, the server will respond with a `conversation.item.created` + event, otherwise an `error` event will be sent. + """ + item: Required[ConversationItemParam] """A single item within a Realtime conversation.""" @@ -22,8 +32,12 @@ class ConversationItemCreateEventParam(TypedDict, total=False): previous_item_id: str """The ID of the preceding item after which the new item will be inserted. - If not set, the new item will be appended to the end of the conversation. If set - to `root`, the new item will be added to the beginning of the conversation. If - set to an existing ID, it allows an item to be inserted mid-conversation. If the - ID cannot be found, an error will be returned and the item will not be added. + If not set, the new item will be appended to the end of the conversation. + + If set to `root`, the new item will be added to the beginning of the + conversation. + + If set to an existing ID, it allows an item to be inserted mid-conversation. If + the ID cannot be found, an error will be returned and the item will not be + added. """ diff --git a/portkey_ai/_vendor/openai/types/realtime/conversation_item_created_event.py b/portkey_ai/_vendor/openai/types/realtime/conversation_item_created_event.py index 13f24ad3..6ae6f05f 100644 --- a/portkey_ai/_vendor/openai/types/realtime/conversation_item_created_event.py +++ b/portkey_ai/_vendor/openai/types/realtime/conversation_item_created_event.py @@ -10,6 +10,19 @@ class ConversationItemCreatedEvent(BaseModel): + """Returned when a conversation item is created. + + There are several scenarios that produce this event: + - The server is generating a Response, which if successful will produce + either one or two Items, which will be of type `message` + (role `assistant`) or type `function_call`. + - The input audio buffer has been committed, either by the client or the + server (in `server_vad` mode). The server will take the content of the + input audio buffer and add it to a new user message Item. + - The client has sent a `conversation.item.create` event to add a new Item + to the Conversation. + """ + event_id: str """The unique ID of the server event.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/conversation_item_delete_event.py b/portkey_ai/_vendor/openai/types/realtime/conversation_item_delete_event.py index 3734f72e..c662f386 100644 --- a/portkey_ai/_vendor/openai/types/realtime/conversation_item_delete_event.py +++ b/portkey_ai/_vendor/openai/types/realtime/conversation_item_delete_event.py @@ -9,6 +9,14 @@ class ConversationItemDeleteEvent(BaseModel): + """Send this event when you want to remove any item from the conversation + history. + + The server will respond with a `conversation.item.deleted` event, + unless the item does not exist in the conversation history, in which case the + server will respond with an error. + """ + item_id: str """The ID of the item to delete.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/conversation_item_delete_event_param.py b/portkey_ai/_vendor/openai/types/realtime/conversation_item_delete_event_param.py index c3f88d66..e79bb68c 100644 --- a/portkey_ai/_vendor/openai/types/realtime/conversation_item_delete_event_param.py +++ b/portkey_ai/_vendor/openai/types/realtime/conversation_item_delete_event_param.py @@ -8,6 +8,14 @@ class ConversationItemDeleteEventParam(TypedDict, total=False): + """Send this event when you want to remove any item from the conversation + history. + + The server will respond with a `conversation.item.deleted` event, + unless the item does not exist in the conversation history, in which case the + server will respond with an error. + """ + item_id: Required[str] """The ID of the item to delete.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/conversation_item_deleted_event.py b/portkey_ai/_vendor/openai/types/realtime/conversation_item_deleted_event.py index cfe6fe85..9826289e 100644 --- a/portkey_ai/_vendor/openai/types/realtime/conversation_item_deleted_event.py +++ b/portkey_ai/_vendor/openai/types/realtime/conversation_item_deleted_event.py @@ -8,6 +8,12 @@ class ConversationItemDeletedEvent(BaseModel): + """ + Returned when an item in the conversation is deleted by the client with a + `conversation.item.delete` event. This event is used to synchronize the + server's understanding of the conversation history with the client's view. + """ + event_id: str """The unique ID of the server event.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/conversation_item_done.py b/portkey_ai/_vendor/openai/types/realtime/conversation_item_done.py index a4c9b8a8..6a823c65 100644 --- a/portkey_ai/_vendor/openai/types/realtime/conversation_item_done.py +++ b/portkey_ai/_vendor/openai/types/realtime/conversation_item_done.py @@ -10,6 +10,11 @@ class ConversationItemDone(BaseModel): + """Returned when a conversation item is finalized. + + The event will include the full content of the Item except for audio data, which can be retrieved separately with a `conversation.item.retrieve` event if needed. + """ + event_id: str """The unique ID of the server event.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/conversation_item_input_audio_transcription_completed_event.py b/portkey_ai/_vendor/openai/types/realtime/conversation_item_input_audio_transcription_completed_event.py index 09b20aa1..3304233f 100644 --- a/portkey_ai/_vendor/openai/types/realtime/conversation_item_input_audio_transcription_completed_event.py +++ b/portkey_ai/_vendor/openai/types/realtime/conversation_item_input_audio_transcription_completed_event.py @@ -16,6 +16,8 @@ class UsageTranscriptTextUsageTokensInputTokenDetails(BaseModel): + """Details about the input tokens billed for this request.""" + audio_tokens: Optional[int] = None """Number of audio tokens billed for this request.""" @@ -24,6 +26,8 @@ class UsageTranscriptTextUsageTokensInputTokenDetails(BaseModel): class UsageTranscriptTextUsageTokens(BaseModel): + """Usage statistics for models billed by token usage.""" + input_tokens: int """Number of input tokens billed for this request.""" @@ -41,6 +45,8 @@ class UsageTranscriptTextUsageTokens(BaseModel): class UsageTranscriptTextUsageDuration(BaseModel): + """Usage statistics for models billed by audio input duration.""" + seconds: float """Duration of the input audio in seconds.""" @@ -52,6 +58,19 @@ class UsageTranscriptTextUsageDuration(BaseModel): class ConversationItemInputAudioTranscriptionCompletedEvent(BaseModel): + """ + This event is the output of audio transcription for user audio written to the + user audio buffer. Transcription begins when the input audio buffer is + committed by the client or server (when VAD is enabled). Transcription runs + asynchronously with Response creation, so this event may come before or after + the Response events. + + Realtime API models accept audio natively, and thus input transcription is a + separate process run on a separate ASR (Automatic Speech Recognition) model. + The transcript may diverge somewhat from the model's interpretation, and + should be treated as a rough guide. + """ + content_index: int """The index of the content part containing the audio.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/conversation_item_input_audio_transcription_delta_event.py b/portkey_ai/_vendor/openai/types/realtime/conversation_item_input_audio_transcription_delta_event.py index f49e6f63..5f3f5481 100644 --- a/portkey_ai/_vendor/openai/types/realtime/conversation_item_input_audio_transcription_delta_event.py +++ b/portkey_ai/_vendor/openai/types/realtime/conversation_item_input_audio_transcription_delta_event.py @@ -10,6 +10,10 @@ class ConversationItemInputAudioTranscriptionDeltaEvent(BaseModel): + """ + Returned when the text value of an input audio transcription content part is updated with incremental transcription results. + """ + event_id: str """The unique ID of the server event.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/conversation_item_input_audio_transcription_failed_event.py b/portkey_ai/_vendor/openai/types/realtime/conversation_item_input_audio_transcription_failed_event.py index edb97bbf..e8ad05e4 100644 --- a/portkey_ai/_vendor/openai/types/realtime/conversation_item_input_audio_transcription_failed_event.py +++ b/portkey_ai/_vendor/openai/types/realtime/conversation_item_input_audio_transcription_failed_event.py @@ -9,6 +9,8 @@ class Error(BaseModel): + """Details of the transcription error.""" + code: Optional[str] = None """Error code, if any.""" @@ -23,6 +25,12 @@ class Error(BaseModel): class ConversationItemInputAudioTranscriptionFailedEvent(BaseModel): + """ + Returned when input audio transcription is configured, and a transcription + request for a user message failed. These events are separate from other + `error` events so that the client can identify the related Item. + """ + content_index: int """The index of the content part containing the audio.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/conversation_item_input_audio_transcription_segment.py b/portkey_ai/_vendor/openai/types/realtime/conversation_item_input_audio_transcription_segment.py index e2cbc9d2..dcc49165 100644 --- a/portkey_ai/_vendor/openai/types/realtime/conversation_item_input_audio_transcription_segment.py +++ b/portkey_ai/_vendor/openai/types/realtime/conversation_item_input_audio_transcription_segment.py @@ -8,6 +8,8 @@ class ConversationItemInputAudioTranscriptionSegment(BaseModel): + """Returned when an input audio transcription segment is identified for an item.""" + id: str """The segment identifier.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/conversation_item_retrieve_event.py b/portkey_ai/_vendor/openai/types/realtime/conversation_item_retrieve_event.py index 018c2ccc..e7d8eb6c 100644 --- a/portkey_ai/_vendor/openai/types/realtime/conversation_item_retrieve_event.py +++ b/portkey_ai/_vendor/openai/types/realtime/conversation_item_retrieve_event.py @@ -9,6 +9,13 @@ class ConversationItemRetrieveEvent(BaseModel): + """ + Send this event when you want to retrieve the server's representation of a specific item in the conversation history. This is useful, for example, to inspect user audio after noise cancellation and VAD. + The server will respond with a `conversation.item.retrieved` event, + unless the item does not exist in the conversation history, in which case the + server will respond with an error. + """ + item_id: str """The ID of the item to retrieve.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/conversation_item_retrieve_event_param.py b/portkey_ai/_vendor/openai/types/realtime/conversation_item_retrieve_event_param.py index 71b3ffa4..59fdb6fb 100644 --- a/portkey_ai/_vendor/openai/types/realtime/conversation_item_retrieve_event_param.py +++ b/portkey_ai/_vendor/openai/types/realtime/conversation_item_retrieve_event_param.py @@ -8,6 +8,13 @@ class ConversationItemRetrieveEventParam(TypedDict, total=False): + """ + Send this event when you want to retrieve the server's representation of a specific item in the conversation history. This is useful, for example, to inspect user audio after noise cancellation and VAD. + The server will respond with a `conversation.item.retrieved` event, + unless the item does not exist in the conversation history, in which case the + server will respond with an error. + """ + item_id: Required[str] """The ID of the item to retrieve.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/conversation_item_truncate_event.py b/portkey_ai/_vendor/openai/types/realtime/conversation_item_truncate_event.py index d6c6779c..16c82183 100644 --- a/portkey_ai/_vendor/openai/types/realtime/conversation_item_truncate_event.py +++ b/portkey_ai/_vendor/openai/types/realtime/conversation_item_truncate_event.py @@ -9,6 +9,21 @@ class ConversationItemTruncateEvent(BaseModel): + """Send this event to truncate a previous assistant message’s audio. + + The server + will produce audio faster than realtime, so this event is useful when the user + interrupts to truncate audio that has already been sent to the client but not + yet played. This will synchronize the server's understanding of the audio with + the client's playback. + + Truncating audio will delete the server-side text transcript to ensure there + is not text in the context that hasn't been heard by the user. + + If successful, the server will respond with a `conversation.item.truncated` + event. + """ + audio_end_ms: int """Inclusive duration up to which audio is truncated, in milliseconds. diff --git a/portkey_ai/_vendor/openai/types/realtime/conversation_item_truncate_event_param.py b/portkey_ai/_vendor/openai/types/realtime/conversation_item_truncate_event_param.py index f5ab13a4..e9b41fc9 100644 --- a/portkey_ai/_vendor/openai/types/realtime/conversation_item_truncate_event_param.py +++ b/portkey_ai/_vendor/openai/types/realtime/conversation_item_truncate_event_param.py @@ -8,6 +8,21 @@ class ConversationItemTruncateEventParam(TypedDict, total=False): + """Send this event to truncate a previous assistant message’s audio. + + The server + will produce audio faster than realtime, so this event is useful when the user + interrupts to truncate audio that has already been sent to the client but not + yet played. This will synchronize the server's understanding of the audio with + the client's playback. + + Truncating audio will delete the server-side text transcript to ensure there + is not text in the context that hasn't been heard by the user. + + If successful, the server will respond with a `conversation.item.truncated` + event. + """ + audio_end_ms: Required[int] """Inclusive duration up to which audio is truncated, in milliseconds. diff --git a/portkey_ai/_vendor/openai/types/realtime/conversation_item_truncated_event.py b/portkey_ai/_vendor/openai/types/realtime/conversation_item_truncated_event.py index f56cabc3..c78a776d 100644 --- a/portkey_ai/_vendor/openai/types/realtime/conversation_item_truncated_event.py +++ b/portkey_ai/_vendor/openai/types/realtime/conversation_item_truncated_event.py @@ -8,6 +8,15 @@ class ConversationItemTruncatedEvent(BaseModel): + """ + Returned when an earlier assistant audio message item is truncated by the + client with a `conversation.item.truncate` event. This event is used to + synchronize the server's understanding of the audio with the client's playback. + + This action will truncate the audio and remove the server-side text transcript + to ensure there is no text in the context that hasn't been heard by the user. + """ + audio_end_ms: int """The duration up to which the audio was truncated, in milliseconds.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/input_audio_buffer_append_event.py b/portkey_ai/_vendor/openai/types/realtime/input_audio_buffer_append_event.py index 8562cf0a..4c9e9a54 100644 --- a/portkey_ai/_vendor/openai/types/realtime/input_audio_buffer_append_event.py +++ b/portkey_ai/_vendor/openai/types/realtime/input_audio_buffer_append_event.py @@ -9,6 +9,23 @@ class InputAudioBufferAppendEvent(BaseModel): + """Send this event to append audio bytes to the input audio buffer. + + The audio + buffer is temporary storage you can write to and later commit. A "commit" will create a new + user message item in the conversation history from the buffer content and clear the buffer. + Input audio transcription (if enabled) will be generated when the buffer is committed. + + If VAD is enabled the audio buffer is used to detect speech and the server will decide + when to commit. When Server VAD is disabled, you must commit the audio buffer + manually. Input audio noise reduction operates on writes to the audio buffer. + + The client may choose how much audio to place in each event up to a maximum + of 15 MiB, for example streaming smaller chunks from the client may allow the + VAD to be more responsive. Unlike most other client events, the server will + not send a confirmation response to this event. + """ + audio: str """Base64-encoded audio bytes. diff --git a/portkey_ai/_vendor/openai/types/realtime/input_audio_buffer_append_event_param.py b/portkey_ai/_vendor/openai/types/realtime/input_audio_buffer_append_event_param.py index 3ad0bc73..a0d308e4 100644 --- a/portkey_ai/_vendor/openai/types/realtime/input_audio_buffer_append_event_param.py +++ b/portkey_ai/_vendor/openai/types/realtime/input_audio_buffer_append_event_param.py @@ -8,6 +8,23 @@ class InputAudioBufferAppendEventParam(TypedDict, total=False): + """Send this event to append audio bytes to the input audio buffer. + + The audio + buffer is temporary storage you can write to and later commit. A "commit" will create a new + user message item in the conversation history from the buffer content and clear the buffer. + Input audio transcription (if enabled) will be generated when the buffer is committed. + + If VAD is enabled the audio buffer is used to detect speech and the server will decide + when to commit. When Server VAD is disabled, you must commit the audio buffer + manually. Input audio noise reduction operates on writes to the audio buffer. + + The client may choose how much audio to place in each event up to a maximum + of 15 MiB, for example streaming smaller chunks from the client may allow the + VAD to be more responsive. Unlike most other client events, the server will + not send a confirmation response to this event. + """ + audio: Required[str] """Base64-encoded audio bytes. diff --git a/portkey_ai/_vendor/openai/types/realtime/input_audio_buffer_clear_event.py b/portkey_ai/_vendor/openai/types/realtime/input_audio_buffer_clear_event.py index 9922ff3b..5526bcbf 100644 --- a/portkey_ai/_vendor/openai/types/realtime/input_audio_buffer_clear_event.py +++ b/portkey_ai/_vendor/openai/types/realtime/input_audio_buffer_clear_event.py @@ -9,6 +9,12 @@ class InputAudioBufferClearEvent(BaseModel): + """Send this event to clear the audio bytes in the buffer. + + The server will + respond with an `input_audio_buffer.cleared` event. + """ + type: Literal["input_audio_buffer.clear"] """The event type, must be `input_audio_buffer.clear`.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/input_audio_buffer_clear_event_param.py b/portkey_ai/_vendor/openai/types/realtime/input_audio_buffer_clear_event_param.py index 2bd6bc5a..8e0e9c55 100644 --- a/portkey_ai/_vendor/openai/types/realtime/input_audio_buffer_clear_event_param.py +++ b/portkey_ai/_vendor/openai/types/realtime/input_audio_buffer_clear_event_param.py @@ -8,6 +8,12 @@ class InputAudioBufferClearEventParam(TypedDict, total=False): + """Send this event to clear the audio bytes in the buffer. + + The server will + respond with an `input_audio_buffer.cleared` event. + """ + type: Required[Literal["input_audio_buffer.clear"]] """The event type, must be `input_audio_buffer.clear`.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/input_audio_buffer_cleared_event.py b/portkey_ai/_vendor/openai/types/realtime/input_audio_buffer_cleared_event.py index af71844f..e4775567 100644 --- a/portkey_ai/_vendor/openai/types/realtime/input_audio_buffer_cleared_event.py +++ b/portkey_ai/_vendor/openai/types/realtime/input_audio_buffer_cleared_event.py @@ -8,6 +8,11 @@ class InputAudioBufferClearedEvent(BaseModel): + """ + Returned when the input audio buffer is cleared by the client with a + `input_audio_buffer.clear` event. + """ + event_id: str """The unique ID of the server event.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/input_audio_buffer_commit_event.py b/portkey_ai/_vendor/openai/types/realtime/input_audio_buffer_commit_event.py index 125c3ba1..fe2ec017 100644 --- a/portkey_ai/_vendor/openai/types/realtime/input_audio_buffer_commit_event.py +++ b/portkey_ai/_vendor/openai/types/realtime/input_audio_buffer_commit_event.py @@ -9,6 +9,12 @@ class InputAudioBufferCommitEvent(BaseModel): + """ + Send this event to commit the user input audio buffer, which will create a new user message item in the conversation. This event will produce an error if the input audio buffer is empty. When in Server VAD mode, the client does not need to send this event, the server will commit the audio buffer automatically. + + Committing the input audio buffer will trigger input audio transcription (if enabled in session configuration), but it will not create a response from the model. The server will respond with an `input_audio_buffer.committed` event. + """ + type: Literal["input_audio_buffer.commit"] """The event type, must be `input_audio_buffer.commit`.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/input_audio_buffer_commit_event_param.py b/portkey_ai/_vendor/openai/types/realtime/input_audio_buffer_commit_event_param.py index c9c927ab..20342795 100644 --- a/portkey_ai/_vendor/openai/types/realtime/input_audio_buffer_commit_event_param.py +++ b/portkey_ai/_vendor/openai/types/realtime/input_audio_buffer_commit_event_param.py @@ -8,6 +8,12 @@ class InputAudioBufferCommitEventParam(TypedDict, total=False): + """ + Send this event to commit the user input audio buffer, which will create a new user message item in the conversation. This event will produce an error if the input audio buffer is empty. When in Server VAD mode, the client does not need to send this event, the server will commit the audio buffer automatically. + + Committing the input audio buffer will trigger input audio transcription (if enabled in session configuration), but it will not create a response from the model. The server will respond with an `input_audio_buffer.committed` event. + """ + type: Required[Literal["input_audio_buffer.commit"]] """The event type, must be `input_audio_buffer.commit`.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/input_audio_buffer_committed_event.py b/portkey_ai/_vendor/openai/types/realtime/input_audio_buffer_committed_event.py index 5ed1b4cc..15dc8254 100644 --- a/portkey_ai/_vendor/openai/types/realtime/input_audio_buffer_committed_event.py +++ b/portkey_ai/_vendor/openai/types/realtime/input_audio_buffer_committed_event.py @@ -9,6 +9,13 @@ class InputAudioBufferCommittedEvent(BaseModel): + """ + Returned when an input audio buffer is committed, either by the client or + automatically in server VAD mode. The `item_id` property is the ID of the user + message item that will be created, thus a `conversation.item.created` event + will also be sent to the client. + """ + event_id: str """The unique ID of the server event.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/input_audio_buffer_dtmf_event_received_event.py b/portkey_ai/_vendor/openai/types/realtime/input_audio_buffer_dtmf_event_received_event.py new file mode 100644 index 00000000..c2623cc7 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/input_audio_buffer_dtmf_event_received_event.py @@ -0,0 +1,26 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["InputAudioBufferDtmfEventReceivedEvent"] + + +class InputAudioBufferDtmfEventReceivedEvent(BaseModel): + """**SIP Only:** Returned when an DTMF event is received. + + A DTMF event is a message that + represents a telephone keypad press (0–9, *, #, A–D). The `event` property + is the keypad that the user press. The `received_at` is the UTC Unix Timestamp + that the server received the event. + """ + + event: str + """The telephone keypad that was pressed by the user.""" + + received_at: int + """UTC Unix Timestamp when DTMF Event was received by server.""" + + type: Literal["input_audio_buffer.dtmf_event_received"] + """The event type, must be `input_audio_buffer.dtmf_event_received`.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/input_audio_buffer_speech_started_event.py b/portkey_ai/_vendor/openai/types/realtime/input_audio_buffer_speech_started_event.py index 865205d7..1bd4c74e 100644 --- a/portkey_ai/_vendor/openai/types/realtime/input_audio_buffer_speech_started_event.py +++ b/portkey_ai/_vendor/openai/types/realtime/input_audio_buffer_speech_started_event.py @@ -8,6 +8,19 @@ class InputAudioBufferSpeechStartedEvent(BaseModel): + """ + Sent by the server when in `server_vad` mode to indicate that speech has been + detected in the audio buffer. This can happen any time audio is added to the + buffer (unless speech is already detected). The client may want to use this + event to interrupt audio playback or provide visual feedback to the user. + + The client should expect to receive a `input_audio_buffer.speech_stopped` event + when speech stops. The `item_id` property is the ID of the user message item + that will be created when speech stops and will also be included in the + `input_audio_buffer.speech_stopped` event (unless the client manually commits + the audio buffer during VAD activation). + """ + audio_start_ms: int """ Milliseconds from the start of all audio written to the buffer during the diff --git a/portkey_ai/_vendor/openai/types/realtime/input_audio_buffer_speech_stopped_event.py b/portkey_ai/_vendor/openai/types/realtime/input_audio_buffer_speech_stopped_event.py index 6cb7845f..b3fb2092 100644 --- a/portkey_ai/_vendor/openai/types/realtime/input_audio_buffer_speech_stopped_event.py +++ b/portkey_ai/_vendor/openai/types/realtime/input_audio_buffer_speech_stopped_event.py @@ -8,6 +8,12 @@ class InputAudioBufferSpeechStoppedEvent(BaseModel): + """ + Returned in `server_vad` mode when the server detects the end of speech in + the audio buffer. The server will also send an `conversation.item.created` + event with the user message item that is created from the audio buffer. + """ + audio_end_ms: int """Milliseconds since the session started when speech stopped. diff --git a/portkey_ai/_vendor/openai/types/realtime/input_audio_buffer_timeout_triggered.py b/portkey_ai/_vendor/openai/types/realtime/input_audio_buffer_timeout_triggered.py index 5c5dc5cf..72b107d5 100644 --- a/portkey_ai/_vendor/openai/types/realtime/input_audio_buffer_timeout_triggered.py +++ b/portkey_ai/_vendor/openai/types/realtime/input_audio_buffer_timeout_triggered.py @@ -8,6 +8,23 @@ class InputAudioBufferTimeoutTriggered(BaseModel): + """Returned when the Server VAD timeout is triggered for the input audio buffer. + + This is configured + with `idle_timeout_ms` in the `turn_detection` settings of the session, and it indicates that + there hasn't been any speech detected for the configured duration. + + The `audio_start_ms` and `audio_end_ms` fields indicate the segment of audio after the last + model response up to the triggering time, as an offset from the beginning of audio written + to the input audio buffer. This means it demarcates the segment of audio that was silent and + the difference between the start and end values will roughly match the configured timeout. + + The empty audio will be committed to the conversation as an `input_audio` item (there will be a + `input_audio_buffer.committed` event) and a model response will be generated. There may be speech + that didn't trigger VAD but is still detected by the model, so the model may respond with + something relevant to the conversation or a prompt to continue speaking. + """ + audio_end_ms: int """ Millisecond offset of audio written to the input audio buffer at the time the diff --git a/portkey_ai/_vendor/openai/types/realtime/log_prob_properties.py b/portkey_ai/_vendor/openai/types/realtime/log_prob_properties.py index 92477d67..423af1c4 100644 --- a/portkey_ai/_vendor/openai/types/realtime/log_prob_properties.py +++ b/portkey_ai/_vendor/openai/types/realtime/log_prob_properties.py @@ -8,6 +8,8 @@ class LogProbProperties(BaseModel): + """A log probability object.""" + token: str """The token that was used to generate the log probability.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/mcp_list_tools_completed.py b/portkey_ai/_vendor/openai/types/realtime/mcp_list_tools_completed.py index 941280f0..2fe64147 100644 --- a/portkey_ai/_vendor/openai/types/realtime/mcp_list_tools_completed.py +++ b/portkey_ai/_vendor/openai/types/realtime/mcp_list_tools_completed.py @@ -8,6 +8,8 @@ class McpListToolsCompleted(BaseModel): + """Returned when listing MCP tools has completed for an item.""" + event_id: str """The unique ID of the server event.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/mcp_list_tools_failed.py b/portkey_ai/_vendor/openai/types/realtime/mcp_list_tools_failed.py index 892eda21..8cad7c0a 100644 --- a/portkey_ai/_vendor/openai/types/realtime/mcp_list_tools_failed.py +++ b/portkey_ai/_vendor/openai/types/realtime/mcp_list_tools_failed.py @@ -8,6 +8,8 @@ class McpListToolsFailed(BaseModel): + """Returned when listing MCP tools has failed for an item.""" + event_id: str """The unique ID of the server event.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/mcp_list_tools_in_progress.py b/portkey_ai/_vendor/openai/types/realtime/mcp_list_tools_in_progress.py index 4254b5fd..823bb875 100644 --- a/portkey_ai/_vendor/openai/types/realtime/mcp_list_tools_in_progress.py +++ b/portkey_ai/_vendor/openai/types/realtime/mcp_list_tools_in_progress.py @@ -8,6 +8,8 @@ class McpListToolsInProgress(BaseModel): + """Returned when listing MCP tools is in progress for an item.""" + event_id: str """The unique ID of the server event.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/output_audio_buffer_clear_event.py b/portkey_ai/_vendor/openai/types/realtime/output_audio_buffer_clear_event.py index b4c95039..b3fa7620 100644 --- a/portkey_ai/_vendor/openai/types/realtime/output_audio_buffer_clear_event.py +++ b/portkey_ai/_vendor/openai/types/realtime/output_audio_buffer_clear_event.py @@ -9,6 +9,15 @@ class OutputAudioBufferClearEvent(BaseModel): + """**WebRTC/SIP Only:** Emit to cut off the current audio response. + + This will trigger the server to + stop generating audio and emit a `output_audio_buffer.cleared` event. This + event should be preceded by a `response.cancel` client event to stop the + generation of the current response. + [Learn more](https://platform.openai.com/docs/guides/realtime-conversations#client-and-server-events-for-audio-in-webrtc). + """ + type: Literal["output_audio_buffer.clear"] """The event type, must be `output_audio_buffer.clear`.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/output_audio_buffer_clear_event_param.py b/portkey_ai/_vendor/openai/types/realtime/output_audio_buffer_clear_event_param.py index a3205ebc..59f897a5 100644 --- a/portkey_ai/_vendor/openai/types/realtime/output_audio_buffer_clear_event_param.py +++ b/portkey_ai/_vendor/openai/types/realtime/output_audio_buffer_clear_event_param.py @@ -8,6 +8,15 @@ class OutputAudioBufferClearEventParam(TypedDict, total=False): + """**WebRTC/SIP Only:** Emit to cut off the current audio response. + + This will trigger the server to + stop generating audio and emit a `output_audio_buffer.cleared` event. This + event should be preceded by a `response.cancel` client event to stop the + generation of the current response. + [Learn more](https://platform.openai.com/docs/guides/realtime-conversations#client-and-server-events-for-audio-in-webrtc). + """ + type: Required[Literal["output_audio_buffer.clear"]] """The event type, must be `output_audio_buffer.clear`.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/rate_limits_updated_event.py b/portkey_ai/_vendor/openai/types/realtime/rate_limits_updated_event.py index 048a4028..951de103 100644 --- a/portkey_ai/_vendor/openai/types/realtime/rate_limits_updated_event.py +++ b/portkey_ai/_vendor/openai/types/realtime/rate_limits_updated_event.py @@ -23,6 +23,14 @@ class RateLimit(BaseModel): class RateLimitsUpdatedEvent(BaseModel): + """Emitted at the beginning of a Response to indicate the updated rate limits. + + + When a Response is created some tokens will be "reserved" for the output + tokens, the rate limits shown here reflect that reservation, which is then + adjusted accordingly once the Response is completed. + """ + event_id: str """The unique ID of the server event.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_audio_config.py b/portkey_ai/_vendor/openai/types/realtime/realtime_audio_config.py index 72d7cc59..daa50358 100644 --- a/portkey_ai/_vendor/openai/types/realtime/realtime_audio_config.py +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_audio_config.py @@ -10,6 +10,8 @@ class RealtimeAudioConfig(BaseModel): + """Configuration for input and output audio.""" + input: Optional[RealtimeAudioConfigInput] = None output: Optional[RealtimeAudioConfigOutput] = None diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_audio_config_input.py b/portkey_ai/_vendor/openai/types/realtime/realtime_audio_config_input.py index cfcb7f22..08e1b146 100644 --- a/portkey_ai/_vendor/openai/types/realtime/realtime_audio_config_input.py +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_audio_config_input.py @@ -12,6 +12,13 @@ class NoiseReduction(BaseModel): + """Configuration for input audio noise reduction. + + This can be set to `null` to turn off. + Noise reduction filters audio added to the input audio buffer before it is sent to VAD and the model. + Filtering the audio can improve VAD and turn detection accuracy (reducing false positives) and model performance by improving perception of the input audio. + """ + type: Optional[NoiseReductionType] = None """Type of noise reduction. diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_audio_config_input_param.py b/portkey_ai/_vendor/openai/types/realtime/realtime_audio_config_input_param.py index 730f46cf..73495e6c 100644 --- a/portkey_ai/_vendor/openai/types/realtime/realtime_audio_config_input_param.py +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_audio_config_input_param.py @@ -14,6 +14,13 @@ class NoiseReduction(TypedDict, total=False): + """Configuration for input audio noise reduction. + + This can be set to `null` to turn off. + Noise reduction filters audio added to the input audio buffer before it is sent to VAD and the model. + Filtering the audio can improve VAD and turn detection accuracy (reducing false positives) and model performance by improving perception of the input audio. + """ + type: NoiseReductionType """Type of noise reduction. diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_audio_config_output.py b/portkey_ai/_vendor/openai/types/realtime/realtime_audio_config_output.py index a8af237c..2922405f 100644 --- a/portkey_ai/_vendor/openai/types/realtime/realtime_audio_config_output.py +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_audio_config_output.py @@ -29,8 +29,8 @@ class RealtimeAudioConfigOutput(BaseModel): ] = None """The voice the model uses to respond. - Voice cannot be changed during the session once the model has responded with - audio at least once. Current voice options are `alloy`, `ash`, `ballad`, - `coral`, `echo`, `sage`, `shimmer`, `verse`, `marin`, and `cedar`. We recommend + Supported built-in voices are `alloy`, `ash`, `ballad`, `coral`, `echo`, `sage`, + `shimmer`, `verse`, `marin`, and `cedar`. Voice cannot be changed during the + session once the model has responded with audio at least once. We recommend `marin` and `cedar` for best quality. """ diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_audio_config_output_param.py b/portkey_ai/_vendor/openai/types/realtime/realtime_audio_config_output_param.py index 8e887d34..d04fd3a3 100644 --- a/portkey_ai/_vendor/openai/types/realtime/realtime_audio_config_output_param.py +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_audio_config_output_param.py @@ -28,8 +28,8 @@ class RealtimeAudioConfigOutputParam(TypedDict, total=False): voice: Union[str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse", "marin", "cedar"]] """The voice the model uses to respond. - Voice cannot be changed during the session once the model has responded with - audio at least once. Current voice options are `alloy`, `ash`, `ballad`, - `coral`, `echo`, `sage`, `shimmer`, `verse`, `marin`, and `cedar`. We recommend + Supported built-in voices are `alloy`, `ash`, `ballad`, `coral`, `echo`, `sage`, + `shimmer`, `verse`, `marin`, and `cedar`. Voice cannot be changed during the + session once the model has responded with audio at least once. We recommend `marin` and `cedar` for best quality. """ diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_audio_config_param.py b/portkey_ai/_vendor/openai/types/realtime/realtime_audio_config_param.py index 2c41de35..7899fe35 100644 --- a/portkey_ai/_vendor/openai/types/realtime/realtime_audio_config_param.py +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_audio_config_param.py @@ -11,6 +11,8 @@ class RealtimeAudioConfigParam(TypedDict, total=False): + """Configuration for input and output audio.""" + input: RealtimeAudioConfigInputParam output: RealtimeAudioConfigOutputParam diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_audio_formats.py b/portkey_ai/_vendor/openai/types/realtime/realtime_audio_formats.py index 10f91883..fa10c9a7 100644 --- a/portkey_ai/_vendor/openai/types/realtime/realtime_audio_formats.py +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_audio_formats.py @@ -10,6 +10,8 @@ class AudioPCM(BaseModel): + """The PCM audio format. Only a 24kHz sample rate is supported.""" + rate: Optional[Literal[24000]] = None """The sample rate of the audio. Always `24000`.""" @@ -18,11 +20,15 @@ class AudioPCM(BaseModel): class AudioPCMU(BaseModel): + """The G.711 μ-law format.""" + type: Optional[Literal["audio/pcmu"]] = None """The audio format. Always `audio/pcmu`.""" class AudioPCMA(BaseModel): + """The G.711 A-law format.""" + type: Optional[Literal["audio/pcma"]] = None """The audio format. Always `audio/pcma`.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_audio_formats_param.py b/portkey_ai/_vendor/openai/types/realtime/realtime_audio_formats_param.py index cf58577f..6392f632 100644 --- a/portkey_ai/_vendor/openai/types/realtime/realtime_audio_formats_param.py +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_audio_formats_param.py @@ -9,6 +9,8 @@ class AudioPCM(TypedDict, total=False): + """The PCM audio format. Only a 24kHz sample rate is supported.""" + rate: Literal[24000] """The sample rate of the audio. Always `24000`.""" @@ -17,11 +19,15 @@ class AudioPCM(TypedDict, total=False): class AudioPCMU(TypedDict, total=False): + """The G.711 μ-law format.""" + type: Literal["audio/pcmu"] """The audio format. Always `audio/pcmu`.""" class AudioPCMA(TypedDict, total=False): + """The G.711 A-law format.""" + type: Literal["audio/pcma"] """The audio format. Always `audio/pcma`.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_audio_input_turn_detection.py b/portkey_ai/_vendor/openai/types/realtime/realtime_audio_input_turn_detection.py index d3f4e003..8d9aff35 100644 --- a/portkey_ai/_vendor/openai/types/realtime/realtime_audio_input_turn_detection.py +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_audio_input_turn_detection.py @@ -10,13 +10,22 @@ class ServerVad(BaseModel): + """ + Server-side voice activity detection (VAD) which flips on when user speech is detected and off after a period of silence. + """ + type: Literal["server_vad"] """Type of turn detection, `server_vad` to turn on simple Server VAD.""" create_response: Optional[bool] = None - """ - Whether or not to automatically generate a response when a VAD stop event + """Whether or not to automatically generate a response when a VAD stop event occurs. + + If `interrupt_response` is set to `false` this may fail to create a response if + the model is already responding. + + If both `create_response` and `interrupt_response` are set to `false`, the model + will never respond automatically but VAD events will still be emitted. """ idle_timeout_ms: Optional[int] = None @@ -37,9 +46,13 @@ class ServerVad(BaseModel): interrupt_response: Optional[bool] = None """ - Whether or not to automatically interrupt any ongoing response with output to - the default conversation (i.e. `conversation` of `auto`) when a VAD start event - occurs. + Whether or not to automatically interrupt (cancel) any ongoing response with + output to the default conversation (i.e. `conversation` of `auto`) when a VAD + start event occurs. If `true` then the response will be cancelled, otherwise it + will continue until complete. + + If both `create_response` and `interrupt_response` are set to `false`, the model + will never respond automatically but VAD events will still be emitted. """ prefix_padding_ms: Optional[int] = None @@ -67,6 +80,10 @@ class ServerVad(BaseModel): class SemanticVad(BaseModel): + """ + Server-side semantic turn detection which uses a model to determine when the user has finished speaking. + """ + type: Literal["semantic_vad"] """Type of turn detection, `semantic_vad` to turn on Semantic VAD.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_audio_input_turn_detection_param.py b/portkey_ai/_vendor/openai/types/realtime/realtime_audio_input_turn_detection_param.py index 09b8cfd1..30522d74 100644 --- a/portkey_ai/_vendor/openai/types/realtime/realtime_audio_input_turn_detection_param.py +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_audio_input_turn_detection_param.py @@ -9,13 +9,22 @@ class ServerVad(TypedDict, total=False): + """ + Server-side voice activity detection (VAD) which flips on when user speech is detected and off after a period of silence. + """ + type: Required[Literal["server_vad"]] """Type of turn detection, `server_vad` to turn on simple Server VAD.""" create_response: bool - """ - Whether or not to automatically generate a response when a VAD stop event + """Whether or not to automatically generate a response when a VAD stop event occurs. + + If `interrupt_response` is set to `false` this may fail to create a response if + the model is already responding. + + If both `create_response` and `interrupt_response` are set to `false`, the model + will never respond automatically but VAD events will still be emitted. """ idle_timeout_ms: Optional[int] @@ -36,9 +45,13 @@ class ServerVad(TypedDict, total=False): interrupt_response: bool """ - Whether or not to automatically interrupt any ongoing response with output to - the default conversation (i.e. `conversation` of `auto`) when a VAD start event - occurs. + Whether or not to automatically interrupt (cancel) any ongoing response with + output to the default conversation (i.e. `conversation` of `auto`) when a VAD + start event occurs. If `true` then the response will be cancelled, otherwise it + will continue until complete. + + If both `create_response` and `interrupt_response` are set to `false`, the model + will never respond automatically but VAD events will still be emitted. """ prefix_padding_ms: int @@ -66,6 +79,10 @@ class ServerVad(TypedDict, total=False): class SemanticVad(TypedDict, total=False): + """ + Server-side semantic turn detection which uses a model to determine when the user has finished speaking. + """ + type: Required[Literal["semantic_vad"]] """Type of turn detection, `semantic_vad` to turn on Semantic VAD.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_conversation_item_assistant_message.py b/portkey_ai/_vendor/openai/types/realtime/realtime_conversation_item_assistant_message.py index 6b0f86ee..207831a3 100644 --- a/portkey_ai/_vendor/openai/types/realtime/realtime_conversation_item_assistant_message.py +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_conversation_item_assistant_message.py @@ -33,6 +33,8 @@ class Content(BaseModel): class RealtimeConversationItemAssistantMessage(BaseModel): + """An assistant message item in a Realtime conversation.""" + content: List[Content] """The content of the message.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_conversation_item_assistant_message_param.py b/portkey_ai/_vendor/openai/types/realtime/realtime_conversation_item_assistant_message_param.py index 93699afb..abc78e7d 100644 --- a/portkey_ai/_vendor/openai/types/realtime/realtime_conversation_item_assistant_message_param.py +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_conversation_item_assistant_message_param.py @@ -33,6 +33,8 @@ class Content(TypedDict, total=False): class RealtimeConversationItemAssistantMessageParam(TypedDict, total=False): + """An assistant message item in a Realtime conversation.""" + content: Required[Iterable[Content]] """The content of the message.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_conversation_item_function_call.py b/portkey_ai/_vendor/openai/types/realtime/realtime_conversation_item_function_call.py index 279a2fcd..4e403948 100644 --- a/portkey_ai/_vendor/openai/types/realtime/realtime_conversation_item_function_call.py +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_conversation_item_function_call.py @@ -9,6 +9,8 @@ class RealtimeConversationItemFunctionCall(BaseModel): + """A function call item in a Realtime conversation.""" + arguments: str """The arguments of the function call. diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_conversation_item_function_call_output.py b/portkey_ai/_vendor/openai/types/realtime/realtime_conversation_item_function_call_output.py index 4b6b15d0..cdbc352d 100644 --- a/portkey_ai/_vendor/openai/types/realtime/realtime_conversation_item_function_call_output.py +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_conversation_item_function_call_output.py @@ -9,6 +9,8 @@ class RealtimeConversationItemFunctionCallOutput(BaseModel): + """A function call output item in a Realtime conversation.""" + call_id: str """The ID of the function call this output is for.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_conversation_item_function_call_output_param.py b/portkey_ai/_vendor/openai/types/realtime/realtime_conversation_item_function_call_output_param.py index 56d62da5..2e56a81d 100644 --- a/portkey_ai/_vendor/openai/types/realtime/realtime_conversation_item_function_call_output_param.py +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_conversation_item_function_call_output_param.py @@ -8,6 +8,8 @@ class RealtimeConversationItemFunctionCallOutputParam(TypedDict, total=False): + """A function call output item in a Realtime conversation.""" + call_id: Required[str] """The ID of the function call this output is for.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_conversation_item_function_call_param.py b/portkey_ai/_vendor/openai/types/realtime/realtime_conversation_item_function_call_param.py index 36a16a27..6467ce14 100644 --- a/portkey_ai/_vendor/openai/types/realtime/realtime_conversation_item_function_call_param.py +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_conversation_item_function_call_param.py @@ -8,6 +8,8 @@ class RealtimeConversationItemFunctionCallParam(TypedDict, total=False): + """A function call item in a Realtime conversation.""" + arguments: Required[str] """The arguments of the function call. diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_conversation_item_system_message.py b/portkey_ai/_vendor/openai/types/realtime/realtime_conversation_item_system_message.py index 7dac5c9f..f69bc039 100644 --- a/portkey_ai/_vendor/openai/types/realtime/realtime_conversation_item_system_message.py +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_conversation_item_system_message.py @@ -17,6 +17,10 @@ class Content(BaseModel): class RealtimeConversationItemSystemMessage(BaseModel): + """ + A system message in a Realtime conversation can be used to provide additional context or instructions to the model. This is similar but distinct from the instruction prompt provided at the start of a conversation, as system messages can be added at any point in the conversation. For major changes to the conversation's behavior, use instructions, but for smaller updates (e.g. "the user is now asking about a different topic"), use system messages. + """ + content: List[Content] """The content of the message.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_conversation_item_system_message_param.py b/portkey_ai/_vendor/openai/types/realtime/realtime_conversation_item_system_message_param.py index a2790fcf..93880198 100644 --- a/portkey_ai/_vendor/openai/types/realtime/realtime_conversation_item_system_message_param.py +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_conversation_item_system_message_param.py @@ -17,6 +17,10 @@ class Content(TypedDict, total=False): class RealtimeConversationItemSystemMessageParam(TypedDict, total=False): + """ + A system message in a Realtime conversation can be used to provide additional context or instructions to the model. This is similar but distinct from the instruction prompt provided at the start of a conversation, as system messages can be added at any point in the conversation. For major changes to the conversation's behavior, use instructions, but for smaller updates (e.g. "the user is now asking about a different topic"), use system messages. + """ + content: Required[Iterable[Content]] """The content of the message.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_conversation_item_user_message.py b/portkey_ai/_vendor/openai/types/realtime/realtime_conversation_item_user_message.py index 30d9bb10..20e9614e 100644 --- a/portkey_ai/_vendor/openai/types/realtime/realtime_conversation_item_user_message.py +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_conversation_item_user_message.py @@ -44,6 +44,8 @@ class Content(BaseModel): class RealtimeConversationItemUserMessage(BaseModel): + """A user message item in a Realtime conversation.""" + content: List[Content] """The content of the message.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_conversation_item_user_message_param.py b/portkey_ai/_vendor/openai/types/realtime/realtime_conversation_item_user_message_param.py index 7d3b9bc1..69a24692 100644 --- a/portkey_ai/_vendor/openai/types/realtime/realtime_conversation_item_user_message_param.py +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_conversation_item_user_message_param.py @@ -44,6 +44,8 @@ class Content(TypedDict, total=False): class RealtimeConversationItemUserMessageParam(TypedDict, total=False): + """A user message item in a Realtime conversation.""" + content: Required[Iterable[Content]] """The content of the message.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_error.py b/portkey_ai/_vendor/openai/types/realtime/realtime_error.py index f1017d09..2aa5bc94 100644 --- a/portkey_ai/_vendor/openai/types/realtime/realtime_error.py +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_error.py @@ -8,6 +8,8 @@ class RealtimeError(BaseModel): + """Details of the error.""" + message: str """A human-readable error message.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_error_event.py b/portkey_ai/_vendor/openai/types/realtime/realtime_error_event.py index 8b501d6b..574464b2 100644 --- a/portkey_ai/_vendor/openai/types/realtime/realtime_error_event.py +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_error_event.py @@ -9,6 +9,12 @@ class RealtimeErrorEvent(BaseModel): + """ + Returned when an error occurs, which could be a client problem or a server + problem. Most errors are recoverable and the session will stay open, we + recommend to implementors to monitor and log error messages by default. + """ + error: RealtimeError """Details of the error.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_mcp_approval_request.py b/portkey_ai/_vendor/openai/types/realtime/realtime_mcp_approval_request.py index bafc8d89..1744c900 100644 --- a/portkey_ai/_vendor/openai/types/realtime/realtime_mcp_approval_request.py +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_mcp_approval_request.py @@ -8,6 +8,8 @@ class RealtimeMcpApprovalRequest(BaseModel): + """A Realtime item requesting human approval of a tool invocation.""" + id: str """The unique ID of the approval request.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_mcp_approval_request_param.py b/portkey_ai/_vendor/openai/types/realtime/realtime_mcp_approval_request_param.py index 57c21a48..f7cb68d6 100644 --- a/portkey_ai/_vendor/openai/types/realtime/realtime_mcp_approval_request_param.py +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_mcp_approval_request_param.py @@ -8,6 +8,8 @@ class RealtimeMcpApprovalRequestParam(TypedDict, total=False): + """A Realtime item requesting human approval of a tool invocation.""" + id: Required[str] """The unique ID of the approval request.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_mcp_approval_response.py b/portkey_ai/_vendor/openai/types/realtime/realtime_mcp_approval_response.py index 2cb03bc6..f8525a12 100644 --- a/portkey_ai/_vendor/openai/types/realtime/realtime_mcp_approval_response.py +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_mcp_approval_response.py @@ -9,6 +9,8 @@ class RealtimeMcpApprovalResponse(BaseModel): + """A Realtime item responding to an MCP approval request.""" + id: str """The unique ID of the approval response.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_mcp_approval_response_param.py b/portkey_ai/_vendor/openai/types/realtime/realtime_mcp_approval_response_param.py index 19b63370..6a65f7ce 100644 --- a/portkey_ai/_vendor/openai/types/realtime/realtime_mcp_approval_response_param.py +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_mcp_approval_response_param.py @@ -9,6 +9,8 @@ class RealtimeMcpApprovalResponseParam(TypedDict, total=False): + """A Realtime item responding to an MCP approval request.""" + id: Required[str] """The unique ID of the approval response.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_mcp_list_tools.py b/portkey_ai/_vendor/openai/types/realtime/realtime_mcp_list_tools.py index aeb58a1f..669d1fb4 100644 --- a/portkey_ai/_vendor/openai/types/realtime/realtime_mcp_list_tools.py +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_mcp_list_tools.py @@ -9,6 +9,8 @@ class Tool(BaseModel): + """A tool available on an MCP server.""" + input_schema: object """The JSON schema describing the tool's input.""" @@ -23,6 +25,8 @@ class Tool(BaseModel): class RealtimeMcpListTools(BaseModel): + """A Realtime item listing tools available on an MCP server.""" + server_label: str """The label of the MCP server.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_mcp_list_tools_param.py b/portkey_ai/_vendor/openai/types/realtime/realtime_mcp_list_tools_param.py index eb8605a0..614fa533 100644 --- a/portkey_ai/_vendor/openai/types/realtime/realtime_mcp_list_tools_param.py +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_mcp_list_tools_param.py @@ -9,6 +9,8 @@ class Tool(TypedDict, total=False): + """A tool available on an MCP server.""" + input_schema: Required[object] """The JSON schema describing the tool's input.""" @@ -23,6 +25,8 @@ class Tool(TypedDict, total=False): class RealtimeMcpListToolsParam(TypedDict, total=False): + """A Realtime item listing tools available on an MCP server.""" + server_label: Required[str] """The label of the MCP server.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_mcp_tool_call.py b/portkey_ai/_vendor/openai/types/realtime/realtime_mcp_tool_call.py index 019aee25..f53ad0ea 100644 --- a/portkey_ai/_vendor/openai/types/realtime/realtime_mcp_tool_call.py +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_mcp_tool_call.py @@ -18,6 +18,8 @@ class RealtimeMcpToolCall(BaseModel): + """A Realtime item representing an invocation of a tool on an MCP server.""" + id: str """The unique ID of the tool call.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_mcp_tool_call_param.py b/portkey_ai/_vendor/openai/types/realtime/realtime_mcp_tool_call_param.py index 0ba16d3d..8ccb5efc 100644 --- a/portkey_ai/_vendor/openai/types/realtime/realtime_mcp_tool_call_param.py +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_mcp_tool_call_param.py @@ -15,6 +15,8 @@ class RealtimeMcpToolCallParam(TypedDict, total=False): + """A Realtime item representing an invocation of a tool on an MCP server.""" + id: Required[str] """The unique ID of the tool call.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_response.py b/portkey_ai/_vendor/openai/types/realtime/realtime_response.py index 92d75491..a23edc48 100644 --- a/portkey_ai/_vendor/openai/types/realtime/realtime_response.py +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_response.py @@ -30,10 +30,14 @@ class AudioOutput(BaseModel): class Audio(BaseModel): + """Configuration for audio output.""" + output: Optional[AudioOutput] = None class RealtimeResponse(BaseModel): + """The response resource.""" + id: Optional[str] = None """The unique ID of the response, will look like `resp_1234`.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_response_create_audio_output.py b/portkey_ai/_vendor/openai/types/realtime/realtime_response_create_audio_output.py index 48a5d67e..db02511a 100644 --- a/portkey_ai/_vendor/openai/types/realtime/realtime_response_create_audio_output.py +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_response_create_audio_output.py @@ -18,12 +18,13 @@ class Output(BaseModel): ] = None """The voice the model uses to respond. - Voice cannot be changed during the session once the model has responded with - audio at least once. Current voice options are `alloy`, `ash`, `ballad`, - `coral`, `echo`, `sage`, `shimmer`, `verse`, `marin`, and `cedar`. We recommend - `marin` and `cedar` for best quality. + Supported built-in voices are `alloy`, `ash`, `ballad`, `coral`, `echo`, `sage`, + `shimmer`, `verse`, `marin`, and `cedar`. Voice cannot be changed during the + session once the model has responded with audio at least once. """ class RealtimeResponseCreateAudioOutput(BaseModel): + """Configuration for audio input and output.""" + output: Optional[Output] = None diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_response_create_audio_output_param.py b/portkey_ai/_vendor/openai/types/realtime/realtime_response_create_audio_output_param.py index 9aa6d288..22787ad1 100644 --- a/portkey_ai/_vendor/openai/types/realtime/realtime_response_create_audio_output_param.py +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_response_create_audio_output_param.py @@ -17,12 +17,13 @@ class Output(TypedDict, total=False): voice: Union[str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse", "marin", "cedar"]] """The voice the model uses to respond. - Voice cannot be changed during the session once the model has responded with - audio at least once. Current voice options are `alloy`, `ash`, `ballad`, - `coral`, `echo`, `sage`, `shimmer`, `verse`, `marin`, and `cedar`. We recommend - `marin` and `cedar` for best quality. + Supported built-in voices are `alloy`, `ash`, `ballad`, `coral`, `echo`, `sage`, + `shimmer`, `verse`, `marin`, and `cedar`. Voice cannot be changed during the + session once the model has responded with audio at least once. """ class RealtimeResponseCreateAudioOutputParam(TypedDict, total=False): + """Configuration for audio input and output.""" + output: Output diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_response_create_mcp_tool.py b/portkey_ai/_vendor/openai/types/realtime/realtime_response_create_mcp_tool.py index 119b4a45..72189e10 100644 --- a/portkey_ai/_vendor/openai/types/realtime/realtime_response_create_mcp_tool.py +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_response_create_mcp_tool.py @@ -17,6 +17,8 @@ class AllowedToolsMcpToolFilter(BaseModel): + """A filter object to specify which tools are allowed.""" + read_only: Optional[bool] = None """Indicates whether or not a tool modifies data or is read-only. @@ -33,6 +35,8 @@ class AllowedToolsMcpToolFilter(BaseModel): class RequireApprovalMcpToolApprovalFilterAlways(BaseModel): + """A filter object to specify which tools are allowed.""" + read_only: Optional[bool] = None """Indicates whether or not a tool modifies data or is read-only. @@ -46,6 +50,8 @@ class RequireApprovalMcpToolApprovalFilterAlways(BaseModel): class RequireApprovalMcpToolApprovalFilterNever(BaseModel): + """A filter object to specify which tools are allowed.""" + read_only: Optional[bool] = None """Indicates whether or not a tool modifies data or is read-only. @@ -59,6 +65,13 @@ class RequireApprovalMcpToolApprovalFilterNever(BaseModel): class RequireApprovalMcpToolApprovalFilter(BaseModel): + """Specify which of the MCP server's tools require approval. + + Can be + `always`, `never`, or a filter object associated with tools + that require approval. + """ + always: Optional[RequireApprovalMcpToolApprovalFilterAlways] = None """A filter object to specify which tools are allowed.""" @@ -70,6 +83,11 @@ class RequireApprovalMcpToolApprovalFilter(BaseModel): class RealtimeResponseCreateMcpTool(BaseModel): + """ + Give the model access to additional tools via remote Model Context Protocol + (MCP) servers. [Learn more about MCP](https://platform.openai.com/docs/guides/tools-remote-mcp). + """ + server_label: str """A label for this MCP server, used to identify it in tool calls.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_response_create_mcp_tool_param.py b/portkey_ai/_vendor/openai/types/realtime/realtime_response_create_mcp_tool_param.py index 3b9cf047..68dd6bdb 100644 --- a/portkey_ai/_vendor/openai/types/realtime/realtime_response_create_mcp_tool_param.py +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_response_create_mcp_tool_param.py @@ -19,6 +19,8 @@ class AllowedToolsMcpToolFilter(TypedDict, total=False): + """A filter object to specify which tools are allowed.""" + read_only: bool """Indicates whether or not a tool modifies data or is read-only. @@ -35,6 +37,8 @@ class AllowedToolsMcpToolFilter(TypedDict, total=False): class RequireApprovalMcpToolApprovalFilterAlways(TypedDict, total=False): + """A filter object to specify which tools are allowed.""" + read_only: bool """Indicates whether or not a tool modifies data or is read-only. @@ -48,6 +52,8 @@ class RequireApprovalMcpToolApprovalFilterAlways(TypedDict, total=False): class RequireApprovalMcpToolApprovalFilterNever(TypedDict, total=False): + """A filter object to specify which tools are allowed.""" + read_only: bool """Indicates whether or not a tool modifies data or is read-only. @@ -61,6 +67,13 @@ class RequireApprovalMcpToolApprovalFilterNever(TypedDict, total=False): class RequireApprovalMcpToolApprovalFilter(TypedDict, total=False): + """Specify which of the MCP server's tools require approval. + + Can be + `always`, `never`, or a filter object associated with tools + that require approval. + """ + always: RequireApprovalMcpToolApprovalFilterAlways """A filter object to specify which tools are allowed.""" @@ -72,6 +85,11 @@ class RequireApprovalMcpToolApprovalFilter(TypedDict, total=False): class RealtimeResponseCreateMcpToolParam(TypedDict, total=False): + """ + Give the model access to additional tools via remote Model Context Protocol + (MCP) servers. [Learn more about MCP](https://platform.openai.com/docs/guides/tools-remote-mcp). + """ + server_label: Required[str] """A label for this MCP server, used to identify it in tool calls.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_response_create_params.py b/portkey_ai/_vendor/openai/types/realtime/realtime_response_create_params.py index e8486220..deec8c92 100644 --- a/portkey_ai/_vendor/openai/types/realtime/realtime_response_create_params.py +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_response_create_params.py @@ -22,6 +22,8 @@ class RealtimeResponseCreateParams(BaseModel): + """Create a new Realtime response with these parameters""" + audio: Optional[RealtimeResponseCreateAudioOutput] = None """Configuration for audio input and output.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_response_create_params_param.py b/portkey_ai/_vendor/openai/types/realtime/realtime_response_create_params_param.py index 116384bd..caad5bc9 100644 --- a/portkey_ai/_vendor/openai/types/realtime/realtime_response_create_params_param.py +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_response_create_params_param.py @@ -23,6 +23,8 @@ class RealtimeResponseCreateParamsParam(TypedDict, total=False): + """Create a new Realtime response with these parameters""" + audio: RealtimeResponseCreateAudioOutputParam """Configuration for audio input and output.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_response_status.py b/portkey_ai/_vendor/openai/types/realtime/realtime_response_status.py index 12999f61..26b272ae 100644 --- a/portkey_ai/_vendor/openai/types/realtime/realtime_response_status.py +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_response_status.py @@ -9,6 +9,11 @@ class Error(BaseModel): + """ + A description of the error that caused the response to fail, + populated when the `status` is `failed`. + """ + code: Optional[str] = None """Error code, if any.""" @@ -17,6 +22,8 @@ class Error(BaseModel): class RealtimeResponseStatus(BaseModel): + """Additional details about the status.""" + error: Optional[Error] = None """ A description of the error that caused the response to fail, populated when the diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_response_usage.py b/portkey_ai/_vendor/openai/types/realtime/realtime_response_usage.py index fb8893b3..a5985d8a 100644 --- a/portkey_ai/_vendor/openai/types/realtime/realtime_response_usage.py +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_response_usage.py @@ -10,6 +10,14 @@ class RealtimeResponseUsage(BaseModel): + """Usage statistics for the Response, this will correspond to billing. + + A + Realtime API session will maintain a conversation context and append new + Items to the Conversation, thus output from previous turns (text and + audio tokens) will become the input for later turns. + """ + input_token_details: Optional[RealtimeResponseUsageInputTokenDetails] = None """Details about the input tokens used in the Response. diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_response_usage_input_token_details.py b/portkey_ai/_vendor/openai/types/realtime/realtime_response_usage_input_token_details.py index e14a74a8..0fc71749 100644 --- a/portkey_ai/_vendor/openai/types/realtime/realtime_response_usage_input_token_details.py +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_response_usage_input_token_details.py @@ -8,6 +8,8 @@ class CachedTokensDetails(BaseModel): + """Details about the cached tokens used as input for the Response.""" + audio_tokens: Optional[int] = None """The number of cached audio tokens used as input for the Response.""" @@ -19,6 +21,11 @@ class CachedTokensDetails(BaseModel): class RealtimeResponseUsageInputTokenDetails(BaseModel): + """Details about the input tokens used in the Response. + + Cached tokens are tokens from previous turns in the conversation that are included as context for the current response. Cached tokens here are counted as a subset of input tokens, meaning input tokens will include cached and uncached tokens. + """ + audio_tokens: Optional[int] = None """The number of audio tokens used as input for the Response.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_response_usage_output_token_details.py b/portkey_ai/_vendor/openai/types/realtime/realtime_response_usage_output_token_details.py index dfa97a1f..2154c77d 100644 --- a/portkey_ai/_vendor/openai/types/realtime/realtime_response_usage_output_token_details.py +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_response_usage_output_token_details.py @@ -8,6 +8,8 @@ class RealtimeResponseUsageOutputTokenDetails(BaseModel): + """Details about the output tokens used in the Response.""" + audio_tokens: Optional[int] = None """The number of audio tokens used in the Response.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_server_event.py b/portkey_ai/_vendor/openai/types/realtime/realtime_server_event.py index 1605b81a..5de53d05 100644 --- a/portkey_ai/_vendor/openai/types/realtime/realtime_server_event.py +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_server_event.py @@ -42,6 +42,7 @@ from .input_audio_buffer_speech_started_event import InputAudioBufferSpeechStartedEvent from .input_audio_buffer_speech_stopped_event import InputAudioBufferSpeechStoppedEvent from .response_function_call_arguments_done_event import ResponseFunctionCallArgumentsDoneEvent +from .input_audio_buffer_dtmf_event_received_event import InputAudioBufferDtmfEventReceivedEvent from .response_function_call_arguments_delta_event import ResponseFunctionCallArgumentsDeltaEvent from .conversation_item_input_audio_transcription_segment import ConversationItemInputAudioTranscriptionSegment from .conversation_item_input_audio_transcription_delta_event import ConversationItemInputAudioTranscriptionDeltaEvent @@ -60,6 +61,11 @@ class ConversationItemRetrieved(BaseModel): + """Returned when a conversation item is retrieved with `conversation.item.retrieve`. + + This is provided as a way to fetch the server's representation of an item, for example to get access to the post-processed audio data after noise cancellation and VAD. It includes the full content of the Item, including audio data. + """ + event_id: str """The unique ID of the server event.""" @@ -71,6 +77,13 @@ class ConversationItemRetrieved(BaseModel): class OutputAudioBufferStarted(BaseModel): + """ + **WebRTC/SIP Only:** Emitted when the server begins streaming audio to the client. This event is + emitted after an audio content part has been added (`response.content_part.added`) + to the response. + [Learn more](https://platform.openai.com/docs/guides/realtime-conversations#client-and-server-events-for-audio-in-webrtc). + """ + event_id: str """The unique ID of the server event.""" @@ -82,6 +95,13 @@ class OutputAudioBufferStarted(BaseModel): class OutputAudioBufferStopped(BaseModel): + """ + **WebRTC/SIP Only:** Emitted when the output audio buffer has been completely drained on the server, + and no more audio is forthcoming. This event is emitted after the full response + data has been sent to the client (`response.done`). + [Learn more](https://platform.openai.com/docs/guides/realtime-conversations#client-and-server-events-for-audio-in-webrtc). + """ + event_id: str """The unique ID of the server event.""" @@ -93,6 +113,15 @@ class OutputAudioBufferStopped(BaseModel): class OutputAudioBufferCleared(BaseModel): + """**WebRTC/SIP Only:** Emitted when the output audio buffer is cleared. + + This happens either in VAD + mode when the user has interrupted (`input_audio_buffer.speech_started`), + or when the client has emitted the `output_audio_buffer.clear` event to manually + cut off the current audio response. + [Learn more](https://platform.openai.com/docs/guides/realtime-conversations#client-and-server-events-for-audio-in-webrtc). + """ + event_id: str """The unique ID of the server event.""" @@ -116,6 +145,7 @@ class OutputAudioBufferCleared(BaseModel): RealtimeErrorEvent, InputAudioBufferClearedEvent, InputAudioBufferCommittedEvent, + InputAudioBufferDtmfEventReceivedEvent, InputAudioBufferSpeechStartedEvent, InputAudioBufferSpeechStoppedEvent, RateLimitsUpdatedEvent, diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_session_client_secret.py b/portkey_ai/_vendor/openai/types/realtime/realtime_session_client_secret.py index a4998802..13a12f55 100644 --- a/portkey_ai/_vendor/openai/types/realtime/realtime_session_client_secret.py +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_session_client_secret.py @@ -6,6 +6,8 @@ class RealtimeSessionClientSecret(BaseModel): + """Ephemeral key returned by the API.""" + expires_at: int """Timestamp for when the token expires. diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_session_create_request.py b/portkey_ai/_vendor/openai/types/realtime/realtime_session_create_request.py index 016ae45b..4a93c91c 100644 --- a/portkey_ai/_vendor/openai/types/realtime/realtime_session_create_request.py +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_session_create_request.py @@ -15,6 +15,8 @@ class RealtimeSessionCreateRequest(BaseModel): + """Realtime session object configuration.""" + type: Literal["realtime"] """The type of session to create. Always `realtime` for the Realtime API.""" @@ -64,8 +66,10 @@ class RealtimeSessionCreateRequest(BaseModel): "gpt-4o-mini-realtime-preview-2024-12-17", "gpt-realtime-mini", "gpt-realtime-mini-2025-10-06", + "gpt-realtime-mini-2025-12-15", "gpt-audio-mini", "gpt-audio-mini-2025-10-06", + "gpt-audio-mini-2025-12-15", ], None, ] = None @@ -110,13 +114,18 @@ class RealtimeSessionCreateRequest(BaseModel): limit, the conversation be truncated, meaning messages (starting from the oldest) will not be included in the model's context. A 32k context model with 4,096 max output tokens can only include 28,224 tokens in the context before - truncation occurs. Clients can configure truncation behavior to truncate with a - lower max token limit, which is an effective way to control token usage and - cost. Truncation will reduce the number of cached tokens on the next turn - (busting the cache), since messages are dropped from the beginning of the - context. However, clients can also configure truncation to retain messages up to - a fraction of the maximum context size, which will reduce the need for future - truncations and thus improve the cache rate. Truncation can be disabled - entirely, which means the server will never truncate but would instead return an - error if the conversation exceeds the model's input token limit. + truncation occurs. + + Clients can configure truncation behavior to truncate with a lower max token + limit, which is an effective way to control token usage and cost. + + Truncation will reduce the number of cached tokens on the next turn (busting the + cache), since messages are dropped from the beginning of the context. However, + clients can also configure truncation to retain messages up to a fraction of the + maximum context size, which will reduce the need for future truncations and thus + improve the cache rate. + + Truncation can be disabled entirely, which means the server will never truncate + but would instead return an error if the conversation exceeds the model's input + token limit. """ diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_session_create_request_param.py b/portkey_ai/_vendor/openai/types/realtime/realtime_session_create_request_param.py index 8c3998c1..dee63d09 100644 --- a/portkey_ai/_vendor/openai/types/realtime/realtime_session_create_request_param.py +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_session_create_request_param.py @@ -16,6 +16,8 @@ class RealtimeSessionCreateRequestParam(TypedDict, total=False): + """Realtime session object configuration.""" + type: Required[Literal["realtime"]] """The type of session to create. Always `realtime` for the Realtime API.""" @@ -65,8 +67,10 @@ class RealtimeSessionCreateRequestParam(TypedDict, total=False): "gpt-4o-mini-realtime-preview-2024-12-17", "gpt-realtime-mini", "gpt-realtime-mini-2025-10-06", + "gpt-realtime-mini-2025-12-15", "gpt-audio-mini", "gpt-audio-mini-2025-10-06", + "gpt-audio-mini-2025-12-15", ], ] """The Realtime model used for this session.""" @@ -110,13 +114,18 @@ class RealtimeSessionCreateRequestParam(TypedDict, total=False): limit, the conversation be truncated, meaning messages (starting from the oldest) will not be included in the model's context. A 32k context model with 4,096 max output tokens can only include 28,224 tokens in the context before - truncation occurs. Clients can configure truncation behavior to truncate with a - lower max token limit, which is an effective way to control token usage and - cost. Truncation will reduce the number of cached tokens on the next turn - (busting the cache), since messages are dropped from the beginning of the - context. However, clients can also configure truncation to retain messages up to - a fraction of the maximum context size, which will reduce the need for future - truncations and thus improve the cache rate. Truncation can be disabled - entirely, which means the server will never truncate but would instead return an - error if the conversation exceeds the model's input token limit. + truncation occurs. + + Clients can configure truncation behavior to truncate with a lower max token + limit, which is an effective way to control token usage and cost. + + Truncation will reduce the number of cached tokens on the next turn (busting the + cache), since messages are dropped from the beginning of the context. However, + clients can also configure truncation to retain messages up to a fraction of the + maximum context size, which will reduce the need for future truncations and thus + improve the cache rate. + + Truncation can be disabled entirely, which means the server will never truncate + but would instead return an error if the conversation exceeds the model's input + token limit. """ diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_session_create_response.py b/portkey_ai/_vendor/openai/types/realtime/realtime_session_create_response.py index c1336cd6..15a200ca 100644 --- a/portkey_ai/_vendor/openai/types/realtime/realtime_session_create_response.py +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_session_create_response.py @@ -40,6 +40,13 @@ class AudioInputNoiseReduction(BaseModel): + """Configuration for input audio noise reduction. + + This can be set to `null` to turn off. + Noise reduction filters audio added to the input audio buffer before it is sent to VAD and the model. + Filtering the audio can improve VAD and turn detection accuracy (reducing false positives) and model performance by improving perception of the input audio. + """ + type: Optional[NoiseReductionType] = None """Type of noise reduction. @@ -49,13 +56,22 @@ class AudioInputNoiseReduction(BaseModel): class AudioInputTurnDetectionServerVad(BaseModel): + """ + Server-side voice activity detection (VAD) which flips on when user speech is detected and off after a period of silence. + """ + type: Literal["server_vad"] """Type of turn detection, `server_vad` to turn on simple Server VAD.""" create_response: Optional[bool] = None - """ - Whether or not to automatically generate a response when a VAD stop event + """Whether or not to automatically generate a response when a VAD stop event occurs. + + If `interrupt_response` is set to `false` this may fail to create a response if + the model is already responding. + + If both `create_response` and `interrupt_response` are set to `false`, the model + will never respond automatically but VAD events will still be emitted. """ idle_timeout_ms: Optional[int] = None @@ -76,9 +92,13 @@ class AudioInputTurnDetectionServerVad(BaseModel): interrupt_response: Optional[bool] = None """ - Whether or not to automatically interrupt any ongoing response with output to - the default conversation (i.e. `conversation` of `auto`) when a VAD start event - occurs. + Whether or not to automatically interrupt (cancel) any ongoing response with + output to the default conversation (i.e. `conversation` of `auto`) when a VAD + start event occurs. If `true` then the response will be cancelled, otherwise it + will continue until complete. + + If both `create_response` and `interrupt_response` are set to `false`, the model + will never respond automatically but VAD events will still be emitted. """ prefix_padding_ms: Optional[int] = None @@ -106,6 +126,10 @@ class AudioInputTurnDetectionServerVad(BaseModel): class AudioInputTurnDetectionSemanticVad(BaseModel): + """ + Server-side semantic turn detection which uses a model to determine when the user has finished speaking. + """ + type: Literal["semantic_vad"] """Type of turn detection, `semantic_vad` to turn on Semantic VAD.""" @@ -209,6 +233,8 @@ class AudioOutput(BaseModel): class Audio(BaseModel): + """Configuration for input and output audio.""" + input: Optional[AudioInput] = None output: Optional[AudioOutput] = None @@ -218,6 +244,8 @@ class Audio(BaseModel): class ToolMcpToolAllowedToolsMcpToolFilter(BaseModel): + """A filter object to specify which tools are allowed.""" + read_only: Optional[bool] = None """Indicates whether or not a tool modifies data or is read-only. @@ -234,6 +262,8 @@ class ToolMcpToolAllowedToolsMcpToolFilter(BaseModel): class ToolMcpToolRequireApprovalMcpToolApprovalFilterAlways(BaseModel): + """A filter object to specify which tools are allowed.""" + read_only: Optional[bool] = None """Indicates whether or not a tool modifies data or is read-only. @@ -247,6 +277,8 @@ class ToolMcpToolRequireApprovalMcpToolApprovalFilterAlways(BaseModel): class ToolMcpToolRequireApprovalMcpToolApprovalFilterNever(BaseModel): + """A filter object to specify which tools are allowed.""" + read_only: Optional[bool] = None """Indicates whether or not a tool modifies data or is read-only. @@ -260,6 +292,13 @@ class ToolMcpToolRequireApprovalMcpToolApprovalFilterNever(BaseModel): class ToolMcpToolRequireApprovalMcpToolApprovalFilter(BaseModel): + """Specify which of the MCP server's tools require approval. + + Can be + `always`, `never`, or a filter object associated with tools + that require approval. + """ + always: Optional[ToolMcpToolRequireApprovalMcpToolApprovalFilterAlways] = None """A filter object to specify which tools are allowed.""" @@ -273,6 +312,11 @@ class ToolMcpToolRequireApprovalMcpToolApprovalFilter(BaseModel): class ToolMcpTool(BaseModel): + """ + Give the model access to additional tools via remote Model Context Protocol + (MCP) servers. [Learn more about MCP](https://platform.openai.com/docs/guides/tools-remote-mcp). + """ + server_label: str """A label for this MCP server, used to identify it in tool calls.""" @@ -342,6 +386,8 @@ class ToolMcpTool(BaseModel): class TracingTracingConfiguration(BaseModel): + """Granular configuration for tracing.""" + group_id: Optional[str] = None """ The group id to attach to this trace to enable filtering and grouping in the @@ -365,6 +411,12 @@ class TracingTracingConfiguration(BaseModel): class RealtimeSessionCreateResponse(BaseModel): + """A new Realtime session configuration, with an ephemeral key. + + Default TTL + for keys is one minute. + """ + client_secret: RealtimeSessionClientSecret """Ephemeral key returned by the API.""" @@ -417,8 +469,10 @@ class RealtimeSessionCreateResponse(BaseModel): "gpt-4o-mini-realtime-preview-2024-12-17", "gpt-realtime-mini", "gpt-realtime-mini-2025-10-06", + "gpt-realtime-mini-2025-12-15", "gpt-audio-mini", "gpt-audio-mini-2025-10-06", + "gpt-audio-mini-2025-12-15", ], None, ] = None @@ -463,13 +517,18 @@ class RealtimeSessionCreateResponse(BaseModel): limit, the conversation be truncated, meaning messages (starting from the oldest) will not be included in the model's context. A 32k context model with 4,096 max output tokens can only include 28,224 tokens in the context before - truncation occurs. Clients can configure truncation behavior to truncate with a - lower max token limit, which is an effective way to control token usage and - cost. Truncation will reduce the number of cached tokens on the next turn - (busting the cache), since messages are dropped from the beginning of the - context. However, clients can also configure truncation to retain messages up to - a fraction of the maximum context size, which will reduce the need for future - truncations and thus improve the cache rate. Truncation can be disabled - entirely, which means the server will never truncate but would instead return an - error if the conversation exceeds the model's input token limit. + truncation occurs. + + Clients can configure truncation behavior to truncate with a lower max token + limit, which is an effective way to control token usage and cost. + + Truncation will reduce the number of cached tokens on the next turn (busting the + cache), since messages are dropped from the beginning of the context. However, + clients can also configure truncation to retain messages up to a fraction of the + maximum context size, which will reduce the need for future truncations and thus + improve the cache rate. + + Truncation can be disabled entirely, which means the server will never truncate + but would instead return an error if the conversation exceeds the model's input + token limit. """ diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_tools_config_param.py b/portkey_ai/_vendor/openai/types/realtime/realtime_tools_config_param.py index 630fc746..3cc404fe 100644 --- a/portkey_ai/_vendor/openai/types/realtime/realtime_tools_config_param.py +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_tools_config_param.py @@ -22,6 +22,8 @@ class McpAllowedToolsMcpToolFilter(TypedDict, total=False): + """A filter object to specify which tools are allowed.""" + read_only: bool """Indicates whether or not a tool modifies data or is read-only. @@ -38,6 +40,8 @@ class McpAllowedToolsMcpToolFilter(TypedDict, total=False): class McpRequireApprovalMcpToolApprovalFilterAlways(TypedDict, total=False): + """A filter object to specify which tools are allowed.""" + read_only: bool """Indicates whether or not a tool modifies data or is read-only. @@ -51,6 +55,8 @@ class McpRequireApprovalMcpToolApprovalFilterAlways(TypedDict, total=False): class McpRequireApprovalMcpToolApprovalFilterNever(TypedDict, total=False): + """A filter object to specify which tools are allowed.""" + read_only: bool """Indicates whether or not a tool modifies data or is read-only. @@ -64,6 +70,13 @@ class McpRequireApprovalMcpToolApprovalFilterNever(TypedDict, total=False): class McpRequireApprovalMcpToolApprovalFilter(TypedDict, total=False): + """Specify which of the MCP server's tools require approval. + + Can be + `always`, `never`, or a filter object associated with tools + that require approval. + """ + always: McpRequireApprovalMcpToolApprovalFilterAlways """A filter object to specify which tools are allowed.""" @@ -75,6 +88,11 @@ class McpRequireApprovalMcpToolApprovalFilter(TypedDict, total=False): class Mcp(TypedDict, total=False): + """ + Give the model access to additional tools via remote Model Context Protocol + (MCP) servers. [Learn more about MCP](https://platform.openai.com/docs/guides/tools-remote-mcp). + """ + server_label: Required[str] """A label for this MCP server, used to identify it in tool calls.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_tools_config_union.py b/portkey_ai/_vendor/openai/types/realtime/realtime_tools_config_union.py index e7126ed6..92aaee7f 100644 --- a/portkey_ai/_vendor/openai/types/realtime/realtime_tools_config_union.py +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_tools_config_union.py @@ -20,6 +20,8 @@ class McpAllowedToolsMcpToolFilter(BaseModel): + """A filter object to specify which tools are allowed.""" + read_only: Optional[bool] = None """Indicates whether or not a tool modifies data or is read-only. @@ -36,6 +38,8 @@ class McpAllowedToolsMcpToolFilter(BaseModel): class McpRequireApprovalMcpToolApprovalFilterAlways(BaseModel): + """A filter object to specify which tools are allowed.""" + read_only: Optional[bool] = None """Indicates whether or not a tool modifies data or is read-only. @@ -49,6 +53,8 @@ class McpRequireApprovalMcpToolApprovalFilterAlways(BaseModel): class McpRequireApprovalMcpToolApprovalFilterNever(BaseModel): + """A filter object to specify which tools are allowed.""" + read_only: Optional[bool] = None """Indicates whether or not a tool modifies data or is read-only. @@ -62,6 +68,13 @@ class McpRequireApprovalMcpToolApprovalFilterNever(BaseModel): class McpRequireApprovalMcpToolApprovalFilter(BaseModel): + """Specify which of the MCP server's tools require approval. + + Can be + `always`, `never`, or a filter object associated with tools + that require approval. + """ + always: Optional[McpRequireApprovalMcpToolApprovalFilterAlways] = None """A filter object to specify which tools are allowed.""" @@ -73,6 +86,11 @@ class McpRequireApprovalMcpToolApprovalFilter(BaseModel): class Mcp(BaseModel): + """ + Give the model access to additional tools via remote Model Context Protocol + (MCP) servers. [Learn more about MCP](https://platform.openai.com/docs/guides/tools-remote-mcp). + """ + server_label: str """A label for this MCP server, used to identify it in tool calls.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_tools_config_union_param.py b/portkey_ai/_vendor/openai/types/realtime/realtime_tools_config_union_param.py index 9ee58fdb..6889b4c3 100644 --- a/portkey_ai/_vendor/openai/types/realtime/realtime_tools_config_union_param.py +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_tools_config_union_param.py @@ -21,6 +21,8 @@ class McpAllowedToolsMcpToolFilter(TypedDict, total=False): + """A filter object to specify which tools are allowed.""" + read_only: bool """Indicates whether or not a tool modifies data or is read-only. @@ -37,6 +39,8 @@ class McpAllowedToolsMcpToolFilter(TypedDict, total=False): class McpRequireApprovalMcpToolApprovalFilterAlways(TypedDict, total=False): + """A filter object to specify which tools are allowed.""" + read_only: bool """Indicates whether or not a tool modifies data or is read-only. @@ -50,6 +54,8 @@ class McpRequireApprovalMcpToolApprovalFilterAlways(TypedDict, total=False): class McpRequireApprovalMcpToolApprovalFilterNever(TypedDict, total=False): + """A filter object to specify which tools are allowed.""" + read_only: bool """Indicates whether or not a tool modifies data or is read-only. @@ -63,6 +69,13 @@ class McpRequireApprovalMcpToolApprovalFilterNever(TypedDict, total=False): class McpRequireApprovalMcpToolApprovalFilter(TypedDict, total=False): + """Specify which of the MCP server's tools require approval. + + Can be + `always`, `never`, or a filter object associated with tools + that require approval. + """ + always: McpRequireApprovalMcpToolApprovalFilterAlways """A filter object to specify which tools are allowed.""" @@ -74,6 +87,11 @@ class McpRequireApprovalMcpToolApprovalFilter(TypedDict, total=False): class Mcp(TypedDict, total=False): + """ + Give the model access to additional tools via remote Model Context Protocol + (MCP) servers. [Learn more about MCP](https://platform.openai.com/docs/guides/tools-remote-mcp). + """ + server_label: Required[str] """A label for this MCP server, used to identify it in tool calls.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_tracing_config.py b/portkey_ai/_vendor/openai/types/realtime/realtime_tracing_config.py index 1c46de79..37e3ce89 100644 --- a/portkey_ai/_vendor/openai/types/realtime/realtime_tracing_config.py +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_tracing_config.py @@ -9,6 +9,8 @@ class TracingConfiguration(BaseModel): + """Granular configuration for tracing.""" + group_id: Optional[str] = None """ The group id to attach to this trace to enable filtering and grouping in the diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_tracing_config_param.py b/portkey_ai/_vendor/openai/types/realtime/realtime_tracing_config_param.py index fd9e2662..74241289 100644 --- a/portkey_ai/_vendor/openai/types/realtime/realtime_tracing_config_param.py +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_tracing_config_param.py @@ -9,6 +9,8 @@ class TracingConfiguration(TypedDict, total=False): + """Granular configuration for tracing.""" + group_id: str """ The group id to attach to this trace to enable filtering and grouping in the diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_transcription_session_audio.py b/portkey_ai/_vendor/openai/types/realtime/realtime_transcription_session_audio.py index a5506947..7ec29afb 100644 --- a/portkey_ai/_vendor/openai/types/realtime/realtime_transcription_session_audio.py +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_transcription_session_audio.py @@ -9,4 +9,6 @@ class RealtimeTranscriptionSessionAudio(BaseModel): + """Configuration for input and output audio.""" + input: Optional[RealtimeTranscriptionSessionAudioInput] = None diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_transcription_session_audio_input.py b/portkey_ai/_vendor/openai/types/realtime/realtime_transcription_session_audio_input.py index efc321cb..80ff2235 100644 --- a/portkey_ai/_vendor/openai/types/realtime/realtime_transcription_session_audio_input.py +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_transcription_session_audio_input.py @@ -14,6 +14,13 @@ class NoiseReduction(BaseModel): + """Configuration for input audio noise reduction. + + This can be set to `null` to turn off. + Noise reduction filters audio added to the input audio buffer before it is sent to VAD and the model. + Filtering the audio can improve VAD and turn detection accuracy (reducing false positives) and model performance by improving perception of the input audio. + """ + type: Optional[NoiseReductionType] = None """Type of noise reduction. diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_transcription_session_audio_input_param.py b/portkey_ai/_vendor/openai/types/realtime/realtime_transcription_session_audio_input_param.py index c9153b68..dd908c72 100644 --- a/portkey_ai/_vendor/openai/types/realtime/realtime_transcription_session_audio_input_param.py +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_transcription_session_audio_input_param.py @@ -16,6 +16,13 @@ class NoiseReduction(TypedDict, total=False): + """Configuration for input audio noise reduction. + + This can be set to `null` to turn off. + Noise reduction filters audio added to the input audio buffer before it is sent to VAD and the model. + Filtering the audio can improve VAD and turn detection accuracy (reducing false positives) and model performance by improving perception of the input audio. + """ + type: NoiseReductionType """Type of noise reduction. diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_transcription_session_audio_input_turn_detection.py b/portkey_ai/_vendor/openai/types/realtime/realtime_transcription_session_audio_input_turn_detection.py index 7dc7a8f3..3d4ee779 100644 --- a/portkey_ai/_vendor/openai/types/realtime/realtime_transcription_session_audio_input_turn_detection.py +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_transcription_session_audio_input_turn_detection.py @@ -10,13 +10,22 @@ class ServerVad(BaseModel): + """ + Server-side voice activity detection (VAD) which flips on when user speech is detected and off after a period of silence. + """ + type: Literal["server_vad"] """Type of turn detection, `server_vad` to turn on simple Server VAD.""" create_response: Optional[bool] = None - """ - Whether or not to automatically generate a response when a VAD stop event + """Whether or not to automatically generate a response when a VAD stop event occurs. + + If `interrupt_response` is set to `false` this may fail to create a response if + the model is already responding. + + If both `create_response` and `interrupt_response` are set to `false`, the model + will never respond automatically but VAD events will still be emitted. """ idle_timeout_ms: Optional[int] = None @@ -37,9 +46,13 @@ class ServerVad(BaseModel): interrupt_response: Optional[bool] = None """ - Whether or not to automatically interrupt any ongoing response with output to - the default conversation (i.e. `conversation` of `auto`) when a VAD start event - occurs. + Whether or not to automatically interrupt (cancel) any ongoing response with + output to the default conversation (i.e. `conversation` of `auto`) when a VAD + start event occurs. If `true` then the response will be cancelled, otherwise it + will continue until complete. + + If both `create_response` and `interrupt_response` are set to `false`, the model + will never respond automatically but VAD events will still be emitted. """ prefix_padding_ms: Optional[int] = None @@ -67,6 +80,10 @@ class ServerVad(BaseModel): class SemanticVad(BaseModel): + """ + Server-side semantic turn detection which uses a model to determine when the user has finished speaking. + """ + type: Literal["semantic_vad"] """Type of turn detection, `semantic_vad` to turn on Semantic VAD.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_transcription_session_audio_input_turn_detection_param.py b/portkey_ai/_vendor/openai/types/realtime/realtime_transcription_session_audio_input_turn_detection_param.py index d899b8c5..0aca59ce 100644 --- a/portkey_ai/_vendor/openai/types/realtime/realtime_transcription_session_audio_input_turn_detection_param.py +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_transcription_session_audio_input_turn_detection_param.py @@ -9,13 +9,22 @@ class ServerVad(TypedDict, total=False): + """ + Server-side voice activity detection (VAD) which flips on when user speech is detected and off after a period of silence. + """ + type: Required[Literal["server_vad"]] """Type of turn detection, `server_vad` to turn on simple Server VAD.""" create_response: bool - """ - Whether or not to automatically generate a response when a VAD stop event + """Whether or not to automatically generate a response when a VAD stop event occurs. + + If `interrupt_response` is set to `false` this may fail to create a response if + the model is already responding. + + If both `create_response` and `interrupt_response` are set to `false`, the model + will never respond automatically but VAD events will still be emitted. """ idle_timeout_ms: Optional[int] @@ -36,9 +45,13 @@ class ServerVad(TypedDict, total=False): interrupt_response: bool """ - Whether or not to automatically interrupt any ongoing response with output to - the default conversation (i.e. `conversation` of `auto`) when a VAD start event - occurs. + Whether or not to automatically interrupt (cancel) any ongoing response with + output to the default conversation (i.e. `conversation` of `auto`) when a VAD + start event occurs. If `true` then the response will be cancelled, otherwise it + will continue until complete. + + If both `create_response` and `interrupt_response` are set to `false`, the model + will never respond automatically but VAD events will still be emitted. """ prefix_padding_ms: int @@ -66,6 +79,10 @@ class ServerVad(TypedDict, total=False): class SemanticVad(TypedDict, total=False): + """ + Server-side semantic turn detection which uses a model to determine when the user has finished speaking. + """ + type: Required[Literal["semantic_vad"]] """Type of turn detection, `semantic_vad` to turn on Semantic VAD.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_transcription_session_audio_param.py b/portkey_ai/_vendor/openai/types/realtime/realtime_transcription_session_audio_param.py index 1503a606..6bf11179 100644 --- a/portkey_ai/_vendor/openai/types/realtime/realtime_transcription_session_audio_param.py +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_transcription_session_audio_param.py @@ -10,4 +10,6 @@ class RealtimeTranscriptionSessionAudioParam(TypedDict, total=False): + """Configuration for input and output audio.""" + input: RealtimeTranscriptionSessionAudioInputParam diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_transcription_session_create_request.py b/portkey_ai/_vendor/openai/types/realtime/realtime_transcription_session_create_request.py index 102f2b14..f72a4ad9 100644 --- a/portkey_ai/_vendor/openai/types/realtime/realtime_transcription_session_create_request.py +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_transcription_session_create_request.py @@ -10,6 +10,8 @@ class RealtimeTranscriptionSessionCreateRequest(BaseModel): + """Realtime transcription session object configuration.""" + type: Literal["transcription"] """The type of session to create. diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_transcription_session_create_request_param.py b/portkey_ai/_vendor/openai/types/realtime/realtime_transcription_session_create_request_param.py index 80cbe2d4..9b4d8ead 100644 --- a/portkey_ai/_vendor/openai/types/realtime/realtime_transcription_session_create_request_param.py +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_transcription_session_create_request_param.py @@ -11,6 +11,8 @@ class RealtimeTranscriptionSessionCreateRequestParam(TypedDict, total=False): + """Realtime transcription session object configuration.""" + type: Required[Literal["transcription"]] """The type of session to create. diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_transcription_session_create_response.py b/portkey_ai/_vendor/openai/types/realtime/realtime_transcription_session_create_response.py index 301af1ac..6ca6c380 100644 --- a/portkey_ai/_vendor/openai/types/realtime/realtime_transcription_session_create_response.py +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_transcription_session_create_response.py @@ -13,6 +13,8 @@ class AudioInputNoiseReduction(BaseModel): + """Configuration for input audio noise reduction.""" + type: Optional[NoiseReductionType] = None """Type of noise reduction. @@ -41,10 +43,14 @@ class AudioInput(BaseModel): class Audio(BaseModel): + """Configuration for input audio for the session.""" + input: Optional[AudioInput] = None class RealtimeTranscriptionSessionCreateResponse(BaseModel): + """A Realtime transcription session configuration object.""" + id: str """Unique identifier for the session that looks like `sess_1234567890abcdef`.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_transcription_session_turn_detection.py b/portkey_ai/_vendor/openai/types/realtime/realtime_transcription_session_turn_detection.py index f5da31ce..8dacd60a 100644 --- a/portkey_ai/_vendor/openai/types/realtime/realtime_transcription_session_turn_detection.py +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_transcription_session_turn_detection.py @@ -8,6 +8,13 @@ class RealtimeTranscriptionSessionTurnDetection(BaseModel): + """Configuration for turn detection. + + Can be set to `null` to turn off. Server + VAD means that the model will detect the start and end of speech based on + audio volume and respond at the end of user speech. + """ + prefix_padding_ms: Optional[int] = None """Amount of audio to include before the VAD detected speech (in milliseconds). diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_truncation_retention_ratio.py b/portkey_ai/_vendor/openai/types/realtime/realtime_truncation_retention_ratio.py index e19ed648..72a93a56 100644 --- a/portkey_ai/_vendor/openai/types/realtime/realtime_truncation_retention_ratio.py +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_truncation_retention_ratio.py @@ -9,6 +9,11 @@ class TokenLimits(BaseModel): + """Optional custom token limits for this truncation strategy. + + If not provided, the model's default token limits will be used. + """ + post_instructions: Optional[int] = None """ Maximum tokens allowed in the conversation after instructions (which including @@ -20,6 +25,10 @@ class TokenLimits(BaseModel): class RealtimeTruncationRetentionRatio(BaseModel): + """ + Retain a fraction of the conversation tokens when the conversation exceeds the input token limit. This allows you to amortize truncations across multiple turns, which can help improve cached token usage. + """ + retention_ratio: float """ Fraction of post-instruction conversation tokens to retain (`0.0` - `1.0`) when diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_truncation_retention_ratio_param.py b/portkey_ai/_vendor/openai/types/realtime/realtime_truncation_retention_ratio_param.py index 4ea80fe4..4648fa66 100644 --- a/portkey_ai/_vendor/openai/types/realtime/realtime_truncation_retention_ratio_param.py +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_truncation_retention_ratio_param.py @@ -8,6 +8,11 @@ class TokenLimits(TypedDict, total=False): + """Optional custom token limits for this truncation strategy. + + If not provided, the model's default token limits will be used. + """ + post_instructions: int """ Maximum tokens allowed in the conversation after instructions (which including @@ -19,6 +24,10 @@ class TokenLimits(TypedDict, total=False): class RealtimeTruncationRetentionRatioParam(TypedDict, total=False): + """ + Retain a fraction of the conversation tokens when the conversation exceeds the input token limit. This allows you to amortize truncations across multiple turns, which can help improve cached token usage. + """ + retention_ratio: Required[float] """ Fraction of post-instruction conversation tokens to retain (`0.0` - `1.0`) when diff --git a/portkey_ai/_vendor/openai/types/realtime/response_audio_delta_event.py b/portkey_ai/_vendor/openai/types/realtime/response_audio_delta_event.py index d92c5462..ae870140 100644 --- a/portkey_ai/_vendor/openai/types/realtime/response_audio_delta_event.py +++ b/portkey_ai/_vendor/openai/types/realtime/response_audio_delta_event.py @@ -8,6 +8,8 @@ class ResponseAudioDeltaEvent(BaseModel): + """Returned when the model-generated audio is updated.""" + content_index: int """The index of the content part in the item's content array.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/response_audio_done_event.py b/portkey_ai/_vendor/openai/types/realtime/response_audio_done_event.py index 5ea0f07e..98715aba 100644 --- a/portkey_ai/_vendor/openai/types/realtime/response_audio_done_event.py +++ b/portkey_ai/_vendor/openai/types/realtime/response_audio_done_event.py @@ -8,6 +8,12 @@ class ResponseAudioDoneEvent(BaseModel): + """Returned when the model-generated audio is done. + + Also emitted when a Response + is interrupted, incomplete, or cancelled. + """ + content_index: int """The index of the content part in the item's content array.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/response_audio_transcript_delta_event.py b/portkey_ai/_vendor/openai/types/realtime/response_audio_transcript_delta_event.py index 4dd5feca..4ec1a820 100644 --- a/portkey_ai/_vendor/openai/types/realtime/response_audio_transcript_delta_event.py +++ b/portkey_ai/_vendor/openai/types/realtime/response_audio_transcript_delta_event.py @@ -8,6 +8,8 @@ class ResponseAudioTranscriptDeltaEvent(BaseModel): + """Returned when the model-generated transcription of audio output is updated.""" + content_index: int """The index of the content part in the item's content array.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/response_audio_transcript_done_event.py b/portkey_ai/_vendor/openai/types/realtime/response_audio_transcript_done_event.py index 2de913d2..c2a24163 100644 --- a/portkey_ai/_vendor/openai/types/realtime/response_audio_transcript_done_event.py +++ b/portkey_ai/_vendor/openai/types/realtime/response_audio_transcript_done_event.py @@ -8,6 +8,12 @@ class ResponseAudioTranscriptDoneEvent(BaseModel): + """ + Returned when the model-generated transcription of audio output is done + streaming. Also emitted when a Response is interrupted, incomplete, or + cancelled. + """ + content_index: int """The index of the content part in the item's content array.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/response_cancel_event.py b/portkey_ai/_vendor/openai/types/realtime/response_cancel_event.py index 15dc141c..9c611399 100644 --- a/portkey_ai/_vendor/openai/types/realtime/response_cancel_event.py +++ b/portkey_ai/_vendor/openai/types/realtime/response_cancel_event.py @@ -9,6 +9,15 @@ class ResponseCancelEvent(BaseModel): + """Send this event to cancel an in-progress response. + + The server will respond + with a `response.done` event with a status of `response.status=cancelled`. If + there is no response to cancel, the server will respond with an error. It's safe + to call `response.cancel` even if no response is in progress, an error will be + returned the session will remain unaffected. + """ + type: Literal["response.cancel"] """The event type, must be `response.cancel`.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/response_cancel_event_param.py b/portkey_ai/_vendor/openai/types/realtime/response_cancel_event_param.py index f3374073..b233b407 100644 --- a/portkey_ai/_vendor/openai/types/realtime/response_cancel_event_param.py +++ b/portkey_ai/_vendor/openai/types/realtime/response_cancel_event_param.py @@ -8,6 +8,15 @@ class ResponseCancelEventParam(TypedDict, total=False): + """Send this event to cancel an in-progress response. + + The server will respond + with a `response.done` event with a status of `response.status=cancelled`. If + there is no response to cancel, the server will respond with an error. It's safe + to call `response.cancel` even if no response is in progress, an error will be + returned the session will remain unaffected. + """ + type: Required[Literal["response.cancel"]] """The event type, must be `response.cancel`.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/response_content_part_added_event.py b/portkey_ai/_vendor/openai/types/realtime/response_content_part_added_event.py index aca965c3..e47c84af 100644 --- a/portkey_ai/_vendor/openai/types/realtime/response_content_part_added_event.py +++ b/portkey_ai/_vendor/openai/types/realtime/response_content_part_added_event.py @@ -9,6 +9,8 @@ class Part(BaseModel): + """The content part that was added.""" + audio: Optional[str] = None """Base64-encoded audio data (if type is "audio").""" @@ -23,6 +25,11 @@ class Part(BaseModel): class ResponseContentPartAddedEvent(BaseModel): + """ + Returned when a new content part is added to an assistant message item during + response generation. + """ + content_index: int """The index of the content part in the item's content array.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/response_content_part_done_event.py b/portkey_ai/_vendor/openai/types/realtime/response_content_part_done_event.py index 59af808a..a6cb8559 100644 --- a/portkey_ai/_vendor/openai/types/realtime/response_content_part_done_event.py +++ b/portkey_ai/_vendor/openai/types/realtime/response_content_part_done_event.py @@ -9,6 +9,8 @@ class Part(BaseModel): + """The content part that is done.""" + audio: Optional[str] = None """Base64-encoded audio data (if type is "audio").""" @@ -23,6 +25,11 @@ class Part(BaseModel): class ResponseContentPartDoneEvent(BaseModel): + """ + Returned when a content part is done streaming in an assistant message item. + Also emitted when a Response is interrupted, incomplete, or cancelled. + """ + content_index: int """The index of the content part in the item's content array.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/response_create_event.py b/portkey_ai/_vendor/openai/types/realtime/response_create_event.py index 75a08ee4..3e98a8d8 100644 --- a/portkey_ai/_vendor/openai/types/realtime/response_create_event.py +++ b/portkey_ai/_vendor/openai/types/realtime/response_create_event.py @@ -10,6 +10,34 @@ class ResponseCreateEvent(BaseModel): + """ + This event instructs the server to create a Response, which means triggering + model inference. When in Server VAD mode, the server will create Responses + automatically. + + A Response will include at least one Item, and may have two, in which case + the second will be a function call. These Items will be appended to the + conversation history by default. + + The server will respond with a `response.created` event, events for Items + and content created, and finally a `response.done` event to indicate the + Response is complete. + + The `response.create` event includes inference configuration like + `instructions` and `tools`. If these are set, they will override the Session's + configuration for this Response only. + + Responses can be created out-of-band of the default Conversation, meaning that they can + have arbitrary input, and it's possible to disable writing the output to the Conversation. + Only one Response can write to the default Conversation at a time, but otherwise multiple + Responses can be created in parallel. The `metadata` field is a good way to disambiguate + multiple simultaneous Responses. + + Clients can set `conversation` to `none` to create a Response that does not write to the default + Conversation. Arbitrary input can be provided with the `input` field, which is an array accepting + raw Items and references to existing Items. + """ + type: Literal["response.create"] """The event type, must be `response.create`.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/response_create_event_param.py b/portkey_ai/_vendor/openai/types/realtime/response_create_event_param.py index e5dd46d9..9da89e14 100644 --- a/portkey_ai/_vendor/openai/types/realtime/response_create_event_param.py +++ b/portkey_ai/_vendor/openai/types/realtime/response_create_event_param.py @@ -10,6 +10,34 @@ class ResponseCreateEventParam(TypedDict, total=False): + """ + This event instructs the server to create a Response, which means triggering + model inference. When in Server VAD mode, the server will create Responses + automatically. + + A Response will include at least one Item, and may have two, in which case + the second will be a function call. These Items will be appended to the + conversation history by default. + + The server will respond with a `response.created` event, events for Items + and content created, and finally a `response.done` event to indicate the + Response is complete. + + The `response.create` event includes inference configuration like + `instructions` and `tools`. If these are set, they will override the Session's + configuration for this Response only. + + Responses can be created out-of-band of the default Conversation, meaning that they can + have arbitrary input, and it's possible to disable writing the output to the Conversation. + Only one Response can write to the default Conversation at a time, but otherwise multiple + Responses can be created in parallel. The `metadata` field is a good way to disambiguate + multiple simultaneous Responses. + + Clients can set `conversation` to `none` to create a Response that does not write to the default + Conversation. Arbitrary input can be provided with the `input` field, which is an array accepting + raw Items and references to existing Items. + """ + type: Required[Literal["response.create"]] """The event type, must be `response.create`.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/response_created_event.py b/portkey_ai/_vendor/openai/types/realtime/response_created_event.py index 996bf26f..dc594126 100644 --- a/portkey_ai/_vendor/openai/types/realtime/response_created_event.py +++ b/portkey_ai/_vendor/openai/types/realtime/response_created_event.py @@ -9,6 +9,12 @@ class ResponseCreatedEvent(BaseModel): + """Returned when a new Response is created. + + The first event of response creation, + where the response is in an initial state of `in_progress`. + """ + event_id: str """The unique ID of the server event.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/response_done_event.py b/portkey_ai/_vendor/openai/types/realtime/response_done_event.py index ce9a4b9f..9c31a2aa 100644 --- a/portkey_ai/_vendor/openai/types/realtime/response_done_event.py +++ b/portkey_ai/_vendor/openai/types/realtime/response_done_event.py @@ -9,6 +9,19 @@ class ResponseDoneEvent(BaseModel): + """Returned when a Response is done streaming. + + Always emitted, no matter the + final state. The Response object included in the `response.done` event will + include all output Items in the Response but will omit the raw audio data. + + Clients should check the `status` field of the Response to determine if it was successful + (`completed`) or if there was another outcome: `cancelled`, `failed`, or `incomplete`. + + A response will contain all output items that were generated during the response, excluding + any audio content. + """ + event_id: str """The unique ID of the server event.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/response_function_call_arguments_delta_event.py b/portkey_ai/_vendor/openai/types/realtime/response_function_call_arguments_delta_event.py index 6d96e78b..a426c3f2 100644 --- a/portkey_ai/_vendor/openai/types/realtime/response_function_call_arguments_delta_event.py +++ b/portkey_ai/_vendor/openai/types/realtime/response_function_call_arguments_delta_event.py @@ -8,6 +8,8 @@ class ResponseFunctionCallArgumentsDeltaEvent(BaseModel): + """Returned when the model-generated function call arguments are updated.""" + call_id: str """The ID of the function call.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/response_function_call_arguments_done_event.py b/portkey_ai/_vendor/openai/types/realtime/response_function_call_arguments_done_event.py index be7fae9a..504f91d5 100644 --- a/portkey_ai/_vendor/openai/types/realtime/response_function_call_arguments_done_event.py +++ b/portkey_ai/_vendor/openai/types/realtime/response_function_call_arguments_done_event.py @@ -8,6 +8,11 @@ class ResponseFunctionCallArgumentsDoneEvent(BaseModel): + """ + Returned when the model-generated function call arguments are done streaming. + Also emitted when a Response is interrupted, incomplete, or cancelled. + """ + arguments: str """The final arguments as a JSON string.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/response_mcp_call_arguments_delta.py b/portkey_ai/_vendor/openai/types/realtime/response_mcp_call_arguments_delta.py index 0a02a1a5..d890de05 100644 --- a/portkey_ai/_vendor/openai/types/realtime/response_mcp_call_arguments_delta.py +++ b/portkey_ai/_vendor/openai/types/realtime/response_mcp_call_arguments_delta.py @@ -9,6 +9,8 @@ class ResponseMcpCallArgumentsDelta(BaseModel): + """Returned when MCP tool call arguments are updated during response generation.""" + delta: str """The JSON-encoded arguments delta.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/response_mcp_call_arguments_done.py b/portkey_ai/_vendor/openai/types/realtime/response_mcp_call_arguments_done.py index 5ec95f17..a7cb2d19 100644 --- a/portkey_ai/_vendor/openai/types/realtime/response_mcp_call_arguments_done.py +++ b/portkey_ai/_vendor/openai/types/realtime/response_mcp_call_arguments_done.py @@ -8,6 +8,8 @@ class ResponseMcpCallArgumentsDone(BaseModel): + """Returned when MCP tool call arguments are finalized during response generation.""" + arguments: str """The final JSON-encoded arguments string.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/response_mcp_call_completed.py b/portkey_ai/_vendor/openai/types/realtime/response_mcp_call_completed.py index e3fcec21..13026053 100644 --- a/portkey_ai/_vendor/openai/types/realtime/response_mcp_call_completed.py +++ b/portkey_ai/_vendor/openai/types/realtime/response_mcp_call_completed.py @@ -8,6 +8,8 @@ class ResponseMcpCallCompleted(BaseModel): + """Returned when an MCP tool call has completed successfully.""" + event_id: str """The unique ID of the server event.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/response_mcp_call_failed.py b/portkey_ai/_vendor/openai/types/realtime/response_mcp_call_failed.py index b7adc8c2..1c08d1d4 100644 --- a/portkey_ai/_vendor/openai/types/realtime/response_mcp_call_failed.py +++ b/portkey_ai/_vendor/openai/types/realtime/response_mcp_call_failed.py @@ -8,6 +8,8 @@ class ResponseMcpCallFailed(BaseModel): + """Returned when an MCP tool call has failed.""" + event_id: str """The unique ID of the server event.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/response_mcp_call_in_progress.py b/portkey_ai/_vendor/openai/types/realtime/response_mcp_call_in_progress.py index d0fcc761..4c0ad149 100644 --- a/portkey_ai/_vendor/openai/types/realtime/response_mcp_call_in_progress.py +++ b/portkey_ai/_vendor/openai/types/realtime/response_mcp_call_in_progress.py @@ -8,6 +8,8 @@ class ResponseMcpCallInProgress(BaseModel): + """Returned when an MCP tool call has started and is in progress.""" + event_id: str """The unique ID of the server event.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/response_output_item_added_event.py b/portkey_ai/_vendor/openai/types/realtime/response_output_item_added_event.py index 509dfcae..abec0d18 100644 --- a/portkey_ai/_vendor/openai/types/realtime/response_output_item_added_event.py +++ b/portkey_ai/_vendor/openai/types/realtime/response_output_item_added_event.py @@ -9,6 +9,8 @@ class ResponseOutputItemAddedEvent(BaseModel): + """Returned when a new Item is created during Response generation.""" + event_id: str """The unique ID of the server event.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/response_output_item_done_event.py b/portkey_ai/_vendor/openai/types/realtime/response_output_item_done_event.py index 800e4ae8..63936b97 100644 --- a/portkey_ai/_vendor/openai/types/realtime/response_output_item_done_event.py +++ b/portkey_ai/_vendor/openai/types/realtime/response_output_item_done_event.py @@ -9,6 +9,12 @@ class ResponseOutputItemDoneEvent(BaseModel): + """Returned when an Item is done streaming. + + Also emitted when a Response is + interrupted, incomplete, or cancelled. + """ + event_id: str """The unique ID of the server event.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/response_text_delta_event.py b/portkey_ai/_vendor/openai/types/realtime/response_text_delta_event.py index 493348aa..b251b763 100644 --- a/portkey_ai/_vendor/openai/types/realtime/response_text_delta_event.py +++ b/portkey_ai/_vendor/openai/types/realtime/response_text_delta_event.py @@ -8,6 +8,8 @@ class ResponseTextDeltaEvent(BaseModel): + """Returned when the text value of an "output_text" content part is updated.""" + content_index: int """The index of the content part in the item's content array.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/response_text_done_event.py b/portkey_ai/_vendor/openai/types/realtime/response_text_done_event.py index 83c6cf06..046e5202 100644 --- a/portkey_ai/_vendor/openai/types/realtime/response_text_done_event.py +++ b/portkey_ai/_vendor/openai/types/realtime/response_text_done_event.py @@ -8,6 +8,12 @@ class ResponseTextDoneEvent(BaseModel): + """Returned when the text value of an "output_text" content part is done streaming. + + Also + emitted when a Response is interrupted, incomplete, or cancelled. + """ + content_index: int """The index of the content part in the item's content array.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/session_created_event.py b/portkey_ai/_vendor/openai/types/realtime/session_created_event.py index b5caad35..1b8d4a4d 100644 --- a/portkey_ai/_vendor/openai/types/realtime/session_created_event.py +++ b/portkey_ai/_vendor/openai/types/realtime/session_created_event.py @@ -13,6 +13,13 @@ class SessionCreatedEvent(BaseModel): + """Returned when a Session is created. + + Emitted automatically when a new + connection is established as the first server event. This event will contain + the default Session configuration. + """ + event_id: str """The unique ID of the server event.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/session_update_event.py b/portkey_ai/_vendor/openai/types/realtime/session_update_event.py index 2e226162..a8422e4e 100644 --- a/portkey_ai/_vendor/openai/types/realtime/session_update_event.py +++ b/portkey_ai/_vendor/openai/types/realtime/session_update_event.py @@ -13,6 +13,18 @@ class SessionUpdateEvent(BaseModel): + """ + Send this event to update the session’s configuration. + The client may send this event at any time to update any field + except for `voice` and `model`. `voice` can be updated only if there have been no other audio outputs yet. + + When the server receives a `session.update`, it will respond + with a `session.updated` event showing the full, effective configuration. + Only the fields that are present in the `session.update` are updated. To clear a field like + `instructions`, pass an empty string. To clear a field like `tools`, pass an empty array. + To clear a field like `turn_detection`, pass `null`. + """ + session: Session """Update the Realtime session. diff --git a/portkey_ai/_vendor/openai/types/realtime/session_update_event_param.py b/portkey_ai/_vendor/openai/types/realtime/session_update_event_param.py index 59623614..910e89ca 100644 --- a/portkey_ai/_vendor/openai/types/realtime/session_update_event_param.py +++ b/portkey_ai/_vendor/openai/types/realtime/session_update_event_param.py @@ -14,6 +14,18 @@ class SessionUpdateEventParam(TypedDict, total=False): + """ + Send this event to update the session’s configuration. + The client may send this event at any time to update any field + except for `voice` and `model`. `voice` can be updated only if there have been no other audio outputs yet. + + When the server receives a `session.update`, it will respond + with a `session.updated` event showing the full, effective configuration. + Only the fields that are present in the `session.update` are updated. To clear a field like + `instructions`, pass an empty string. To clear a field like `tools`, pass an empty array. + To clear a field like `turn_detection`, pass `null`. + """ + session: Required[Session] """Update the Realtime session. diff --git a/portkey_ai/_vendor/openai/types/realtime/session_updated_event.py b/portkey_ai/_vendor/openai/types/realtime/session_updated_event.py index eb7ee033..e68a08d6 100644 --- a/portkey_ai/_vendor/openai/types/realtime/session_updated_event.py +++ b/portkey_ai/_vendor/openai/types/realtime/session_updated_event.py @@ -13,6 +13,11 @@ class SessionUpdatedEvent(BaseModel): + """ + Returned when a session is updated with a `session.update` event, unless + there is an error. + """ + event_id: str """The unique ID of the server event.""" diff --git a/portkey_ai/_vendor/openai/types/responses/__init__.py b/portkey_ai/_vendor/openai/types/responses/__init__.py index fd70836e..a4d939d9 100644 --- a/portkey_ai/_vendor/openai/types/responses/__init__.py +++ b/portkey_ai/_vendor/openai/types/responses/__init__.py @@ -23,13 +23,17 @@ from .response_status import ResponseStatus as ResponseStatus from .tool_choice_mcp import ToolChoiceMcp as ToolChoiceMcp from .web_search_tool import WebSearchTool as WebSearchTool +from .apply_patch_tool import ApplyPatchTool as ApplyPatchTool from .file_search_tool import FileSearchTool as FileSearchTool from .custom_tool_param import CustomToolParam as CustomToolParam +from .tool_choice_shell import ToolChoiceShell as ToolChoiceShell from .tool_choice_types import ToolChoiceTypes as ToolChoiceTypes +from .compacted_response import CompactedResponse as CompactedResponse from .easy_input_message import EasyInputMessage as EasyInputMessage from .response_item_list import ResponseItemList as ResponseItemList from .tool_choice_custom import ToolChoiceCustom as ToolChoiceCustom from .computer_tool_param import ComputerToolParam as ComputerToolParam +from .function_shell_tool import FunctionShellTool as FunctionShellTool from .function_tool_param import FunctionToolParam as FunctionToolParam from .response_includable import ResponseIncludable as ResponseIncludable from .response_input_file import ResponseInputFile as ResponseInputFile @@ -51,22 +55,28 @@ from .response_stream_event import ResponseStreamEvent as ResponseStreamEvent from .tool_choice_mcp_param import ToolChoiceMcpParam as ToolChoiceMcpParam from .web_search_tool_param import WebSearchToolParam as WebSearchToolParam +from .apply_patch_tool_param import ApplyPatchToolParam as ApplyPatchToolParam from .file_search_tool_param import FileSearchToolParam as FileSearchToolParam from .input_item_list_params import InputItemListParams as InputItemListParams from .response_create_params import ResponseCreateParams as ResponseCreateParams from .response_created_event import ResponseCreatedEvent as ResponseCreatedEvent from .response_input_content import ResponseInputContent as ResponseInputContent +from .response_compact_params import ResponseCompactParams as ResponseCompactParams from .response_output_message import ResponseOutputMessage as ResponseOutputMessage from .response_output_refusal import ResponseOutputRefusal as ResponseOutputRefusal from .response_reasoning_item import ResponseReasoningItem as ResponseReasoningItem +from .tool_choice_apply_patch import ToolChoiceApplyPatch as ToolChoiceApplyPatch +from .tool_choice_shell_param import ToolChoiceShellParam as ToolChoiceShellParam from .tool_choice_types_param import ToolChoiceTypesParam as ToolChoiceTypesParam from .web_search_preview_tool import WebSearchPreviewTool as WebSearchPreviewTool from .easy_input_message_param import EasyInputMessageParam as EasyInputMessageParam from .input_token_count_params import InputTokenCountParams as InputTokenCountParams +from .response_compaction_item import ResponseCompactionItem as ResponseCompactionItem from .response_completed_event import ResponseCompletedEvent as ResponseCompletedEvent from .response_retrieve_params import ResponseRetrieveParams as ResponseRetrieveParams from .response_text_done_event import ResponseTextDoneEvent as ResponseTextDoneEvent from .tool_choice_custom_param import ToolChoiceCustomParam as ToolChoiceCustomParam +from .function_shell_tool_param import FunctionShellToolParam as FunctionShellToolParam from .response_audio_done_event import ResponseAudioDoneEvent as ResponseAudioDoneEvent from .response_custom_tool_call import ResponseCustomToolCall as ResponseCustomToolCall from .response_incomplete_event import ResponseIncompleteEvent as ResponseIncompleteEvent @@ -98,7 +108,10 @@ from .response_output_message_param import ResponseOutputMessageParam as ResponseOutputMessageParam from .response_output_refusal_param import ResponseOutputRefusalParam as ResponseOutputRefusalParam from .response_reasoning_item_param import ResponseReasoningItemParam as ResponseReasoningItemParam +from .tool_choice_apply_patch_param import ToolChoiceApplyPatchParam as ToolChoiceApplyPatchParam from .web_search_preview_tool_param import WebSearchPreviewToolParam as WebSearchPreviewToolParam +from .response_apply_patch_tool_call import ResponseApplyPatchToolCall as ResponseApplyPatchToolCall +from .response_compaction_item_param import ResponseCompactionItemParam as ResponseCompactionItemParam from .response_file_search_tool_call import ResponseFileSearchToolCall as ResponseFileSearchToolCall from .response_mcp_call_failed_event import ResponseMcpCallFailedEvent as ResponseMcpCallFailedEvent from .response_custom_tool_call_param import ResponseCustomToolCallParam as ResponseCustomToolCallParam @@ -110,6 +123,7 @@ from .response_computer_tool_call_param import ResponseComputerToolCallParam as ResponseComputerToolCallParam from .response_content_part_added_event import ResponseContentPartAddedEvent as ResponseContentPartAddedEvent from .response_format_text_config_param import ResponseFormatTextConfigParam as ResponseFormatTextConfigParam +from .response_function_shell_tool_call import ResponseFunctionShellToolCall as ResponseFunctionShellToolCall from .response_function_tool_call_param import ResponseFunctionToolCallParam as ResponseFunctionToolCallParam from .response_input_file_content_param import ResponseInputFileContentParam as ResponseInputFileContentParam from .response_input_text_content_param import ResponseInputTextContentParam as ResponseInputTextContentParam @@ -123,8 +137,10 @@ from .response_mcp_call_in_progress_event import ResponseMcpCallInProgressEvent as ResponseMcpCallInProgressEvent from .response_reasoning_text_delta_event import ResponseReasoningTextDeltaEvent as ResponseReasoningTextDeltaEvent from .response_audio_transcript_done_event import ResponseAudioTranscriptDoneEvent as ResponseAudioTranscriptDoneEvent +from .response_compaction_item_param_param import ResponseCompactionItemParamParam as ResponseCompactionItemParamParam from .response_file_search_tool_call_param import ResponseFileSearchToolCallParam as ResponseFileSearchToolCallParam from .response_mcp_list_tools_failed_event import ResponseMcpListToolsFailedEvent as ResponseMcpListToolsFailedEvent +from .response_apply_patch_tool_call_output import ResponseApplyPatchToolCallOutput as ResponseApplyPatchToolCallOutput from .response_audio_transcript_delta_event import ( ResponseAudioTranscriptDeltaEvent as ResponseAudioTranscriptDeltaEvent, ) @@ -158,6 +174,9 @@ from .response_function_call_output_item_param import ( ResponseFunctionCallOutputItemParam as ResponseFunctionCallOutputItemParam, ) +from .response_function_shell_tool_call_output import ( + ResponseFunctionShellToolCallOutput as ResponseFunctionShellToolCallOutput, +) from .response_image_gen_call_generating_event import ( ResponseImageGenCallGeneratingEvent as ResponseImageGenCallGeneratingEvent, ) @@ -206,6 +225,9 @@ from .response_function_call_arguments_done_event import ( ResponseFunctionCallArgumentsDoneEvent as ResponseFunctionCallArgumentsDoneEvent, ) +from .response_function_shell_call_output_content import ( + ResponseFunctionShellCallOutputContent as ResponseFunctionShellCallOutputContent, +) from .response_image_gen_call_partial_image_event import ( ResponseImageGenCallPartialImageEvent as ResponseImageGenCallPartialImageEvent, ) @@ -245,6 +267,9 @@ from .response_code_interpreter_call_interpreting_event import ( ResponseCodeInterpreterCallInterpretingEvent as ResponseCodeInterpreterCallInterpretingEvent, ) +from .response_function_shell_call_output_content_param import ( + ResponseFunctionShellCallOutputContentParam as ResponseFunctionShellCallOutputContentParam, +) from .response_computer_tool_call_output_screenshot_param import ( ResponseComputerToolCallOutputScreenshotParam as ResponseComputerToolCallOutputScreenshotParam, ) diff --git a/portkey_ai/_vendor/openai/types/responses/apply_patch_tool.py b/portkey_ai/_vendor/openai/types/responses/apply_patch_tool.py new file mode 100644 index 00000000..f2ed245d --- /dev/null +++ b/portkey_ai/_vendor/openai/types/responses/apply_patch_tool.py @@ -0,0 +1,14 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ApplyPatchTool"] + + +class ApplyPatchTool(BaseModel): + """Allows the assistant to create, delete, or update files using unified diffs.""" + + type: Literal["apply_patch"] + """The type of the tool. Always `apply_patch`.""" diff --git a/portkey_ai/_vendor/openai/types/responses/apply_patch_tool_param.py b/portkey_ai/_vendor/openai/types/responses/apply_patch_tool_param.py new file mode 100644 index 00000000..2e0a8090 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/responses/apply_patch_tool_param.py @@ -0,0 +1,14 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["ApplyPatchToolParam"] + + +class ApplyPatchToolParam(TypedDict, total=False): + """Allows the assistant to create, delete, or update files using unified diffs.""" + + type: Required[Literal["apply_patch"]] + """The type of the tool. Always `apply_patch`.""" diff --git a/portkey_ai/_vendor/openai/types/responses/compacted_response.py b/portkey_ai/_vendor/openai/types/responses/compacted_response.py new file mode 100644 index 00000000..5b333b83 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/responses/compacted_response.py @@ -0,0 +1,33 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List +from typing_extensions import Literal + +from ..._models import BaseModel +from .response_usage import ResponseUsage +from .response_output_item import ResponseOutputItem + +__all__ = ["CompactedResponse"] + + +class CompactedResponse(BaseModel): + id: str + """The unique identifier for the compacted response.""" + + created_at: int + """Unix timestamp (in seconds) when the compacted conversation was created.""" + + object: Literal["response.compaction"] + """The object type. Always `response.compaction`.""" + + output: List[ResponseOutputItem] + """The compacted list of output items. + + This is a list of all user messages, followed by a single compaction item. + """ + + usage: ResponseUsage + """ + Token accounting for the compaction pass, including cached, reasoning, and total + tokens. + """ diff --git a/portkey_ai/_vendor/openai/types/responses/computer_tool.py b/portkey_ai/_vendor/openai/types/responses/computer_tool.py index 5b844f5b..22871c84 100644 --- a/portkey_ai/_vendor/openai/types/responses/computer_tool.py +++ b/portkey_ai/_vendor/openai/types/responses/computer_tool.py @@ -8,6 +8,11 @@ class ComputerTool(BaseModel): + """A tool that controls a virtual computer. + + Learn more about the [computer tool](https://platform.openai.com/docs/guides/tools-computer-use). + """ + display_height: int """The height of the computer display.""" diff --git a/portkey_ai/_vendor/openai/types/responses/computer_tool_param.py b/portkey_ai/_vendor/openai/types/responses/computer_tool_param.py index 06a5c132..cdf75a43 100644 --- a/portkey_ai/_vendor/openai/types/responses/computer_tool_param.py +++ b/portkey_ai/_vendor/openai/types/responses/computer_tool_param.py @@ -8,6 +8,11 @@ class ComputerToolParam(TypedDict, total=False): + """A tool that controls a virtual computer. + + Learn more about the [computer tool](https://platform.openai.com/docs/guides/tools-computer-use). + """ + display_height: Required[int] """The height of the computer display.""" diff --git a/portkey_ai/_vendor/openai/types/responses/custom_tool.py b/portkey_ai/_vendor/openai/types/responses/custom_tool.py index c16ae715..1ca401a4 100644 --- a/portkey_ai/_vendor/openai/types/responses/custom_tool.py +++ b/portkey_ai/_vendor/openai/types/responses/custom_tool.py @@ -10,6 +10,11 @@ class CustomTool(BaseModel): + """A custom tool that processes input using a specified format. + + Learn more about [custom tools](https://platform.openai.com/docs/guides/function-calling#custom-tools) + """ + name: str """The name of the custom tool, used to identify it in tool calls.""" diff --git a/portkey_ai/_vendor/openai/types/responses/custom_tool_param.py b/portkey_ai/_vendor/openai/types/responses/custom_tool_param.py index 2afc8b19..4ce43cdf 100644 --- a/portkey_ai/_vendor/openai/types/responses/custom_tool_param.py +++ b/portkey_ai/_vendor/openai/types/responses/custom_tool_param.py @@ -10,6 +10,11 @@ class CustomToolParam(TypedDict, total=False): + """A custom tool that processes input using a specified format. + + Learn more about [custom tools](https://platform.openai.com/docs/guides/function-calling#custom-tools) + """ + name: Required[str] """The name of the custom tool, used to identify it in tool calls.""" diff --git a/portkey_ai/_vendor/openai/types/responses/easy_input_message.py b/portkey_ai/_vendor/openai/types/responses/easy_input_message.py index 4ed0194f..9a36a6b0 100644 --- a/portkey_ai/_vendor/openai/types/responses/easy_input_message.py +++ b/portkey_ai/_vendor/openai/types/responses/easy_input_message.py @@ -10,6 +10,14 @@ class EasyInputMessage(BaseModel): + """ + A message input to the model with a role indicating instruction following + hierarchy. Instructions given with the `developer` or `system` role take + precedence over instructions given with the `user` role. Messages with the + `assistant` role are presumed to have been generated by the model in previous + interactions. + """ + content: Union[str, ResponseInputMessageContentList] """ Text, image, or audio input to the model, used to generate a response. Can also diff --git a/portkey_ai/_vendor/openai/types/responses/easy_input_message_param.py b/portkey_ai/_vendor/openai/types/responses/easy_input_message_param.py index ef2f1c5f..0a382bdd 100644 --- a/portkey_ai/_vendor/openai/types/responses/easy_input_message_param.py +++ b/portkey_ai/_vendor/openai/types/responses/easy_input_message_param.py @@ -11,6 +11,14 @@ class EasyInputMessageParam(TypedDict, total=False): + """ + A message input to the model with a role indicating instruction following + hierarchy. Instructions given with the `developer` or `system` role take + precedence over instructions given with the `user` role. Messages with the + `assistant` role are presumed to have been generated by the model in previous + interactions. + """ + content: Required[Union[str, ResponseInputMessageContentListParam]] """ Text, image, or audio input to the model, used to generate a response. Can also diff --git a/portkey_ai/_vendor/openai/types/responses/file_search_tool.py b/portkey_ai/_vendor/openai/types/responses/file_search_tool.py index d0d08a32..09c12876 100644 --- a/portkey_ai/_vendor/openai/types/responses/file_search_tool.py +++ b/portkey_ai/_vendor/openai/types/responses/file_search_tool.py @@ -13,6 +13,10 @@ class RankingOptionsHybridSearch(BaseModel): + """ + Weights that control how reciprocal rank fusion balances semantic embedding matches versus sparse keyword matches when hybrid search is enabled. + """ + embedding_weight: float """The weight of the embedding in the reciprocal ranking fusion.""" @@ -21,6 +25,8 @@ class RankingOptionsHybridSearch(BaseModel): class RankingOptions(BaseModel): + """Ranking options for search.""" + hybrid_search: Optional[RankingOptionsHybridSearch] = None """ Weights that control how reciprocal rank fusion balances semantic embedding @@ -39,6 +45,11 @@ class RankingOptions(BaseModel): class FileSearchTool(BaseModel): + """A tool that searches for relevant content from uploaded files. + + Learn more about the [file search tool](https://platform.openai.com/docs/guides/tools-file-search). + """ + type: Literal["file_search"] """The type of the file search tool. Always `file_search`.""" diff --git a/portkey_ai/_vendor/openai/types/responses/file_search_tool_param.py b/portkey_ai/_vendor/openai/types/responses/file_search_tool_param.py index b37a669e..82831d0d 100644 --- a/portkey_ai/_vendor/openai/types/responses/file_search_tool_param.py +++ b/portkey_ai/_vendor/openai/types/responses/file_search_tool_param.py @@ -15,6 +15,10 @@ class RankingOptionsHybridSearch(TypedDict, total=False): + """ + Weights that control how reciprocal rank fusion balances semantic embedding matches versus sparse keyword matches when hybrid search is enabled. + """ + embedding_weight: Required[float] """The weight of the embedding in the reciprocal ranking fusion.""" @@ -23,6 +27,8 @@ class RankingOptionsHybridSearch(TypedDict, total=False): class RankingOptions(TypedDict, total=False): + """Ranking options for search.""" + hybrid_search: RankingOptionsHybridSearch """ Weights that control how reciprocal rank fusion balances semantic embedding @@ -41,6 +47,11 @@ class RankingOptions(TypedDict, total=False): class FileSearchToolParam(TypedDict, total=False): + """A tool that searches for relevant content from uploaded files. + + Learn more about the [file search tool](https://platform.openai.com/docs/guides/tools-file-search). + """ + type: Required[Literal["file_search"]] """The type of the file search tool. Always `file_search`.""" diff --git a/portkey_ai/_vendor/openai/types/responses/function_shell_tool.py b/portkey_ai/_vendor/openai/types/responses/function_shell_tool.py new file mode 100644 index 00000000..5b237aa7 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/responses/function_shell_tool.py @@ -0,0 +1,14 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["FunctionShellTool"] + + +class FunctionShellTool(BaseModel): + """A tool that allows the model to execute shell commands.""" + + type: Literal["shell"] + """The type of the shell tool. Always `shell`.""" diff --git a/portkey_ai/_vendor/openai/types/responses/function_shell_tool_param.py b/portkey_ai/_vendor/openai/types/responses/function_shell_tool_param.py new file mode 100644 index 00000000..c640ddab --- /dev/null +++ b/portkey_ai/_vendor/openai/types/responses/function_shell_tool_param.py @@ -0,0 +1,14 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["FunctionShellToolParam"] + + +class FunctionShellToolParam(TypedDict, total=False): + """A tool that allows the model to execute shell commands.""" + + type: Required[Literal["shell"]] + """The type of the shell tool. Always `shell`.""" diff --git a/portkey_ai/_vendor/openai/types/responses/function_tool.py b/portkey_ai/_vendor/openai/types/responses/function_tool.py index d8815653..b0827a9f 100644 --- a/portkey_ai/_vendor/openai/types/responses/function_tool.py +++ b/portkey_ai/_vendor/openai/types/responses/function_tool.py @@ -9,6 +9,11 @@ class FunctionTool(BaseModel): + """Defines a function in your own code the model can choose to call. + + Learn more about [function calling](https://platform.openai.com/docs/guides/function-calling). + """ + name: str """The name of the function to call.""" diff --git a/portkey_ai/_vendor/openai/types/responses/function_tool_param.py b/portkey_ai/_vendor/openai/types/responses/function_tool_param.py index 56bab36f..ba0a3168 100644 --- a/portkey_ai/_vendor/openai/types/responses/function_tool_param.py +++ b/portkey_ai/_vendor/openai/types/responses/function_tool_param.py @@ -9,6 +9,11 @@ class FunctionToolParam(TypedDict, total=False): + """Defines a function in your own code the model can choose to call. + + Learn more about [function calling](https://platform.openai.com/docs/guides/function-calling). + """ + name: Required[str] """The name of the function to call.""" diff --git a/portkey_ai/_vendor/openai/types/responses/input_token_count_params.py b/portkey_ai/_vendor/openai/types/responses/input_token_count_params.py index d442a2d1..97ee4bf6 100644 --- a/portkey_ai/_vendor/openai/types/responses/input_token_count_params.py +++ b/portkey_ai/_vendor/openai/types/responses/input_token_count_params.py @@ -8,6 +8,7 @@ from .tool_param import ToolParam from .tool_choice_options import ToolChoiceOptions from .tool_choice_mcp_param import ToolChoiceMcpParam +from .tool_choice_shell_param import ToolChoiceShellParam from .tool_choice_types_param import ToolChoiceTypesParam from ..shared_params.reasoning import Reasoning from .tool_choice_custom_param import ToolChoiceCustomParam @@ -15,6 +16,7 @@ from .tool_choice_allowed_param import ToolChoiceAllowedParam from .tool_choice_function_param import ToolChoiceFunctionParam from .response_conversation_param import ResponseConversationParam +from .tool_choice_apply_patch_param import ToolChoiceApplyPatchParam from .response_format_text_config_param import ResponseFormatTextConfigParam __all__ = ["InputTokenCountParams", "Conversation", "Text", "ToolChoice"] @@ -76,11 +78,7 @@ class InputTokenCountParams(TypedDict, total=False): """ tool_choice: Optional[ToolChoice] - """ - How the model should select which tool (or tools) to use when generating a - response. See the `tools` parameter to see how to specify which tools the model - can call. - """ + """Controls which tool the model should use, if any.""" tools: Optional[Iterable[ToolParam]] """An array of tools the model may call while generating a response. @@ -103,6 +101,14 @@ class InputTokenCountParams(TypedDict, total=False): class Text(TypedDict, total=False): + """Configuration options for a text response from the model. + + Can be plain + text or structured JSON data. Learn more: + - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + """ + format: ResponseFormatTextConfigParam """An object specifying the format that the model must output. @@ -135,4 +141,6 @@ class Text(TypedDict, total=False): ToolChoiceFunctionParam, ToolChoiceMcpParam, ToolChoiceCustomParam, + ToolChoiceApplyPatchParam, + ToolChoiceShellParam, ] diff --git a/portkey_ai/_vendor/openai/types/responses/parsed_response.py b/portkey_ai/_vendor/openai/types/responses/parsed_response.py index 1d9db361..a8597105 100644 --- a/portkey_ai/_vendor/openai/types/responses/parsed_response.py +++ b/portkey_ai/_vendor/openai/types/responses/parsed_response.py @@ -6,7 +6,6 @@ from ..._utils import PropertyInfo from .response import Response from ..._models import GenericModel -from ..._utils._transform import PropertyInfo from .response_output_item import ( McpCall, McpListTools, @@ -19,12 +18,17 @@ from .response_output_message import ResponseOutputMessage from .response_output_refusal import ResponseOutputRefusal from .response_reasoning_item import ResponseReasoningItem +from .response_compaction_item import ResponseCompactionItem from .response_custom_tool_call import ResponseCustomToolCall from .response_computer_tool_call import ResponseComputerToolCall from .response_function_tool_call import ResponseFunctionToolCall from .response_function_web_search import ResponseFunctionWebSearch +from .response_apply_patch_tool_call import ResponseApplyPatchToolCall from .response_file_search_tool_call import ResponseFileSearchToolCall +from .response_function_shell_tool_call import ResponseFunctionShellToolCall from .response_code_interpreter_tool_call import ResponseCodeInterpreterToolCall +from .response_apply_patch_tool_call_output import ResponseApplyPatchToolCallOutput +from .response_function_shell_tool_call_output import ResponseFunctionShellToolCallOutput __all__ = ["ParsedResponse", "ParsedResponseOutputMessage", "ParsedResponseOutputText"] @@ -75,6 +79,11 @@ class ParsedResponseFunctionToolCall(ResponseFunctionToolCall): McpListTools, ResponseCodeInterpreterToolCall, ResponseCustomToolCall, + ResponseCompactionItem, + ResponseFunctionShellToolCall, + ResponseFunctionShellToolCallOutput, + ResponseApplyPatchToolCall, + ResponseApplyPatchToolCallOutput, ], PropertyInfo(discriminator="type"), ] diff --git a/portkey_ai/_vendor/openai/types/responses/response.py b/portkey_ai/_vendor/openai/types/responses/response.py index a1133a41..6bac7d65 100644 --- a/portkey_ai/_vendor/openai/types/responses/response.py +++ b/portkey_ai/_vendor/openai/types/responses/response.py @@ -12,6 +12,7 @@ from .tool_choice_mcp import ToolChoiceMcp from ..shared.metadata import Metadata from ..shared.reasoning import Reasoning +from .tool_choice_shell import ToolChoiceShell from .tool_choice_types import ToolChoiceTypes from .tool_choice_custom import ToolChoiceCustom from .response_input_item import ResponseInputItem @@ -21,23 +22,38 @@ from .response_text_config import ResponseTextConfig from .tool_choice_function import ToolChoiceFunction from ..shared.responses_model import ResponsesModel +from .tool_choice_apply_patch import ToolChoiceApplyPatch __all__ = ["Response", "IncompleteDetails", "ToolChoice", "Conversation"] class IncompleteDetails(BaseModel): + """Details about why the response is incomplete.""" + reason: Optional[Literal["max_output_tokens", "content_filter"]] = None """The reason why the response is incomplete.""" ToolChoice: TypeAlias = Union[ - ToolChoiceOptions, ToolChoiceAllowed, ToolChoiceTypes, ToolChoiceFunction, ToolChoiceMcp, ToolChoiceCustom + ToolChoiceOptions, + ToolChoiceAllowed, + ToolChoiceTypes, + ToolChoiceFunction, + ToolChoiceMcp, + ToolChoiceCustom, + ToolChoiceApplyPatch, + ToolChoiceShell, ] class Conversation(BaseModel): + """The conversation that this response belonged to. + + Input items and output items from this response were automatically added to this conversation. + """ + id: str - """The unique ID of the conversation.""" + """The unique ID of the conversation that this response was associated with.""" class Response(BaseModel): @@ -149,10 +165,16 @@ class Response(BaseModel): [Learn more](https://platform.openai.com/docs/guides/background). """ + completed_at: Optional[float] = None + """ + Unix timestamp (in seconds) of when this Response was completed. Only present + when the status is `completed`. + """ + conversation: Optional[Conversation] = None - """The conversation that this response belongs to. + """The conversation that this response belonged to. - Input items and output items from this response are automatically added to this + Input items and output items from this response were automatically added to this conversation. """ @@ -192,6 +214,14 @@ class Response(BaseModel): [Learn more](https://platform.openai.com/docs/guides/prompt-caching). """ + prompt_cache_retention: Optional[Literal["in-memory", "24h"]] = None + """The retention policy for the prompt cache. + + Set to `24h` to enable extended prompt caching, which keeps cached prefixes + active for longer, up to a maximum of 24 hours. + [Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention). + """ + reasoning: Optional[Reasoning] = None """**gpt-5 and o-series models only** diff --git a/portkey_ai/_vendor/openai/types/responses/response_apply_patch_tool_call.py b/portkey_ai/_vendor/openai/types/responses/response_apply_patch_tool_call.py new file mode 100644 index 00000000..7af13002 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/responses/response_apply_patch_tool_call.py @@ -0,0 +1,84 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Union, Optional +from typing_extensions import Literal, Annotated, TypeAlias + +from ..._utils import PropertyInfo +from ..._models import BaseModel + +__all__ = [ + "ResponseApplyPatchToolCall", + "Operation", + "OperationCreateFile", + "OperationDeleteFile", + "OperationUpdateFile", +] + + +class OperationCreateFile(BaseModel): + """Instruction describing how to create a file via the apply_patch tool.""" + + diff: str + """Diff to apply.""" + + path: str + """Path of the file to create.""" + + type: Literal["create_file"] + """Create a new file with the provided diff.""" + + +class OperationDeleteFile(BaseModel): + """Instruction describing how to delete a file via the apply_patch tool.""" + + path: str + """Path of the file to delete.""" + + type: Literal["delete_file"] + """Delete the specified file.""" + + +class OperationUpdateFile(BaseModel): + """Instruction describing how to update a file via the apply_patch tool.""" + + diff: str + """Diff to apply.""" + + path: str + """Path of the file to update.""" + + type: Literal["update_file"] + """Update an existing file with the provided diff.""" + + +Operation: TypeAlias = Annotated[ + Union[OperationCreateFile, OperationDeleteFile, OperationUpdateFile], PropertyInfo(discriminator="type") +] + + +class ResponseApplyPatchToolCall(BaseModel): + """A tool call that applies file diffs by creating, deleting, or updating files.""" + + id: str + """The unique ID of the apply patch tool call. + + Populated when this item is returned via API. + """ + + call_id: str + """The unique ID of the apply patch tool call generated by the model.""" + + operation: Operation + """ + One of the create_file, delete_file, or update_file operations applied via + apply_patch. + """ + + status: Literal["in_progress", "completed"] + """The status of the apply patch tool call. One of `in_progress` or `completed`.""" + + type: Literal["apply_patch_call"] + """The type of the item. Always `apply_patch_call`.""" + + created_by: Optional[str] = None + """The ID of the entity that created this tool call.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_apply_patch_tool_call_output.py b/portkey_ai/_vendor/openai/types/responses/response_apply_patch_tool_call_output.py new file mode 100644 index 00000000..de63c6e2 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/responses/response_apply_patch_tool_call_output.py @@ -0,0 +1,33 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseApplyPatchToolCallOutput"] + + +class ResponseApplyPatchToolCallOutput(BaseModel): + """The output emitted by an apply patch tool call.""" + + id: str + """The unique ID of the apply patch tool call output. + + Populated when this item is returned via API. + """ + + call_id: str + """The unique ID of the apply patch tool call generated by the model.""" + + status: Literal["completed", "failed"] + """The status of the apply patch tool call output. One of `completed` or `failed`.""" + + type: Literal["apply_patch_call_output"] + """The type of the item. Always `apply_patch_call_output`.""" + + created_by: Optional[str] = None + """The ID of the entity that created this tool call output.""" + + output: Optional[str] = None + """Optional textual output returned by the apply patch tool.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_audio_delta_event.py b/portkey_ai/_vendor/openai/types/responses/response_audio_delta_event.py index 6fb7887b..e577d65d 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_audio_delta_event.py +++ b/portkey_ai/_vendor/openai/types/responses/response_audio_delta_event.py @@ -8,6 +8,8 @@ class ResponseAudioDeltaEvent(BaseModel): + """Emitted when there is a partial audio response.""" + delta: str """A chunk of Base64 encoded response audio bytes.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_audio_done_event.py b/portkey_ai/_vendor/openai/types/responses/response_audio_done_event.py index 2592ae8d..f5f0401c 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_audio_done_event.py +++ b/portkey_ai/_vendor/openai/types/responses/response_audio_done_event.py @@ -8,6 +8,8 @@ class ResponseAudioDoneEvent(BaseModel): + """Emitted when the audio response is complete.""" + sequence_number: int """The sequence number of the delta.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_audio_transcript_delta_event.py b/portkey_ai/_vendor/openai/types/responses/response_audio_transcript_delta_event.py index 830c133d..03be59a2 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_audio_transcript_delta_event.py +++ b/portkey_ai/_vendor/openai/types/responses/response_audio_transcript_delta_event.py @@ -8,6 +8,8 @@ class ResponseAudioTranscriptDeltaEvent(BaseModel): + """Emitted when there is a partial transcript of audio.""" + delta: str """The partial transcript of the audio response.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_audio_transcript_done_event.py b/portkey_ai/_vendor/openai/types/responses/response_audio_transcript_done_event.py index e39f501c..87219e48 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_audio_transcript_done_event.py +++ b/portkey_ai/_vendor/openai/types/responses/response_audio_transcript_done_event.py @@ -8,6 +8,8 @@ class ResponseAudioTranscriptDoneEvent(BaseModel): + """Emitted when the full audio transcript is completed.""" + sequence_number: int """The sequence number of this event.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_code_interpreter_call_code_delta_event.py b/portkey_ai/_vendor/openai/types/responses/response_code_interpreter_call_code_delta_event.py index c5fef939..c6bc8b73 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_code_interpreter_call_code_delta_event.py +++ b/portkey_ai/_vendor/openai/types/responses/response_code_interpreter_call_code_delta_event.py @@ -8,6 +8,8 @@ class ResponseCodeInterpreterCallCodeDeltaEvent(BaseModel): + """Emitted when a partial code snippet is streamed by the code interpreter.""" + delta: str """The partial code snippet being streamed by the code interpreter.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_code_interpreter_call_code_done_event.py b/portkey_ai/_vendor/openai/types/responses/response_code_interpreter_call_code_done_event.py index 5201a02d..186c0371 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_code_interpreter_call_code_done_event.py +++ b/portkey_ai/_vendor/openai/types/responses/response_code_interpreter_call_code_done_event.py @@ -8,6 +8,8 @@ class ResponseCodeInterpreterCallCodeDoneEvent(BaseModel): + """Emitted when the code snippet is finalized by the code interpreter.""" + code: str """The final code snippet output by the code interpreter.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_code_interpreter_call_completed_event.py b/portkey_ai/_vendor/openai/types/responses/response_code_interpreter_call_completed_event.py index bb9563a1..197e39e7 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_code_interpreter_call_completed_event.py +++ b/portkey_ai/_vendor/openai/types/responses/response_code_interpreter_call_completed_event.py @@ -8,6 +8,8 @@ class ResponseCodeInterpreterCallCompletedEvent(BaseModel): + """Emitted when the code interpreter call is completed.""" + item_id: str """The unique identifier of the code interpreter tool call item.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_code_interpreter_call_in_progress_event.py b/portkey_ai/_vendor/openai/types/responses/response_code_interpreter_call_in_progress_event.py index 9c6b2210..c775f1b8 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_code_interpreter_call_in_progress_event.py +++ b/portkey_ai/_vendor/openai/types/responses/response_code_interpreter_call_in_progress_event.py @@ -8,6 +8,8 @@ class ResponseCodeInterpreterCallInProgressEvent(BaseModel): + """Emitted when a code interpreter call is in progress.""" + item_id: str """The unique identifier of the code interpreter tool call item.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_code_interpreter_call_interpreting_event.py b/portkey_ai/_vendor/openai/types/responses/response_code_interpreter_call_interpreting_event.py index f6191e41..85e9c87f 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_code_interpreter_call_interpreting_event.py +++ b/portkey_ai/_vendor/openai/types/responses/response_code_interpreter_call_interpreting_event.py @@ -8,6 +8,8 @@ class ResponseCodeInterpreterCallInterpretingEvent(BaseModel): + """Emitted when the code interpreter is actively interpreting the code snippet.""" + item_id: str """The unique identifier of the code interpreter tool call item.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_code_interpreter_tool_call.py b/portkey_ai/_vendor/openai/types/responses/response_code_interpreter_tool_call.py index b6515815..d7e30f49 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_code_interpreter_tool_call.py +++ b/portkey_ai/_vendor/openai/types/responses/response_code_interpreter_tool_call.py @@ -10,6 +10,8 @@ class OutputLogs(BaseModel): + """The logs output from the code interpreter.""" + logs: str """The logs output from the code interpreter.""" @@ -18,6 +20,8 @@ class OutputLogs(BaseModel): class OutputImage(BaseModel): + """The image output from the code interpreter.""" + type: Literal["image"] """The type of the output. Always `image`.""" @@ -29,6 +33,8 @@ class OutputImage(BaseModel): class ResponseCodeInterpreterToolCall(BaseModel): + """A tool call to run code.""" + id: str """The unique ID of the code interpreter tool call.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_code_interpreter_tool_call_param.py b/portkey_ai/_vendor/openai/types/responses/response_code_interpreter_tool_call_param.py index d402b872..fc03a3fe 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_code_interpreter_tool_call_param.py +++ b/portkey_ai/_vendor/openai/types/responses/response_code_interpreter_tool_call_param.py @@ -9,6 +9,8 @@ class OutputLogs(TypedDict, total=False): + """The logs output from the code interpreter.""" + logs: Required[str] """The logs output from the code interpreter.""" @@ -17,6 +19,8 @@ class OutputLogs(TypedDict, total=False): class OutputImage(TypedDict, total=False): + """The image output from the code interpreter.""" + type: Required[Literal["image"]] """The type of the output. Always `image`.""" @@ -28,6 +32,8 @@ class OutputImage(TypedDict, total=False): class ResponseCodeInterpreterToolCallParam(TypedDict, total=False): + """A tool call to run code.""" + id: Required[str] """The unique ID of the code interpreter tool call.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_compact_params.py b/portkey_ai/_vendor/openai/types/responses/response_compact_params.py new file mode 100644 index 00000000..657c6a07 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/responses/response_compact_params.py @@ -0,0 +1,133 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union, Iterable, Optional +from typing_extensions import Literal, Required, TypedDict + +from .response_input_item_param import ResponseInputItemParam + +__all__ = ["ResponseCompactParams"] + + +class ResponseCompactParams(TypedDict, total=False): + model: Required[ + Union[ + Literal[ + "gpt-5.2", + "gpt-5.2-2025-12-11", + "gpt-5.2-chat-latest", + "gpt-5.2-pro", + "gpt-5.2-pro-2025-12-11", + "gpt-5.1", + "gpt-5.1-2025-11-13", + "gpt-5.1-codex", + "gpt-5.1-mini", + "gpt-5.1-chat-latest", + "gpt-5", + "gpt-5-mini", + "gpt-5-nano", + "gpt-5-2025-08-07", + "gpt-5-mini-2025-08-07", + "gpt-5-nano-2025-08-07", + "gpt-5-chat-latest", + "gpt-4.1", + "gpt-4.1-mini", + "gpt-4.1-nano", + "gpt-4.1-2025-04-14", + "gpt-4.1-mini-2025-04-14", + "gpt-4.1-nano-2025-04-14", + "o4-mini", + "o4-mini-2025-04-16", + "o3", + "o3-2025-04-16", + "o3-mini", + "o3-mini-2025-01-31", + "o1", + "o1-2024-12-17", + "o1-preview", + "o1-preview-2024-09-12", + "o1-mini", + "o1-mini-2024-09-12", + "gpt-4o", + "gpt-4o-2024-11-20", + "gpt-4o-2024-08-06", + "gpt-4o-2024-05-13", + "gpt-4o-audio-preview", + "gpt-4o-audio-preview-2024-10-01", + "gpt-4o-audio-preview-2024-12-17", + "gpt-4o-audio-preview-2025-06-03", + "gpt-4o-mini-audio-preview", + "gpt-4o-mini-audio-preview-2024-12-17", + "gpt-4o-search-preview", + "gpt-4o-mini-search-preview", + "gpt-4o-search-preview-2025-03-11", + "gpt-4o-mini-search-preview-2025-03-11", + "chatgpt-4o-latest", + "codex-mini-latest", + "gpt-4o-mini", + "gpt-4o-mini-2024-07-18", + "gpt-4-turbo", + "gpt-4-turbo-2024-04-09", + "gpt-4-0125-preview", + "gpt-4-turbo-preview", + "gpt-4-1106-preview", + "gpt-4-vision-preview", + "gpt-4", + "gpt-4-0314", + "gpt-4-0613", + "gpt-4-32k", + "gpt-4-32k-0314", + "gpt-4-32k-0613", + "gpt-3.5-turbo", + "gpt-3.5-turbo-16k", + "gpt-3.5-turbo-0301", + "gpt-3.5-turbo-0613", + "gpt-3.5-turbo-1106", + "gpt-3.5-turbo-0125", + "gpt-3.5-turbo-16k-0613", + "o1-pro", + "o1-pro-2025-03-19", + "o3-pro", + "o3-pro-2025-06-10", + "o3-deep-research", + "o3-deep-research-2025-06-26", + "o4-mini-deep-research", + "o4-mini-deep-research-2025-06-26", + "computer-use-preview", + "computer-use-preview-2025-03-11", + "gpt-5-codex", + "gpt-5-pro", + "gpt-5-pro-2025-10-06", + "gpt-5.1-codex-max", + ], + str, + None, + ] + ] + """Model ID used to generate the response, like `gpt-5` or `o3`. + + OpenAI offers a wide range of models with different capabilities, performance + characteristics, and price points. Refer to the + [model guide](https://platform.openai.com/docs/models) to browse and compare + available models. + """ + + input: Union[str, Iterable[ResponseInputItemParam], None] + """Text, image, or file inputs to the model, used to generate a response""" + + instructions: Optional[str] + """ + A system (or developer) message inserted into the model's context. When used + along with `previous_response_id`, the instructions from a previous response + will not be carried over to the next response. This makes it simple to swap out + system (or developer) messages in new responses. + """ + + previous_response_id: Optional[str] + """The unique ID of the previous response to the model. + + Use this to create multi-turn conversations. Learn more about + [conversation state](https://platform.openai.com/docs/guides/conversation-state). + Cannot be used in conjunction with `conversation`. + """ diff --git a/portkey_ai/_vendor/openai/types/responses/response_compaction_item.py b/portkey_ai/_vendor/openai/types/responses/response_compaction_item.py new file mode 100644 index 00000000..36e953b1 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/responses/response_compaction_item.py @@ -0,0 +1,26 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseCompactionItem"] + + +class ResponseCompactionItem(BaseModel): + """ + A compaction item generated by the [`v1/responses/compact` API](https://platform.openai.com/docs/api-reference/responses/compact). + """ + + id: str + """The unique ID of the compaction item.""" + + encrypted_content: str + """The encrypted content that was produced by compaction.""" + + type: Literal["compaction"] + """The type of the item. Always `compaction`.""" + + created_by: Optional[str] = None + """The identifier of the actor that created the item.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_compaction_item_param.py b/portkey_ai/_vendor/openai/types/responses/response_compaction_item_param.py new file mode 100644 index 00000000..5ef134b0 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/responses/response_compaction_item_param.py @@ -0,0 +1,23 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseCompactionItemParam"] + + +class ResponseCompactionItemParam(BaseModel): + """ + A compaction item generated by the [`v1/responses/compact` API](https://platform.openai.com/docs/api-reference/responses/compact). + """ + + encrypted_content: str + """The encrypted content of the compaction summary.""" + + type: Literal["compaction"] + """The type of the item. Always `compaction`.""" + + id: Optional[str] = None + """The ID of the compaction item.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_compaction_item_param_param.py b/portkey_ai/_vendor/openai/types/responses/response_compaction_item_param_param.py new file mode 100644 index 00000000..b4d72c2e --- /dev/null +++ b/portkey_ai/_vendor/openai/types/responses/response_compaction_item_param_param.py @@ -0,0 +1,23 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Optional +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["ResponseCompactionItemParamParam"] + + +class ResponseCompactionItemParamParam(TypedDict, total=False): + """ + A compaction item generated by the [`v1/responses/compact` API](https://platform.openai.com/docs/api-reference/responses/compact). + """ + + encrypted_content: Required[str] + """The encrypted content of the compaction summary.""" + + type: Required[Literal["compaction"]] + """The type of the item. Always `compaction`.""" + + id: Optional[str] + """The ID of the compaction item.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_completed_event.py b/portkey_ai/_vendor/openai/types/responses/response_completed_event.py index 8a2bd51f..6dc95810 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_completed_event.py +++ b/portkey_ai/_vendor/openai/types/responses/response_completed_event.py @@ -9,6 +9,8 @@ class ResponseCompletedEvent(BaseModel): + """Emitted when the model response is complete.""" + response: Response """Properties of the completed response.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_computer_tool_call.py b/portkey_ai/_vendor/openai/types/responses/response_computer_tool_call.py index f1476fa0..4e1b3cf7 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_computer_tool_call.py +++ b/portkey_ai/_vendor/openai/types/responses/response_computer_tool_call.py @@ -24,6 +24,8 @@ class ActionClick(BaseModel): + """A click action.""" + button: Literal["left", "right", "wheel", "back", "forward"] """Indicates which mouse button was pressed during the click. @@ -41,6 +43,8 @@ class ActionClick(BaseModel): class ActionDoubleClick(BaseModel): + """A double click action.""" + type: Literal["double_click"] """Specifies the event type. @@ -55,6 +59,8 @@ class ActionDoubleClick(BaseModel): class ActionDragPath(BaseModel): + """An x/y coordinate pair, e.g. `{ x: 100, y: 200 }`.""" + x: int """The x-coordinate.""" @@ -63,6 +69,8 @@ class ActionDragPath(BaseModel): class ActionDrag(BaseModel): + """A drag action.""" + path: List[ActionDragPath] """An array of coordinates representing the path of the drag action. @@ -84,6 +92,8 @@ class ActionDrag(BaseModel): class ActionKeypress(BaseModel): + """A collection of keypresses the model would like to perform.""" + keys: List[str] """The combination of keys the model is requesting to be pressed. @@ -98,6 +108,8 @@ class ActionKeypress(BaseModel): class ActionMove(BaseModel): + """A mouse move action.""" + type: Literal["move"] """Specifies the event type. @@ -112,6 +124,8 @@ class ActionMove(BaseModel): class ActionScreenshot(BaseModel): + """A screenshot action.""" + type: Literal["screenshot"] """Specifies the event type. @@ -120,6 +134,8 @@ class ActionScreenshot(BaseModel): class ActionScroll(BaseModel): + """A scroll action.""" + scroll_x: int """The horizontal scroll distance.""" @@ -140,6 +156,8 @@ class ActionScroll(BaseModel): class ActionType(BaseModel): + """An action to type in text.""" + text: str """The text to type.""" @@ -151,6 +169,8 @@ class ActionType(BaseModel): class ActionWait(BaseModel): + """A wait action.""" + type: Literal["wait"] """Specifies the event type. @@ -175,6 +195,8 @@ class ActionWait(BaseModel): class PendingSafetyCheck(BaseModel): + """A pending safety check for the computer call.""" + id: str """The ID of the pending safety check.""" @@ -186,6 +208,12 @@ class PendingSafetyCheck(BaseModel): class ResponseComputerToolCall(BaseModel): + """A tool call to a computer use tool. + + See the + [computer use guide](https://platform.openai.com/docs/guides/tools-computer-use) for more information. + """ + id: str """The unique ID of the computer call.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_computer_tool_call_output_item.py b/portkey_ai/_vendor/openai/types/responses/response_computer_tool_call_output_item.py index e1ac358c..90e935c3 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_computer_tool_call_output_item.py +++ b/portkey_ai/_vendor/openai/types/responses/response_computer_tool_call_output_item.py @@ -10,6 +10,8 @@ class AcknowledgedSafetyCheck(BaseModel): + """A pending safety check for the computer call.""" + id: str """The ID of the pending safety check.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_computer_tool_call_output_screenshot.py b/portkey_ai/_vendor/openai/types/responses/response_computer_tool_call_output_screenshot.py index a500da85..2c16f215 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_computer_tool_call_output_screenshot.py +++ b/portkey_ai/_vendor/openai/types/responses/response_computer_tool_call_output_screenshot.py @@ -9,6 +9,8 @@ class ResponseComputerToolCallOutputScreenshot(BaseModel): + """A computer screenshot image used with the computer use tool.""" + type: Literal["computer_screenshot"] """Specifies the event type. diff --git a/portkey_ai/_vendor/openai/types/responses/response_computer_tool_call_output_screenshot_param.py b/portkey_ai/_vendor/openai/types/responses/response_computer_tool_call_output_screenshot_param.py index efc2028a..857ccf9f 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_computer_tool_call_output_screenshot_param.py +++ b/portkey_ai/_vendor/openai/types/responses/response_computer_tool_call_output_screenshot_param.py @@ -8,6 +8,8 @@ class ResponseComputerToolCallOutputScreenshotParam(TypedDict, total=False): + """A computer screenshot image used with the computer use tool.""" + type: Required[Literal["computer_screenshot"]] """Specifies the event type. diff --git a/portkey_ai/_vendor/openai/types/responses/response_computer_tool_call_param.py b/portkey_ai/_vendor/openai/types/responses/response_computer_tool_call_param.py index 228f76ba..550ba599 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_computer_tool_call_param.py +++ b/portkey_ai/_vendor/openai/types/responses/response_computer_tool_call_param.py @@ -25,6 +25,8 @@ class ActionClick(TypedDict, total=False): + """A click action.""" + button: Required[Literal["left", "right", "wheel", "back", "forward"]] """Indicates which mouse button was pressed during the click. @@ -42,6 +44,8 @@ class ActionClick(TypedDict, total=False): class ActionDoubleClick(TypedDict, total=False): + """A double click action.""" + type: Required[Literal["double_click"]] """Specifies the event type. @@ -56,6 +60,8 @@ class ActionDoubleClick(TypedDict, total=False): class ActionDragPath(TypedDict, total=False): + """An x/y coordinate pair, e.g. `{ x: 100, y: 200 }`.""" + x: Required[int] """The x-coordinate.""" @@ -64,6 +70,8 @@ class ActionDragPath(TypedDict, total=False): class ActionDrag(TypedDict, total=False): + """A drag action.""" + path: Required[Iterable[ActionDragPath]] """An array of coordinates representing the path of the drag action. @@ -85,6 +93,8 @@ class ActionDrag(TypedDict, total=False): class ActionKeypress(TypedDict, total=False): + """A collection of keypresses the model would like to perform.""" + keys: Required[SequenceNotStr[str]] """The combination of keys the model is requesting to be pressed. @@ -99,6 +109,8 @@ class ActionKeypress(TypedDict, total=False): class ActionMove(TypedDict, total=False): + """A mouse move action.""" + type: Required[Literal["move"]] """Specifies the event type. @@ -113,6 +125,8 @@ class ActionMove(TypedDict, total=False): class ActionScreenshot(TypedDict, total=False): + """A screenshot action.""" + type: Required[Literal["screenshot"]] """Specifies the event type. @@ -121,6 +135,8 @@ class ActionScreenshot(TypedDict, total=False): class ActionScroll(TypedDict, total=False): + """A scroll action.""" + scroll_x: Required[int] """The horizontal scroll distance.""" @@ -141,6 +157,8 @@ class ActionScroll(TypedDict, total=False): class ActionType(TypedDict, total=False): + """An action to type in text.""" + text: Required[str] """The text to type.""" @@ -152,6 +170,8 @@ class ActionType(TypedDict, total=False): class ActionWait(TypedDict, total=False): + """A wait action.""" + type: Required[Literal["wait"]] """Specifies the event type. @@ -173,6 +193,8 @@ class ActionWait(TypedDict, total=False): class PendingSafetyCheck(TypedDict, total=False): + """A pending safety check for the computer call.""" + id: Required[str] """The ID of the pending safety check.""" @@ -184,6 +206,12 @@ class PendingSafetyCheck(TypedDict, total=False): class ResponseComputerToolCallParam(TypedDict, total=False): + """A tool call to a computer use tool. + + See the + [computer use guide](https://platform.openai.com/docs/guides/tools-computer-use) for more information. + """ + id: Required[str] """The unique ID of the computer call.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_content_part_added_event.py b/portkey_ai/_vendor/openai/types/responses/response_content_part_added_event.py index c78e80d1..ec989315 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_content_part_added_event.py +++ b/portkey_ai/_vendor/openai/types/responses/response_content_part_added_event.py @@ -12,6 +12,8 @@ class PartReasoningText(BaseModel): + """Reasoning text from the model.""" + text: str """The reasoning text from the model.""" @@ -25,6 +27,8 @@ class PartReasoningText(BaseModel): class ResponseContentPartAddedEvent(BaseModel): + """Emitted when a new content part is added.""" + content_index: int """The index of the content part that was added.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_content_part_done_event.py b/portkey_ai/_vendor/openai/types/responses/response_content_part_done_event.py index 732f2303..f896ad87 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_content_part_done_event.py +++ b/portkey_ai/_vendor/openai/types/responses/response_content_part_done_event.py @@ -12,6 +12,8 @@ class PartReasoningText(BaseModel): + """Reasoning text from the model.""" + text: str """The reasoning text from the model.""" @@ -25,6 +27,8 @@ class PartReasoningText(BaseModel): class ResponseContentPartDoneEvent(BaseModel): + """Emitted when a content part is done.""" + content_index: int """The index of the content part that is done.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_conversation_param.py b/portkey_ai/_vendor/openai/types/responses/response_conversation_param.py index 067bdc7a..d1587fe6 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_conversation_param.py +++ b/portkey_ai/_vendor/openai/types/responses/response_conversation_param.py @@ -8,5 +8,7 @@ class ResponseConversationParam(TypedDict, total=False): + """The conversation that this response belongs to.""" + id: Required[str] """The unique ID of the conversation.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_create_params.py b/portkey_ai/_vendor/openai/types/responses/response_create_params.py index ba5c45ff..15844c65 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_create_params.py +++ b/portkey_ai/_vendor/openai/types/responses/response_create_params.py @@ -12,6 +12,7 @@ from .response_prompt_param import ResponsePromptParam from .tool_choice_mcp_param import ToolChoiceMcpParam from ..shared_params.metadata import Metadata +from .tool_choice_shell_param import ToolChoiceShellParam from .tool_choice_types_param import ToolChoiceTypesParam from ..shared_params.reasoning import Reasoning from .tool_choice_custom_param import ToolChoiceCustomParam @@ -19,6 +20,7 @@ from .response_text_config_param import ResponseTextConfigParam from .tool_choice_function_param import ToolChoiceFunctionParam from .response_conversation_param import ResponseConversationParam +from .tool_choice_apply_patch_param import ToolChoiceApplyPatchParam from ..shared_params.responses_model import ResponsesModel __all__ = [ @@ -146,6 +148,14 @@ class ResponseCreateParamsBase(TypedDict, total=False): [Learn more](https://platform.openai.com/docs/guides/prompt-caching). """ + prompt_cache_retention: Optional[Literal["in-memory", "24h"]] + """The retention policy for the prompt cache. + + Set to `24h` to enable extended prompt caching, which keeps cached prefixes + active for longer, up to a maximum of 24 hours. + [Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention). + """ + reasoning: Optional[Reasoning] """**gpt-5 and o-series models only** @@ -273,6 +283,8 @@ class ResponseCreateParamsBase(TypedDict, total=False): class StreamOptions(TypedDict, total=False): + """Options for streaming responses. Only set this when you set `stream: true`.""" + include_obfuscation: bool """When true, stream obfuscation will be enabled. @@ -292,6 +304,8 @@ class StreamOptions(TypedDict, total=False): ToolChoiceFunctionParam, ToolChoiceMcpParam, ToolChoiceCustomParam, + ToolChoiceApplyPatchParam, + ToolChoiceShellParam, ] diff --git a/portkey_ai/_vendor/openai/types/responses/response_created_event.py b/portkey_ai/_vendor/openai/types/responses/response_created_event.py index 73a9d700..308b2f49 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_created_event.py +++ b/portkey_ai/_vendor/openai/types/responses/response_created_event.py @@ -9,6 +9,8 @@ class ResponseCreatedEvent(BaseModel): + """An event that is emitted when a response is created.""" + response: Response """The response that was created.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_custom_tool_call.py b/portkey_ai/_vendor/openai/types/responses/response_custom_tool_call.py index 38c650e6..f0574396 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_custom_tool_call.py +++ b/portkey_ai/_vendor/openai/types/responses/response_custom_tool_call.py @@ -9,6 +9,8 @@ class ResponseCustomToolCall(BaseModel): + """A call to a custom tool created by the model.""" + call_id: str """An identifier used to map this custom tool call to a tool call output.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_custom_tool_call_input_delta_event.py b/portkey_ai/_vendor/openai/types/responses/response_custom_tool_call_input_delta_event.py index 6c33102d..7473d33d 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_custom_tool_call_input_delta_event.py +++ b/portkey_ai/_vendor/openai/types/responses/response_custom_tool_call_input_delta_event.py @@ -8,6 +8,8 @@ class ResponseCustomToolCallInputDeltaEvent(BaseModel): + """Event representing a delta (partial update) to the input of a custom tool call.""" + delta: str """The incremental input data (delta) for the custom tool call.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_custom_tool_call_input_done_event.py b/portkey_ai/_vendor/openai/types/responses/response_custom_tool_call_input_done_event.py index 35a2fee2..be47ae8e 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_custom_tool_call_input_done_event.py +++ b/portkey_ai/_vendor/openai/types/responses/response_custom_tool_call_input_done_event.py @@ -8,6 +8,8 @@ class ResponseCustomToolCallInputDoneEvent(BaseModel): + """Event indicating that input for a custom tool call is complete.""" + input: str """The complete input data for the custom tool call.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_custom_tool_call_output.py b/portkey_ai/_vendor/openai/types/responses/response_custom_tool_call_output.py index 9db9e7e5..83395649 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_custom_tool_call_output.py +++ b/portkey_ai/_vendor/openai/types/responses/response_custom_tool_call_output.py @@ -17,6 +17,8 @@ class ResponseCustomToolCallOutput(BaseModel): + """The output of a custom tool call from your code, being sent back to the model.""" + call_id: str """The call ID, used to map this custom tool call output to a custom tool call.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_custom_tool_call_output_param.py b/portkey_ai/_vendor/openai/types/responses/response_custom_tool_call_output_param.py index e967a37c..db003421 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_custom_tool_call_output_param.py +++ b/portkey_ai/_vendor/openai/types/responses/response_custom_tool_call_output_param.py @@ -15,6 +15,8 @@ class ResponseCustomToolCallOutputParam(TypedDict, total=False): + """The output of a custom tool call from your code, being sent back to the model.""" + call_id: Required[str] """The call ID, used to map this custom tool call output to a custom tool call.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_custom_tool_call_param.py b/portkey_ai/_vendor/openai/types/responses/response_custom_tool_call_param.py index e15beac2..5d4ce337 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_custom_tool_call_param.py +++ b/portkey_ai/_vendor/openai/types/responses/response_custom_tool_call_param.py @@ -8,6 +8,8 @@ class ResponseCustomToolCallParam(TypedDict, total=False): + """A call to a custom tool created by the model.""" + call_id: Required[str] """An identifier used to map this custom tool call to a tool call output.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_error.py b/portkey_ai/_vendor/openai/types/responses/response_error.py index 90f1fcf5..90958d1c 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_error.py +++ b/portkey_ai/_vendor/openai/types/responses/response_error.py @@ -8,6 +8,8 @@ class ResponseError(BaseModel): + """An error object returned when the model fails to generate a Response.""" + code: Literal[ "server_error", "rate_limit_exceeded", diff --git a/portkey_ai/_vendor/openai/types/responses/response_error_event.py b/portkey_ai/_vendor/openai/types/responses/response_error_event.py index 826c3951..1789f731 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_error_event.py +++ b/portkey_ai/_vendor/openai/types/responses/response_error_event.py @@ -9,6 +9,8 @@ class ResponseErrorEvent(BaseModel): + """Emitted when an error occurs.""" + code: Optional[str] = None """The error code.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_failed_event.py b/portkey_ai/_vendor/openai/types/responses/response_failed_event.py index cdd3d7d8..2232c967 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_failed_event.py +++ b/portkey_ai/_vendor/openai/types/responses/response_failed_event.py @@ -9,6 +9,8 @@ class ResponseFailedEvent(BaseModel): + """An event that is emitted when a response fails.""" + response: Response """The response that failed.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_file_search_call_completed_event.py b/portkey_ai/_vendor/openai/types/responses/response_file_search_call_completed_event.py index 08e51b2d..88ffa5ac 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_file_search_call_completed_event.py +++ b/portkey_ai/_vendor/openai/types/responses/response_file_search_call_completed_event.py @@ -8,6 +8,8 @@ class ResponseFileSearchCallCompletedEvent(BaseModel): + """Emitted when a file search call is completed (results found).""" + item_id: str """The ID of the output item that the file search call is initiated.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_file_search_call_in_progress_event.py b/portkey_ai/_vendor/openai/types/responses/response_file_search_call_in_progress_event.py index 63840a64..4f3504fd 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_file_search_call_in_progress_event.py +++ b/portkey_ai/_vendor/openai/types/responses/response_file_search_call_in_progress_event.py @@ -8,6 +8,8 @@ class ResponseFileSearchCallInProgressEvent(BaseModel): + """Emitted when a file search call is initiated.""" + item_id: str """The ID of the output item that the file search call is initiated.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_file_search_call_searching_event.py b/portkey_ai/_vendor/openai/types/responses/response_file_search_call_searching_event.py index 706c8c57..5bf1a076 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_file_search_call_searching_event.py +++ b/portkey_ai/_vendor/openai/types/responses/response_file_search_call_searching_event.py @@ -8,6 +8,8 @@ class ResponseFileSearchCallSearchingEvent(BaseModel): + """Emitted when a file search is currently searching.""" + item_id: str """The ID of the output item that the file search call is initiated.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_file_search_tool_call.py b/portkey_ai/_vendor/openai/types/responses/response_file_search_tool_call.py index ef1c6a56..fa456313 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_file_search_tool_call.py +++ b/portkey_ai/_vendor/openai/types/responses/response_file_search_tool_call.py @@ -32,6 +32,12 @@ class Result(BaseModel): class ResponseFileSearchToolCall(BaseModel): + """The results of a file search tool call. + + See the + [file search guide](https://platform.openai.com/docs/guides/tools-file-search) for more information. + """ + id: str """The unique ID of the file search tool call.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_file_search_tool_call_param.py b/portkey_ai/_vendor/openai/types/responses/response_file_search_tool_call_param.py index 4903dca4..45a5bbb4 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_file_search_tool_call_param.py +++ b/portkey_ai/_vendor/openai/types/responses/response_file_search_tool_call_param.py @@ -34,6 +34,12 @@ class Result(TypedDict, total=False): class ResponseFileSearchToolCallParam(TypedDict, total=False): + """The results of a file search tool call. + + See the + [file search guide](https://platform.openai.com/docs/guides/tools-file-search) for more information. + """ + id: Required[str] """The unique ID of the file search tool call.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_format_text_json_schema_config.py b/portkey_ai/_vendor/openai/types/responses/response_format_text_json_schema_config.py index 001fcf5b..b9531126 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_format_text_json_schema_config.py +++ b/portkey_ai/_vendor/openai/types/responses/response_format_text_json_schema_config.py @@ -11,6 +11,12 @@ class ResponseFormatTextJSONSchemaConfig(BaseModel): + """JSON Schema response format. + + Used to generate structured JSON responses. + Learn more about [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs). + """ + name: str """The name of the response format. diff --git a/portkey_ai/_vendor/openai/types/responses/response_format_text_json_schema_config_param.py b/portkey_ai/_vendor/openai/types/responses/response_format_text_json_schema_config_param.py index f293a80c..6f5c6331 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_format_text_json_schema_config_param.py +++ b/portkey_ai/_vendor/openai/types/responses/response_format_text_json_schema_config_param.py @@ -9,6 +9,12 @@ class ResponseFormatTextJSONSchemaConfigParam(TypedDict, total=False): + """JSON Schema response format. + + Used to generate structured JSON responses. + Learn more about [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs). + """ + name: Required[str] """The name of the response format. diff --git a/portkey_ai/_vendor/openai/types/responses/response_function_call_arguments_delta_event.py b/portkey_ai/_vendor/openai/types/responses/response_function_call_arguments_delta_event.py index c6bc5dfa..0798c2e1 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_function_call_arguments_delta_event.py +++ b/portkey_ai/_vendor/openai/types/responses/response_function_call_arguments_delta_event.py @@ -8,6 +8,8 @@ class ResponseFunctionCallArgumentsDeltaEvent(BaseModel): + """Emitted when there is a partial function-call arguments delta.""" + delta: str """The function-call arguments delta that is added.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_function_call_arguments_done_event.py b/portkey_ai/_vendor/openai/types/responses/response_function_call_arguments_done_event.py index 4ee5ed7f..543cd073 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_function_call_arguments_done_event.py +++ b/portkey_ai/_vendor/openai/types/responses/response_function_call_arguments_done_event.py @@ -8,6 +8,8 @@ class ResponseFunctionCallArgumentsDoneEvent(BaseModel): + """Emitted when function-call arguments are finalized.""" + arguments: str """The function-call arguments.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_function_shell_call_output_content.py b/portkey_ai/_vendor/openai/types/responses/response_function_shell_call_output_content.py new file mode 100644 index 00000000..dae48f14 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/responses/response_function_shell_call_output_content.py @@ -0,0 +1,42 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Union +from typing_extensions import Literal, Annotated, TypeAlias + +from ..._utils import PropertyInfo +from ..._models import BaseModel + +__all__ = ["ResponseFunctionShellCallOutputContent", "Outcome", "OutcomeTimeout", "OutcomeExit"] + + +class OutcomeTimeout(BaseModel): + """Indicates that the shell call exceeded its configured time limit.""" + + type: Literal["timeout"] + """The outcome type. Always `timeout`.""" + + +class OutcomeExit(BaseModel): + """Indicates that the shell commands finished and returned an exit code.""" + + exit_code: int + """The exit code returned by the shell process.""" + + type: Literal["exit"] + """The outcome type. Always `exit`.""" + + +Outcome: TypeAlias = Annotated[Union[OutcomeTimeout, OutcomeExit], PropertyInfo(discriminator="type")] + + +class ResponseFunctionShellCallOutputContent(BaseModel): + """Captured stdout and stderr for a portion of a shell tool call output.""" + + outcome: Outcome + """The exit or timeout outcome associated with this shell call.""" + + stderr: str + """Captured stderr output for the shell call.""" + + stdout: str + """Captured stdout output for the shell call.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_function_shell_call_output_content_param.py b/portkey_ai/_vendor/openai/types/responses/response_function_shell_call_output_content_param.py new file mode 100644 index 00000000..4d8ea70d --- /dev/null +++ b/portkey_ai/_vendor/openai/types/responses/response_function_shell_call_output_content_param.py @@ -0,0 +1,41 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union +from typing_extensions import Literal, Required, TypeAlias, TypedDict + +__all__ = ["ResponseFunctionShellCallOutputContentParam", "Outcome", "OutcomeTimeout", "OutcomeExit"] + + +class OutcomeTimeout(TypedDict, total=False): + """Indicates that the shell call exceeded its configured time limit.""" + + type: Required[Literal["timeout"]] + """The outcome type. Always `timeout`.""" + + +class OutcomeExit(TypedDict, total=False): + """Indicates that the shell commands finished and returned an exit code.""" + + exit_code: Required[int] + """The exit code returned by the shell process.""" + + type: Required[Literal["exit"]] + """The outcome type. Always `exit`.""" + + +Outcome: TypeAlias = Union[OutcomeTimeout, OutcomeExit] + + +class ResponseFunctionShellCallOutputContentParam(TypedDict, total=False): + """Captured stdout and stderr for a portion of a shell tool call output.""" + + outcome: Required[Outcome] + """The exit or timeout outcome associated with this shell call.""" + + stderr: Required[str] + """Captured stderr output for the shell call.""" + + stdout: Required[str] + """Captured stdout output for the shell call.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_function_shell_tool_call.py b/portkey_ai/_vendor/openai/types/responses/response_function_shell_tool_call.py new file mode 100644 index 00000000..7c6a184e --- /dev/null +++ b/portkey_ai/_vendor/openai/types/responses/response_function_shell_tool_call.py @@ -0,0 +1,48 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseFunctionShellToolCall", "Action"] + + +class Action(BaseModel): + """The shell commands and limits that describe how to run the tool call.""" + + commands: List[str] + + max_output_length: Optional[int] = None + """Optional maximum number of characters to return from each command.""" + + timeout_ms: Optional[int] = None + """Optional timeout in milliseconds for the commands.""" + + +class ResponseFunctionShellToolCall(BaseModel): + """A tool call that executes one or more shell commands in a managed environment.""" + + id: str + """The unique ID of the shell tool call. + + Populated when this item is returned via API. + """ + + action: Action + """The shell commands and limits that describe how to run the tool call.""" + + call_id: str + """The unique ID of the shell tool call generated by the model.""" + + status: Literal["in_progress", "completed", "incomplete"] + """The status of the shell call. + + One of `in_progress`, `completed`, or `incomplete`. + """ + + type: Literal["shell_call"] + """The type of the item. Always `shell_call`.""" + + created_by: Optional[str] = None + """The ID of the entity that created this tool call.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_function_shell_tool_call_output.py b/portkey_ai/_vendor/openai/types/responses/response_function_shell_tool_call_output.py new file mode 100644 index 00000000..7196ab47 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/responses/response_function_shell_tool_call_output.py @@ -0,0 +1,88 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Union, Optional +from typing_extensions import Literal, Annotated, TypeAlias + +from ..._utils import PropertyInfo +from ..._models import BaseModel + +__all__ = [ + "ResponseFunctionShellToolCallOutput", + "Output", + "OutputOutcome", + "OutputOutcomeTimeout", + "OutputOutcomeExit", +] + + +class OutputOutcomeTimeout(BaseModel): + """Indicates that the shell call exceeded its configured time limit.""" + + type: Literal["timeout"] + """The outcome type. Always `timeout`.""" + + +class OutputOutcomeExit(BaseModel): + """Indicates that the shell commands finished and returned an exit code.""" + + exit_code: int + """Exit code from the shell process.""" + + type: Literal["exit"] + """The outcome type. Always `exit`.""" + + +OutputOutcome: TypeAlias = Annotated[Union[OutputOutcomeTimeout, OutputOutcomeExit], PropertyInfo(discriminator="type")] + + +class Output(BaseModel): + """The content of a shell tool call output that was emitted.""" + + outcome: OutputOutcome + """ + Represents either an exit outcome (with an exit code) or a timeout outcome for a + shell call output chunk. + """ + + stderr: str + """The standard error output that was captured.""" + + stdout: str + """The standard output that was captured.""" + + created_by: Optional[str] = None + """The identifier of the actor that created the item.""" + + +class ResponseFunctionShellToolCallOutput(BaseModel): + """The output of a shell tool call that was emitted.""" + + id: str + """The unique ID of the shell call output. + + Populated when this item is returned via API. + """ + + call_id: str + """The unique ID of the shell tool call generated by the model.""" + + max_output_length: Optional[int] = None + """The maximum length of the shell command output. + + This is generated by the model and should be passed back with the raw output. + """ + + output: List[Output] + """An array of shell call output contents""" + + status: Literal["in_progress", "completed", "incomplete"] + """The status of the shell call output. + + One of `in_progress`, `completed`, or `incomplete`. + """ + + type: Literal["shell_call_output"] + """The type of the shell call output. Always `shell_call_output`.""" + + created_by: Optional[str] = None + """The identifier of the actor that created the item.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_function_tool_call.py b/portkey_ai/_vendor/openai/types/responses/response_function_tool_call.py index 2a848220..194e3f7d 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_function_tool_call.py +++ b/portkey_ai/_vendor/openai/types/responses/response_function_tool_call.py @@ -9,6 +9,12 @@ class ResponseFunctionToolCall(BaseModel): + """A tool call to run a function. + + See the + [function calling guide](https://platform.openai.com/docs/guides/function-calling) for more information. + """ + arguments: str """A JSON string of the arguments to pass to the function.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_function_tool_call_item.py b/portkey_ai/_vendor/openai/types/responses/response_function_tool_call_item.py index 762015a4..3df299e5 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_function_tool_call_item.py +++ b/portkey_ai/_vendor/openai/types/responses/response_function_tool_call_item.py @@ -6,5 +6,11 @@ class ResponseFunctionToolCallItem(ResponseFunctionToolCall): + """A tool call to run a function. + + See the + [function calling guide](https://platform.openai.com/docs/guides/function-calling) for more information. + """ + id: str # type: ignore """The unique ID of the function tool call.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_function_tool_call_param.py b/portkey_ai/_vendor/openai/types/responses/response_function_tool_call_param.py index eaa263cf..4e8dd3d6 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_function_tool_call_param.py +++ b/portkey_ai/_vendor/openai/types/responses/response_function_tool_call_param.py @@ -8,6 +8,12 @@ class ResponseFunctionToolCallParam(TypedDict, total=False): + """A tool call to run a function. + + See the + [function calling guide](https://platform.openai.com/docs/guides/function-calling) for more information. + """ + arguments: Required[str] """A JSON string of the arguments to pass to the function.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_function_web_search.py b/portkey_ai/_vendor/openai/types/responses/response_function_web_search.py index f3e80e6a..0cb7e0b0 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_function_web_search.py +++ b/portkey_ai/_vendor/openai/types/responses/response_function_web_search.py @@ -10,6 +10,8 @@ class ActionSearchSource(BaseModel): + """A source used in the search.""" + type: Literal["url"] """The type of source. Always `url`.""" @@ -18,17 +20,24 @@ class ActionSearchSource(BaseModel): class ActionSearch(BaseModel): + """Action type "search" - Performs a web search query.""" + query: str - """The search query.""" + """[DEPRECATED] The search query.""" type: Literal["search"] """The action type.""" + queries: Optional[List[str]] = None + """The search queries.""" + sources: Optional[List[ActionSearchSource]] = None """The sources used in the search.""" class ActionOpenPage(BaseModel): + """Action type "open_page" - Opens a specific URL from search results.""" + type: Literal["open_page"] """The action type.""" @@ -37,6 +46,8 @@ class ActionOpenPage(BaseModel): class ActionFind(BaseModel): + """Action type "find": Searches for a pattern within a loaded page.""" + pattern: str """The pattern or text to search for within the page.""" @@ -51,6 +62,12 @@ class ActionFind(BaseModel): class ResponseFunctionWebSearch(BaseModel): + """The results of a web search tool call. + + See the + [web search guide](https://platform.openai.com/docs/guides/tools-web-search) for more information. + """ + id: str """The unique ID of the web search tool call.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_function_web_search_param.py b/portkey_ai/_vendor/openai/types/responses/response_function_web_search_param.py index fc019d3e..7db3e3c8 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_function_web_search_param.py +++ b/portkey_ai/_vendor/openai/types/responses/response_function_web_search_param.py @@ -5,6 +5,8 @@ from typing import Union, Iterable from typing_extensions import Literal, Required, TypeAlias, TypedDict +from ..._types import SequenceNotStr + __all__ = [ "ResponseFunctionWebSearchParam", "Action", @@ -16,6 +18,8 @@ class ActionSearchSource(TypedDict, total=False): + """A source used in the search.""" + type: Required[Literal["url"]] """The type of source. Always `url`.""" @@ -24,17 +28,24 @@ class ActionSearchSource(TypedDict, total=False): class ActionSearch(TypedDict, total=False): + """Action type "search" - Performs a web search query.""" + query: Required[str] - """The search query.""" + """[DEPRECATED] The search query.""" type: Required[Literal["search"]] """The action type.""" + queries: SequenceNotStr[str] + """The search queries.""" + sources: Iterable[ActionSearchSource] """The sources used in the search.""" class ActionOpenPage(TypedDict, total=False): + """Action type "open_page" - Opens a specific URL from search results.""" + type: Required[Literal["open_page"]] """The action type.""" @@ -43,6 +54,8 @@ class ActionOpenPage(TypedDict, total=False): class ActionFind(TypedDict, total=False): + """Action type "find": Searches for a pattern within a loaded page.""" + pattern: Required[str] """The pattern or text to search for within the page.""" @@ -57,6 +70,12 @@ class ActionFind(TypedDict, total=False): class ResponseFunctionWebSearchParam(TypedDict, total=False): + """The results of a web search tool call. + + See the + [web search guide](https://platform.openai.com/docs/guides/tools-web-search) for more information. + """ + id: Required[str] """The unique ID of the web search tool call.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_image_gen_call_completed_event.py b/portkey_ai/_vendor/openai/types/responses/response_image_gen_call_completed_event.py index a554273e..f6ce9d0f 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_image_gen_call_completed_event.py +++ b/portkey_ai/_vendor/openai/types/responses/response_image_gen_call_completed_event.py @@ -8,6 +8,10 @@ class ResponseImageGenCallCompletedEvent(BaseModel): + """ + Emitted when an image generation tool call has completed and the final image is available. + """ + item_id: str """The unique identifier of the image generation item being processed.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_image_gen_call_generating_event.py b/portkey_ai/_vendor/openai/types/responses/response_image_gen_call_generating_event.py index 74b4f573..8e3026d0 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_image_gen_call_generating_event.py +++ b/portkey_ai/_vendor/openai/types/responses/response_image_gen_call_generating_event.py @@ -8,6 +8,10 @@ class ResponseImageGenCallGeneratingEvent(BaseModel): + """ + Emitted when an image generation tool call is actively generating an image (intermediate state). + """ + item_id: str """The unique identifier of the image generation item being processed.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_image_gen_call_in_progress_event.py b/portkey_ai/_vendor/openai/types/responses/response_image_gen_call_in_progress_event.py index b36ff5fa..60726a22 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_image_gen_call_in_progress_event.py +++ b/portkey_ai/_vendor/openai/types/responses/response_image_gen_call_in_progress_event.py @@ -8,6 +8,8 @@ class ResponseImageGenCallInProgressEvent(BaseModel): + """Emitted when an image generation tool call is in progress.""" + item_id: str """The unique identifier of the image generation item being processed.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_image_gen_call_partial_image_event.py b/portkey_ai/_vendor/openai/types/responses/response_image_gen_call_partial_image_event.py index e69c95fb..289d5d44 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_image_gen_call_partial_image_event.py +++ b/portkey_ai/_vendor/openai/types/responses/response_image_gen_call_partial_image_event.py @@ -8,6 +8,8 @@ class ResponseImageGenCallPartialImageEvent(BaseModel): + """Emitted when a partial image is available during image generation streaming.""" + item_id: str """The unique identifier of the image generation item being processed.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_in_progress_event.py b/portkey_ai/_vendor/openai/types/responses/response_in_progress_event.py index b82e10b3..9d9bbd94 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_in_progress_event.py +++ b/portkey_ai/_vendor/openai/types/responses/response_in_progress_event.py @@ -9,6 +9,8 @@ class ResponseInProgressEvent(BaseModel): + """Emitted when the response is in progress.""" + response: Response """The response that is in progress.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_incomplete_event.py b/portkey_ai/_vendor/openai/types/responses/response_incomplete_event.py index 63c969a4..ef99c5f0 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_incomplete_event.py +++ b/portkey_ai/_vendor/openai/types/responses/response_incomplete_event.py @@ -9,6 +9,8 @@ class ResponseIncompleteEvent(BaseModel): + """An event that is emitted when a response finishes as incomplete.""" + response: Response """The response that was incomplete.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_input_audio.py b/portkey_ai/_vendor/openai/types/responses/response_input_audio.py index 9fef6de0..f362ba41 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_input_audio.py +++ b/portkey_ai/_vendor/openai/types/responses/response_input_audio.py @@ -16,6 +16,8 @@ class InputAudio(BaseModel): class ResponseInputAudio(BaseModel): + """An audio input to the model.""" + input_audio: InputAudio type: Literal["input_audio"] diff --git a/portkey_ai/_vendor/openai/types/responses/response_input_audio_param.py b/portkey_ai/_vendor/openai/types/responses/response_input_audio_param.py index f3fc913c..0be935c5 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_input_audio_param.py +++ b/portkey_ai/_vendor/openai/types/responses/response_input_audio_param.py @@ -16,6 +16,8 @@ class InputAudio(TypedDict, total=False): class ResponseInputAudioParam(TypedDict, total=False): + """An audio input to the model.""" + input_audio: Required[InputAudio] type: Required[Literal["input_audio"]] diff --git a/portkey_ai/_vendor/openai/types/responses/response_input_file.py b/portkey_ai/_vendor/openai/types/responses/response_input_file.py index 1eecd6a2..3e5fb70c 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_input_file.py +++ b/portkey_ai/_vendor/openai/types/responses/response_input_file.py @@ -9,6 +9,8 @@ class ResponseInputFile(BaseModel): + """A file input to the model.""" + type: Literal["input_file"] """The type of the input item. Always `input_file`.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_input_file_content.py b/portkey_ai/_vendor/openai/types/responses/response_input_file_content.py index d832bb0e..f0dfef55 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_input_file_content.py +++ b/portkey_ai/_vendor/openai/types/responses/response_input_file_content.py @@ -9,6 +9,8 @@ class ResponseInputFileContent(BaseModel): + """A file input to the model.""" + type: Literal["input_file"] """The type of the input item. Always `input_file`.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_input_file_content_param.py b/portkey_ai/_vendor/openai/types/responses/response_input_file_content_param.py index 71f7b3a2..376f6c7a 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_input_file_content_param.py +++ b/portkey_ai/_vendor/openai/types/responses/response_input_file_content_param.py @@ -9,6 +9,8 @@ class ResponseInputFileContentParam(TypedDict, total=False): + """A file input to the model.""" + type: Required[Literal["input_file"]] """The type of the input item. Always `input_file`.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_input_file_param.py b/portkey_ai/_vendor/openai/types/responses/response_input_file_param.py index 0b5f513e..8b5da202 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_input_file_param.py +++ b/portkey_ai/_vendor/openai/types/responses/response_input_file_param.py @@ -9,6 +9,8 @@ class ResponseInputFileParam(TypedDict, total=False): + """A file input to the model.""" + type: Required[Literal["input_file"]] """The type of the input item. Always `input_file`.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_input_image.py b/portkey_ai/_vendor/openai/types/responses/response_input_image.py index f2d760b2..500bc4b3 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_input_image.py +++ b/portkey_ai/_vendor/openai/types/responses/response_input_image.py @@ -9,6 +9,11 @@ class ResponseInputImage(BaseModel): + """An image input to the model. + + Learn about [image inputs](https://platform.openai.com/docs/guides/vision). + """ + detail: Literal["low", "high", "auto"] """The detail level of the image to be sent to the model. diff --git a/portkey_ai/_vendor/openai/types/responses/response_input_image_content.py b/portkey_ai/_vendor/openai/types/responses/response_input_image_content.py index fb90cb57..e38bc28d 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_input_image_content.py +++ b/portkey_ai/_vendor/openai/types/responses/response_input_image_content.py @@ -9,6 +9,11 @@ class ResponseInputImageContent(BaseModel): + """An image input to the model. + + Learn about [image inputs](https://platform.openai.com/docs/guides/vision) + """ + type: Literal["input_image"] """The type of the input item. Always `input_image`.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_input_image_content_param.py b/portkey_ai/_vendor/openai/types/responses/response_input_image_content_param.py index c51509a3..c21f46d7 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_input_image_content_param.py +++ b/portkey_ai/_vendor/openai/types/responses/response_input_image_content_param.py @@ -9,6 +9,11 @@ class ResponseInputImageContentParam(TypedDict, total=False): + """An image input to the model. + + Learn about [image inputs](https://platform.openai.com/docs/guides/vision) + """ + type: Required[Literal["input_image"]] """The type of the input item. Always `input_image`.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_input_image_param.py b/portkey_ai/_vendor/openai/types/responses/response_input_image_param.py index bc17e4f1..fd8c1bd0 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_input_image_param.py +++ b/portkey_ai/_vendor/openai/types/responses/response_input_image_param.py @@ -9,6 +9,11 @@ class ResponseInputImageParam(TypedDict, total=False): + """An image input to the model. + + Learn about [image inputs](https://platform.openai.com/docs/guides/vision). + """ + detail: Required[Literal["low", "high", "auto"]] """The detail level of the image to be sent to the model. diff --git a/portkey_ai/_vendor/openai/types/responses/response_input_item.py b/portkey_ai/_vendor/openai/types/responses/response_input_item.py index 0a487b8b..23eb2c89 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_input_item.py +++ b/portkey_ai/_vendor/openai/types/responses/response_input_item.py @@ -12,11 +12,13 @@ from .response_computer_tool_call import ResponseComputerToolCall from .response_function_tool_call import ResponseFunctionToolCall from .response_function_web_search import ResponseFunctionWebSearch +from .response_compaction_item_param import ResponseCompactionItemParam from .response_file_search_tool_call import ResponseFileSearchToolCall from .response_custom_tool_call_output import ResponseCustomToolCallOutput from .response_code_interpreter_tool_call import ResponseCodeInterpreterToolCall from .response_input_message_content_list import ResponseInputMessageContentList from .response_function_call_output_item_list import ResponseFunctionCallOutputItemList +from .response_function_shell_call_output_content import ResponseFunctionShellCallOutputContent from .response_computer_tool_call_output_screenshot import ResponseComputerToolCallOutputScreenshot __all__ = [ @@ -29,6 +31,15 @@ "LocalShellCall", "LocalShellCallAction", "LocalShellCallOutput", + "ShellCall", + "ShellCallAction", + "ShellCallOutput", + "ApplyPatchCall", + "ApplyPatchCallOperation", + "ApplyPatchCallOperationCreateFile", + "ApplyPatchCallOperationDeleteFile", + "ApplyPatchCallOperationUpdateFile", + "ApplyPatchCallOutput", "McpListTools", "McpListToolsTool", "McpApprovalRequest", @@ -39,6 +50,12 @@ class Message(BaseModel): + """ + A message input to the model with a role indicating instruction following + hierarchy. Instructions given with the `developer` or `system` role take + precedence over instructions given with the `user` role. + """ + content: ResponseInputMessageContentList """ A list of one or many input items to the model, containing different content @@ -60,6 +77,8 @@ class Message(BaseModel): class ComputerCallOutputAcknowledgedSafetyCheck(BaseModel): + """A pending safety check for the computer call.""" + id: str """The ID of the pending safety check.""" @@ -71,6 +90,8 @@ class ComputerCallOutputAcknowledgedSafetyCheck(BaseModel): class ComputerCallOutput(BaseModel): + """The output of a computer tool call.""" + call_id: str """The ID of the computer tool call that produced the output.""" @@ -98,6 +119,8 @@ class ComputerCallOutput(BaseModel): class FunctionCallOutput(BaseModel): + """The output of a function tool call.""" + call_id: str """The unique ID of the function tool call generated by the model.""" @@ -122,6 +145,8 @@ class FunctionCallOutput(BaseModel): class ImageGenerationCall(BaseModel): + """An image generation request made by the model.""" + id: str """The unique ID of the image generation call.""" @@ -136,6 +161,8 @@ class ImageGenerationCall(BaseModel): class LocalShellCallAction(BaseModel): + """Execute a shell command on the server.""" + command: List[str] """The command to run.""" @@ -156,6 +183,8 @@ class LocalShellCallAction(BaseModel): class LocalShellCall(BaseModel): + """A tool call to run a command on the local shell.""" + id: str """The unique ID of the local shell call.""" @@ -173,6 +202,8 @@ class LocalShellCall(BaseModel): class LocalShellCallOutput(BaseModel): + """The output of a local shell tool call.""" + id: str """The unique ID of the local shell tool call generated by the model.""" @@ -186,7 +217,172 @@ class LocalShellCallOutput(BaseModel): """The status of the item. One of `in_progress`, `completed`, or `incomplete`.""" +class ShellCallAction(BaseModel): + """The shell commands and limits that describe how to run the tool call.""" + + commands: List[str] + """Ordered shell commands for the execution environment to run.""" + + max_output_length: Optional[int] = None + """ + Maximum number of UTF-8 characters to capture from combined stdout and stderr + output. + """ + + timeout_ms: Optional[int] = None + """Maximum wall-clock time in milliseconds to allow the shell commands to run.""" + + +class ShellCall(BaseModel): + """A tool representing a request to execute one or more shell commands.""" + + action: ShellCallAction + """The shell commands and limits that describe how to run the tool call.""" + + call_id: str + """The unique ID of the shell tool call generated by the model.""" + + type: Literal["shell_call"] + """The type of the item. Always `shell_call`.""" + + id: Optional[str] = None + """The unique ID of the shell tool call. + + Populated when this item is returned via API. + """ + + status: Optional[Literal["in_progress", "completed", "incomplete"]] = None + """The status of the shell call. + + One of `in_progress`, `completed`, or `incomplete`. + """ + + +class ShellCallOutput(BaseModel): + """The streamed output items emitted by a shell tool call.""" + + call_id: str + """The unique ID of the shell tool call generated by the model.""" + + output: List[ResponseFunctionShellCallOutputContent] + """ + Captured chunks of stdout and stderr output, along with their associated + outcomes. + """ + + type: Literal["shell_call_output"] + """The type of the item. Always `shell_call_output`.""" + + id: Optional[str] = None + """The unique ID of the shell tool call output. + + Populated when this item is returned via API. + """ + + max_output_length: Optional[int] = None + """ + The maximum number of UTF-8 characters captured for this shell call's combined + output. + """ + + +class ApplyPatchCallOperationCreateFile(BaseModel): + """Instruction for creating a new file via the apply_patch tool.""" + + diff: str + """Unified diff content to apply when creating the file.""" + + path: str + """Path of the file to create relative to the workspace root.""" + + type: Literal["create_file"] + """The operation type. Always `create_file`.""" + + +class ApplyPatchCallOperationDeleteFile(BaseModel): + """Instruction for deleting an existing file via the apply_patch tool.""" + + path: str + """Path of the file to delete relative to the workspace root.""" + + type: Literal["delete_file"] + """The operation type. Always `delete_file`.""" + + +class ApplyPatchCallOperationUpdateFile(BaseModel): + """Instruction for updating an existing file via the apply_patch tool.""" + + diff: str + """Unified diff content to apply to the existing file.""" + + path: str + """Path of the file to update relative to the workspace root.""" + + type: Literal["update_file"] + """The operation type. Always `update_file`.""" + + +ApplyPatchCallOperation: TypeAlias = Annotated[ + Union[ApplyPatchCallOperationCreateFile, ApplyPatchCallOperationDeleteFile, ApplyPatchCallOperationUpdateFile], + PropertyInfo(discriminator="type"), +] + + +class ApplyPatchCall(BaseModel): + """ + A tool call representing a request to create, delete, or update files using diff patches. + """ + + call_id: str + """The unique ID of the apply patch tool call generated by the model.""" + + operation: ApplyPatchCallOperation + """ + The specific create, delete, or update instruction for the apply_patch tool + call. + """ + + status: Literal["in_progress", "completed"] + """The status of the apply patch tool call. One of `in_progress` or `completed`.""" + + type: Literal["apply_patch_call"] + """The type of the item. Always `apply_patch_call`.""" + + id: Optional[str] = None + """The unique ID of the apply patch tool call. + + Populated when this item is returned via API. + """ + + +class ApplyPatchCallOutput(BaseModel): + """The streamed output emitted by an apply patch tool call.""" + + call_id: str + """The unique ID of the apply patch tool call generated by the model.""" + + status: Literal["completed", "failed"] + """The status of the apply patch tool call output. One of `completed` or `failed`.""" + + type: Literal["apply_patch_call_output"] + """The type of the item. Always `apply_patch_call_output`.""" + + id: Optional[str] = None + """The unique ID of the apply patch tool call output. + + Populated when this item is returned via API. + """ + + output: Optional[str] = None + """ + Optional human-readable log text from the apply patch tool (e.g., patch results + or errors). + """ + + class McpListToolsTool(BaseModel): + """A tool available on an MCP server.""" + input_schema: object """The JSON schema describing the tool's input.""" @@ -201,6 +397,8 @@ class McpListToolsTool(BaseModel): class McpListTools(BaseModel): + """A list of tools available on an MCP server.""" + id: str """The unique ID of the list.""" @@ -218,6 +416,8 @@ class McpListTools(BaseModel): class McpApprovalRequest(BaseModel): + """A request for human approval of a tool invocation.""" + id: str """The unique ID of the approval request.""" @@ -235,6 +435,8 @@ class McpApprovalRequest(BaseModel): class McpApprovalResponse(BaseModel): + """A response to an MCP approval request.""" + approval_request_id: str """The ID of the approval request being answered.""" @@ -252,6 +454,8 @@ class McpApprovalResponse(BaseModel): class McpCall(BaseModel): + """An invocation of a tool on an MCP server.""" + id: str """The unique ID of the tool call.""" @@ -288,6 +492,8 @@ class McpCall(BaseModel): class ItemReference(BaseModel): + """An internal identifier for an item to reference.""" + id: str """The ID of the item to reference.""" @@ -307,10 +513,15 @@ class ItemReference(BaseModel): ResponseFunctionToolCall, FunctionCallOutput, ResponseReasoningItem, + ResponseCompactionItemParam, ImageGenerationCall, ResponseCodeInterpreterToolCall, LocalShellCall, LocalShellCallOutput, + ShellCall, + ShellCallOutput, + ApplyPatchCall, + ApplyPatchCallOutput, McpListTools, McpApprovalRequest, McpApprovalResponse, diff --git a/portkey_ai/_vendor/openai/types/responses/response_input_item_param.py b/portkey_ai/_vendor/openai/types/responses/response_input_item_param.py index 115147dc..2c42b930 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_input_item_param.py +++ b/portkey_ai/_vendor/openai/types/responses/response_input_item_param.py @@ -13,11 +13,13 @@ from .response_computer_tool_call_param import ResponseComputerToolCallParam from .response_function_tool_call_param import ResponseFunctionToolCallParam from .response_function_web_search_param import ResponseFunctionWebSearchParam +from .response_compaction_item_param_param import ResponseCompactionItemParamParam from .response_file_search_tool_call_param import ResponseFileSearchToolCallParam from .response_custom_tool_call_output_param import ResponseCustomToolCallOutputParam from .response_code_interpreter_tool_call_param import ResponseCodeInterpreterToolCallParam from .response_input_message_content_list_param import ResponseInputMessageContentListParam from .response_function_call_output_item_list_param import ResponseFunctionCallOutputItemListParam +from .response_function_shell_call_output_content_param import ResponseFunctionShellCallOutputContentParam from .response_computer_tool_call_output_screenshot_param import ResponseComputerToolCallOutputScreenshotParam __all__ = [ @@ -30,6 +32,15 @@ "LocalShellCall", "LocalShellCallAction", "LocalShellCallOutput", + "ShellCall", + "ShellCallAction", + "ShellCallOutput", + "ApplyPatchCall", + "ApplyPatchCallOperation", + "ApplyPatchCallOperationCreateFile", + "ApplyPatchCallOperationDeleteFile", + "ApplyPatchCallOperationUpdateFile", + "ApplyPatchCallOutput", "McpListTools", "McpListToolsTool", "McpApprovalRequest", @@ -40,6 +51,12 @@ class Message(TypedDict, total=False): + """ + A message input to the model with a role indicating instruction following + hierarchy. Instructions given with the `developer` or `system` role take + precedence over instructions given with the `user` role. + """ + content: Required[ResponseInputMessageContentListParam] """ A list of one or many input items to the model, containing different content @@ -61,6 +78,8 @@ class Message(TypedDict, total=False): class ComputerCallOutputAcknowledgedSafetyCheck(TypedDict, total=False): + """A pending safety check for the computer call.""" + id: Required[str] """The ID of the pending safety check.""" @@ -72,6 +91,8 @@ class ComputerCallOutputAcknowledgedSafetyCheck(TypedDict, total=False): class ComputerCallOutput(TypedDict, total=False): + """The output of a computer tool call.""" + call_id: Required[str] """The ID of the computer tool call that produced the output.""" @@ -99,6 +120,8 @@ class ComputerCallOutput(TypedDict, total=False): class FunctionCallOutput(TypedDict, total=False): + """The output of a function tool call.""" + call_id: Required[str] """The unique ID of the function tool call generated by the model.""" @@ -123,6 +146,8 @@ class FunctionCallOutput(TypedDict, total=False): class ImageGenerationCall(TypedDict, total=False): + """An image generation request made by the model.""" + id: Required[str] """The unique ID of the image generation call.""" @@ -137,6 +162,8 @@ class ImageGenerationCall(TypedDict, total=False): class LocalShellCallAction(TypedDict, total=False): + """Execute a shell command on the server.""" + command: Required[SequenceNotStr[str]] """The command to run.""" @@ -157,6 +184,8 @@ class LocalShellCallAction(TypedDict, total=False): class LocalShellCall(TypedDict, total=False): + """A tool call to run a command on the local shell.""" + id: Required[str] """The unique ID of the local shell call.""" @@ -174,6 +203,8 @@ class LocalShellCall(TypedDict, total=False): class LocalShellCallOutput(TypedDict, total=False): + """The output of a local shell tool call.""" + id: Required[str] """The unique ID of the local shell tool call generated by the model.""" @@ -187,7 +218,171 @@ class LocalShellCallOutput(TypedDict, total=False): """The status of the item. One of `in_progress`, `completed`, or `incomplete`.""" +class ShellCallAction(TypedDict, total=False): + """The shell commands and limits that describe how to run the tool call.""" + + commands: Required[SequenceNotStr[str]] + """Ordered shell commands for the execution environment to run.""" + + max_output_length: Optional[int] + """ + Maximum number of UTF-8 characters to capture from combined stdout and stderr + output. + """ + + timeout_ms: Optional[int] + """Maximum wall-clock time in milliseconds to allow the shell commands to run.""" + + +class ShellCall(TypedDict, total=False): + """A tool representing a request to execute one or more shell commands.""" + + action: Required[ShellCallAction] + """The shell commands and limits that describe how to run the tool call.""" + + call_id: Required[str] + """The unique ID of the shell tool call generated by the model.""" + + type: Required[Literal["shell_call"]] + """The type of the item. Always `shell_call`.""" + + id: Optional[str] + """The unique ID of the shell tool call. + + Populated when this item is returned via API. + """ + + status: Optional[Literal["in_progress", "completed", "incomplete"]] + """The status of the shell call. + + One of `in_progress`, `completed`, or `incomplete`. + """ + + +class ShellCallOutput(TypedDict, total=False): + """The streamed output items emitted by a shell tool call.""" + + call_id: Required[str] + """The unique ID of the shell tool call generated by the model.""" + + output: Required[Iterable[ResponseFunctionShellCallOutputContentParam]] + """ + Captured chunks of stdout and stderr output, along with their associated + outcomes. + """ + + type: Required[Literal["shell_call_output"]] + """The type of the item. Always `shell_call_output`.""" + + id: Optional[str] + """The unique ID of the shell tool call output. + + Populated when this item is returned via API. + """ + + max_output_length: Optional[int] + """ + The maximum number of UTF-8 characters captured for this shell call's combined + output. + """ + + +class ApplyPatchCallOperationCreateFile(TypedDict, total=False): + """Instruction for creating a new file via the apply_patch tool.""" + + diff: Required[str] + """Unified diff content to apply when creating the file.""" + + path: Required[str] + """Path of the file to create relative to the workspace root.""" + + type: Required[Literal["create_file"]] + """The operation type. Always `create_file`.""" + + +class ApplyPatchCallOperationDeleteFile(TypedDict, total=False): + """Instruction for deleting an existing file via the apply_patch tool.""" + + path: Required[str] + """Path of the file to delete relative to the workspace root.""" + + type: Required[Literal["delete_file"]] + """The operation type. Always `delete_file`.""" + + +class ApplyPatchCallOperationUpdateFile(TypedDict, total=False): + """Instruction for updating an existing file via the apply_patch tool.""" + + diff: Required[str] + """Unified diff content to apply to the existing file.""" + + path: Required[str] + """Path of the file to update relative to the workspace root.""" + + type: Required[Literal["update_file"]] + """The operation type. Always `update_file`.""" + + +ApplyPatchCallOperation: TypeAlias = Union[ + ApplyPatchCallOperationCreateFile, ApplyPatchCallOperationDeleteFile, ApplyPatchCallOperationUpdateFile +] + + +class ApplyPatchCall(TypedDict, total=False): + """ + A tool call representing a request to create, delete, or update files using diff patches. + """ + + call_id: Required[str] + """The unique ID of the apply patch tool call generated by the model.""" + + operation: Required[ApplyPatchCallOperation] + """ + The specific create, delete, or update instruction for the apply_patch tool + call. + """ + + status: Required[Literal["in_progress", "completed"]] + """The status of the apply patch tool call. One of `in_progress` or `completed`.""" + + type: Required[Literal["apply_patch_call"]] + """The type of the item. Always `apply_patch_call`.""" + + id: Optional[str] + """The unique ID of the apply patch tool call. + + Populated when this item is returned via API. + """ + + +class ApplyPatchCallOutput(TypedDict, total=False): + """The streamed output emitted by an apply patch tool call.""" + + call_id: Required[str] + """The unique ID of the apply patch tool call generated by the model.""" + + status: Required[Literal["completed", "failed"]] + """The status of the apply patch tool call output. One of `completed` or `failed`.""" + + type: Required[Literal["apply_patch_call_output"]] + """The type of the item. Always `apply_patch_call_output`.""" + + id: Optional[str] + """The unique ID of the apply patch tool call output. + + Populated when this item is returned via API. + """ + + output: Optional[str] + """ + Optional human-readable log text from the apply patch tool (e.g., patch results + or errors). + """ + + class McpListToolsTool(TypedDict, total=False): + """A tool available on an MCP server.""" + input_schema: Required[object] """The JSON schema describing the tool's input.""" @@ -202,6 +397,8 @@ class McpListToolsTool(TypedDict, total=False): class McpListTools(TypedDict, total=False): + """A list of tools available on an MCP server.""" + id: Required[str] """The unique ID of the list.""" @@ -219,6 +416,8 @@ class McpListTools(TypedDict, total=False): class McpApprovalRequest(TypedDict, total=False): + """A request for human approval of a tool invocation.""" + id: Required[str] """The unique ID of the approval request.""" @@ -236,6 +435,8 @@ class McpApprovalRequest(TypedDict, total=False): class McpApprovalResponse(TypedDict, total=False): + """A response to an MCP approval request.""" + approval_request_id: Required[str] """The ID of the approval request being answered.""" @@ -253,6 +454,8 @@ class McpApprovalResponse(TypedDict, total=False): class McpCall(TypedDict, total=False): + """An invocation of a tool on an MCP server.""" + id: Required[str] """The unique ID of the tool call.""" @@ -289,6 +492,8 @@ class McpCall(TypedDict, total=False): class ItemReference(TypedDict, total=False): + """An internal identifier for an item to reference.""" + id: Required[str] """The ID of the item to reference.""" @@ -307,10 +512,15 @@ class ItemReference(TypedDict, total=False): ResponseFunctionToolCallParam, FunctionCallOutput, ResponseReasoningItemParam, + ResponseCompactionItemParamParam, ImageGenerationCall, ResponseCodeInterpreterToolCallParam, LocalShellCall, LocalShellCallOutput, + ShellCall, + ShellCallOutput, + ApplyPatchCall, + ApplyPatchCallOutput, McpListTools, McpApprovalRequest, McpApprovalResponse, diff --git a/portkey_ai/_vendor/openai/types/responses/response_input_param.py b/portkey_ai/_vendor/openai/types/responses/response_input_param.py index 9a999c72..c2d12c0a 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_input_param.py +++ b/portkey_ai/_vendor/openai/types/responses/response_input_param.py @@ -13,11 +13,13 @@ from .response_computer_tool_call_param import ResponseComputerToolCallParam from .response_function_tool_call_param import ResponseFunctionToolCallParam from .response_function_web_search_param import ResponseFunctionWebSearchParam +from .response_compaction_item_param_param import ResponseCompactionItemParamParam from .response_file_search_tool_call_param import ResponseFileSearchToolCallParam from .response_custom_tool_call_output_param import ResponseCustomToolCallOutputParam from .response_code_interpreter_tool_call_param import ResponseCodeInterpreterToolCallParam from .response_input_message_content_list_param import ResponseInputMessageContentListParam from .response_function_call_output_item_list_param import ResponseFunctionCallOutputItemListParam +from .response_function_shell_call_output_content_param import ResponseFunctionShellCallOutputContentParam from .response_computer_tool_call_output_screenshot_param import ResponseComputerToolCallOutputScreenshotParam __all__ = [ @@ -31,6 +33,15 @@ "LocalShellCall", "LocalShellCallAction", "LocalShellCallOutput", + "ShellCall", + "ShellCallAction", + "ShellCallOutput", + "ApplyPatchCall", + "ApplyPatchCallOperation", + "ApplyPatchCallOperationCreateFile", + "ApplyPatchCallOperationDeleteFile", + "ApplyPatchCallOperationUpdateFile", + "ApplyPatchCallOutput", "McpListTools", "McpListToolsTool", "McpApprovalRequest", @@ -41,6 +52,12 @@ class Message(TypedDict, total=False): + """ + A message input to the model with a role indicating instruction following + hierarchy. Instructions given with the `developer` or `system` role take + precedence over instructions given with the `user` role. + """ + content: Required[ResponseInputMessageContentListParam] """ A list of one or many input items to the model, containing different content @@ -62,6 +79,8 @@ class Message(TypedDict, total=False): class ComputerCallOutputAcknowledgedSafetyCheck(TypedDict, total=False): + """A pending safety check for the computer call.""" + id: Required[str] """The ID of the pending safety check.""" @@ -73,6 +92,8 @@ class ComputerCallOutputAcknowledgedSafetyCheck(TypedDict, total=False): class ComputerCallOutput(TypedDict, total=False): + """The output of a computer tool call.""" + call_id: Required[str] """The ID of the computer tool call that produced the output.""" @@ -100,6 +121,8 @@ class ComputerCallOutput(TypedDict, total=False): class FunctionCallOutput(TypedDict, total=False): + """The output of a function tool call.""" + call_id: Required[str] """The unique ID of the function tool call generated by the model.""" @@ -124,6 +147,8 @@ class FunctionCallOutput(TypedDict, total=False): class ImageGenerationCall(TypedDict, total=False): + """An image generation request made by the model.""" + id: Required[str] """The unique ID of the image generation call.""" @@ -138,6 +163,8 @@ class ImageGenerationCall(TypedDict, total=False): class LocalShellCallAction(TypedDict, total=False): + """Execute a shell command on the server.""" + command: Required[SequenceNotStr[str]] """The command to run.""" @@ -158,6 +185,8 @@ class LocalShellCallAction(TypedDict, total=False): class LocalShellCall(TypedDict, total=False): + """A tool call to run a command on the local shell.""" + id: Required[str] """The unique ID of the local shell call.""" @@ -175,6 +204,8 @@ class LocalShellCall(TypedDict, total=False): class LocalShellCallOutput(TypedDict, total=False): + """The output of a local shell tool call.""" + id: Required[str] """The unique ID of the local shell tool call generated by the model.""" @@ -188,7 +219,171 @@ class LocalShellCallOutput(TypedDict, total=False): """The status of the item. One of `in_progress`, `completed`, or `incomplete`.""" +class ShellCallAction(TypedDict, total=False): + """The shell commands and limits that describe how to run the tool call.""" + + commands: Required[SequenceNotStr[str]] + """Ordered shell commands for the execution environment to run.""" + + max_output_length: Optional[int] + """ + Maximum number of UTF-8 characters to capture from combined stdout and stderr + output. + """ + + timeout_ms: Optional[int] + """Maximum wall-clock time in milliseconds to allow the shell commands to run.""" + + +class ShellCall(TypedDict, total=False): + """A tool representing a request to execute one or more shell commands.""" + + action: Required[ShellCallAction] + """The shell commands and limits that describe how to run the tool call.""" + + call_id: Required[str] + """The unique ID of the shell tool call generated by the model.""" + + type: Required[Literal["shell_call"]] + """The type of the item. Always `shell_call`.""" + + id: Optional[str] + """The unique ID of the shell tool call. + + Populated when this item is returned via API. + """ + + status: Optional[Literal["in_progress", "completed", "incomplete"]] + """The status of the shell call. + + One of `in_progress`, `completed`, or `incomplete`. + """ + + +class ShellCallOutput(TypedDict, total=False): + """The streamed output items emitted by a shell tool call.""" + + call_id: Required[str] + """The unique ID of the shell tool call generated by the model.""" + + output: Required[Iterable[ResponseFunctionShellCallOutputContentParam]] + """ + Captured chunks of stdout and stderr output, along with their associated + outcomes. + """ + + type: Required[Literal["shell_call_output"]] + """The type of the item. Always `shell_call_output`.""" + + id: Optional[str] + """The unique ID of the shell tool call output. + + Populated when this item is returned via API. + """ + + max_output_length: Optional[int] + """ + The maximum number of UTF-8 characters captured for this shell call's combined + output. + """ + + +class ApplyPatchCallOperationCreateFile(TypedDict, total=False): + """Instruction for creating a new file via the apply_patch tool.""" + + diff: Required[str] + """Unified diff content to apply when creating the file.""" + + path: Required[str] + """Path of the file to create relative to the workspace root.""" + + type: Required[Literal["create_file"]] + """The operation type. Always `create_file`.""" + + +class ApplyPatchCallOperationDeleteFile(TypedDict, total=False): + """Instruction for deleting an existing file via the apply_patch tool.""" + + path: Required[str] + """Path of the file to delete relative to the workspace root.""" + + type: Required[Literal["delete_file"]] + """The operation type. Always `delete_file`.""" + + +class ApplyPatchCallOperationUpdateFile(TypedDict, total=False): + """Instruction for updating an existing file via the apply_patch tool.""" + + diff: Required[str] + """Unified diff content to apply to the existing file.""" + + path: Required[str] + """Path of the file to update relative to the workspace root.""" + + type: Required[Literal["update_file"]] + """The operation type. Always `update_file`.""" + + +ApplyPatchCallOperation: TypeAlias = Union[ + ApplyPatchCallOperationCreateFile, ApplyPatchCallOperationDeleteFile, ApplyPatchCallOperationUpdateFile +] + + +class ApplyPatchCall(TypedDict, total=False): + """ + A tool call representing a request to create, delete, or update files using diff patches. + """ + + call_id: Required[str] + """The unique ID of the apply patch tool call generated by the model.""" + + operation: Required[ApplyPatchCallOperation] + """ + The specific create, delete, or update instruction for the apply_patch tool + call. + """ + + status: Required[Literal["in_progress", "completed"]] + """The status of the apply patch tool call. One of `in_progress` or `completed`.""" + + type: Required[Literal["apply_patch_call"]] + """The type of the item. Always `apply_patch_call`.""" + + id: Optional[str] + """The unique ID of the apply patch tool call. + + Populated when this item is returned via API. + """ + + +class ApplyPatchCallOutput(TypedDict, total=False): + """The streamed output emitted by an apply patch tool call.""" + + call_id: Required[str] + """The unique ID of the apply patch tool call generated by the model.""" + + status: Required[Literal["completed", "failed"]] + """The status of the apply patch tool call output. One of `completed` or `failed`.""" + + type: Required[Literal["apply_patch_call_output"]] + """The type of the item. Always `apply_patch_call_output`.""" + + id: Optional[str] + """The unique ID of the apply patch tool call output. + + Populated when this item is returned via API. + """ + + output: Optional[str] + """ + Optional human-readable log text from the apply patch tool (e.g., patch results + or errors). + """ + + class McpListToolsTool(TypedDict, total=False): + """A tool available on an MCP server.""" + input_schema: Required[object] """The JSON schema describing the tool's input.""" @@ -203,6 +398,8 @@ class McpListToolsTool(TypedDict, total=False): class McpListTools(TypedDict, total=False): + """A list of tools available on an MCP server.""" + id: Required[str] """The unique ID of the list.""" @@ -220,6 +417,8 @@ class McpListTools(TypedDict, total=False): class McpApprovalRequest(TypedDict, total=False): + """A request for human approval of a tool invocation.""" + id: Required[str] """The unique ID of the approval request.""" @@ -237,6 +436,8 @@ class McpApprovalRequest(TypedDict, total=False): class McpApprovalResponse(TypedDict, total=False): + """A response to an MCP approval request.""" + approval_request_id: Required[str] """The ID of the approval request being answered.""" @@ -254,6 +455,8 @@ class McpApprovalResponse(TypedDict, total=False): class McpCall(TypedDict, total=False): + """An invocation of a tool on an MCP server.""" + id: Required[str] """The unique ID of the tool call.""" @@ -290,6 +493,8 @@ class McpCall(TypedDict, total=False): class ItemReference(TypedDict, total=False): + """An internal identifier for an item to reference.""" + id: Required[str] """The ID of the item to reference.""" @@ -308,10 +513,15 @@ class ItemReference(TypedDict, total=False): ResponseFunctionToolCallParam, FunctionCallOutput, ResponseReasoningItemParam, + ResponseCompactionItemParamParam, ImageGenerationCall, ResponseCodeInterpreterToolCallParam, LocalShellCall, LocalShellCallOutput, + ShellCall, + ShellCallOutput, + ApplyPatchCall, + ApplyPatchCallOutput, McpListTools, McpApprovalRequest, McpApprovalResponse, diff --git a/portkey_ai/_vendor/openai/types/responses/response_input_text.py b/portkey_ai/_vendor/openai/types/responses/response_input_text.py index ba8d1ea1..1e06ba71 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_input_text.py +++ b/portkey_ai/_vendor/openai/types/responses/response_input_text.py @@ -8,6 +8,8 @@ class ResponseInputText(BaseModel): + """A text input to the model.""" + text: str """The text input to the model.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_input_text_content.py b/portkey_ai/_vendor/openai/types/responses/response_input_text_content.py index 2cce8498..66dbb8b0 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_input_text_content.py +++ b/portkey_ai/_vendor/openai/types/responses/response_input_text_content.py @@ -8,6 +8,8 @@ class ResponseInputTextContent(BaseModel): + """A text input to the model.""" + text: str """The text input to the model.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_input_text_content_param.py b/portkey_ai/_vendor/openai/types/responses/response_input_text_content_param.py index 85b57df2..013f22d0 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_input_text_content_param.py +++ b/portkey_ai/_vendor/openai/types/responses/response_input_text_content_param.py @@ -8,6 +8,8 @@ class ResponseInputTextContentParam(TypedDict, total=False): + """A text input to the model.""" + text: Required[str] """The text input to the model.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_input_text_param.py b/portkey_ai/_vendor/openai/types/responses/response_input_text_param.py index f2ba8340..e1a2976e 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_input_text_param.py +++ b/portkey_ai/_vendor/openai/types/responses/response_input_text_param.py @@ -8,6 +8,8 @@ class ResponseInputTextParam(TypedDict, total=False): + """A text input to the model.""" + text: Required[str] """The text input to the model.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_item.py b/portkey_ai/_vendor/openai/types/responses/response_item.py index bdd2523b..3dba681d 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_item.py +++ b/portkey_ai/_vendor/openai/types/responses/response_item.py @@ -9,11 +9,15 @@ from .response_computer_tool_call import ResponseComputerToolCall from .response_input_message_item import ResponseInputMessageItem from .response_function_web_search import ResponseFunctionWebSearch +from .response_apply_patch_tool_call import ResponseApplyPatchToolCall from .response_file_search_tool_call import ResponseFileSearchToolCall from .response_function_tool_call_item import ResponseFunctionToolCallItem +from .response_function_shell_tool_call import ResponseFunctionShellToolCall from .response_code_interpreter_tool_call import ResponseCodeInterpreterToolCall +from .response_apply_patch_tool_call_output import ResponseApplyPatchToolCallOutput from .response_computer_tool_call_output_item import ResponseComputerToolCallOutputItem from .response_function_tool_call_output_item import ResponseFunctionToolCallOutputItem +from .response_function_shell_tool_call_output import ResponseFunctionShellToolCallOutput __all__ = [ "ResponseItem", @@ -30,6 +34,8 @@ class ImageGenerationCall(BaseModel): + """An image generation request made by the model.""" + id: str """The unique ID of the image generation call.""" @@ -44,6 +50,8 @@ class ImageGenerationCall(BaseModel): class LocalShellCallAction(BaseModel): + """Execute a shell command on the server.""" + command: List[str] """The command to run.""" @@ -64,6 +72,8 @@ class LocalShellCallAction(BaseModel): class LocalShellCall(BaseModel): + """A tool call to run a command on the local shell.""" + id: str """The unique ID of the local shell call.""" @@ -81,6 +91,8 @@ class LocalShellCall(BaseModel): class LocalShellCallOutput(BaseModel): + """The output of a local shell tool call.""" + id: str """The unique ID of the local shell tool call generated by the model.""" @@ -95,6 +107,8 @@ class LocalShellCallOutput(BaseModel): class McpListToolsTool(BaseModel): + """A tool available on an MCP server.""" + input_schema: object """The JSON schema describing the tool's input.""" @@ -109,6 +123,8 @@ class McpListToolsTool(BaseModel): class McpListTools(BaseModel): + """A list of tools available on an MCP server.""" + id: str """The unique ID of the list.""" @@ -126,6 +142,8 @@ class McpListTools(BaseModel): class McpApprovalRequest(BaseModel): + """A request for human approval of a tool invocation.""" + id: str """The unique ID of the approval request.""" @@ -143,6 +161,8 @@ class McpApprovalRequest(BaseModel): class McpApprovalResponse(BaseModel): + """A response to an MCP approval request.""" + id: str """The unique ID of the approval response""" @@ -160,6 +180,8 @@ class McpApprovalResponse(BaseModel): class McpCall(BaseModel): + """An invocation of a tool on an MCP server.""" + id: str """The unique ID of the tool call.""" @@ -209,6 +231,10 @@ class McpCall(BaseModel): ResponseCodeInterpreterToolCall, LocalShellCall, LocalShellCallOutput, + ResponseFunctionShellToolCall, + ResponseFunctionShellToolCallOutput, + ResponseApplyPatchToolCall, + ResponseApplyPatchToolCallOutput, McpListTools, McpApprovalRequest, McpApprovalResponse, diff --git a/portkey_ai/_vendor/openai/types/responses/response_item_list.py b/portkey_ai/_vendor/openai/types/responses/response_item_list.py index b43eacdb..e2b5a1a9 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_item_list.py +++ b/portkey_ai/_vendor/openai/types/responses/response_item_list.py @@ -10,6 +10,8 @@ class ResponseItemList(BaseModel): + """A list of Response items.""" + data: List[ResponseItem] """A list of items used to generate this response.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_mcp_call_arguments_delta_event.py b/portkey_ai/_vendor/openai/types/responses/response_mcp_call_arguments_delta_event.py index 54eff383..303ef494 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_mcp_call_arguments_delta_event.py +++ b/portkey_ai/_vendor/openai/types/responses/response_mcp_call_arguments_delta_event.py @@ -8,6 +8,10 @@ class ResponseMcpCallArgumentsDeltaEvent(BaseModel): + """ + Emitted when there is a delta (partial update) to the arguments of an MCP tool call. + """ + delta: str """ A JSON string containing the partial update to the arguments for the MCP tool diff --git a/portkey_ai/_vendor/openai/types/responses/response_mcp_call_arguments_done_event.py b/portkey_ai/_vendor/openai/types/responses/response_mcp_call_arguments_done_event.py index 59ce9bc9..59e71be7 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_mcp_call_arguments_done_event.py +++ b/portkey_ai/_vendor/openai/types/responses/response_mcp_call_arguments_done_event.py @@ -8,6 +8,8 @@ class ResponseMcpCallArgumentsDoneEvent(BaseModel): + """Emitted when the arguments for an MCP tool call are finalized.""" + arguments: str """A JSON string containing the finalized arguments for the MCP tool call.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_mcp_call_completed_event.py b/portkey_ai/_vendor/openai/types/responses/response_mcp_call_completed_event.py index 2fee5dff..bee54d40 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_mcp_call_completed_event.py +++ b/portkey_ai/_vendor/openai/types/responses/response_mcp_call_completed_event.py @@ -8,6 +8,8 @@ class ResponseMcpCallCompletedEvent(BaseModel): + """Emitted when an MCP tool call has completed successfully.""" + item_id: str """The ID of the MCP tool call item that completed.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_mcp_call_failed_event.py b/portkey_ai/_vendor/openai/types/responses/response_mcp_call_failed_event.py index ca41ab71..cb3130b1 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_mcp_call_failed_event.py +++ b/portkey_ai/_vendor/openai/types/responses/response_mcp_call_failed_event.py @@ -8,6 +8,8 @@ class ResponseMcpCallFailedEvent(BaseModel): + """Emitted when an MCP tool call has failed.""" + item_id: str """The ID of the MCP tool call item that failed.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_mcp_call_in_progress_event.py b/portkey_ai/_vendor/openai/types/responses/response_mcp_call_in_progress_event.py index 401c3168..7cf6a1de 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_mcp_call_in_progress_event.py +++ b/portkey_ai/_vendor/openai/types/responses/response_mcp_call_in_progress_event.py @@ -8,6 +8,8 @@ class ResponseMcpCallInProgressEvent(BaseModel): + """Emitted when an MCP tool call is in progress.""" + item_id: str """The unique identifier of the MCP tool call item being processed.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_mcp_list_tools_completed_event.py b/portkey_ai/_vendor/openai/types/responses/response_mcp_list_tools_completed_event.py index c60ad88e..685ba59c 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_mcp_list_tools_completed_event.py +++ b/portkey_ai/_vendor/openai/types/responses/response_mcp_list_tools_completed_event.py @@ -8,6 +8,8 @@ class ResponseMcpListToolsCompletedEvent(BaseModel): + """Emitted when the list of available MCP tools has been successfully retrieved.""" + item_id: str """The ID of the MCP tool call item that produced this output.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_mcp_list_tools_failed_event.py b/portkey_ai/_vendor/openai/types/responses/response_mcp_list_tools_failed_event.py index 0c966c44..c5fa54d2 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_mcp_list_tools_failed_event.py +++ b/portkey_ai/_vendor/openai/types/responses/response_mcp_list_tools_failed_event.py @@ -8,6 +8,8 @@ class ResponseMcpListToolsFailedEvent(BaseModel): + """Emitted when the attempt to list available MCP tools has failed.""" + item_id: str """The ID of the MCP tool call item that failed.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_mcp_list_tools_in_progress_event.py b/portkey_ai/_vendor/openai/types/responses/response_mcp_list_tools_in_progress_event.py index f451db1e..403fdbde 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_mcp_list_tools_in_progress_event.py +++ b/portkey_ai/_vendor/openai/types/responses/response_mcp_list_tools_in_progress_event.py @@ -8,6 +8,10 @@ class ResponseMcpListToolsInProgressEvent(BaseModel): + """ + Emitted when the system is in the process of retrieving the list of available MCP tools. + """ + item_id: str """The ID of the MCP tool call item that is being processed.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_output_item.py b/portkey_ai/_vendor/openai/types/responses/response_output_item.py index e33d59ce..990f947b 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_output_item.py +++ b/portkey_ai/_vendor/openai/types/responses/response_output_item.py @@ -7,12 +7,17 @@ from ..._models import BaseModel from .response_output_message import ResponseOutputMessage from .response_reasoning_item import ResponseReasoningItem +from .response_compaction_item import ResponseCompactionItem from .response_custom_tool_call import ResponseCustomToolCall from .response_computer_tool_call import ResponseComputerToolCall from .response_function_tool_call import ResponseFunctionToolCall from .response_function_web_search import ResponseFunctionWebSearch +from .response_apply_patch_tool_call import ResponseApplyPatchToolCall from .response_file_search_tool_call import ResponseFileSearchToolCall +from .response_function_shell_tool_call import ResponseFunctionShellToolCall from .response_code_interpreter_tool_call import ResponseCodeInterpreterToolCall +from .response_apply_patch_tool_call_output import ResponseApplyPatchToolCallOutput +from .response_function_shell_tool_call_output import ResponseFunctionShellToolCallOutput __all__ = [ "ResponseOutputItem", @@ -27,6 +32,8 @@ class ImageGenerationCall(BaseModel): + """An image generation request made by the model.""" + id: str """The unique ID of the image generation call.""" @@ -41,6 +48,8 @@ class ImageGenerationCall(BaseModel): class LocalShellCallAction(BaseModel): + """Execute a shell command on the server.""" + command: List[str] """The command to run.""" @@ -61,6 +70,8 @@ class LocalShellCallAction(BaseModel): class LocalShellCall(BaseModel): + """A tool call to run a command on the local shell.""" + id: str """The unique ID of the local shell call.""" @@ -78,6 +89,8 @@ class LocalShellCall(BaseModel): class McpCall(BaseModel): + """An invocation of a tool on an MCP server.""" + id: str """The unique ID of the tool call.""" @@ -114,6 +127,8 @@ class McpCall(BaseModel): class McpListToolsTool(BaseModel): + """A tool available on an MCP server.""" + input_schema: object """The JSON schema describing the tool's input.""" @@ -128,6 +143,8 @@ class McpListToolsTool(BaseModel): class McpListTools(BaseModel): + """A list of tools available on an MCP server.""" + id: str """The unique ID of the list.""" @@ -145,6 +162,8 @@ class McpListTools(BaseModel): class McpApprovalRequest(BaseModel): + """A request for human approval of a tool invocation.""" + id: str """The unique ID of the approval request.""" @@ -169,9 +188,14 @@ class McpApprovalRequest(BaseModel): ResponseFunctionWebSearch, ResponseComputerToolCall, ResponseReasoningItem, + ResponseCompactionItem, ImageGenerationCall, ResponseCodeInterpreterToolCall, LocalShellCall, + ResponseFunctionShellToolCall, + ResponseFunctionShellToolCallOutput, + ResponseApplyPatchToolCall, + ResponseApplyPatchToolCallOutput, McpCall, McpListTools, McpApprovalRequest, diff --git a/portkey_ai/_vendor/openai/types/responses/response_output_item_added_event.py b/portkey_ai/_vendor/openai/types/responses/response_output_item_added_event.py index 7cd2a394..a42f6281 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_output_item_added_event.py +++ b/portkey_ai/_vendor/openai/types/responses/response_output_item_added_event.py @@ -9,6 +9,8 @@ class ResponseOutputItemAddedEvent(BaseModel): + """Emitted when a new output item is added.""" + item: ResponseOutputItem """The output item that was added.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_output_item_done_event.py b/portkey_ai/_vendor/openai/types/responses/response_output_item_done_event.py index 37d3694c..50b99da5 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_output_item_done_event.py +++ b/portkey_ai/_vendor/openai/types/responses/response_output_item_done_event.py @@ -9,6 +9,8 @@ class ResponseOutputItemDoneEvent(BaseModel): + """Emitted when an output item is marked done.""" + item: ResponseOutputItem """The output item that was marked done.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_output_message.py b/portkey_ai/_vendor/openai/types/responses/response_output_message.py index 3864aa21..9c1d1f97 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_output_message.py +++ b/portkey_ai/_vendor/openai/types/responses/response_output_message.py @@ -14,6 +14,8 @@ class ResponseOutputMessage(BaseModel): + """An output message from the model.""" + id: str """The unique ID of the output message.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_output_message_param.py b/portkey_ai/_vendor/openai/types/responses/response_output_message_param.py index 46cbbd20..9c2f5246 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_output_message_param.py +++ b/portkey_ai/_vendor/openai/types/responses/response_output_message_param.py @@ -14,6 +14,8 @@ class ResponseOutputMessageParam(TypedDict, total=False): + """An output message from the model.""" + id: Required[str] """The unique ID of the output message.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_output_refusal.py b/portkey_ai/_vendor/openai/types/responses/response_output_refusal.py index 685c8722..6bce26af 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_output_refusal.py +++ b/portkey_ai/_vendor/openai/types/responses/response_output_refusal.py @@ -8,6 +8,8 @@ class ResponseOutputRefusal(BaseModel): + """A refusal from the model.""" + refusal: str """The refusal explanation from the model.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_output_refusal_param.py b/portkey_ai/_vendor/openai/types/responses/response_output_refusal_param.py index 54cfaf07..02bdfdcf 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_output_refusal_param.py +++ b/portkey_ai/_vendor/openai/types/responses/response_output_refusal_param.py @@ -8,6 +8,8 @@ class ResponseOutputRefusalParam(TypedDict, total=False): + """A refusal from the model.""" + refusal: Required[str] """The refusal explanation from the model.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_output_text.py b/portkey_ai/_vendor/openai/types/responses/response_output_text.py index aa97b629..2386fcb3 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_output_text.py +++ b/portkey_ai/_vendor/openai/types/responses/response_output_text.py @@ -19,6 +19,8 @@ class AnnotationFileCitation(BaseModel): + """A citation to a file.""" + file_id: str """The ID of the file.""" @@ -33,6 +35,8 @@ class AnnotationFileCitation(BaseModel): class AnnotationURLCitation(BaseModel): + """A citation for a web resource used to generate a model response.""" + end_index: int """The index of the last character of the URL citation in the message.""" @@ -50,6 +54,8 @@ class AnnotationURLCitation(BaseModel): class AnnotationContainerFileCitation(BaseModel): + """A citation for a container file used to generate a model response.""" + container_id: str """The ID of the container file.""" @@ -70,6 +76,8 @@ class AnnotationContainerFileCitation(BaseModel): class AnnotationFilePath(BaseModel): + """A path to a file.""" + file_id: str """The ID of the file.""" @@ -87,6 +95,8 @@ class AnnotationFilePath(BaseModel): class LogprobTopLogprob(BaseModel): + """The top log probability of a token.""" + token: str bytes: List[int] @@ -95,6 +105,8 @@ class LogprobTopLogprob(BaseModel): class Logprob(BaseModel): + """The log probability of a token.""" + token: str bytes: List[int] @@ -105,6 +117,8 @@ class Logprob(BaseModel): class ResponseOutputText(BaseModel): + """A text output from the model.""" + annotations: List[Annotation] """The annotations of the text output.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_output_text_annotation_added_event.py b/portkey_ai/_vendor/openai/types/responses/response_output_text_annotation_added_event.py index 62d8f728..b9dc2621 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_output_text_annotation_added_event.py +++ b/portkey_ai/_vendor/openai/types/responses/response_output_text_annotation_added_event.py @@ -8,6 +8,8 @@ class ResponseOutputTextAnnotationAddedEvent(BaseModel): + """Emitted when an annotation is added to output text content.""" + annotation: object """The annotation object being added. (See annotation schema for details.)""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_output_text_param.py b/portkey_ai/_vendor/openai/types/responses/response_output_text_param.py index 63d2d394..bc30fbcd 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_output_text_param.py +++ b/portkey_ai/_vendor/openai/types/responses/response_output_text_param.py @@ -18,6 +18,8 @@ class AnnotationFileCitation(TypedDict, total=False): + """A citation to a file.""" + file_id: Required[str] """The ID of the file.""" @@ -32,6 +34,8 @@ class AnnotationFileCitation(TypedDict, total=False): class AnnotationURLCitation(TypedDict, total=False): + """A citation for a web resource used to generate a model response.""" + end_index: Required[int] """The index of the last character of the URL citation in the message.""" @@ -49,6 +53,8 @@ class AnnotationURLCitation(TypedDict, total=False): class AnnotationContainerFileCitation(TypedDict, total=False): + """A citation for a container file used to generate a model response.""" + container_id: Required[str] """The ID of the container file.""" @@ -69,6 +75,8 @@ class AnnotationContainerFileCitation(TypedDict, total=False): class AnnotationFilePath(TypedDict, total=False): + """A path to a file.""" + file_id: Required[str] """The ID of the file.""" @@ -85,6 +93,8 @@ class AnnotationFilePath(TypedDict, total=False): class LogprobTopLogprob(TypedDict, total=False): + """The top log probability of a token.""" + token: Required[str] bytes: Required[Iterable[int]] @@ -93,6 +103,8 @@ class LogprobTopLogprob(TypedDict, total=False): class Logprob(TypedDict, total=False): + """The log probability of a token.""" + token: Required[str] bytes: Required[Iterable[int]] @@ -103,6 +115,8 @@ class Logprob(TypedDict, total=False): class ResponseOutputTextParam(TypedDict, total=False): + """A text output from the model.""" + annotations: Required[Iterable[Annotation]] """The annotations of the text output.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_prompt.py b/portkey_ai/_vendor/openai/types/responses/response_prompt.py index 537c2f8f..e3acacf6 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_prompt.py +++ b/portkey_ai/_vendor/openai/types/responses/response_prompt.py @@ -14,6 +14,11 @@ class ResponsePrompt(BaseModel): + """ + Reference to a prompt template and its variables. + [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts). + """ + id: str """The unique identifier of the prompt template to use.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_prompt_param.py b/portkey_ai/_vendor/openai/types/responses/response_prompt_param.py index d935fa51..f9a28b62 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_prompt_param.py +++ b/portkey_ai/_vendor/openai/types/responses/response_prompt_param.py @@ -15,6 +15,11 @@ class ResponsePromptParam(TypedDict, total=False): + """ + Reference to a prompt template and its variables. + [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts). + """ + id: Required[str] """The unique identifier of the prompt template to use.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_queued_event.py b/portkey_ai/_vendor/openai/types/responses/response_queued_event.py index 40257408..a5542152 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_queued_event.py +++ b/portkey_ai/_vendor/openai/types/responses/response_queued_event.py @@ -9,6 +9,8 @@ class ResponseQueuedEvent(BaseModel): + """Emitted when a response is queued and waiting to be processed.""" + response: Response """The full response object that is queued.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_reasoning_item.py b/portkey_ai/_vendor/openai/types/responses/response_reasoning_item.py index fc582cf7..1a22eb60 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_reasoning_item.py +++ b/portkey_ai/_vendor/openai/types/responses/response_reasoning_item.py @@ -9,6 +9,8 @@ class Summary(BaseModel): + """A summary text from the model.""" + text: str """A summary of the reasoning output from the model so far.""" @@ -17,6 +19,8 @@ class Summary(BaseModel): class Content(BaseModel): + """Reasoning text from the model.""" + text: str """The reasoning text from the model.""" @@ -25,6 +29,13 @@ class Content(BaseModel): class ResponseReasoningItem(BaseModel): + """ + A description of the chain of thought used by a reasoning model while generating + a response. Be sure to include these items in your `input` to the Responses API + for subsequent turns of a conversation if you are manually + [managing context](https://platform.openai.com/docs/guides/conversation-state). + """ + id: str """The unique identifier of the reasoning content.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_reasoning_item_param.py b/portkey_ai/_vendor/openai/types/responses/response_reasoning_item_param.py index 56e88ba2..40320b72 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_reasoning_item_param.py +++ b/portkey_ai/_vendor/openai/types/responses/response_reasoning_item_param.py @@ -9,6 +9,8 @@ class Summary(TypedDict, total=False): + """A summary text from the model.""" + text: Required[str] """A summary of the reasoning output from the model so far.""" @@ -17,6 +19,8 @@ class Summary(TypedDict, total=False): class Content(TypedDict, total=False): + """Reasoning text from the model.""" + text: Required[str] """The reasoning text from the model.""" @@ -25,6 +29,13 @@ class Content(TypedDict, total=False): class ResponseReasoningItemParam(TypedDict, total=False): + """ + A description of the chain of thought used by a reasoning model while generating + a response. Be sure to include these items in your `input` to the Responses API + for subsequent turns of a conversation if you are manually + [managing context](https://platform.openai.com/docs/guides/conversation-state). + """ + id: Required[str] """The unique identifier of the reasoning content.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_reasoning_summary_part_added_event.py b/portkey_ai/_vendor/openai/types/responses/response_reasoning_summary_part_added_event.py index dc755b25..e4b0f342 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_reasoning_summary_part_added_event.py +++ b/portkey_ai/_vendor/openai/types/responses/response_reasoning_summary_part_added_event.py @@ -8,6 +8,8 @@ class Part(BaseModel): + """The summary part that was added.""" + text: str """The text of the summary part.""" @@ -16,6 +18,8 @@ class Part(BaseModel): class ResponseReasoningSummaryPartAddedEvent(BaseModel): + """Emitted when a new reasoning summary part is added.""" + item_id: str """The ID of the item this summary part is associated with.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_reasoning_summary_part_done_event.py b/portkey_ai/_vendor/openai/types/responses/response_reasoning_summary_part_done_event.py index 7cc0b56d..48f3f684 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_reasoning_summary_part_done_event.py +++ b/portkey_ai/_vendor/openai/types/responses/response_reasoning_summary_part_done_event.py @@ -8,6 +8,8 @@ class Part(BaseModel): + """The completed summary part.""" + text: str """The text of the summary part.""" @@ -16,6 +18,8 @@ class Part(BaseModel): class ResponseReasoningSummaryPartDoneEvent(BaseModel): + """Emitted when a reasoning summary part is completed.""" + item_id: str """The ID of the item this summary part is associated with.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_reasoning_summary_text_delta_event.py b/portkey_ai/_vendor/openai/types/responses/response_reasoning_summary_text_delta_event.py index 96652991..84bcf039 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_reasoning_summary_text_delta_event.py +++ b/portkey_ai/_vendor/openai/types/responses/response_reasoning_summary_text_delta_event.py @@ -8,6 +8,8 @@ class ResponseReasoningSummaryTextDeltaEvent(BaseModel): + """Emitted when a delta is added to a reasoning summary text.""" + delta: str """The text delta that was added to the summary.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_reasoning_summary_text_done_event.py b/portkey_ai/_vendor/openai/types/responses/response_reasoning_summary_text_done_event.py index b35b8231..244d001b 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_reasoning_summary_text_done_event.py +++ b/portkey_ai/_vendor/openai/types/responses/response_reasoning_summary_text_done_event.py @@ -8,6 +8,8 @@ class ResponseReasoningSummaryTextDoneEvent(BaseModel): + """Emitted when a reasoning summary text is completed.""" + item_id: str """The ID of the item this summary text is associated with.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_reasoning_text_delta_event.py b/portkey_ai/_vendor/openai/types/responses/response_reasoning_text_delta_event.py index e1df893b..0e05226c 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_reasoning_text_delta_event.py +++ b/portkey_ai/_vendor/openai/types/responses/response_reasoning_text_delta_event.py @@ -8,6 +8,8 @@ class ResponseReasoningTextDeltaEvent(BaseModel): + """Emitted when a delta is added to a reasoning text.""" + content_index: int """The index of the reasoning content part this delta is associated with.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_reasoning_text_done_event.py b/portkey_ai/_vendor/openai/types/responses/response_reasoning_text_done_event.py index d22d984e..40e3f470 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_reasoning_text_done_event.py +++ b/portkey_ai/_vendor/openai/types/responses/response_reasoning_text_done_event.py @@ -8,6 +8,8 @@ class ResponseReasoningTextDoneEvent(BaseModel): + """Emitted when a reasoning text is completed.""" + content_index: int """The index of the reasoning content part.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_refusal_delta_event.py b/portkey_ai/_vendor/openai/types/responses/response_refusal_delta_event.py index 03c903ed..e3933b7d 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_refusal_delta_event.py +++ b/portkey_ai/_vendor/openai/types/responses/response_refusal_delta_event.py @@ -8,6 +8,8 @@ class ResponseRefusalDeltaEvent(BaseModel): + """Emitted when there is a partial refusal text.""" + content_index: int """The index of the content part that the refusal text is added to.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_refusal_done_event.py b/portkey_ai/_vendor/openai/types/responses/response_refusal_done_event.py index 61fd51aa..91adeb63 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_refusal_done_event.py +++ b/portkey_ai/_vendor/openai/types/responses/response_refusal_done_event.py @@ -8,6 +8,8 @@ class ResponseRefusalDoneEvent(BaseModel): + """Emitted when refusal text is finalized.""" + content_index: int """The index of the content part that the refusal text is finalized.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_text_config.py b/portkey_ai/_vendor/openai/types/responses/response_text_config.py index c53546da..fbf4da0b 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_text_config.py +++ b/portkey_ai/_vendor/openai/types/responses/response_text_config.py @@ -10,6 +10,14 @@ class ResponseTextConfig(BaseModel): + """Configuration options for a text response from the model. + + Can be plain + text or structured JSON data. Learn more: + - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + """ + format: Optional[ResponseFormatTextConfig] = None """An object specifying the format that the model must output. diff --git a/portkey_ai/_vendor/openai/types/responses/response_text_config_param.py b/portkey_ai/_vendor/openai/types/responses/response_text_config_param.py index 1229fce3..9cd54765 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_text_config_param.py +++ b/portkey_ai/_vendor/openai/types/responses/response_text_config_param.py @@ -11,6 +11,14 @@ class ResponseTextConfigParam(TypedDict, total=False): + """Configuration options for a text response from the model. + + Can be plain + text or structured JSON data. Learn more: + - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + """ + format: ResponseFormatTextConfigParam """An object specifying the format that the model must output. diff --git a/portkey_ai/_vendor/openai/types/responses/response_text_delta_event.py b/portkey_ai/_vendor/openai/types/responses/response_text_delta_event.py index b5379b7a..4f802abf 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_text_delta_event.py +++ b/portkey_ai/_vendor/openai/types/responses/response_text_delta_event.py @@ -17,6 +17,12 @@ class LogprobTopLogprob(BaseModel): class Logprob(BaseModel): + """ + A logprob is the logarithmic probability that the model assigns to producing + a particular token at a given position in the sequence. Less-negative (higher) + logprob values indicate greater model confidence in that token choice. + """ + token: str """A possible text token.""" @@ -28,6 +34,8 @@ class Logprob(BaseModel): class ResponseTextDeltaEvent(BaseModel): + """Emitted when there is an additional text delta.""" + content_index: int """The index of the content part that the text delta was added to.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_text_done_event.py b/portkey_ai/_vendor/openai/types/responses/response_text_done_event.py index d9776a18..75bd4798 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_text_done_event.py +++ b/portkey_ai/_vendor/openai/types/responses/response_text_done_event.py @@ -17,6 +17,12 @@ class LogprobTopLogprob(BaseModel): class Logprob(BaseModel): + """ + A logprob is the logarithmic probability that the model assigns to producing + a particular token at a given position in the sequence. Less-negative (higher) + logprob values indicate greater model confidence in that token choice. + """ + token: str """A possible text token.""" @@ -28,6 +34,8 @@ class Logprob(BaseModel): class ResponseTextDoneEvent(BaseModel): + """Emitted when text content is finalized.""" + content_index: int """The index of the content part that the text content is finalized.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_usage.py b/portkey_ai/_vendor/openai/types/responses/response_usage.py index 52b93ac5..d4b739c5 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_usage.py +++ b/portkey_ai/_vendor/openai/types/responses/response_usage.py @@ -6,6 +6,8 @@ class InputTokensDetails(BaseModel): + """A detailed breakdown of the input tokens.""" + cached_tokens: int """The number of tokens that were retrieved from the cache. @@ -14,11 +16,18 @@ class InputTokensDetails(BaseModel): class OutputTokensDetails(BaseModel): + """A detailed breakdown of the output tokens.""" + reasoning_tokens: int """The number of reasoning tokens.""" class ResponseUsage(BaseModel): + """ + Represents token usage details including input tokens, output tokens, + a breakdown of output tokens, and the total tokens used. + """ + input_tokens: int """The number of input tokens.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_web_search_call_completed_event.py b/portkey_ai/_vendor/openai/types/responses/response_web_search_call_completed_event.py index 497f7bfe..5aa7afe6 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_web_search_call_completed_event.py +++ b/portkey_ai/_vendor/openai/types/responses/response_web_search_call_completed_event.py @@ -8,6 +8,8 @@ class ResponseWebSearchCallCompletedEvent(BaseModel): + """Emitted when a web search call is completed.""" + item_id: str """Unique ID for the output item associated with the web search call.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_web_search_call_in_progress_event.py b/portkey_ai/_vendor/openai/types/responses/response_web_search_call_in_progress_event.py index da8b3fe4..73b30ff5 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_web_search_call_in_progress_event.py +++ b/portkey_ai/_vendor/openai/types/responses/response_web_search_call_in_progress_event.py @@ -8,6 +8,8 @@ class ResponseWebSearchCallInProgressEvent(BaseModel): + """Emitted when a web search call is initiated.""" + item_id: str """Unique ID for the output item associated with the web search call.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_web_search_call_searching_event.py b/portkey_ai/_vendor/openai/types/responses/response_web_search_call_searching_event.py index 42df9cb2..959c0951 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_web_search_call_searching_event.py +++ b/portkey_ai/_vendor/openai/types/responses/response_web_search_call_searching_event.py @@ -8,6 +8,8 @@ class ResponseWebSearchCallSearchingEvent(BaseModel): + """Emitted when a web search call is executing.""" + item_id: str """Unique ID for the output item associated with the web search call.""" diff --git a/portkey_ai/_vendor/openai/types/responses/tool.py b/portkey_ai/_vendor/openai/types/responses/tool.py index b29fede0..019962a0 100644 --- a/portkey_ai/_vendor/openai/types/responses/tool.py +++ b/portkey_ai/_vendor/openai/types/responses/tool.py @@ -10,7 +10,9 @@ from .computer_tool import ComputerTool from .function_tool import FunctionTool from .web_search_tool import WebSearchTool +from .apply_patch_tool import ApplyPatchTool from .file_search_tool import FileSearchTool +from .function_shell_tool import FunctionShellTool from .web_search_preview_tool import WebSearchPreviewTool __all__ = [ @@ -36,6 +38,8 @@ class McpAllowedToolsMcpToolFilter(BaseModel): + """A filter object to specify which tools are allowed.""" + read_only: Optional[bool] = None """Indicates whether or not a tool modifies data or is read-only. @@ -52,6 +56,8 @@ class McpAllowedToolsMcpToolFilter(BaseModel): class McpRequireApprovalMcpToolApprovalFilterAlways(BaseModel): + """A filter object to specify which tools are allowed.""" + read_only: Optional[bool] = None """Indicates whether or not a tool modifies data or is read-only. @@ -65,6 +71,8 @@ class McpRequireApprovalMcpToolApprovalFilterAlways(BaseModel): class McpRequireApprovalMcpToolApprovalFilterNever(BaseModel): + """A filter object to specify which tools are allowed.""" + read_only: Optional[bool] = None """Indicates whether or not a tool modifies data or is read-only. @@ -78,6 +86,13 @@ class McpRequireApprovalMcpToolApprovalFilterNever(BaseModel): class McpRequireApprovalMcpToolApprovalFilter(BaseModel): + """Specify which of the MCP server's tools require approval. + + Can be + `always`, `never`, or a filter object associated with tools + that require approval. + """ + always: Optional[McpRequireApprovalMcpToolApprovalFilterAlways] = None """A filter object to specify which tools are allowed.""" @@ -89,6 +104,11 @@ class McpRequireApprovalMcpToolApprovalFilter(BaseModel): class Mcp(BaseModel): + """ + Give the model access to additional tools via remote Model Context Protocol + (MCP) servers. [Learn more about MCP](https://platform.openai.com/docs/guides/tools-remote-mcp). + """ + server_label: str """A label for this MCP server, used to identify it in tool calls.""" @@ -155,6 +175,11 @@ class Mcp(BaseModel): class CodeInterpreterContainerCodeInterpreterToolAuto(BaseModel): + """Configuration for a code interpreter container. + + Optionally specify the IDs of the files to run the code on. + """ + type: Literal["auto"] """Always `auto`.""" @@ -162,17 +187,20 @@ class CodeInterpreterContainerCodeInterpreterToolAuto(BaseModel): """An optional list of uploaded files to make available to your code.""" memory_limit: Optional[Literal["1g", "4g", "16g", "64g"]] = None + """The memory limit for the code interpreter container.""" CodeInterpreterContainer: TypeAlias = Union[str, CodeInterpreterContainerCodeInterpreterToolAuto] class CodeInterpreter(BaseModel): + """A tool that runs Python code to help generate a response to a prompt.""" + container: CodeInterpreterContainer """The code interpreter container. Can be a container ID or an object that specifies uploaded file IDs to make - available to your code. + available to your code, along with an optional `memory_limit` setting. """ type: Literal["code_interpreter"] @@ -180,6 +208,12 @@ class CodeInterpreter(BaseModel): class ImageGenerationInputImageMask(BaseModel): + """Optional mask for inpainting. + + Contains `image_url` + (string, optional) and `file_id` (string, optional). + """ + file_id: Optional[str] = None """File ID for the mask image.""" @@ -188,6 +222,8 @@ class ImageGenerationInputImageMask(BaseModel): class ImageGeneration(BaseModel): + """A tool that generates images using the GPT image models.""" + type: Literal["image_generation"] """The type of the image generation tool. Always `image_generation`.""" @@ -211,7 +247,7 @@ class ImageGeneration(BaseModel): Contains `image_url` (string, optional) and `file_id` (string, optional). """ - model: Optional[Literal["gpt-image-1", "gpt-image-1-mini"]] = None + model: Union[str, Literal["gpt-image-1", "gpt-image-1-mini"], None] = None """The image generation model to use. Default: `gpt-image-1`.""" moderation: Optional[Literal["auto", "low"]] = None @@ -246,6 +282,8 @@ class ImageGeneration(BaseModel): class LocalShell(BaseModel): + """A tool that allows the model to execute shell commands in a local environment.""" + type: Literal["local_shell"] """The type of the local shell tool. Always `local_shell`.""" @@ -260,8 +298,10 @@ class LocalShell(BaseModel): CodeInterpreter, ImageGeneration, LocalShell, + FunctionShellTool, CustomTool, WebSearchPreviewTool, + ApplyPatchTool, ], PropertyInfo(discriminator="type"), ] diff --git a/portkey_ai/_vendor/openai/types/responses/tool_choice_allowed.py b/portkey_ai/_vendor/openai/types/responses/tool_choice_allowed.py index d7921dcb..400e170a 100644 --- a/portkey_ai/_vendor/openai/types/responses/tool_choice_allowed.py +++ b/portkey_ai/_vendor/openai/types/responses/tool_choice_allowed.py @@ -9,6 +9,8 @@ class ToolChoiceAllowed(BaseModel): + """Constrains the tools available to the model to a pre-defined set.""" + mode: Literal["auto", "required"] """Constrains the tools available to the model to a pre-defined set. diff --git a/portkey_ai/_vendor/openai/types/responses/tool_choice_allowed_param.py b/portkey_ai/_vendor/openai/types/responses/tool_choice_allowed_param.py index 0712cab4..cb316c15 100644 --- a/portkey_ai/_vendor/openai/types/responses/tool_choice_allowed_param.py +++ b/portkey_ai/_vendor/openai/types/responses/tool_choice_allowed_param.py @@ -9,6 +9,8 @@ class ToolChoiceAllowedParam(TypedDict, total=False): + """Constrains the tools available to the model to a pre-defined set.""" + mode: Required[Literal["auto", "required"]] """Constrains the tools available to the model to a pre-defined set. diff --git a/portkey_ai/_vendor/openai/types/responses/tool_choice_apply_patch.py b/portkey_ai/_vendor/openai/types/responses/tool_choice_apply_patch.py new file mode 100644 index 00000000..ef5a5e8b --- /dev/null +++ b/portkey_ai/_vendor/openai/types/responses/tool_choice_apply_patch.py @@ -0,0 +1,14 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ToolChoiceApplyPatch"] + + +class ToolChoiceApplyPatch(BaseModel): + """Forces the model to call the apply_patch tool when executing a tool call.""" + + type: Literal["apply_patch"] + """The tool to call. Always `apply_patch`.""" diff --git a/portkey_ai/_vendor/openai/types/responses/tool_choice_apply_patch_param.py b/portkey_ai/_vendor/openai/types/responses/tool_choice_apply_patch_param.py new file mode 100644 index 00000000..193c9932 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/responses/tool_choice_apply_patch_param.py @@ -0,0 +1,14 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["ToolChoiceApplyPatchParam"] + + +class ToolChoiceApplyPatchParam(TypedDict, total=False): + """Forces the model to call the apply_patch tool when executing a tool call.""" + + type: Required[Literal["apply_patch"]] + """The tool to call. Always `apply_patch`.""" diff --git a/portkey_ai/_vendor/openai/types/responses/tool_choice_custom.py b/portkey_ai/_vendor/openai/types/responses/tool_choice_custom.py index d600e536..dec85ef7 100644 --- a/portkey_ai/_vendor/openai/types/responses/tool_choice_custom.py +++ b/portkey_ai/_vendor/openai/types/responses/tool_choice_custom.py @@ -8,6 +8,8 @@ class ToolChoiceCustom(BaseModel): + """Use this option to force the model to call a specific custom tool.""" + name: str """The name of the custom tool to call.""" diff --git a/portkey_ai/_vendor/openai/types/responses/tool_choice_custom_param.py b/portkey_ai/_vendor/openai/types/responses/tool_choice_custom_param.py index 55bc53b7..ccdbab56 100644 --- a/portkey_ai/_vendor/openai/types/responses/tool_choice_custom_param.py +++ b/portkey_ai/_vendor/openai/types/responses/tool_choice_custom_param.py @@ -8,6 +8,8 @@ class ToolChoiceCustomParam(TypedDict, total=False): + """Use this option to force the model to call a specific custom tool.""" + name: Required[str] """The name of the custom tool to call.""" diff --git a/portkey_ai/_vendor/openai/types/responses/tool_choice_function.py b/portkey_ai/_vendor/openai/types/responses/tool_choice_function.py index 8d2a4f28..b2aab24a 100644 --- a/portkey_ai/_vendor/openai/types/responses/tool_choice_function.py +++ b/portkey_ai/_vendor/openai/types/responses/tool_choice_function.py @@ -8,6 +8,8 @@ class ToolChoiceFunction(BaseModel): + """Use this option to force the model to call a specific function.""" + name: str """The name of the function to call.""" diff --git a/portkey_ai/_vendor/openai/types/responses/tool_choice_function_param.py b/portkey_ai/_vendor/openai/types/responses/tool_choice_function_param.py index 910537fd..837465eb 100644 --- a/portkey_ai/_vendor/openai/types/responses/tool_choice_function_param.py +++ b/portkey_ai/_vendor/openai/types/responses/tool_choice_function_param.py @@ -8,6 +8,8 @@ class ToolChoiceFunctionParam(TypedDict, total=False): + """Use this option to force the model to call a specific function.""" + name: Required[str] """The name of the function to call.""" diff --git a/portkey_ai/_vendor/openai/types/responses/tool_choice_mcp.py b/portkey_ai/_vendor/openai/types/responses/tool_choice_mcp.py index 8763d816..a2c8049c 100644 --- a/portkey_ai/_vendor/openai/types/responses/tool_choice_mcp.py +++ b/portkey_ai/_vendor/openai/types/responses/tool_choice_mcp.py @@ -9,6 +9,10 @@ class ToolChoiceMcp(BaseModel): + """ + Use this option to force the model to call a specific tool on a remote MCP server. + """ + server_label: str """The label of the MCP server to use.""" diff --git a/portkey_ai/_vendor/openai/types/responses/tool_choice_mcp_param.py b/portkey_ai/_vendor/openai/types/responses/tool_choice_mcp_param.py index afcceb8c..9726e47a 100644 --- a/portkey_ai/_vendor/openai/types/responses/tool_choice_mcp_param.py +++ b/portkey_ai/_vendor/openai/types/responses/tool_choice_mcp_param.py @@ -9,6 +9,10 @@ class ToolChoiceMcpParam(TypedDict, total=False): + """ + Use this option to force the model to call a specific tool on a remote MCP server. + """ + server_label: Required[str] """The label of the MCP server to use.""" diff --git a/portkey_ai/_vendor/openai/types/responses/tool_choice_shell.py b/portkey_ai/_vendor/openai/types/responses/tool_choice_shell.py new file mode 100644 index 00000000..a78eccc3 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/responses/tool_choice_shell.py @@ -0,0 +1,14 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ToolChoiceShell"] + + +class ToolChoiceShell(BaseModel): + """Forces the model to call the shell tool when a tool call is required.""" + + type: Literal["shell"] + """The tool to call. Always `shell`.""" diff --git a/portkey_ai/_vendor/openai/types/responses/tool_choice_shell_param.py b/portkey_ai/_vendor/openai/types/responses/tool_choice_shell_param.py new file mode 100644 index 00000000..0dbcc90f --- /dev/null +++ b/portkey_ai/_vendor/openai/types/responses/tool_choice_shell_param.py @@ -0,0 +1,14 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["ToolChoiceShellParam"] + + +class ToolChoiceShellParam(TypedDict, total=False): + """Forces the model to call the shell tool when a tool call is required.""" + + type: Required[Literal["shell"]] + """The tool to call. Always `shell`.""" diff --git a/portkey_ai/_vendor/openai/types/responses/tool_choice_types.py b/portkey_ai/_vendor/openai/types/responses/tool_choice_types.py index b31a8260..044c014b 100644 --- a/portkey_ai/_vendor/openai/types/responses/tool_choice_types.py +++ b/portkey_ai/_vendor/openai/types/responses/tool_choice_types.py @@ -8,6 +8,11 @@ class ToolChoiceTypes(BaseModel): + """ + Indicates that the model should use a built-in tool to generate a response. + [Learn more about built-in tools](https://platform.openai.com/docs/guides/tools). + """ + type: Literal[ "file_search", "web_search_preview", diff --git a/portkey_ai/_vendor/openai/types/responses/tool_choice_types_param.py b/portkey_ai/_vendor/openai/types/responses/tool_choice_types_param.py index 15e03574..9bf02dbf 100644 --- a/portkey_ai/_vendor/openai/types/responses/tool_choice_types_param.py +++ b/portkey_ai/_vendor/openai/types/responses/tool_choice_types_param.py @@ -8,6 +8,11 @@ class ToolChoiceTypesParam(TypedDict, total=False): + """ + Indicates that the model should use a built-in tool to generate a response. + [Learn more about built-in tools](https://platform.openai.com/docs/guides/tools). + """ + type: Required[ Literal[ "file_search", diff --git a/portkey_ai/_vendor/openai/types/responses/tool_param.py b/portkey_ai/_vendor/openai/types/responses/tool_param.py index dd1ea0bd..37d3dde0 100644 --- a/portkey_ai/_vendor/openai/types/responses/tool_param.py +++ b/portkey_ai/_vendor/openai/types/responses/tool_param.py @@ -12,7 +12,9 @@ from .computer_tool_param import ComputerToolParam from .function_tool_param import FunctionToolParam from .web_search_tool_param import WebSearchToolParam +from .apply_patch_tool_param import ApplyPatchToolParam from .file_search_tool_param import FileSearchToolParam +from .function_shell_tool_param import FunctionShellToolParam from .web_search_preview_tool_param import WebSearchPreviewToolParam __all__ = [ @@ -38,6 +40,8 @@ class McpAllowedToolsMcpToolFilter(TypedDict, total=False): + """A filter object to specify which tools are allowed.""" + read_only: bool """Indicates whether or not a tool modifies data or is read-only. @@ -54,6 +58,8 @@ class McpAllowedToolsMcpToolFilter(TypedDict, total=False): class McpRequireApprovalMcpToolApprovalFilterAlways(TypedDict, total=False): + """A filter object to specify which tools are allowed.""" + read_only: bool """Indicates whether or not a tool modifies data or is read-only. @@ -67,6 +73,8 @@ class McpRequireApprovalMcpToolApprovalFilterAlways(TypedDict, total=False): class McpRequireApprovalMcpToolApprovalFilterNever(TypedDict, total=False): + """A filter object to specify which tools are allowed.""" + read_only: bool """Indicates whether or not a tool modifies data or is read-only. @@ -80,6 +88,13 @@ class McpRequireApprovalMcpToolApprovalFilterNever(TypedDict, total=False): class McpRequireApprovalMcpToolApprovalFilter(TypedDict, total=False): + """Specify which of the MCP server's tools require approval. + + Can be + `always`, `never`, or a filter object associated with tools + that require approval. + """ + always: McpRequireApprovalMcpToolApprovalFilterAlways """A filter object to specify which tools are allowed.""" @@ -91,6 +106,11 @@ class McpRequireApprovalMcpToolApprovalFilter(TypedDict, total=False): class Mcp(TypedDict, total=False): + """ + Give the model access to additional tools via remote Model Context Protocol + (MCP) servers. [Learn more about MCP](https://platform.openai.com/docs/guides/tools-remote-mcp). + """ + server_label: Required[str] """A label for this MCP server, used to identify it in tool calls.""" @@ -155,6 +175,11 @@ class Mcp(TypedDict, total=False): class CodeInterpreterContainerCodeInterpreterToolAuto(TypedDict, total=False): + """Configuration for a code interpreter container. + + Optionally specify the IDs of the files to run the code on. + """ + type: Required[Literal["auto"]] """Always `auto`.""" @@ -162,17 +187,20 @@ class CodeInterpreterContainerCodeInterpreterToolAuto(TypedDict, total=False): """An optional list of uploaded files to make available to your code.""" memory_limit: Optional[Literal["1g", "4g", "16g", "64g"]] + """The memory limit for the code interpreter container.""" CodeInterpreterContainer: TypeAlias = Union[str, CodeInterpreterContainerCodeInterpreterToolAuto] class CodeInterpreter(TypedDict, total=False): + """A tool that runs Python code to help generate a response to a prompt.""" + container: Required[CodeInterpreterContainer] """The code interpreter container. Can be a container ID or an object that specifies uploaded file IDs to make - available to your code. + available to your code, along with an optional `memory_limit` setting. """ type: Required[Literal["code_interpreter"]] @@ -180,6 +208,12 @@ class CodeInterpreter(TypedDict, total=False): class ImageGenerationInputImageMask(TypedDict, total=False): + """Optional mask for inpainting. + + Contains `image_url` + (string, optional) and `file_id` (string, optional). + """ + file_id: str """File ID for the mask image.""" @@ -188,6 +222,8 @@ class ImageGenerationInputImageMask(TypedDict, total=False): class ImageGeneration(TypedDict, total=False): + """A tool that generates images using the GPT image models.""" + type: Required[Literal["image_generation"]] """The type of the image generation tool. Always `image_generation`.""" @@ -211,7 +247,7 @@ class ImageGeneration(TypedDict, total=False): Contains `image_url` (string, optional) and `file_id` (string, optional). """ - model: Literal["gpt-image-1", "gpt-image-1-mini"] + model: Union[str, Literal["gpt-image-1", "gpt-image-1-mini"]] """The image generation model to use. Default: `gpt-image-1`.""" moderation: Literal["auto", "low"] @@ -246,6 +282,8 @@ class ImageGeneration(TypedDict, total=False): class LocalShell(TypedDict, total=False): + """A tool that allows the model to execute shell commands in a local environment.""" + type: Required[Literal["local_shell"]] """The type of the local shell tool. Always `local_shell`.""" @@ -259,8 +297,10 @@ class LocalShell(TypedDict, total=False): CodeInterpreter, ImageGeneration, LocalShell, + FunctionShellToolParam, CustomToolParam, WebSearchPreviewToolParam, + ApplyPatchToolParam, ] diff --git a/portkey_ai/_vendor/openai/types/responses/web_search_preview_tool.py b/portkey_ai/_vendor/openai/types/responses/web_search_preview_tool.py index 66d6a246..12478e89 100644 --- a/portkey_ai/_vendor/openai/types/responses/web_search_preview_tool.py +++ b/portkey_ai/_vendor/openai/types/responses/web_search_preview_tool.py @@ -9,6 +9,8 @@ class UserLocation(BaseModel): + """The user's location.""" + type: Literal["approximate"] """The type of location approximation. Always `approximate`.""" @@ -32,6 +34,11 @@ class UserLocation(BaseModel): class WebSearchPreviewTool(BaseModel): + """This tool searches the web for relevant results to use in a response. + + Learn more about the [web search tool](https://platform.openai.com/docs/guides/tools-web-search). + """ + type: Literal["web_search_preview", "web_search_preview_2025_03_11"] """The type of the web search tool. diff --git a/portkey_ai/_vendor/openai/types/responses/web_search_preview_tool_param.py b/portkey_ai/_vendor/openai/types/responses/web_search_preview_tool_param.py index ec2173f8..09619a33 100644 --- a/portkey_ai/_vendor/openai/types/responses/web_search_preview_tool_param.py +++ b/portkey_ai/_vendor/openai/types/responses/web_search_preview_tool_param.py @@ -9,6 +9,8 @@ class UserLocation(TypedDict, total=False): + """The user's location.""" + type: Required[Literal["approximate"]] """The type of location approximation. Always `approximate`.""" @@ -32,6 +34,11 @@ class UserLocation(TypedDict, total=False): class WebSearchPreviewToolParam(TypedDict, total=False): + """This tool searches the web for relevant results to use in a response. + + Learn more about the [web search tool](https://platform.openai.com/docs/guides/tools-web-search). + """ + type: Required[Literal["web_search_preview", "web_search_preview_2025_03_11"]] """The type of the web search tool. diff --git a/portkey_ai/_vendor/openai/types/responses/web_search_tool.py b/portkey_ai/_vendor/openai/types/responses/web_search_tool.py index bde9600c..769f5c93 100644 --- a/portkey_ai/_vendor/openai/types/responses/web_search_tool.py +++ b/portkey_ai/_vendor/openai/types/responses/web_search_tool.py @@ -9,6 +9,8 @@ class Filters(BaseModel): + """Filters for the search.""" + allowed_domains: Optional[List[str]] = None """Allowed domains for the search. @@ -20,6 +22,8 @@ class Filters(BaseModel): class UserLocation(BaseModel): + """The approximate location of the user.""" + city: Optional[str] = None """Free text input for the city of the user, e.g. `San Francisco`.""" @@ -43,6 +47,12 @@ class UserLocation(BaseModel): class WebSearchTool(BaseModel): + """Search the Internet for sources related to the prompt. + + Learn more about the + [web search tool](https://platform.openai.com/docs/guides/tools-web-search). + """ + type: Literal["web_search", "web_search_2025_08_26"] """The type of the web search tool. diff --git a/portkey_ai/_vendor/openai/types/responses/web_search_tool_param.py b/portkey_ai/_vendor/openai/types/responses/web_search_tool_param.py index 7fa19e9c..a4531a93 100644 --- a/portkey_ai/_vendor/openai/types/responses/web_search_tool_param.py +++ b/portkey_ai/_vendor/openai/types/responses/web_search_tool_param.py @@ -11,6 +11,8 @@ class Filters(TypedDict, total=False): + """Filters for the search.""" + allowed_domains: Optional[SequenceNotStr[str]] """Allowed domains for the search. @@ -22,6 +24,8 @@ class Filters(TypedDict, total=False): class UserLocation(TypedDict, total=False): + """The approximate location of the user.""" + city: Optional[str] """Free text input for the city of the user, e.g. `San Francisco`.""" @@ -45,6 +49,12 @@ class UserLocation(TypedDict, total=False): class WebSearchToolParam(TypedDict, total=False): + """Search the Internet for sources related to the prompt. + + Learn more about the + [web search tool](https://platform.openai.com/docs/guides/tools-web-search). + """ + type: Required[Literal["web_search", "web_search_2025_08_26"]] """The type of the web search tool. diff --git a/portkey_ai/_vendor/openai/types/shared/all_models.py b/portkey_ai/_vendor/openai/types/shared/all_models.py index 3e0b09e2..ba8e1d82 100644 --- a/portkey_ai/_vendor/openai/types/shared/all_models.py +++ b/portkey_ai/_vendor/openai/types/shared/all_models.py @@ -24,5 +24,6 @@ "gpt-5-codex", "gpt-5-pro", "gpt-5-pro-2025-10-06", + "gpt-5.1-codex-max", ], ] diff --git a/portkey_ai/_vendor/openai/types/shared/chat_model.py b/portkey_ai/_vendor/openai/types/shared/chat_model.py index 727c60c1..8223b81b 100644 --- a/portkey_ai/_vendor/openai/types/shared/chat_model.py +++ b/portkey_ai/_vendor/openai/types/shared/chat_model.py @@ -5,6 +5,16 @@ __all__ = ["ChatModel"] ChatModel: TypeAlias = Literal[ + "gpt-5.2", + "gpt-5.2-2025-12-11", + "gpt-5.2-chat-latest", + "gpt-5.2-pro", + "gpt-5.2-pro-2025-12-11", + "gpt-5.1", + "gpt-5.1-2025-11-13", + "gpt-5.1-codex", + "gpt-5.1-mini", + "gpt-5.1-chat-latest", "gpt-5", "gpt-5-mini", "gpt-5-nano", diff --git a/portkey_ai/_vendor/openai/types/shared/comparison_filter.py b/portkey_ai/_vendor/openai/types/shared/comparison_filter.py index 33415ca4..852cac17 100644 --- a/portkey_ai/_vendor/openai/types/shared/comparison_filter.py +++ b/portkey_ai/_vendor/openai/types/shared/comparison_filter.py @@ -9,6 +9,10 @@ class ComparisonFilter(BaseModel): + """ + A filter used to compare a specified attribute key to a given value using a defined comparison operation. + """ + key: str """The key to compare against the value.""" diff --git a/portkey_ai/_vendor/openai/types/shared/compound_filter.py b/portkey_ai/_vendor/openai/types/shared/compound_filter.py index 3aefa436..4801aaac 100644 --- a/portkey_ai/_vendor/openai/types/shared/compound_filter.py +++ b/portkey_ai/_vendor/openai/types/shared/compound_filter.py @@ -12,6 +12,8 @@ class CompoundFilter(BaseModel): + """Combine multiple filters using `and` or `or`.""" + filters: List[Filter] """Array of filters to combine. diff --git a/portkey_ai/_vendor/openai/types/shared/custom_tool_input_format.py b/portkey_ai/_vendor/openai/types/shared/custom_tool_input_format.py index 53c8323e..9391692b 100644 --- a/portkey_ai/_vendor/openai/types/shared/custom_tool_input_format.py +++ b/portkey_ai/_vendor/openai/types/shared/custom_tool_input_format.py @@ -10,11 +10,15 @@ class Text(BaseModel): + """Unconstrained free-form text.""" + type: Literal["text"] """Unconstrained text format. Always `text`.""" class Grammar(BaseModel): + """A grammar defined by the user.""" + definition: str """The grammar definition.""" diff --git a/portkey_ai/_vendor/openai/types/shared/reasoning.py b/portkey_ai/_vendor/openai/types/shared/reasoning.py index 6ea2fe82..14f56a04 100644 --- a/portkey_ai/_vendor/openai/types/shared/reasoning.py +++ b/portkey_ai/_vendor/openai/types/shared/reasoning.py @@ -10,16 +10,27 @@ class Reasoning(BaseModel): + """**gpt-5 and o-series models only** + + Configuration options for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). + """ + effort: Optional[ReasoningEffort] = None """ Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - effort can result in faster responses and fewer tokens used on reasoning in a - response. - - Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - effort. + supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. + Reducing reasoning effort can result in faster responses and fewer tokens used + on reasoning in a response. + + - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + calls are supported for all reasoning values in gpt-5.1. + - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + support `none`. + - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. + - `xhigh` is supported for all models after `gpt-5.1-codex-max`. """ generate_summary: Optional[Literal["auto", "concise", "detailed"]] = None @@ -36,5 +47,6 @@ class Reasoning(BaseModel): This can be useful for debugging and understanding the model's reasoning process. One of `auto`, `concise`, or `detailed`. - `concise` is only supported for `computer-use-preview` models. + `concise` is supported for `computer-use-preview` models and all reasoning + models after `gpt-5`. """ diff --git a/portkey_ai/_vendor/openai/types/shared/reasoning_effort.py b/portkey_ai/_vendor/openai/types/shared/reasoning_effort.py index 4b960cd7..24d85164 100644 --- a/portkey_ai/_vendor/openai/types/shared/reasoning_effort.py +++ b/portkey_ai/_vendor/openai/types/shared/reasoning_effort.py @@ -5,4 +5,4 @@ __all__ = ["ReasoningEffort"] -ReasoningEffort: TypeAlias = Optional[Literal["minimal", "low", "medium", "high"]] +ReasoningEffort: TypeAlias = Optional[Literal["none", "minimal", "low", "medium", "high", "xhigh"]] diff --git a/portkey_ai/_vendor/openai/types/shared/response_format_json_object.py b/portkey_ai/_vendor/openai/types/shared/response_format_json_object.py index 2aaa5dbd..98e0da6a 100644 --- a/portkey_ai/_vendor/openai/types/shared/response_format_json_object.py +++ b/portkey_ai/_vendor/openai/types/shared/response_format_json_object.py @@ -8,5 +8,13 @@ class ResponseFormatJSONObject(BaseModel): + """JSON object response format. + + An older method of generating JSON responses. + Using `json_schema` is recommended for models that support it. Note that the + model will not generate JSON without a system or user message instructing it + to do so. + """ + type: Literal["json_object"] """The type of response format being defined. Always `json_object`.""" diff --git a/portkey_ai/_vendor/openai/types/shared/response_format_json_schema.py b/portkey_ai/_vendor/openai/types/shared/response_format_json_schema.py index c7924446..9b2adb66 100644 --- a/portkey_ai/_vendor/openai/types/shared/response_format_json_schema.py +++ b/portkey_ai/_vendor/openai/types/shared/response_format_json_schema.py @@ -11,6 +11,8 @@ class JSONSchema(BaseModel): + """Structured Outputs configuration options, including a JSON Schema.""" + name: str """The name of the response format. @@ -41,6 +43,12 @@ class JSONSchema(BaseModel): class ResponseFormatJSONSchema(BaseModel): + """JSON Schema response format. + + Used to generate structured JSON responses. + Learn more about [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs). + """ + json_schema: JSONSchema """Structured Outputs configuration options, including a JSON Schema.""" diff --git a/portkey_ai/_vendor/openai/types/shared/response_format_text.py b/portkey_ai/_vendor/openai/types/shared/response_format_text.py index f0c8cfb7..9f4bc0d1 100644 --- a/portkey_ai/_vendor/openai/types/shared/response_format_text.py +++ b/portkey_ai/_vendor/openai/types/shared/response_format_text.py @@ -8,5 +8,7 @@ class ResponseFormatText(BaseModel): + """Default response format. Used to generate text responses.""" + type: Literal["text"] """The type of response format being defined. Always `text`.""" diff --git a/portkey_ai/_vendor/openai/types/shared/response_format_text_grammar.py b/portkey_ai/_vendor/openai/types/shared/response_format_text_grammar.py index b02f99c1..84cd1412 100644 --- a/portkey_ai/_vendor/openai/types/shared/response_format_text_grammar.py +++ b/portkey_ai/_vendor/openai/types/shared/response_format_text_grammar.py @@ -8,6 +8,11 @@ class ResponseFormatTextGrammar(BaseModel): + """ + A custom grammar for the model to follow when generating text. + Learn more in the [custom grammars guide](https://platform.openai.com/docs/guides/custom-grammars). + """ + grammar: str """The custom grammar for the model to follow.""" diff --git a/portkey_ai/_vendor/openai/types/shared/response_format_text_python.py b/portkey_ai/_vendor/openai/types/shared/response_format_text_python.py index 4cd18d46..1b04cb62 100644 --- a/portkey_ai/_vendor/openai/types/shared/response_format_text_python.py +++ b/portkey_ai/_vendor/openai/types/shared/response_format_text_python.py @@ -8,5 +8,11 @@ class ResponseFormatTextPython(BaseModel): + """Configure the model to generate valid Python code. + + See the + [custom grammars guide](https://platform.openai.com/docs/guides/custom-grammars) for more details. + """ + type: Literal["python"] """The type of response format being defined. Always `python`.""" diff --git a/portkey_ai/_vendor/openai/types/shared/responses_model.py b/portkey_ai/_vendor/openai/types/shared/responses_model.py index 432cb82a..38cdea9a 100644 --- a/portkey_ai/_vendor/openai/types/shared/responses_model.py +++ b/portkey_ai/_vendor/openai/types/shared/responses_model.py @@ -24,5 +24,6 @@ "gpt-5-codex", "gpt-5-pro", "gpt-5-pro-2025-10-06", + "gpt-5.1-codex-max", ], ] diff --git a/portkey_ai/_vendor/openai/types/shared_params/chat_model.py b/portkey_ai/_vendor/openai/types/shared_params/chat_model.py index a1e5ab9f..c1937a83 100644 --- a/portkey_ai/_vendor/openai/types/shared_params/chat_model.py +++ b/portkey_ai/_vendor/openai/types/shared_params/chat_model.py @@ -7,6 +7,16 @@ __all__ = ["ChatModel"] ChatModel: TypeAlias = Literal[ + "gpt-5.2", + "gpt-5.2-2025-12-11", + "gpt-5.2-chat-latest", + "gpt-5.2-pro", + "gpt-5.2-pro-2025-12-11", + "gpt-5.1", + "gpt-5.1-2025-11-13", + "gpt-5.1-codex", + "gpt-5.1-mini", + "gpt-5.1-chat-latest", "gpt-5", "gpt-5-mini", "gpt-5-nano", diff --git a/portkey_ai/_vendor/openai/types/shared_params/comparison_filter.py b/portkey_ai/_vendor/openai/types/shared_params/comparison_filter.py index 1c40729c..363688e4 100644 --- a/portkey_ai/_vendor/openai/types/shared_params/comparison_filter.py +++ b/portkey_ai/_vendor/openai/types/shared_params/comparison_filter.py @@ -11,6 +11,10 @@ class ComparisonFilter(TypedDict, total=False): + """ + A filter used to compare a specified attribute key to a given value using a defined comparison operation. + """ + key: Required[str] """The key to compare against the value.""" diff --git a/portkey_ai/_vendor/openai/types/shared_params/compound_filter.py b/portkey_ai/_vendor/openai/types/shared_params/compound_filter.py index d12e9b1b..9358e460 100644 --- a/portkey_ai/_vendor/openai/types/shared_params/compound_filter.py +++ b/portkey_ai/_vendor/openai/types/shared_params/compound_filter.py @@ -13,6 +13,8 @@ class CompoundFilter(TypedDict, total=False): + """Combine multiple filters using `and` or `or`.""" + filters: Required[Iterable[Filter]] """Array of filters to combine. diff --git a/portkey_ai/_vendor/openai/types/shared_params/custom_tool_input_format.py b/portkey_ai/_vendor/openai/types/shared_params/custom_tool_input_format.py index 37df393e..ddc71cac 100644 --- a/portkey_ai/_vendor/openai/types/shared_params/custom_tool_input_format.py +++ b/portkey_ai/_vendor/openai/types/shared_params/custom_tool_input_format.py @@ -9,11 +9,15 @@ class Text(TypedDict, total=False): + """Unconstrained free-form text.""" + type: Required[Literal["text"]] """Unconstrained text format. Always `text`.""" class Grammar(TypedDict, total=False): + """A grammar defined by the user.""" + definition: Required[str] """The grammar definition.""" diff --git a/portkey_ai/_vendor/openai/types/shared_params/reasoning.py b/portkey_ai/_vendor/openai/types/shared_params/reasoning.py index 5c1eff68..2bd7ce72 100644 --- a/portkey_ai/_vendor/openai/types/shared_params/reasoning.py +++ b/portkey_ai/_vendor/openai/types/shared_params/reasoning.py @@ -11,16 +11,27 @@ class Reasoning(TypedDict, total=False): + """**gpt-5 and o-series models only** + + Configuration options for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). + """ + effort: Optional[ReasoningEffort] """ Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning - effort can result in faster responses and fewer tokens used on reasoning in a - response. - - Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning - effort. + supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. + Reducing reasoning effort can result in faster responses and fewer tokens used + on reasoning in a response. + + - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported + reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool + calls are supported for all reasoning values in gpt-5.1. + - All models before `gpt-5.1` default to `medium` reasoning effort, and do not + support `none`. + - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. + - `xhigh` is supported for all models after `gpt-5.1-codex-max`. """ generate_summary: Optional[Literal["auto", "concise", "detailed"]] @@ -37,5 +48,6 @@ class Reasoning(TypedDict, total=False): This can be useful for debugging and understanding the model's reasoning process. One of `auto`, `concise`, or `detailed`. - `concise` is only supported for `computer-use-preview` models. + `concise` is supported for `computer-use-preview` models and all reasoning + models after `gpt-5`. """ diff --git a/portkey_ai/_vendor/openai/types/shared_params/reasoning_effort.py b/portkey_ai/_vendor/openai/types/shared_params/reasoning_effort.py index 4c095a28..8518c2b1 100644 --- a/portkey_ai/_vendor/openai/types/shared_params/reasoning_effort.py +++ b/portkey_ai/_vendor/openai/types/shared_params/reasoning_effort.py @@ -7,4 +7,4 @@ __all__ = ["ReasoningEffort"] -ReasoningEffort: TypeAlias = Optional[Literal["minimal", "low", "medium", "high"]] +ReasoningEffort: TypeAlias = Optional[Literal["none", "minimal", "low", "medium", "high", "xhigh"]] diff --git a/portkey_ai/_vendor/openai/types/shared_params/response_format_json_object.py b/portkey_ai/_vendor/openai/types/shared_params/response_format_json_object.py index d4d1deaa..ef5d43be 100644 --- a/portkey_ai/_vendor/openai/types/shared_params/response_format_json_object.py +++ b/portkey_ai/_vendor/openai/types/shared_params/response_format_json_object.py @@ -8,5 +8,13 @@ class ResponseFormatJSONObject(TypedDict, total=False): + """JSON object response format. + + An older method of generating JSON responses. + Using `json_schema` is recommended for models that support it. Note that the + model will not generate JSON without a system or user message instructing it + to do so. + """ + type: Required[Literal["json_object"]] """The type of response format being defined. Always `json_object`.""" diff --git a/portkey_ai/_vendor/openai/types/shared_params/response_format_json_schema.py b/portkey_ai/_vendor/openai/types/shared_params/response_format_json_schema.py index 5b0a13ee..0a0e8468 100644 --- a/portkey_ai/_vendor/openai/types/shared_params/response_format_json_schema.py +++ b/portkey_ai/_vendor/openai/types/shared_params/response_format_json_schema.py @@ -9,6 +9,8 @@ class JSONSchema(TypedDict, total=False): + """Structured Outputs configuration options, including a JSON Schema.""" + name: Required[str] """The name of the response format. @@ -39,6 +41,12 @@ class JSONSchema(TypedDict, total=False): class ResponseFormatJSONSchema(TypedDict, total=False): + """JSON Schema response format. + + Used to generate structured JSON responses. + Learn more about [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs). + """ + json_schema: Required[JSONSchema] """Structured Outputs configuration options, including a JSON Schema.""" diff --git a/portkey_ai/_vendor/openai/types/shared_params/response_format_text.py b/portkey_ai/_vendor/openai/types/shared_params/response_format_text.py index c3ef2b08..c195036f 100644 --- a/portkey_ai/_vendor/openai/types/shared_params/response_format_text.py +++ b/portkey_ai/_vendor/openai/types/shared_params/response_format_text.py @@ -8,5 +8,7 @@ class ResponseFormatText(TypedDict, total=False): + """Default response format. Used to generate text responses.""" + type: Required[Literal["text"]] """The type of response format being defined. Always `text`.""" diff --git a/portkey_ai/_vendor/openai/types/shared_params/responses_model.py b/portkey_ai/_vendor/openai/types/shared_params/responses_model.py index fe34eb0f..ad44dd6b 100644 --- a/portkey_ai/_vendor/openai/types/shared_params/responses_model.py +++ b/portkey_ai/_vendor/openai/types/shared_params/responses_model.py @@ -26,5 +26,6 @@ "gpt-5-codex", "gpt-5-pro", "gpt-5-pro-2025-10-06", + "gpt-5.1-codex-max", ], ] diff --git a/portkey_ai/_vendor/openai/types/static_file_chunking_strategy_object_param.py b/portkey_ai/_vendor/openai/types/static_file_chunking_strategy_object_param.py index 0cdf35c0..40188a41 100644 --- a/portkey_ai/_vendor/openai/types/static_file_chunking_strategy_object_param.py +++ b/portkey_ai/_vendor/openai/types/static_file_chunking_strategy_object_param.py @@ -10,6 +10,8 @@ class StaticFileChunkingStrategyObjectParam(TypedDict, total=False): + """Customize your own chunking strategy by setting chunk size and chunk overlap.""" + static: Required[StaticFileChunkingStrategyParam] type: Required[Literal["static"]] diff --git a/portkey_ai/_vendor/openai/types/upload.py b/portkey_ai/_vendor/openai/types/upload.py index 914b69a8..d248da6e 100644 --- a/portkey_ai/_vendor/openai/types/upload.py +++ b/portkey_ai/_vendor/openai/types/upload.py @@ -10,6 +10,8 @@ class Upload(BaseModel): + """The Upload object can accept byte chunks in the form of Parts.""" + id: str """The Upload unique identifier, which can be referenced in API endpoints.""" diff --git a/portkey_ai/_vendor/openai/types/upload_create_params.py b/portkey_ai/_vendor/openai/types/upload_create_params.py index ab4cded8..c25d65be 100644 --- a/portkey_ai/_vendor/openai/types/upload_create_params.py +++ b/portkey_ai/_vendor/openai/types/upload_create_params.py @@ -39,6 +39,11 @@ class UploadCreateParams(TypedDict, total=False): class ExpiresAfter(TypedDict, total=False): + """The expiration policy for a file. + + By default, files with `purpose=batch` expire after 30 days and all other files are persisted until they are manually deleted. + """ + anchor: Required[Literal["created_at"]] """Anchor timestamp after which the expiration policy applies. diff --git a/portkey_ai/_vendor/openai/types/uploads/upload_part.py b/portkey_ai/_vendor/openai/types/uploads/upload_part.py index e09621d8..e585b1a2 100644 --- a/portkey_ai/_vendor/openai/types/uploads/upload_part.py +++ b/portkey_ai/_vendor/openai/types/uploads/upload_part.py @@ -8,6 +8,8 @@ class UploadPart(BaseModel): + """The upload Part represents a chunk of bytes we can add to an Upload object.""" + id: str """The upload Part unique identifier, which can be referenced in API endpoints.""" diff --git a/portkey_ai/_vendor/openai/types/vector_store.py b/portkey_ai/_vendor/openai/types/vector_store.py index 2473a442..82899ecd 100644 --- a/portkey_ai/_vendor/openai/types/vector_store.py +++ b/portkey_ai/_vendor/openai/types/vector_store.py @@ -27,6 +27,8 @@ class FileCounts(BaseModel): class ExpiresAfter(BaseModel): + """The expiration policy for a vector store.""" + anchor: Literal["last_active_at"] """Anchor timestamp after which the expiration policy applies. @@ -38,6 +40,10 @@ class ExpiresAfter(BaseModel): class VectorStore(BaseModel): + """ + A vector store is a collection of processed files can be used by the `file_search` tool. + """ + id: str """The identifier, which can be referenced in API endpoints.""" diff --git a/portkey_ai/_vendor/openai/types/vector_store_create_params.py b/portkey_ai/_vendor/openai/types/vector_store_create_params.py index f373a6ed..2b725629 100644 --- a/portkey_ai/_vendor/openai/types/vector_store_create_params.py +++ b/portkey_ai/_vendor/openai/types/vector_store_create_params.py @@ -51,6 +51,8 @@ class VectorStoreCreateParams(TypedDict, total=False): class ExpiresAfter(TypedDict, total=False): + """The expiration policy for a vector store.""" + anchor: Required[Literal["last_active_at"]] """Anchor timestamp after which the expiration policy applies. diff --git a/portkey_ai/_vendor/openai/types/vector_store_search_params.py b/portkey_ai/_vendor/openai/types/vector_store_search_params.py index 8b7b13c4..851d63c5 100644 --- a/portkey_ai/_vendor/openai/types/vector_store_search_params.py +++ b/portkey_ai/_vendor/openai/types/vector_store_search_params.py @@ -36,6 +36,8 @@ class VectorStoreSearchParams(TypedDict, total=False): class RankingOptions(TypedDict, total=False): + """Ranking options for search.""" + ranker: Literal["none", "auto", "default-2024-11-15"] """Enable re-ranking; set to `none` to disable, which can help reduce latency.""" diff --git a/portkey_ai/_vendor/openai/types/vector_store_update_params.py b/portkey_ai/_vendor/openai/types/vector_store_update_params.py index 4f6ac639..7c6f8911 100644 --- a/portkey_ai/_vendor/openai/types/vector_store_update_params.py +++ b/portkey_ai/_vendor/openai/types/vector_store_update_params.py @@ -29,6 +29,8 @@ class VectorStoreUpdateParams(TypedDict, total=False): class ExpiresAfter(TypedDict, total=False): + """The expiration policy for a vector store.""" + anchor: Required[Literal["last_active_at"]] """Anchor timestamp after which the expiration policy applies. diff --git a/portkey_ai/_vendor/openai/types/vector_stores/vector_store_file.py b/portkey_ai/_vendor/openai/types/vector_stores/vector_store_file.py index 001584df..c1ea0222 100644 --- a/portkey_ai/_vendor/openai/types/vector_stores/vector_store_file.py +++ b/portkey_ai/_vendor/openai/types/vector_stores/vector_store_file.py @@ -10,6 +10,11 @@ class LastError(BaseModel): + """The last error associated with this vector store file. + + Will be `null` if there are no errors. + """ + code: Literal["server_error", "unsupported_file", "invalid_file"] """One of `server_error`, `unsupported_file`, or `invalid_file`.""" @@ -18,6 +23,8 @@ class LastError(BaseModel): class VectorStoreFile(BaseModel): + """A list of files attached to a vector store.""" + id: str """The identifier, which can be referenced in API endpoints.""" diff --git a/portkey_ai/_vendor/openai/types/vector_stores/vector_store_file_batch.py b/portkey_ai/_vendor/openai/types/vector_stores/vector_store_file_batch.py index 57dbfbd8..b07eb25d 100644 --- a/portkey_ai/_vendor/openai/types/vector_stores/vector_store_file_batch.py +++ b/portkey_ai/_vendor/openai/types/vector_stores/vector_store_file_batch.py @@ -25,6 +25,8 @@ class FileCounts(BaseModel): class VectorStoreFileBatch(BaseModel): + """A batch of files attached to a vector store.""" + id: str """The identifier, which can be referenced in API endpoints.""" diff --git a/portkey_ai/_vendor/openai/types/video.py b/portkey_ai/_vendor/openai/types/video.py index 22ee3a11..e732ea54 100644 --- a/portkey_ai/_vendor/openai/types/video.py +++ b/portkey_ai/_vendor/openai/types/video.py @@ -13,6 +13,8 @@ class Video(BaseModel): + """Structured information describing a generated video job.""" + id: str """Unique identifier for the video job.""" diff --git a/portkey_ai/_vendor/openai/types/video_create_error.py b/portkey_ai/_vendor/openai/types/video_create_error.py index ae328b78..7f520220 100644 --- a/portkey_ai/_vendor/openai/types/video_create_error.py +++ b/portkey_ai/_vendor/openai/types/video_create_error.py @@ -6,6 +6,10 @@ class VideoCreateError(BaseModel): + """An error that occurred while generating the response.""" + code: str + """A machine-readable error code that was returned.""" message: str + """A human-readable description of the error that was returned.""" diff --git a/portkey_ai/_vendor/openai/types/video_create_params.py b/portkey_ai/_vendor/openai/types/video_create_params.py index 527d62d1..d787aaed 100644 --- a/portkey_ai/_vendor/openai/types/video_create_params.py +++ b/portkey_ai/_vendor/openai/types/video_create_params.py @@ -6,8 +6,8 @@ from .._types import FileTypes from .video_size import VideoSize -from .video_model import VideoModel from .video_seconds import VideoSeconds +from .video_model_param import VideoModelParam __all__ = ["VideoCreateParams"] @@ -19,11 +19,17 @@ class VideoCreateParams(TypedDict, total=False): input_reference: FileTypes """Optional image reference that guides generation.""" - model: VideoModel - """The video generation model to use. Defaults to `sora-2`.""" + model: VideoModelParam + """The video generation model to use (allowed values: sora-2, sora-2-pro). + + Defaults to `sora-2`. + """ seconds: VideoSeconds - """Clip duration in seconds. Defaults to 4 seconds.""" + """Clip duration in seconds (allowed values: 4, 8, 12). Defaults to 4 seconds.""" size: VideoSize - """Output resolution formatted as width x height. Defaults to 720x1280.""" + """ + Output resolution formatted as width x height (allowed values: 720x1280, + 1280x720, 1024x1792, 1792x1024). Defaults to 720x1280. + """ diff --git a/portkey_ai/_vendor/openai/types/video_delete_response.py b/portkey_ai/_vendor/openai/types/video_delete_response.py index e2673ffe..1ed543ae 100644 --- a/portkey_ai/_vendor/openai/types/video_delete_response.py +++ b/portkey_ai/_vendor/openai/types/video_delete_response.py @@ -8,6 +8,8 @@ class VideoDeleteResponse(BaseModel): + """Confirmation payload returned after deleting a video.""" + id: str """Identifier of the deleted video.""" diff --git a/portkey_ai/_vendor/openai/types/video_model.py b/portkey_ai/_vendor/openai/types/video_model.py index 0b0835fc..29d8cb16 100644 --- a/portkey_ai/_vendor/openai/types/video_model.py +++ b/portkey_ai/_vendor/openai/types/video_model.py @@ -1,7 +1,10 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. +from typing import Union from typing_extensions import Literal, TypeAlias __all__ = ["VideoModel"] -VideoModel: TypeAlias = Literal["sora-2", "sora-2-pro"] +VideoModel: TypeAlias = Union[ + str, Literal["sora-2", "sora-2-pro", "sora-2-2025-10-06", "sora-2-pro-2025-10-06", "sora-2-2025-12-08"] +] diff --git a/portkey_ai/_vendor/openai/types/video_model_param.py b/portkey_ai/_vendor/openai/types/video_model_param.py new file mode 100644 index 00000000..4310b8d0 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/video_model_param.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union +from typing_extensions import Literal, TypeAlias + +__all__ = ["VideoModelParam"] + +VideoModelParam: TypeAlias = Union[ + str, Literal["sora-2", "sora-2-pro", "sora-2-2025-10-06", "sora-2-pro-2025-10-06", "sora-2-2025-12-08"] +] diff --git a/portkey_ai/_vendor/openai/types/webhooks/batch_cancelled_webhook_event.py b/portkey_ai/_vendor/openai/types/webhooks/batch_cancelled_webhook_event.py index 4bbd7307..9d1c485f 100644 --- a/portkey_ai/_vendor/openai/types/webhooks/batch_cancelled_webhook_event.py +++ b/portkey_ai/_vendor/openai/types/webhooks/batch_cancelled_webhook_event.py @@ -9,11 +9,15 @@ class Data(BaseModel): + """Event data payload.""" + id: str """The unique ID of the batch API request.""" class BatchCancelledWebhookEvent(BaseModel): + """Sent when a batch API request has been cancelled.""" + id: str """The unique ID of the event.""" diff --git a/portkey_ai/_vendor/openai/types/webhooks/batch_completed_webhook_event.py b/portkey_ai/_vendor/openai/types/webhooks/batch_completed_webhook_event.py index a47ca156..5ae81917 100644 --- a/portkey_ai/_vendor/openai/types/webhooks/batch_completed_webhook_event.py +++ b/portkey_ai/_vendor/openai/types/webhooks/batch_completed_webhook_event.py @@ -9,11 +9,15 @@ class Data(BaseModel): + """Event data payload.""" + id: str """The unique ID of the batch API request.""" class BatchCompletedWebhookEvent(BaseModel): + """Sent when a batch API request has been completed.""" + id: str """The unique ID of the event.""" diff --git a/portkey_ai/_vendor/openai/types/webhooks/batch_expired_webhook_event.py b/portkey_ai/_vendor/openai/types/webhooks/batch_expired_webhook_event.py index e91001e8..2f08a7f5 100644 --- a/portkey_ai/_vendor/openai/types/webhooks/batch_expired_webhook_event.py +++ b/portkey_ai/_vendor/openai/types/webhooks/batch_expired_webhook_event.py @@ -9,11 +9,15 @@ class Data(BaseModel): + """Event data payload.""" + id: str """The unique ID of the batch API request.""" class BatchExpiredWebhookEvent(BaseModel): + """Sent when a batch API request has expired.""" + id: str """The unique ID of the event.""" diff --git a/portkey_ai/_vendor/openai/types/webhooks/batch_failed_webhook_event.py b/portkey_ai/_vendor/openai/types/webhooks/batch_failed_webhook_event.py index ef80863e..71666165 100644 --- a/portkey_ai/_vendor/openai/types/webhooks/batch_failed_webhook_event.py +++ b/portkey_ai/_vendor/openai/types/webhooks/batch_failed_webhook_event.py @@ -9,11 +9,15 @@ class Data(BaseModel): + """Event data payload.""" + id: str """The unique ID of the batch API request.""" class BatchFailedWebhookEvent(BaseModel): + """Sent when a batch API request has failed.""" + id: str """The unique ID of the event.""" diff --git a/portkey_ai/_vendor/openai/types/webhooks/eval_run_canceled_webhook_event.py b/portkey_ai/_vendor/openai/types/webhooks/eval_run_canceled_webhook_event.py index 855359f7..1948f893 100644 --- a/portkey_ai/_vendor/openai/types/webhooks/eval_run_canceled_webhook_event.py +++ b/portkey_ai/_vendor/openai/types/webhooks/eval_run_canceled_webhook_event.py @@ -9,11 +9,15 @@ class Data(BaseModel): + """Event data payload.""" + id: str """The unique ID of the eval run.""" class EvalRunCanceledWebhookEvent(BaseModel): + """Sent when an eval run has been canceled.""" + id: str """The unique ID of the event.""" diff --git a/portkey_ai/_vendor/openai/types/webhooks/eval_run_failed_webhook_event.py b/portkey_ai/_vendor/openai/types/webhooks/eval_run_failed_webhook_event.py index 76716807..4e4c860a 100644 --- a/portkey_ai/_vendor/openai/types/webhooks/eval_run_failed_webhook_event.py +++ b/portkey_ai/_vendor/openai/types/webhooks/eval_run_failed_webhook_event.py @@ -9,11 +9,15 @@ class Data(BaseModel): + """Event data payload.""" + id: str """The unique ID of the eval run.""" class EvalRunFailedWebhookEvent(BaseModel): + """Sent when an eval run has failed.""" + id: str """The unique ID of the event.""" diff --git a/portkey_ai/_vendor/openai/types/webhooks/eval_run_succeeded_webhook_event.py b/portkey_ai/_vendor/openai/types/webhooks/eval_run_succeeded_webhook_event.py index d0d1fc2b..c20f22ee 100644 --- a/portkey_ai/_vendor/openai/types/webhooks/eval_run_succeeded_webhook_event.py +++ b/portkey_ai/_vendor/openai/types/webhooks/eval_run_succeeded_webhook_event.py @@ -9,11 +9,15 @@ class Data(BaseModel): + """Event data payload.""" + id: str """The unique ID of the eval run.""" class EvalRunSucceededWebhookEvent(BaseModel): + """Sent when an eval run has succeeded.""" + id: str """The unique ID of the event.""" diff --git a/portkey_ai/_vendor/openai/types/webhooks/fine_tuning_job_cancelled_webhook_event.py b/portkey_ai/_vendor/openai/types/webhooks/fine_tuning_job_cancelled_webhook_event.py index 1fe3c060..0cfff85d 100644 --- a/portkey_ai/_vendor/openai/types/webhooks/fine_tuning_job_cancelled_webhook_event.py +++ b/portkey_ai/_vendor/openai/types/webhooks/fine_tuning_job_cancelled_webhook_event.py @@ -9,11 +9,15 @@ class Data(BaseModel): + """Event data payload.""" + id: str """The unique ID of the fine-tuning job.""" class FineTuningJobCancelledWebhookEvent(BaseModel): + """Sent when a fine-tuning job has been cancelled.""" + id: str """The unique ID of the event.""" diff --git a/portkey_ai/_vendor/openai/types/webhooks/fine_tuning_job_failed_webhook_event.py b/portkey_ai/_vendor/openai/types/webhooks/fine_tuning_job_failed_webhook_event.py index 71d899c8..0eb6bf95 100644 --- a/portkey_ai/_vendor/openai/types/webhooks/fine_tuning_job_failed_webhook_event.py +++ b/portkey_ai/_vendor/openai/types/webhooks/fine_tuning_job_failed_webhook_event.py @@ -9,11 +9,15 @@ class Data(BaseModel): + """Event data payload.""" + id: str """The unique ID of the fine-tuning job.""" class FineTuningJobFailedWebhookEvent(BaseModel): + """Sent when a fine-tuning job has failed.""" + id: str """The unique ID of the event.""" diff --git a/portkey_ai/_vendor/openai/types/webhooks/fine_tuning_job_succeeded_webhook_event.py b/portkey_ai/_vendor/openai/types/webhooks/fine_tuning_job_succeeded_webhook_event.py index 470f1fcf..26b5ea89 100644 --- a/portkey_ai/_vendor/openai/types/webhooks/fine_tuning_job_succeeded_webhook_event.py +++ b/portkey_ai/_vendor/openai/types/webhooks/fine_tuning_job_succeeded_webhook_event.py @@ -9,11 +9,15 @@ class Data(BaseModel): + """Event data payload.""" + id: str """The unique ID of the fine-tuning job.""" class FineTuningJobSucceededWebhookEvent(BaseModel): + """Sent when a fine-tuning job has succeeded.""" + id: str """The unique ID of the event.""" diff --git a/portkey_ai/_vendor/openai/types/webhooks/realtime_call_incoming_webhook_event.py b/portkey_ai/_vendor/openai/types/webhooks/realtime_call_incoming_webhook_event.py index a166a347..4647a2e2 100644 --- a/portkey_ai/_vendor/openai/types/webhooks/realtime_call_incoming_webhook_event.py +++ b/portkey_ai/_vendor/openai/types/webhooks/realtime_call_incoming_webhook_event.py @@ -9,6 +9,8 @@ class DataSipHeader(BaseModel): + """A header from the SIP Invite.""" + name: str """Name of the SIP Header.""" @@ -17,6 +19,8 @@ class DataSipHeader(BaseModel): class Data(BaseModel): + """Event data payload.""" + call_id: str """The unique ID of this call.""" @@ -25,6 +29,8 @@ class Data(BaseModel): class RealtimeCallIncomingWebhookEvent(BaseModel): + """Sent when Realtime API Receives a incoming SIP call.""" + id: str """The unique ID of the event.""" diff --git a/portkey_ai/_vendor/openai/types/webhooks/response_cancelled_webhook_event.py b/portkey_ai/_vendor/openai/types/webhooks/response_cancelled_webhook_event.py index 443e360e..cd791b33 100644 --- a/portkey_ai/_vendor/openai/types/webhooks/response_cancelled_webhook_event.py +++ b/portkey_ai/_vendor/openai/types/webhooks/response_cancelled_webhook_event.py @@ -9,11 +9,15 @@ class Data(BaseModel): + """Event data payload.""" + id: str """The unique ID of the model response.""" class ResponseCancelledWebhookEvent(BaseModel): + """Sent when a background response has been cancelled.""" + id: str """The unique ID of the event.""" diff --git a/portkey_ai/_vendor/openai/types/webhooks/response_completed_webhook_event.py b/portkey_ai/_vendor/openai/types/webhooks/response_completed_webhook_event.py index ac1feff3..cf07f0c2 100644 --- a/portkey_ai/_vendor/openai/types/webhooks/response_completed_webhook_event.py +++ b/portkey_ai/_vendor/openai/types/webhooks/response_completed_webhook_event.py @@ -9,11 +9,15 @@ class Data(BaseModel): + """Event data payload.""" + id: str """The unique ID of the model response.""" class ResponseCompletedWebhookEvent(BaseModel): + """Sent when a background response has been completed.""" + id: str """The unique ID of the event.""" diff --git a/portkey_ai/_vendor/openai/types/webhooks/response_failed_webhook_event.py b/portkey_ai/_vendor/openai/types/webhooks/response_failed_webhook_event.py index 5b4ba65e..aecb1b8f 100644 --- a/portkey_ai/_vendor/openai/types/webhooks/response_failed_webhook_event.py +++ b/portkey_ai/_vendor/openai/types/webhooks/response_failed_webhook_event.py @@ -9,11 +9,15 @@ class Data(BaseModel): + """Event data payload.""" + id: str """The unique ID of the model response.""" class ResponseFailedWebhookEvent(BaseModel): + """Sent when a background response has failed.""" + id: str """The unique ID of the event.""" diff --git a/portkey_ai/_vendor/openai/types/webhooks/response_incomplete_webhook_event.py b/portkey_ai/_vendor/openai/types/webhooks/response_incomplete_webhook_event.py index 01609314..2367731e 100644 --- a/portkey_ai/_vendor/openai/types/webhooks/response_incomplete_webhook_event.py +++ b/portkey_ai/_vendor/openai/types/webhooks/response_incomplete_webhook_event.py @@ -9,11 +9,15 @@ class Data(BaseModel): + """Event data payload.""" + id: str """The unique ID of the model response.""" class ResponseIncompleteWebhookEvent(BaseModel): + """Sent when a background response has been interrupted.""" + id: str """The unique ID of the event.""" diff --git a/portkey_ai/api_resources/apis/chat_complete.py b/portkey_ai/api_resources/apis/chat_complete.py index 15a25d91..6517713f 100644 --- a/portkey_ai/api_resources/apis/chat_complete.py +++ b/portkey_ai/api_resources/apis/chat_complete.py @@ -318,6 +318,9 @@ def parse( prediction: Union[Any, Omit] = omit, presence_penalty: Union[Optional[float], Omit] = omit, prompt_cache_key: Union[str, Omit] = omit, + prompt_cache_retention: Union[ + Optional[Literal["in-memory", "24h"]], Omit + ] = omit, reasoning_effort: Union[Any, Omit] = omit, safety_identifier: Union[str, Omit] = omit, seed: Union[Optional[int], Omit] = omit, @@ -358,6 +361,7 @@ def parse( prediction=prediction, presence_penalty=presence_penalty, prompt_cache_key=prompt_cache_key, + prompt_cache_retention=prompt_cache_retention, reasoning_effort=reasoning_effort, safety_identifier=safety_identifier, seed=seed, @@ -400,6 +404,9 @@ def stream( prediction: Union[Any, Omit] = omit, presence_penalty: Union[Optional[float], Omit] = omit, prompt_cache_key: Union[str, Omit] = omit, + prompt_cache_retention: Union[ + Optional[Literal["in-memory", "24h"]], Omit + ] = omit, reasoning_effort: Union[Any, Omit] = omit, safety_identifier: Union[str, Omit] = omit, seed: Union[Optional[int], Omit] = omit, @@ -440,6 +447,7 @@ def stream( prediction=prediction, presence_penalty=presence_penalty, prompt_cache_key=prompt_cache_key, + prompt_cache_retention=prompt_cache_retention, reasoning_effort=reasoning_effort, safety_identifier=safety_identifier, seed=seed, @@ -723,6 +731,9 @@ async def parse( prediction: Union[Any, Omit] = omit, presence_penalty: Union[Optional[float], Omit] = omit, prompt_cache_key: Union[str, Omit] = omit, + prompt_cache_retention: Union[ + Optional[Literal["in-memory", "24h"]], Omit + ] = omit, reasoning_effort: Union[Any, Omit] = omit, safety_identifier: Union[str, Omit] = omit, seed: Union[Optional[int], Omit] = omit, @@ -763,6 +774,7 @@ async def parse( prediction=prediction, presence_penalty=presence_penalty, prompt_cache_key=prompt_cache_key, + prompt_cache_retention=prompt_cache_retention, reasoning_effort=reasoning_effort, safety_identifier=safety_identifier, seed=seed, @@ -805,6 +817,9 @@ def stream( prediction: Union[Any, Omit] = omit, presence_penalty: Union[Optional[float], Omit] = omit, prompt_cache_key: Union[str, Omit] = omit, + prompt_cache_retention: Union[ + Optional[Literal["in-memory", "24h"]], Omit + ] = omit, reasoning_effort: Union[Any, Omit] = omit, safety_identifier: Union[str, Omit] = omit, seed: Union[Optional[int], Omit] = omit, @@ -845,6 +860,7 @@ def stream( prediction=prediction, presence_penalty=presence_penalty, prompt_cache_key=prompt_cache_key, + prompt_cache_retention=prompt_cache_retention, reasoning_effort=reasoning_effort, safety_identifier=safety_identifier, seed=seed, diff --git a/tests/models.json b/tests/models.json index 73033dc7..0499881a 100644 --- a/tests/models.json +++ b/tests/models.json @@ -23,92 +23,5 @@ "tts-1", "whisper-1" ] - }, - "anyscale": { - "env_variable": "ANYSCALE_API_KEY", - "chat": [ - "meta-llama/Llama-2-7b-chat-hf", - "meta-llama/Llama-2-13b-chat-hf", - "meta-llama/Llama-2-70b-chat-hf", - "mistralai/Mistral-7B-Instruct-v0.1", - "mistralai/Mixtral-8x7B-Instruct-v0.1" - ], - "text": [ - "meta-llama/Llama-2-7b-chat-hf", - "meta-llama/Llama-2-13b-chat-hf", - "meta-llama/Llama-2-70b-chat-hf", - "mistralai/Mistral-7B-Instruct-v0.1", - "mistralai/Mixtral-8x7B-Instruct-v0.1" - ], - "image":[], - "audio":[] - }, - "anthropic": { - "env_variable": "ANTHROPIC_API_KEY", - "chat": [ - "claude-3-opus-20240229", - "claude-3-sonnet-20240229", - "claude-3-haiku-20240307", - "claude-2.1", - "claude-2.0", - "claude-instant-1.2" - ], - "text": [ - "claude-2.1", - "claude-2.0", - "claude-instant-1.2" - ], - "image":[], - "audio":[] - }, - "cohere": { - "env_variable": "COHERE_API_KEY", - "chat": [ - "command-light", - "command-light-nightly", - "command", - "command-nightly" - ], - "text": [ - "command-light", - "command-light-nightly", - "command", - "command-nightly" - ], - "image":[], - "audio":[] - }, - "langchain": { - "env_variable": "OPENAI_API_KEY", - "chat": [ - "gpt-4o", - "gpt-4-0125-preview", - "gpt-4-turbo-preview", - "gpt-4-1106-preview", - "gpt-4-vision-preview", - "gpt-4", - "gpt-4-0613", - "gpt-3.5-turbo", - "gpt-3.5-turbo-0125", - "gpt-3.5-turbo-1106" - ], - "text":[], - "image":[], - "audio":[] - }, - "llamaindex":{ - "env_variable": "LLAMA_INDEX_API_KEY", - "HuggingFaceEmbedding": [ - "sentence-transformers/all-MiniLM-L6-v2", - "sentence-transformers/LaBSE" - ], - "OpenAIEmbedding": [ - "text-embedding-ada-002", - "text-embedding-3-large" - ], - "chat":[], - "text":[], - "image":[], - "audio":[] } } \ No newline at end of file diff --git a/vendorize.toml b/vendorize.toml index 35b6af8f..356956c6 100644 --- a/vendorize.toml +++ b/vendorize.toml @@ -1,4 +1,4 @@ target = "portkey_ai/_vendor" packages = [ - "openai==2.7.1" + "openai==2.16.0" ] \ No newline at end of file From d05765e66700f8fe7825ce92e27fce6b7df798a9 Mon Sep 17 00:00:00 2001 From: Narendranath Gogineni Date: Thu, 5 Feb 2026 02:09:37 +0530 Subject: [PATCH 2/4] feat: update Portkey wrappers for OpenAI SDK v2.16.0 compatibility - Add compact() method to Responses and AsyncResponses - Add CompactedResponse type - Add memory_limit parameter to containers.create - Add prompt_cache_retention parameter to chat completions - Add input_tokens resource to AsyncResponses --- portkey_ai/api_resources/apis/containers.py | 4 ++ portkey_ai/api_resources/apis/responses.py | 64 ++++++++++++++++++- .../api_resources/types/response_type.py | 22 +++++++ 3 files changed, 89 insertions(+), 1 deletion(-) diff --git a/portkey_ai/api_resources/apis/containers.py b/portkey_ai/api_resources/apis/containers.py index aef72756..1a0909e1 100644 --- a/portkey_ai/api_resources/apis/containers.py +++ b/portkey_ai/api_resources/apis/containers.py @@ -28,6 +28,7 @@ def create( name: str, expires_after: Union[container_create_params.ExpiresAfter, Omit] = omit, file_ids: Union[List[str], Omit] = omit, + memory_limit: Union[Literal["1g", "4g", "16g", "64g"], Omit] = omit, **kwargs, ) -> ContainerCreateResponse: extra_headers = kwargs.pop("extra_headers", None) @@ -38,6 +39,7 @@ def create( name=name, expires_after=expires_after, file_ids=file_ids, + memory_limit=memory_limit, extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, @@ -286,6 +288,7 @@ async def create( name: str, expires_after: Union[container_create_params.ExpiresAfter, Omit] = omit, file_ids: Union[List[str], Omit] = omit, + memory_limit: Union[Literal["1g", "4g", "16g", "64g"], Omit] = omit, **kwargs, ) -> ContainerCreateResponse: extra_headers = kwargs.pop("extra_headers", None) @@ -296,6 +299,7 @@ async def create( name=name, expires_after=expires_after, file_ids=file_ids, + memory_limit=memory_limit, extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, diff --git a/portkey_ai/api_resources/apis/responses.py b/portkey_ai/api_resources/apis/responses.py index 7ccfe186..d994cf2f 100644 --- a/portkey_ai/api_resources/apis/responses.py +++ b/portkey_ai/api_resources/apis/responses.py @@ -36,7 +36,10 @@ from portkey_ai._vendor.openai.types.shared_params.reasoning import Reasoning from portkey_ai.api_resources.apis.api_resource import APIResource, AsyncAPIResource from portkey_ai.api_resources.client import AsyncPortkey, Portkey -from portkey_ai.api_resources.types.response_type import Response as ResponseType +from portkey_ai.api_resources.types.response_type import ( + Response as ResponseType, + CompactedResponse, +) from portkey_ai.api_resources.types.responses_input_items_type import InputItemList from portkey_ai.api_resources.types.responses_input_tokens_type import ( InputTokenCountResponse, @@ -354,6 +357,35 @@ def cancel( timeout=timeout, ) + def compact( + self, + *, + model: Union[str, Omit] = omit, + input: Union[str, Iterable[ResponseInputItemParam], None, Omit] = omit, + instructions: Union[Optional[str], Omit] = omit, + previous_response_id: Union[Optional[str], Omit] = omit, + **kwargs, + ) -> CompactedResponse: + import json + + extra_headers = kwargs.pop("extra_headers", None) + extra_query = kwargs.pop("extra_query", None) + extra_body = kwargs.pop("extra_body", None) + timeout = kwargs.pop("timeout", None) + response = self.openai_client.with_raw_response.responses.compact( + model=model, # type: ignore[arg-type] + input=input, + instructions=instructions, + previous_response_id=previous_response_id, + extra_headers=extra_headers, + extra_query=extra_query, + extra_body={**(extra_body or {}), **kwargs}, + timeout=timeout, + ) + data = CompactedResponse(**json.loads(response.text)) + data._headers = response.headers + return data + class InputItems(APIResource): def __init__(self, client: Portkey) -> None: @@ -444,6 +476,7 @@ def __init__(self, client: AsyncPortkey) -> None: super().__init__(client) self.openai_client = client.openai_client self.input_items = AsyncInputItems(client) + self.input_tokens = AsyncInputTokens(client) @overload async def create( @@ -746,6 +779,35 @@ async def cancel( timeout=timeout, ) + async def compact( + self, + *, + model: Union[str, Omit] = omit, + input: Union[str, Iterable[ResponseInputItemParam], None, Omit] = omit, + instructions: Union[Optional[str], Omit] = omit, + previous_response_id: Union[Optional[str], Omit] = omit, + **kwargs, + ) -> CompactedResponse: + import json + + extra_headers = kwargs.pop("extra_headers", None) + extra_query = kwargs.pop("extra_query", None) + extra_body = kwargs.pop("extra_body", None) + timeout = kwargs.pop("timeout", None) + response = await self.openai_client.with_raw_response.responses.compact( + model=model, # type: ignore[arg-type] + input=input, + instructions=instructions, + previous_response_id=previous_response_id, + extra_headers=extra_headers, + extra_query=extra_query, + extra_body={**(extra_body or {}), **kwargs}, + timeout=timeout, + ) + data = CompactedResponse(**json.loads(response.text)) + data._headers = response.headers + return data + class AsyncInputItems(AsyncAPIResource): def __init__(self, client: AsyncPortkey) -> None: diff --git a/portkey_ai/api_resources/types/response_type.py b/portkey_ai/api_resources/types/response_type.py index 6544821a..d6374fde 100644 --- a/portkey_ai/api_resources/types/response_type.py +++ b/portkey_ai/api_resources/types/response_type.py @@ -18,6 +18,28 @@ from portkey_ai.api_resources.types.utils import parse_headers +class CompactedResponse(BaseModel, extra="allow"): + id: str + """The unique identifier for the compacted response.""" + + created_at: int + """Unix timestamp (in seconds) when the compacted conversation was created.""" + + object: Literal["response.compaction"] + """The object type. Always `response.compaction`.""" + + output: List[ResponseOutputItem] + """The compacted list of output items.""" + + usage: ResponseUsage + """Token accounting for the compaction pass.""" + + _headers: Optional[httpx.Headers] = PrivateAttr() + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) + + class ResponseError(BaseModel, extra="allow"): code: Literal[ "server_error", From fe9c4309b4043f02da0be2995bb97727e51c306b Mon Sep 17 00:00:00 2001 From: Narendranath Gogineni Date: Thu, 5 Feb 2026 03:04:45 +0530 Subject: [PATCH 3/4] docs: update claude.md with detailed vendoring guide - Add step-by-step vendoring process - Document Portkey customizations needed after vendoring - Add wrapper update patterns for new params/methods - Add checklist for vendoring - Update version references to v2.16.0 --- claude.md | 199 +++++++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 188 insertions(+), 11 deletions(-) diff --git a/claude.md b/claude.md index b06b367e..02654ceb 100644 --- a/claude.md +++ b/claude.md @@ -12,7 +12,7 @@ portkey-python-sdk/ │ ├── __init__.py # Public API - exports all classes │ ├── version.py # SDK version │ ├── _vendor/ # Vendored dependencies -│ │ └── openai/ # Vendored OpenAI SDK (e.g., v2.7.1) +│ │ └── openai/ # Vendored OpenAI SDK (currently v2.16.0) │ ├── api_resources/ # Core SDK implementation │ │ ├── __init__.py # Internal exports │ │ ├── client.py # Portkey & AsyncPortkey clients @@ -41,7 +41,7 @@ The SDK vendors the OpenAI Python SDK to avoid version conflicts: ```toml # vendorize.toml target = "portkey_ai/_vendor" -packages = ["openai==2.7.1"] +packages = ["openai==2.16.0"] # Update this version as needed ``` ### Key Vendoring Quirks @@ -407,19 +407,196 @@ portkey_ai/__init__.py # Final public export + add to __all__ ## Updating Vendored OpenAI SDK -1. Update version in `vendorize.toml`: - ```toml - packages = ["openai==X.Y.Z"] +### Step-by-Step Process + +#### Step 1: Compare SDK Versions +Before vendoring, compare changes between versions: +``` +https://github.com/openai/openai-python/compare/vOLD...vNEW +``` +This helps identify: +- New methods/resources added +- Removed/deprecated methods +- Signature changes in existing methods + +#### Step 2: Clean and Re-vendor + +```bash +# Delete the old vendor folder contents +rm -rf portkey_ai/_vendor/* + +# Update version in vendorize.toml +packages = ["openai==X.Y.Z"] + +# Run vendorize (install with: pip install vendorize) +python-vendorize +``` + +The tool rewrites imports from `openai.*` to `portkey_ai._vendor.openai.*` + +#### Step 3: Apply Portkey Customizations + +**Two files MUST be modified after every vendoring:** + +1. **`portkey_ai/_vendor/openai/_constants.py`** + ```python + # Change DEFAULT_MAX_RETRIES from 2 to 1 + DEFAULT_MAX_RETRIES = 1 + ``` + +2. **`portkey_ai/_vendor/openai/_base_client.py`** + + Replace the `_should_retry` method with Portkey's custom logic: + ```python + def _should_retry(self, response: httpx.Response) -> bool: + # Custom Retry Conditions + retry_status_code = response.status_code + retry_trace_id = response.headers.get("x-portkey-trace-id") + retry_request_id = response.headers.get("x-portkey-request-id") + retry_gateway_exception = response.headers.get("x-portkey-gateway-exception") + + if ( + retry_status_code < 500 + or retry_trace_id + or retry_request_id + or retry_gateway_exception + ): + return False + + return True ``` -2. Run vendorize tool (typically `vendorize` or `python -m vendorize`) +#### Step 4: Run Lint to Find Issues + +```bash +make lint +``` + +This will surface: +- Type incompatibilities +- Missing imports +- Signature mismatches + +#### Step 5: Update Portkey Wrappers + +For each wrapper that needs updating: + +**Adding new parameters:** +```python +# If wrapper explicitly lists parameters (not just **kwargs) +# Add new params with Union[Type, Omit] = omit pattern +def create( + self, + *, + existing_param: Union[str, Omit] = omit, + new_param: Union[str, Omit] = omit, # Add new param + **kwargs, +): + response = self.openai_client.with_raw_response.resource.create( + existing_param=existing_param, + new_param=new_param, # Pass to underlying client + ... + ) +``` + +**Adding new methods:** +```python +def new_method( + self, + *, + param: Union[str, Omit] = omit, + **kwargs, +) -> NewResponseType: + import json + extra_headers = kwargs.pop("extra_headers", None) + extra_query = kwargs.pop("extra_query", None) + extra_body = kwargs.pop("extra_body", None) + timeout = kwargs.pop("timeout", None) + + response = self.openai_client.with_raw_response.resource.new_method( + param=param, + extra_headers=extra_headers, + extra_query=extra_query, + extra_body={**(extra_body or {}), **kwargs}, + timeout=timeout, + ) + data = NewResponseType(**json.loads(response.text)) + data._headers = response.headers + return data +``` + +**Adding new response types:** +```python +# In api_resources/types/ +class NewResponseType(BaseModel, extra="allow"): + id: str + created_at: int + # ... other fields from OpenAI type + _headers: Optional[httpx.Headers] = PrivateAttr() + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) +``` + +#### Step 6: Handle Type Ignore Comments + +Sometimes Portkey uses more flexible types than OpenAI's strict literals: +```python +# Portkey allows any string, OpenAI expects specific literals +response = self.openai_client.with_raw_response.resource.method( + model=model, # type: ignore[arg-type] + ... +) +``` + +#### Step 7: Verify and Test + +```bash +# Format code +make format + +# Run all checks +make lint + +# Test imports +python -c "from portkey_ai import Portkey; print('OK')" -3. The tool rewrites imports from `openai.*` to `portkey_ai._vendor.openai.*` +# Run tests (requires valid virtual keys) +pytest . -n 10 +``` + +### Wrapper Resilience Pattern + +Most Portkey wrappers use `**kwargs` pattern which automatically forwards new parameters: + +```python +def create(self, *, name: str, **kwargs): + extra_headers = kwargs.pop("extra_headers", None) + # ... other pops + response = self.openai_client.with_raw_response.resource.create( + name=name, + extra_headers=extra_headers, + extra_body={**(extra_body or {}), **kwargs}, # Unknown params go here + ) +``` -4. After vendoring: - - Check for new OpenAI resources that need Portkey wrappers - - Update any Portkey types that mirror OpenAI types - - Test thoroughly - OpenAI SDK changes can break wrappers +This means many new OpenAI parameters work automatically without code changes. Only add explicit parameters when: +1. The wrapper doesn't pass `**kwargs` to `extra_body` +2. You want IDE autocomplete/type hints for important params +3. The parameter needs special handling + +### Checklist for Vendoring + +- [ ] Delete `portkey_ai/_vendor/*` +- [ ] Update `vendorize.toml` with new version +- [ ] Run `python-vendorize` +- [ ] Set `DEFAULT_MAX_RETRIES = 1` in `_constants.py` +- [ ] Replace `_should_retry` in `_base_client.py` +- [ ] Run `make lint` and fix errors +- [ ] Add new methods/types if needed +- [ ] Run `make format` +- [ ] Test imports work +- [ ] Commit vendored code first, then wrapper changes ## Common Gotchas From c0705146b39a1fae654c43a4d1fe4b9ca298a2a1 Mon Sep 17 00:00:00 2001 From: Narendranath Gogineni Date: Thu, 5 Feb 2026 03:48:03 +0530 Subject: [PATCH 4/4] fix: add missing parameters from OpenAI SDK v2.16.0 - Add prompt_cache_retention to responses create/stream/parse methods - Add known_speaker_names and known_speaker_references to audio transcriptions - Add description parameter to vector_stores.create - Make file_ids optional in vector_stores.file_batches.create - Update videos model type to VideoModelParam --- portkey_ai/api_resources/apis/audio.py | 12 ++++++ portkey_ai/api_resources/apis/responses.py | 42 +++++++++++++++++++ .../api_resources/apis/vector_stores.py | 8 +++- portkey_ai/api_resources/apis/videos.py | 10 ++--- 4 files changed, 65 insertions(+), 7 deletions(-) diff --git a/portkey_ai/api_resources/apis/audio.py b/portkey_ai/api_resources/apis/audio.py index 75f53ee3..052614fb 100644 --- a/portkey_ai/api_resources/apis/audio.py +++ b/portkey_ai/api_resources/apis/audio.py @@ -41,6 +41,8 @@ def create( temperature: Union[float, Omit] = omit, timestamp_granularities: Union[List[str], Omit] = omit, stream: Union[bool, Omit] = omit, + known_speaker_names: Union[List[str], Omit] = omit, + known_speaker_references: Union[List[str], Omit] = omit, **kwargs, ) -> Union[Transcription, TranscriptionVerbose, str]: extra_headers = kwargs.pop("extra_headers", {}) @@ -58,6 +60,8 @@ def create( temperature=temperature, timestamp_granularities=timestamp_granularities, stream=stream, + known_speaker_names=known_speaker_names, + known_speaker_references=known_speaker_references, extra_headers=extra_headers, extra_body=kwargs, ) @@ -70,6 +74,8 @@ def create( response_format=response_format, temperature=temperature, timestamp_granularities=timestamp_granularities, + known_speaker_names=known_speaker_names, + known_speaker_references=known_speaker_references, extra_headers=extra_headers, extra_body=kwargs, ) @@ -179,6 +185,8 @@ async def create( temperature: Union[float, Omit] = omit, timestamp_granularities: Union[List[str], Omit] = omit, stream: Union[bool, Omit] = omit, + known_speaker_names: Union[List[str], Omit] = omit, + known_speaker_references: Union[List[str], Omit] = omit, **kwargs, ) -> Union[Transcription, TranscriptionVerbose, str]: extra_headers = kwargs.pop("extra_headers", {}) @@ -196,6 +204,8 @@ async def create( temperature=temperature, timestamp_granularities=timestamp_granularities, stream=stream, + known_speaker_names=known_speaker_names, + known_speaker_references=known_speaker_references, extra_headers=extra_headers, extra_body=kwargs, ) @@ -209,6 +219,8 @@ async def create( response_format=response_format, temperature=temperature, timestamp_granularities=timestamp_granularities, + known_speaker_names=known_speaker_names, + known_speaker_references=known_speaker_references, extra_headers=extra_headers, extra_body=kwargs, ) diff --git a/portkey_ai/api_resources/apis/responses.py b/portkey_ai/api_resources/apis/responses.py index d994cf2f..547536c5 100644 --- a/portkey_ai/api_resources/apis/responses.py +++ b/portkey_ai/api_resources/apis/responses.py @@ -68,6 +68,9 @@ def create( metadata: Union[Optional[Metadata], Omit] = omit, parallel_tool_calls: Union[Optional[bool], Omit] = omit, previous_response_id: Union[Optional[str], Omit] = omit, + prompt_cache_retention: Union[ + Optional[Literal["in-memory", "24h"]], Omit + ] = omit, reasoning: Union[Optional[Reasoning], Omit] = omit, store: Union[Optional[bool], Omit] = omit, stream: Union[Literal[False], Omit] = omit, @@ -95,6 +98,9 @@ def create( metadata: Union[Optional[Metadata], Omit] = omit, parallel_tool_calls: Union[Optional[bool], Omit] = omit, previous_response_id: Union[Optional[str], Omit] = omit, + prompt_cache_retention: Union[ + Optional[Literal["in-memory", "24h"]], Omit + ] = omit, reasoning: Union[Optional[Reasoning], Omit] = omit, store: Union[Optional[bool], Omit] = omit, temperature: Union[Optional[float], Omit] = omit, @@ -121,6 +127,9 @@ def create( metadata: Union[Optional[Metadata], Omit] = omit, parallel_tool_calls: Union[Optional[bool], Omit] = omit, previous_response_id: Union[Optional[str], Omit] = omit, + prompt_cache_retention: Union[ + Optional[Literal["in-memory", "24h"]], Omit + ] = omit, reasoning: Union[Optional[Reasoning], Omit] = omit, store: Union[Optional[bool], Omit] = omit, temperature: Union[Optional[float], Omit] = omit, @@ -145,6 +154,9 @@ def create( metadata: Union[Optional[Metadata], Omit] = omit, parallel_tool_calls: Union[Optional[bool], Omit] = omit, previous_response_id: Union[Optional[str], Omit] = omit, + prompt_cache_retention: Union[ + Optional[Literal["in-memory", "24h"]], Omit + ] = omit, reasoning: Union[Optional[Reasoning], Omit] = omit, store: Union[Optional[bool], Omit] = omit, stream: Union[Optional[Literal[False]], Literal[True], Omit] = omit, @@ -171,6 +183,7 @@ def create( metadata=metadata, parallel_tool_calls=parallel_tool_calls, previous_response_id=previous_response_id, + prompt_cache_retention=prompt_cache_retention, reasoning=reasoning, store=store, stream=stream, # type: ignore[arg-type] @@ -244,6 +257,9 @@ def stream( metadata: Union[Metadata, Omit] = omit, parallel_tool_calls: Union[bool, Omit] = omit, previous_response_id: Union[str, Omit] = omit, + prompt_cache_retention: Union[ + Optional[Literal["in-memory", "24h"]], Omit + ] = omit, reasoning: Union[Reasoning, Omit] = omit, store: Union[bool, Omit] = omit, stream_options: Union[response_create_params.StreamOptions, Omit] = omit, @@ -270,6 +286,7 @@ def stream( metadata=metadata, parallel_tool_calls=parallel_tool_calls, previous_response_id=previous_response_id, + prompt_cache_retention=prompt_cache_retention, reasoning=reasoning, store=store, stream_options=stream_options, @@ -298,6 +315,9 @@ def parse( metadata: Union[Metadata, Omit] = omit, parallel_tool_calls: Union[bool, Omit] = omit, previous_response_id: Union[str, Omit] = omit, + prompt_cache_retention: Union[ + Optional[Literal["in-memory", "24h"]], Omit + ] = omit, reasoning: Union[Reasoning, Omit] = omit, store: Union[bool, Omit] = omit, stream: Union[Literal[False], Literal[True], Omit] = omit, @@ -325,6 +345,7 @@ def parse( metadata=metadata, parallel_tool_calls=parallel_tool_calls, previous_response_id=previous_response_id, + prompt_cache_retention=prompt_cache_retention, reasoning=reasoning, store=store, stream=stream, @@ -490,6 +511,9 @@ async def create( metadata: Union[Optional[Metadata], Omit] = omit, parallel_tool_calls: Union[Optional[bool], Omit] = omit, previous_response_id: Union[Optional[str], Omit] = omit, + prompt_cache_retention: Union[ + Optional[Literal["in-memory", "24h"]], Omit + ] = omit, reasoning: Union[Optional[Reasoning], Omit] = omit, store: Union[Optional[bool], Omit] = omit, stream: Union[Literal[False], Omit] = omit, @@ -517,6 +541,9 @@ async def create( metadata: Union[Optional[Metadata], Omit] = omit, parallel_tool_calls: Union[Optional[bool], Omit] = omit, previous_response_id: Union[Optional[str], Omit] = omit, + prompt_cache_retention: Union[ + Optional[Literal["in-memory", "24h"]], Omit + ] = omit, reasoning: Union[Optional[Reasoning], Omit] = omit, store: Union[Optional[bool], Omit] = omit, temperature: Union[Optional[float], Omit] = omit, @@ -543,6 +570,9 @@ async def create( metadata: Union[Optional[Metadata], Omit] = omit, parallel_tool_calls: Union[Optional[bool], Omit] = omit, previous_response_id: Union[Optional[str], Omit] = omit, + prompt_cache_retention: Union[ + Optional[Literal["in-memory", "24h"]], Omit + ] = omit, reasoning: Union[Optional[Reasoning], Omit] = omit, store: Union[Optional[bool], Omit] = omit, temperature: Union[Optional[float], Omit] = omit, @@ -567,6 +597,9 @@ async def create( metadata: Union[Optional[Metadata], Omit] = omit, parallel_tool_calls: Union[Optional[bool], Omit] = omit, previous_response_id: Union[Optional[str], Omit] = omit, + prompt_cache_retention: Union[ + Optional[Literal["in-memory", "24h"]], Omit + ] = omit, reasoning: Union[Optional[Reasoning], Omit] = omit, store: Union[Optional[bool], Omit] = omit, stream: Union[Optional[Literal[False]], Literal[True], Omit] = omit, @@ -593,6 +626,7 @@ async def create( metadata=metadata, parallel_tool_calls=parallel_tool_calls, previous_response_id=previous_response_id, + prompt_cache_retention=prompt_cache_retention, reasoning=reasoning, store=store, stream=stream, # type: ignore[arg-type] @@ -666,6 +700,9 @@ def stream( metadata: Union[Metadata, Omit] = omit, parallel_tool_calls: Union[bool, Omit] = omit, previous_response_id: Union[str, Omit] = omit, + prompt_cache_retention: Union[ + Optional[Literal["in-memory", "24h"]], Omit + ] = omit, reasoning: Union[Reasoning, Omit] = omit, store: Union[bool, Omit] = omit, stream_options: Union[response_create_params.StreamOptions, Omit] = omit, @@ -692,6 +729,7 @@ def stream( metadata=metadata, parallel_tool_calls=parallel_tool_calls, previous_response_id=previous_response_id, + prompt_cache_retention=prompt_cache_retention, reasoning=reasoning, store=store, stream_options=stream_options, @@ -720,6 +758,9 @@ async def parse( metadata: Union[Metadata, Omit] = omit, parallel_tool_calls: Union[bool, Omit] = omit, previous_response_id: Union[str, Omit] = omit, + prompt_cache_retention: Union[ + Optional[Literal["in-memory", "24h"]], Omit + ] = omit, reasoning: Union[Reasoning, Omit] = omit, store: Union[bool, Omit] = omit, stream: Union[Literal[False], Literal[True], Omit] = omit, @@ -747,6 +788,7 @@ async def parse( metadata=metadata, parallel_tool_calls=parallel_tool_calls, previous_response_id=previous_response_id, + prompt_cache_retention=prompt_cache_retention, reasoning=reasoning, store=store, stream=stream, diff --git a/portkey_ai/api_resources/apis/vector_stores.py b/portkey_ai/api_resources/apis/vector_stores.py index 73386b60..b9e31127 100644 --- a/portkey_ai/api_resources/apis/vector_stores.py +++ b/portkey_ai/api_resources/apis/vector_stores.py @@ -32,6 +32,7 @@ def create( self, *, chunking_strategy: Union[Any, Omit] = omit, + description: Union[str, Omit] = omit, expires_after: Union[vector_store_create_params.ExpiresAfter, Omit] = omit, file_ids: Union[List[str], Omit] = omit, metadata: Union[Optional[Metadata], Omit] = omit, @@ -39,6 +40,7 @@ def create( ) -> VectorStore: response = self.openai_client.with_raw_response.vector_stores.create( chunking_strategy=chunking_strategy, + description=description, expires_after=expires_after, file_ids=file_ids, metadata=metadata, @@ -308,7 +310,7 @@ def create( self, vector_store_id: str, *, - file_ids: List[str], + file_ids: Union[List[str], Omit] = omit, chunking_strategy: Union[Any, Omit] = omit, **kwargs, ) -> VectorStoreFileBatch: @@ -460,6 +462,7 @@ async def create( self, *, chunking_strategy: Union[Any, Omit] = omit, + description: Union[str, Omit] = omit, expires_after: Union[vector_store_create_params.ExpiresAfter, Omit] = omit, file_ids: Union[List[str], Omit] = omit, metadata: Union[Optional[Metadata], Omit] = omit, @@ -467,6 +470,7 @@ async def create( ) -> VectorStore: response = await self.openai_client.with_raw_response.vector_stores.create( chunking_strategy=chunking_strategy, + description=description, expires_after=expires_after, file_ids=file_ids, metadata=metadata, @@ -736,7 +740,7 @@ async def create( self, vector_store_id: str, *, - file_ids: List[str], + file_ids: Union[List[str], Omit] = omit, chunking_strategy: Union[Any, Omit] = omit, **kwargs, ) -> VectorStoreFileBatch: diff --git a/portkey_ai/api_resources/apis/videos.py b/portkey_ai/api_resources/apis/videos.py index 6cd658e3..ef810cc4 100644 --- a/portkey_ai/api_resources/apis/videos.py +++ b/portkey_ai/api_resources/apis/videos.py @@ -2,7 +2,7 @@ from typing import Any, Literal, Optional, Union import httpx -from portkey_ai._vendor.openai.types.video_model import VideoModel +from portkey_ai._vendor.openai.types.video_model_param import VideoModelParam from portkey_ai._vendor.openai.types.video_seconds import VideoSeconds from portkey_ai._vendor.openai.types.video_size import VideoSize from portkey_ai.api_resources.apis.api_resource import APIResource, AsyncAPIResource @@ -26,7 +26,7 @@ def create( *, prompt: str, input_reference: Union[FileTypes, Omit] = omit, - model: Union[VideoModel, Omit] = omit, + model: Union[VideoModelParam, Omit] = omit, seconds: Union[VideoSeconds, Omit] = omit, size: Union[VideoSize, Omit] = omit, extra_headers: Optional[Headers] = None, @@ -55,7 +55,7 @@ def create_and_poll( *, prompt: str, input_reference: Union[FileTypes, Omit] = omit, - model: Union[VideoModel, Omit] = omit, + model: Union[VideoModelParam, Omit] = omit, seconds: Union[VideoSeconds, Omit] = omit, size: Union[VideoSize, Omit] = omit, poll_interval_ms: Union[int, Omit] = omit, @@ -208,7 +208,7 @@ async def create( *, prompt: str, input_reference: Union[FileTypes, Omit] = omit, - model: Union[VideoModel, Omit] = omit, + model: Union[VideoModelParam, Omit] = omit, seconds: Union[VideoSeconds, Omit] = omit, size: Union[VideoSize, Omit] = omit, extra_headers: Optional[Headers] = None, @@ -237,7 +237,7 @@ async def create_and_poll( *, prompt: str, input_reference: Union[FileTypes, Omit] = omit, - model: Union[VideoModel, Omit] = omit, + model: Union[VideoModelParam, Omit] = omit, seconds: Union[VideoSeconds, Omit] = omit, size: Union[VideoSize, Omit] = omit, poll_interval_ms: Union[int, Omit] = omit,