
    
@g}}                      d dl mZ d dlZd dlmZmZmZmZmZ d dl	m
Z
mZ d dlZd dlZddlmZ ddlmZmZmZmZmZ ddlmZmZmZ dd	lmZ dd
lmZmZ ddlm Z m!Z! ddl"m#Z#m$Z$ ddl%m&Z&m'Z' ddl(m)Z) ddl*m+Z+ ddl,m-Z- ddl.m/Z/ ddl0m1Z1 ddl2m3Z3 ddl4m&Z& ddl5m6Z6 ddl7m8Z8 ddl9m:Z: ddl;m<Z< ddgZ= G d de          Z> G d de          Z? G d d          Z@ G d d           ZA G d! d"          ZB G d# d$          ZCd*d)ZDdS )+    )annotationsN)DictListUnionIterableOptional)Literaloverload   )_legacy_response)	NOT_GIVENBodyQueryHeadersNotGiven)required_argsmaybe_transformasync_maybe_transform)cached_property)SyncAPIResourceAsyncAPIResource)to_streamed_response_wrapper"async_to_streamed_response_wrapper)StreamAsyncStream)ChatCompletionAudioParamcompletion_create_params)make_request_options)	ChatModel)ChatCompletion)ChatCompletionChunk)ChatCompletionModality)ChatCompletionToolParam)r   )ChatCompletionMessageParam) ChatCompletionStreamOptionsParam)$ChatCompletionPredictionContentParam)#ChatCompletionToolChoiceOptionParamCompletionsAsyncCompletionsc            !         e Zd ZedNd            ZedOd            ZeeeeeeeeeeeeeeeeeeeeeeeeeeeedddeddPdC            ZeeeeeeeeeeeeeeeeeeeeeeeeeeedddedDdQdG            ZeeeeeeeeeeeeeeeeeeeeeeeeeeedddedDdRdJ            Z e	dd
gg dK          eeeeeeeeeeeeeeeeeeeeeeeeeeedddeddSdM            ZdS )Tr(   returnCompletionsWithRawResponsec                     t          |           S a  
        This property can be used as a prefix for any HTTP method call to return the
        the raw response object instead of the parsed content.

        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
        )r,   selfs    U/var/www/piapp/venv/lib/python3.11/site-packages/openai/resources/chat/completions.pywith_raw_responsezCompletions.with_raw_response+   s     *$///     CompletionsWithStreamingResponsec                     t          |           S z
        An alternative to `.with_raw_response` that doesn't eagerly read the response body.

        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
        )r4   r/   s    r1   with_streaming_responsez#Completions.with_streaming_response5   s     0555r3   Naudiofrequency_penaltyfunction_call	functions
logit_biaslogprobsmax_completion_tokens
max_tokensmetadata
modalitiesnparallel_tool_calls
predictionpresence_penaltyresponse_formatseedservice_tierstopstorestreamstream_optionstemperaturetool_choicetoolstop_logprobstop_puserextra_headersextra_query
extra_bodytimeoutmessages$Iterable[ChatCompletionMessageParam]modelUnion[str, ChatModel]r9   -Optional[ChatCompletionAudioParam] | NotGivenr:   Optional[float] | NotGivenr;   0completion_create_params.FunctionCall | NotGivenr<   6Iterable[completion_create_params.Function] | NotGivenr=   #Optional[Dict[str, int]] | NotGivenr>   Optional[bool] | NotGivenr?   Optional[int] | NotGivenr@   rA   #Optional[Dict[str, str]] | NotGivenrB   1Optional[List[ChatCompletionModality]] | NotGivenrC   rD   bool | NotGivenrE   9Optional[ChatCompletionPredictionContentParam] | NotGivenrF   rG   2completion_create_params.ResponseFormat | NotGivenrH   rI   /Optional[Literal['auto', 'default']] | NotGivenrJ   *Union[Optional[str], List[str]] | NotGivenrK   rL   #Optional[Literal[False]] | NotGivenrM   5Optional[ChatCompletionStreamOptionsParam] | NotGivenrN   rO   .ChatCompletionToolChoiceOptionParam | NotGivenrP   ,Iterable[ChatCompletionToolParam] | NotGivenrQ   rR   rS   str | NotGivenrT   Headers | NonerU   Query | NonerV   Body | NonerW   'float | httpx.Timeout | None | NotGivenr    c       !            dS a-  Creates a model response for the given chat conversation.

        Learn more in the
        [text generation](https://platform.openai.com/docs/guides/text-generation),
        [vision](https://platform.openai.com/docs/guides/vision), and
        [audio](https://platform.openai.com/docs/guides/audio) guides.

        Args:
          messages: A list of messages comprising the conversation so far. Depending on the
              [model](https://platform.openai.com/docs/models) you use, different message
              types (modalities) are supported, like
              [text](https://platform.openai.com/docs/guides/text-generation),
              [images](https://platform.openai.com/docs/guides/vision), and
              [audio](https://platform.openai.com/docs/guides/audio).

          model: ID of the model to use. See the
              [model endpoint compatibility](https://platform.openai.com/docs/models#model-endpoint-compatibility)
              table for details on which models work with the Chat API.

          audio: Parameters for audio output. Required when audio output is requested with
              `modalities: ["audio"]`.
              [Learn more](https://platform.openai.com/docs/guides/audio).

          frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
              existing frequency in the text so far, decreasing the model's likelihood to
              repeat the same line verbatim.

              [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation)

          function_call: Deprecated in favor of `tool_choice`.

              Controls which (if any) function is called by the model. `none` means the model
              will not call a function and instead generates a message. `auto` means the model
              can pick between generating a message or calling a function. Specifying a
              particular function via `{"name": "my_function"}` forces the model to call that
              function.

              `none` is the default when no functions are present. `auto` is the default if
              functions are present.

          functions: Deprecated in favor of `tools`.

              A list of functions the model may generate JSON inputs for.

          logit_bias: Modify the likelihood of specified tokens appearing in the completion.

              Accepts a JSON object that maps tokens (specified by their token ID in the
              tokenizer) to an associated bias value from -100 to 100. Mathematically, the
              bias is added to the logits generated by the model prior to sampling. The exact
              effect will vary per model, but values between -1 and 1 should decrease or
              increase likelihood of selection; values like -100 or 100 should result in a ban
              or exclusive selection of the relevant token.

          logprobs: Whether to return log probabilities of the output tokens or not. If true,
              returns the log probabilities of each output token returned in the `content` of
              `message`.

          max_completion_tokens: An upper bound for the number of tokens that can be generated for a completion,
              including visible output tokens and
              [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).

          max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the chat
              completion. This value can be used to control
              [costs](https://openai.com/api/pricing/) for text generated via API.

              This value is now deprecated in favor of `max_completion_tokens`, and is not
              compatible with
              [o1 series models](https://platform.openai.com/docs/guides/reasoning).

          metadata: Developer-defined tags and values used for filtering completions in the
              [dashboard](https://platform.openai.com/chat-completions).

          modalities: Output types that you would like the model to generate for this request. Most
              models are capable of generating text, which is the default:

              `["text"]`

              The `gpt-4o-audio-preview` model can also be used to
              [generate audio](https://platform.openai.com/docs/guides/audio). To request that
              this model generate both text and audio responses, you can use:

              `["text", "audio"]`

          n: How many chat completion choices to generate for each input message. Note that
              you will be charged based on the number of generated tokens across all of the
              choices. Keep `n` as `1` to minimize costs.

          parallel_tool_calls: Whether to enable
              [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
              during tool use.

          prediction: Static predicted output content, such as the content of a text file that is
              being regenerated.

          presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on
              whether they appear in the text so far, increasing the model's likelihood to
              talk about new topics.

              [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation)

          response_format: An object specifying the format that the model must output. Compatible with
              [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
              [GPT-4o mini](https://platform.openai.com/docs/models#gpt-4o-mini),
              [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4) and
              all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.

              Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
              Outputs which ensures the model will match your supplied JSON schema. Learn more
              in the
              [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).

              Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
              message the model generates is valid JSON.

              **Important:** when using JSON mode, you **must** also instruct the model to
              produce JSON yourself via a system or user message. Without this, the model may
              generate an unending stream of whitespace until the generation reaches the token
              limit, resulting in a long-running and seemingly "stuck" request. Also note that
              the message content may be partially cut off if `finish_reason="length"`, which
              indicates the generation exceeded `max_tokens` or the conversation exceeded the
              max context length.

          seed: This feature is in Beta. If specified, our system will make a best effort to
              sample deterministically, such that repeated requests with the same `seed` and
              parameters should return the same result. Determinism is not guaranteed, and you
              should refer to the `system_fingerprint` response parameter to monitor changes
              in the backend.

          service_tier: Specifies the latency tier to use for processing the request. This parameter is
              relevant for customers subscribed to the scale tier service:

              - If set to 'auto', and the Project is Scale tier enabled, the system will
                utilize scale tier credits until they are exhausted.
              - If set to 'auto', and the Project is not Scale tier enabled, the request will
                be processed using the default service tier with a lower uptime SLA and no
                latency guarentee.
              - If set to 'default', the request will be processed using the default service
                tier with a lower uptime SLA and no latency guarentee.
              - When not set, the default behavior is 'auto'.

              When this parameter is set, the response body will include the `service_tier`
              utilized.

          stop: Up to 4 sequences where the API will stop generating further tokens.

          store: Whether or not to store the output of this chat completion request for use in
              our [model distillation](https://platform.openai.com/docs/guides/distillation)
              or [evals](https://platform.openai.com/docs/guides/evals) products.

          stream: If set, partial message deltas will be sent, like in ChatGPT. Tokens will be
              sent as data-only
              [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
              as they become available, with the stream terminated by a `data: [DONE]`
              message.
              [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).

          stream_options: Options for streaming response. Only set this when you set `stream: true`.

          temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
              make the output more random, while lower values like 0.2 will make it more
              focused and deterministic.

              We generally recommend altering this or `top_p` but not both.

          tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
              not call any tool and instead generates a message. `auto` means the model can
              pick between generating a message or calling one or more tools. `required` means
              the model must call one or more tools. Specifying a particular tool via
              `{"type": "function", "function": {"name": "my_function"}}` forces the model to
              call that tool.

              `none` is the default when no tools are present. `auto` is the default if tools
              are present.

          tools: A list of tools the model may call. Currently, only functions are supported as a
              tool. Use this to provide a list of functions the model may generate JSON inputs
              for. A max of 128 functions are supported.

          top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
              return at each token position, each with an associated log probability.
              `logprobs` must be set to `true` if this parameter is used.

          top_p: An alternative to sampling with temperature, called nucleus sampling, where the
              model considers the results of the tokens with top_p probability mass. So 0.1
              means only the tokens comprising the top 10% probability mass are considered.

              We generally recommend altering this or `temperature` but not both.

          user: A unique identifier representing your end-user, which can help OpenAI to monitor
              and detect abuse.
              [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).

          extra_headers: Send extra headers

          extra_query: Add additional query parameters to the request

          extra_body: Add additional JSON properties to the request

          timeout: Override the client-level default timeout for this request, in seconds
        N "r0   rX   rZ   r9   r:   r;   r<   r=   r>   r?   r@   rA   rB   rC   rD   rE   rF   rG   rH   rI   rJ   rK   rL   rM   rN   rO   rP   rQ   rR   rS   rT   rU   rV   rW   s"                                     r1   createzCompletions.create>   
    ` 	r3   r9   r:   r;   r<   r=   r>   r?   r@   rA   rB   rC   rD   rE   rF   rG   rH   rI   rJ   rK   rM   rN   rO   rP   rQ   rR   rS   rT   rU   rV   rW   Literal[True]Stream[ChatCompletionChunk]c       !            dS a-  Creates a model response for the given chat conversation.

        Learn more in the
        [text generation](https://platform.openai.com/docs/guides/text-generation),
        [vision](https://platform.openai.com/docs/guides/vision), and
        [audio](https://platform.openai.com/docs/guides/audio) guides.

        Args:
          messages: A list of messages comprising the conversation so far. Depending on the
              [model](https://platform.openai.com/docs/models) you use, different message
              types (modalities) are supported, like
              [text](https://platform.openai.com/docs/guides/text-generation),
              [images](https://platform.openai.com/docs/guides/vision), and
              [audio](https://platform.openai.com/docs/guides/audio).

          model: ID of the model to use. See the
              [model endpoint compatibility](https://platform.openai.com/docs/models#model-endpoint-compatibility)
              table for details on which models work with the Chat API.

          stream: If set, partial message deltas will be sent, like in ChatGPT. Tokens will be
              sent as data-only
              [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
              as they become available, with the stream terminated by a `data: [DONE]`
              message.
              [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).

          audio: Parameters for audio output. Required when audio output is requested with
              `modalities: ["audio"]`.
              [Learn more](https://platform.openai.com/docs/guides/audio).

          frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
              existing frequency in the text so far, decreasing the model's likelihood to
              repeat the same line verbatim.

              [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation)

          function_call: Deprecated in favor of `tool_choice`.

              Controls which (if any) function is called by the model. `none` means the model
              will not call a function and instead generates a message. `auto` means the model
              can pick between generating a message or calling a function. Specifying a
              particular function via `{"name": "my_function"}` forces the model to call that
              function.

              `none` is the default when no functions are present. `auto` is the default if
              functions are present.

          functions: Deprecated in favor of `tools`.

              A list of functions the model may generate JSON inputs for.

          logit_bias: Modify the likelihood of specified tokens appearing in the completion.

              Accepts a JSON object that maps tokens (specified by their token ID in the
              tokenizer) to an associated bias value from -100 to 100. Mathematically, the
              bias is added to the logits generated by the model prior to sampling. The exact
              effect will vary per model, but values between -1 and 1 should decrease or
              increase likelihood of selection; values like -100 or 100 should result in a ban
              or exclusive selection of the relevant token.

          logprobs: Whether to return log probabilities of the output tokens or not. If true,
              returns the log probabilities of each output token returned in the `content` of
              `message`.

          max_completion_tokens: An upper bound for the number of tokens that can be generated for a completion,
              including visible output tokens and
              [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).

          max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the chat
              completion. This value can be used to control
              [costs](https://openai.com/api/pricing/) for text generated via API.

              This value is now deprecated in favor of `max_completion_tokens`, and is not
              compatible with
              [o1 series models](https://platform.openai.com/docs/guides/reasoning).

          metadata: Developer-defined tags and values used for filtering completions in the
              [dashboard](https://platform.openai.com/chat-completions).

          modalities: Output types that you would like the model to generate for this request. Most
              models are capable of generating text, which is the default:

              `["text"]`

              The `gpt-4o-audio-preview` model can also be used to
              [generate audio](https://platform.openai.com/docs/guides/audio). To request that
              this model generate both text and audio responses, you can use:

              `["text", "audio"]`

          n: How many chat completion choices to generate for each input message. Note that
              you will be charged based on the number of generated tokens across all of the
              choices. Keep `n` as `1` to minimize costs.

          parallel_tool_calls: Whether to enable
              [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
              during tool use.

          prediction: Static predicted output content, such as the content of a text file that is
              being regenerated.

          presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on
              whether they appear in the text so far, increasing the model's likelihood to
              talk about new topics.

              [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation)

          response_format: An object specifying the format that the model must output. Compatible with
              [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
              [GPT-4o mini](https://platform.openai.com/docs/models#gpt-4o-mini),
              [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4) and
              all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.

              Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
              Outputs which ensures the model will match your supplied JSON schema. Learn more
              in the
              [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).

              Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
              message the model generates is valid JSON.

              **Important:** when using JSON mode, you **must** also instruct the model to
              produce JSON yourself via a system or user message. Without this, the model may
              generate an unending stream of whitespace until the generation reaches the token
              limit, resulting in a long-running and seemingly "stuck" request. Also note that
              the message content may be partially cut off if `finish_reason="length"`, which
              indicates the generation exceeded `max_tokens` or the conversation exceeded the
              max context length.

          seed: This feature is in Beta. If specified, our system will make a best effort to
              sample deterministically, such that repeated requests with the same `seed` and
              parameters should return the same result. Determinism is not guaranteed, and you
              should refer to the `system_fingerprint` response parameter to monitor changes
              in the backend.

          service_tier: Specifies the latency tier to use for processing the request. This parameter is
              relevant for customers subscribed to the scale tier service:

              - If set to 'auto', and the Project is Scale tier enabled, the system will
                utilize scale tier credits until they are exhausted.
              - If set to 'auto', and the Project is not Scale tier enabled, the request will
                be processed using the default service tier with a lower uptime SLA and no
                latency guarentee.
              - If set to 'default', the request will be processed using the default service
                tier with a lower uptime SLA and no latency guarentee.
              - When not set, the default behavior is 'auto'.

              When this parameter is set, the response body will include the `service_tier`
              utilized.

          stop: Up to 4 sequences where the API will stop generating further tokens.

          store: Whether or not to store the output of this chat completion request for use in
              our [model distillation](https://platform.openai.com/docs/guides/distillation)
              or [evals](https://platform.openai.com/docs/guides/evals) products.

          stream_options: Options for streaming response. Only set this when you set `stream: true`.

          temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
              make the output more random, while lower values like 0.2 will make it more
              focused and deterministic.

              We generally recommend altering this or `top_p` but not both.

          tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
              not call any tool and instead generates a message. `auto` means the model can
              pick between generating a message or calling one or more tools. `required` means
              the model must call one or more tools. Specifying a particular tool via
              `{"type": "function", "function": {"name": "my_function"}}` forces the model to
              call that tool.

              `none` is the default when no tools are present. `auto` is the default if tools
              are present.

          tools: A list of tools the model may call. Currently, only functions are supported as a
              tool. Use this to provide a list of functions the model may generate JSON inputs
              for. A max of 128 functions are supported.

          top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
              return at each token position, each with an associated log probability.
              `logprobs` must be set to `true` if this parameter is used.

          top_p: An alternative to sampling with temperature, called nucleus sampling, where the
              model considers the results of the tokens with top_p probability mass. So 0.1
              means only the tokens comprising the top 10% probability mass are considered.

              We generally recommend altering this or `temperature` but not both.

          user: A unique identifier representing your end-user, which can help OpenAI to monitor
              and detect abuse.
              [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).

          extra_headers: Send extra headers

          extra_query: Add additional query parameters to the request

          extra_body: Add additional JSON properties to the request

          timeout: Override the client-level default timeout for this request, in seconds
        Nru   "r0   rX   rZ   rL   r9   r:   r;   r<   r=   r>   r?   r@   rA   rB   rC   rD   rE   rF   rG   rH   rI   rJ   rK   rM   rN   rO   rP   rQ   rR   rS   rT   rU   rV   rW   s"                                     r1   rw   zCompletions.create0  rx   r3   bool,ChatCompletion | Stream[ChatCompletionChunk]c       !            dS r}   ru   r~   s"                                     r1   rw   zCompletions.create"  rx   r3   rX   rZ   rL   3Optional[Literal[False]] | Literal[True] | NotGivenc       !        Z   t          |           |                     dt          i d|d|d|d|d|d|d|d	|d
|	d|
d|d|d|d|d|d|d|||||||||||||dt          j                  t          ||| |!          t          |pdt          t                             S Nz/chat/completionsrX   rZ   r9   r:   r;   r<   r=   r>   r?   r@   rA   rB   rC   rD   rE   rF   rG   )rH   rI   rJ   rK   rL   rM   rN   rO   rP   rQ   rR   rS   )rT   rU   rV   rW   F)bodyoptionscast_torL   
stream_cls)	validate_response_format_postr   r   CompletionCreateParamsr   r    r   r!   rv   s"                                     r1   rw   zCompletions.create  s   P 	!111zz U U ():	
 $]   !*  ,-B !*  !*  *+> !*  '(8!" &#$ !$0 "$&4#.#."$0" ;  > )?A! !D )+Q[el   #?U12S  *
 *
 *	
r3   )r+   r,   )r+   r4   DrX   rY   rZ   r[   r9   r\   r:   r]   r;   r^   r<   r_   r=   r`   r>   ra   r?   rb   r@   rb   rA   rc   rB   rd   rC   rb   rD   re   rE   rf   rF   r]   rG   rg   rH   rb   rI   rh   rJ   ri   rK   ra   rL   rj   rM   rk   rN   r]   rO   rl   rP   rm   rQ   rb   rR   r]   rS   rn   rT   ro   rU   rp   rV   rq   rW   rr   r+   r    )DrX   rY   rZ   r[   rL   rz   r9   r\   r:   r]   r;   r^   r<   r_   r=   r`   r>   ra   r?   rb   r@   rb   rA   rc   rB   rd   rC   rb   rD   re   rE   rf   rF   r]   rG   rg   rH   rb   rI   rh   rJ   ri   rK   ra   rM   rk   rN   r]   rO   rl   rP   rm   rQ   rb   rR   r]   rS   rn   rT   ro   rU   rp   rV   rq   rW   rr   r+   r{   )DrX   rY   rZ   r[   rL   r   r9   r\   r:   r]   r;   r^   r<   r_   r=   r`   r>   ra   r?   rb   r@   rb   rA   rc   rB   rd   rC   rb   rD   re   rE   rf   rF   r]   rG   rg   rH   rb   rI   rh   rJ   ri   rK   ra   rM   rk   rN   r]   rO   rl   rP   rm   rQ   rb   rR   r]   rS   rn   rT   ro   rU   rp   rV   rq   rW   rr   r+   r   )DrX   rY   rZ   r[   r9   r\   r:   r]   r;   r^   r<   r_   r=   r`   r>   ra   r?   rb   r@   rb   rA   rc   rB   rd   rC   rb   rD   re   rE   rf   rF   r]   rG   rg   rH   rb   rI   rh   rJ   ri   rK   ra   rL   r   rM   rk   rN   r]   rO   rl   rP   rm   rQ   rb   rR   r]   rS   rn   rT   ro   rU   rp   rV   rq   rW   rr   r+   r   
__name__
__module____qualname__r   r2   r7   r
   r   rw   r   ru   r3   r1   r(   r(   *   s       0 0 0 _0 6 6 6 _6  @I8AJSLU:C.7:C/88AHQ&//8PY7@NW)2HQ;D+46?PY2;FO>G1:,5( )-$("&;DKo o o o o Xob  @I8AJSLU:C.7:C/88AHQ&//8PY7@NW)2HQ;D+4PY2;FO>G1:,5( )-$("&;DKo o o o o Xob  @I8AJSLU:C.7:C/88AHQ&//8PY7@NW)2HQ;D+4PY2;FO>G1:,5( )-$("&;DKo o o o o Xob ]J(*I*I*IJJ @I8AJSLU:C.7:C/88AHQ&//8PY7@NW)2HQ;D+4FOPY2;FO>G1:,5( )-$("&;DKR
 R
 R
 R
 R
 KJR
 R
 R
r3   c            !         e Zd ZedNd            ZedOd            ZeeeeeeeeeeeeeeeeeeeeeeeeeeeedddeddPdC            ZeeeeeeeeeeeeeeeeeeeeeeeeeeedddedDdQdG            ZeeeeeeeeeeeeeeeeeeeeeeeeeeedddedDdRdJ            Z e	dd
gg dK          eeeeeeeeeeeeeeeeeeeeeeeeeeedddeddSdM            ZdS )Tr)   r+   AsyncCompletionsWithRawResponsec                     t          |           S r.   )r   r/   s    r1   r2   z"AsyncCompletions.with_raw_responsek  s     /t444r3   %AsyncCompletionsWithStreamingResponsec                     t          |           S r6   )r   r/   s    r1   r7   z(AsyncCompletions.with_streaming_responseu  s     5T:::r3   Nr8   rX   rY   rZ   r[   r9   r\   r:   r]   r;   r^   r<   r_   r=   r`   r>   ra   r?   rb   r@   rA   rc   rB   rd   rC   rD   re   rE   rf   rF   rG   rg   rH   rI   rh   rJ   ri   rK   rL   rj   rM   rk   rN   rO   rl   rP   rm   rQ   rR   rS   rn   rT   ro   rU   rp   rV   rq   rW   rr   r    c       !        
   K   dS rt   ru   rv   s"                                     r1   rw   zAsyncCompletions.create~        ` 	r3   ry   rz    AsyncStream[ChatCompletionChunk]c       !        
   K   dS r}   ru   r~   s"                                     r1   rw   zAsyncCompletions.createp  r   r3   r   1ChatCompletion | AsyncStream[ChatCompletionChunk]c       !        
   K   dS r}   ru   r~   s"                                     r1   rw   zAsyncCompletions.createb  r   r3   r   r   c       !        v  K   t          |           |                     dt          i d|d|d|d|d|d|d|d	|d
|	d|
d|d|d|d|d|d|d|||||||||||||dt          j                   d {V t          ||| |!          t          |pdt          t                              d {V S r   )	r   r   r   r   r   r   r    r   r!   rv   s"                                     r1   rw   zAsyncCompletions.createT  s     P 	!111ZZ,U U ():	
 $]   !*  ,-B !*  !*  *+> !*  '(8!" &#$ !$0 "$&4#.#."$0" ;  > )?A! ! ! ! ! ! ! !D )+Q[el   #?U"#67S   *
 *
 *
 *
 *
 *
 *
 *
 *	
r3   )r+   r   )r+   r   r   )DrX   rY   rZ   r[   rL   rz   r9   r\   r:   r]   r;   r^   r<   r_   r=   r`   r>   ra   r?   rb   r@   rb   rA   rc   rB   rd   rC   rb   rD   re   rE   rf   rF   r]   rG   rg   rH   rb   rI   rh   rJ   ri   rK   ra   rM   rk   rN   r]   rO   rl   rP   rm   rQ   rb   rR   r]   rS   rn   rT   ro   rU   rp   rV   rq   rW   rr   r+   r   )DrX   rY   rZ   r[   rL   r   r9   r\   r:   r]   r;   r^   r<   r_   r=   r`   r>   ra   r?   rb   r@   rb   rA   rc   rB   rd   rC   rb   rD   re   rE   rf   rF   r]   rG   rg   rH   rb   rI   rh   rJ   ri   rK   ra   rM   rk   rN   r]   rO   rl   rP   rm   rQ   rb   rR   r]   rS   rn   rT   ro   rU   rp   rV   rq   rW   rr   r+   r   )DrX   rY   rZ   r[   r9   r\   r:   r]   r;   r^   r<   r_   r=   r`   r>   ra   r?   rb   r@   rb   rA   rc   rB   rd   rC   rb   rD   re   rE   rf   rF   r]   rG   rg   rH   rb   rI   rh   rJ   ri   rK   ra   rL   r   rM   rk   rN   r]   rO   rl   rP   rm   rQ   rb   rR   r]   rS   rn   rT   ro   rU   rp   rV   rq   rW   rr   r+   r   r   ru   r3   r1   r)   r)   j  s       5 5 5 _5 ; ; ; _;  @I8AJSLU:C.7:C/88AHQ&//8PY7@NW)2HQ;D+46?PY2;FO>G1:,5( )-$("&;DKo o o o o Xob  @I8AJSLU:C.7:C/88AHQ&//8PY7@NW)2HQ;D+4PY2;FO>G1:,5( )-$("&;DKo o o o o Xob  @I8AJSLU:C.7:C/88AHQ&//8PY7@NW)2HQ;D+4PY2;FO>G1:,5( )-$("&;DKo o o o o Xob ]J(*I*I*IJJ @I8AJSLU:C.7:C/88AHQ&//8PY7@NW)2HQ;D+4FOPY2;FO>G1:,5( )-$("&;DKR
 R
 R
 R
 R
 KJR
 R
 R
r3   c                      e Zd ZddZdS )r,   completionsr(   r+   Nonec                P    || _         t          j        |j                  | _        d S N)_completionsr   to_raw_response_wrapperrw   r0   r   s     r1   __init__z#CompletionsWithRawResponse.__init__  s(    '&>
 
r3   Nr   r(   r+   r   r   r   r   r   ru   r3   r1   r,   r,     (        
 
 
 
 
 
r3   r,   c                      e Zd ZddZdS )r   r   r)   r+   r   c                P    || _         t          j        |j                  | _        d S r   )r   r   async_to_raw_response_wrapperrw   r   s     r1   r   z(AsyncCompletionsWithRawResponse.__init__  s(    '&D
 
r3   Nr   r)   r+   r   r   ru   r3   r1   r   r     r   r3   r   c                      e Zd ZddZdS )r4   r   r(   r+   r   c                F    || _         t          |j                  | _        d S r   )r   r   rw   r   s     r1   r   z)CompletionsWithStreamingResponse.__init__  s%    '2
 
r3   Nr   r   ru   r3   r1   r4   r4     r   r3   r4   c                      e Zd ZddZdS )r   r   r)   r+   r   c                F    || _         t          |j                  | _        d S r   )r   r   rw   r   s     r1   r   z.AsyncCompletionsWithStreamingResponse.__init__  s%    '8
 
r3   Nr   r   ru   r3   r1   r   r     r   r3   r   rG   objectr+   r   c                    t          j        |           r)t          | t          j                  rt          d          d S d S )NzzYou tried to pass a `BaseModel` class to `chat.completions.create()`; You must use `beta.chat.completions.parse()` instead)inspectisclass
issubclasspydantic	BaseModel	TypeError)rG   s    r1   r   r     sT    '' 
JHZ,[,[ 
 I
 
 	

 
 
 
r3   )rG   r   r+   r   )E
__future__r   r   typingr   r   r   r   r   typing_extensionsr	   r
   httpxr    r   _typesr   r   r   r   r   _utilsr   r   r   _compatr   	_resourcer   r   	_responser   r   
_streamingr   r   
types.chatr   r   _base_clientr   types.chat_modelr   types.chat.chat_completionr     types.chat.chat_completion_chunkr!   #types.chat.chat_completion_modalityr"   %types.chat.chat_completion_tool_paramr#   &types.chat.chat_completion_audio_param(types.chat.chat_completion_message_paramr$   /types.chat.chat_completion_stream_options_paramr%   3types.chat.chat_completion_prediction_content_paramr&   3types.chat.chat_completion_tool_choice_option_paramr'   __all__r(   r)   r,   r   r4   r   r   ru   r3   r1   <module>r      s`   # " " " " "  8 8 8 8 8 8 8 8 8 8 8 8 8 8 / / / / / / / /               ? ? ? ? ? ? ? ? ? ? ? ? ? ?         
 ' & & & & & : : : : : : : : Y Y Y Y Y Y Y Y - - - - - - - -        1 0 0 0 0 0 ) ) ) ) ) ) 8 8 8 8 8 8 C C C C C C I I I I I I L L L L L L N N N N N N R R R R R R _ _ _ _ _ _ g g g g g g f f f f f f,
-}
 }
 }
 }
 }
/ }
 }
 }
@}
 }
 }
 }
 }
' }
 }
 }
@
 
 
 
 
 
 
 

 
 
 
 
 
 
 

 
 
 
 
 
 
 

 
 
 
 
 
 
 

 
 
 
 
 
r3   