
    
@gU                       d dl mZ d dlmZmZmZ d dlmZ d dlZddl	m
Z
 ddlmZmZmZmZmZ ddlmZmZ dd	lmZ dd
lmZmZ ddlmZmZ ddlmZmZ ddlm Z m!Z!m"Z" ddl#m$Z$m%Z% ddl&m'Z' ddl(m)Z) ddl*m+Z+ ddl,m-Z- ddl.m/Z/ ddgZ0 G d de          Z1 G d de          Z2 G d d          Z3 G d d          Z4 G d d          Z5 G d d          Z6dS )     )annotations)UnionIterableOptional)LiteralN   )_legacy_response)	NOT_GIVENBodyQueryHeadersNotGiven)maybe_transformasync_maybe_transform)cached_property)SyncAPIResourceAsyncAPIResource)to_streamed_response_wrapper"async_to_streamed_response_wrapper)SyncCursorPageAsyncCursorPage)assistant_list_paramsassistant_create_paramsassistant_update_params)AsyncPaginatormake_request_options)	ChatModel)	Assistant)AssistantDeleted)AssistantToolParam)"AssistantResponseFormatOptionParam
AssistantsAsyncAssistantsc                      e Zd Zed6d            Zed7d            Zeeeeeeeeedddedd8d"Zddded#d9d&Zeeeeeeeeeeddded'd:d*Z	eeeeddded+d;d3Z
ddded#d<d5ZdS )=r"   returnAssistantsWithRawResponsec                     t          |           S a  
        This property can be used as a prefix for any HTTP method call to return the
        the raw response object instead of the parsed content.

        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
        )r&   selfs    T/var/www/piapp/venv/lib/python3.11/site-packages/openai/resources/beta/assistants.pywith_raw_responsezAssistants.with_raw_response$   s     )...    AssistantsWithStreamingResponsec                     t          |           S z
        An alternative to `.with_raw_response` that doesn't eagerly read the response body.

        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
        )r.   r)   s    r+   with_streaming_responsez"Assistants.with_streaming_response.   s     /t444r-   Ndescriptioninstructionsmetadatanameresponse_formattemperaturetool_resourcestoolstop_pextra_headersextra_query
extra_bodytimeoutmodelUnion[str, ChatModel]r3   Optional[str] | NotGivenr4   r5   Optional[object] | NotGivenr6   r7   7Optional[AssistantResponseFormatOptionParam] | NotGivenr8   Optional[float] | NotGivenr9   :Optional[assistant_create_params.ToolResources] | NotGivenr:   'Iterable[AssistantToolParam] | NotGivenr;   r<   Headers | Noner=   Query | Noner>   Body | Noner?   'float | httpx.Timeout | None | NotGivenr   c                   ddi|pi }|                      dt          |||||||||	|
d
t          j                  t	          ||||          t
                    S )  
        Create an assistant with a model and instructions.

        Args:
          model: ID of the model to use. You can use the
              [List models](https://platform.openai.com/docs/api-reference/models/list) API to
              see all of your available models, or see our
              [Model overview](https://platform.openai.com/docs/models) for descriptions of
              them.

          description: The description of the assistant. The maximum length is 512 characters.

          instructions: The system instructions that the assistant uses. The maximum length is 256,000
              characters.

          metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
              for storing additional information about the object in a structured format. Keys
              can be a maximum of 64 characters long and values can be a maximum of 512
              characters long.

          name: The name of the assistant. The maximum length is 256 characters.

          response_format: Specifies the format that the model must output. Compatible with
              [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
              [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
              and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.

              Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
              Outputs which ensures the model will match your supplied JSON schema. Learn more
              in the
              [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).

              Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
              message the model generates is valid JSON.

              **Important:** when using JSON mode, you **must** also instruct the model to
              produce JSON yourself via a system or user message. Without this, the model may
              generate an unending stream of whitespace until the generation reaches the token
              limit, resulting in a long-running and seemingly "stuck" request. Also note that
              the message content may be partially cut off if `finish_reason="length"`, which
              indicates the generation exceeded `max_tokens` or the conversation exceeded the
              max context length.

          temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
              make the output more random, while lower values like 0.2 will make it more
              focused and deterministic.

          tool_resources: A set of resources that are used by the assistant's tools. The resources are
              specific to the type of tool. For example, the `code_interpreter` tool requires
              a list of file IDs, while the `file_search` tool requires a list of vector store
              IDs.

          tools: A list of tool enabled on the assistant. There can be a maximum of 128 tools per
              assistant. Tools can be of types `code_interpreter`, `file_search`, or
              `function`.

          top_p: An alternative to sampling with temperature, called nucleus sampling, where the
              model considers the results of the tokens with top_p probability mass. So 0.1
              means only the tokens comprising the top 10% probability mass are considered.

              We generally recommend altering this or temperature but not both.

          extra_headers: Send extra headers

          extra_query: Add additional query parameters to the request

          extra_body: Add additional JSON properties to the request

          timeout: Override the client-level default timeout for this request, in seconds
        OpenAI-Betaassistants=v2/assistants
r@   r3   r4   r5   r6   r7   r8   r9   r:   r;   r<   r=   r>   r?   bodyoptionscast_to)_postr   r   AssistantCreateParamsr   r   r*   r@   r3   r4   r5   r6   r7   r8   r9   r:   r;   r<   r=   r>   r?   s                  r+   createzAssistants.create7   s    t 'QM<ORQzz "#.$0 ( '6#.&4""  (=  )+Q[el   )  
 
 	
r-   rR   assistant_idstrc          	         |st          d|          ddi|pi }|                     d| t          ||||          t                    S )K  
        Retrieves an assistant.

        Args:
          extra_headers: Send extra headers

          extra_query: Add additional query parameters to the request

          extra_body: Add additional JSON properties to the request

          timeout: Override the client-level default timeout for this request, in seconds
        ;Expected a non-empty value for `assistant_id` but received rN   rO   /assistants/rR   rU   rV   
ValueError_getr   r   r*   r[   r<   r=   r>   r?   s         r+   retrievezAssistants.retrieve   s    .  	mk[gkklll&QM<ORQyy)<))(+Q[el     
 
 	
r-   r3   r4   r5   r@   r6   r7   r8   r9   r:   r;   r<   r=   r>   r?   str | NotGiven:Optional[assistant_update_params.ToolResources] | NotGivenc                   |st          d|          ddi|pi }|                     d| t          ||||||||	|
|d
t          j                  t          ||||          t                    S )  Modifies an assistant.

        Args:
          description: The description of the assistant.

        The maximum length is 512 characters.

          instructions: The system instructions that the assistant uses. The maximum length is 256,000
              characters.

          metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
              for storing additional information about the object in a structured format. Keys
              can be a maximum of 64 characters long and values can be a maximum of 512
              characters long.

          model: ID of the model to use. You can use the
              [List models](https://platform.openai.com/docs/api-reference/models/list) API to
              see all of your available models, or see our
              [Model overview](https://platform.openai.com/docs/models) for descriptions of
              them.

          name: The name of the assistant. The maximum length is 256 characters.

          response_format: Specifies the format that the model must output. Compatible with
              [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
              [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
              and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.

              Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
              Outputs which ensures the model will match your supplied JSON schema. Learn more
              in the
              [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).

              Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
              message the model generates is valid JSON.

              **Important:** when using JSON mode, you **must** also instruct the model to
              produce JSON yourself via a system or user message. Without this, the model may
              generate an unending stream of whitespace until the generation reaches the token
              limit, resulting in a long-running and seemingly "stuck" request. Also note that
              the message content may be partially cut off if `finish_reason="length"`, which
              indicates the generation exceeded `max_tokens` or the conversation exceeded the
              max context length.

          temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
              make the output more random, while lower values like 0.2 will make it more
              focused and deterministic.

          tool_resources: A set of resources that are used by the assistant's tools. The resources are
              specific to the type of tool. For example, the `code_interpreter` tool requires
              a list of file IDs, while the `file_search` tool requires a list of vector store
              IDs.

          tools: A list of tool enabled on the assistant. There can be a maximum of 128 tools per
              assistant. Tools can be of types `code_interpreter`, `file_search`, or
              `function`.

          top_p: An alternative to sampling with temperature, called nucleus sampling, where the
              model considers the results of the tokens with top_p probability mass. So 0.1
              means only the tokens comprising the top 10% probability mass are considered.

              We generally recommend altering this or temperature but not both.

          extra_headers: Send extra headers

          extra_query: Add additional query parameters to the request

          extra_body: Add additional JSON properties to the request

          timeout: Override the client-level default timeout for this request, in seconds
        r_   rN   rO   r`   
r3   r4   r5   r@   r6   r7   r8   r9   r:   r;   rR   rS   )rc   rW   r   r   AssistantUpdateParamsr   r   r*   r[   r3   r4   r5   r@   r6   r7   r8   r9   r:   r;   r<   r=   r>   r?   s                   r+   updatezAssistants.update   s    x  	mk[gkklll&QM<ORQzz)<)) #.$0 (" '6#.&4""  (=  )+Q[el   )  
 
 	
r-   afterbeforelimitorderr<   r=   r>   r?   rq   rr   rs   int | NotGivenrt   !Literal['asc', 'desc'] | NotGivenSyncCursorPage[Assistant]c                   ddi|pi }|                      dt          t                   t          ||||t	          ||||dt
          j                            t                    S a  Returns a list of assistants.

        Args:
          after: A cursor for use in pagination.

        `after` is an object ID that defines your place
              in the list. For instance, if you make a list request and receive 100 objects,
              ending with obj_foo, your subsequent call can include after=obj_foo in order to
              fetch the next page of the list.

          before: A cursor for use in pagination. `before` is an object ID that defines your place
              in the list. For instance, if you make a list request and receive 100 objects,
              starting with obj_foo, your subsequent call can include before=obj_foo in order
              to fetch the previous page of the list.

          limit: A limit on the number of objects to be returned. Limit can range between 1 and
              100, and the default is 20.

          order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
              order and `desc` for descending order.

          extra_headers: Send extra headers

          extra_query: Add additional query parameters to the request

          extra_body: Add additional JSON properties to the request

          timeout: Override the client-level default timeout for this request, in seconds
        rN   rO   rP   )rq   rr   rs   rt   )r<   r=   r>   r?   query)pagerU   r@   )_get_api_listr   r   r   r   r   AssistantListParams	r*   rq   rr   rs   rt   r<   r=   r>   r?   s	            r+   listzAssistants.listA  s    V 'QM<ORQ!!	*(+'%%!&"(!&!&	  *=    % " 
 
 	
r-   r   c          	         |st          d|          ddi|pi }|                     d| t          ||||          t                    S )H  
        Delete an assistant.

        Args:
          extra_headers: Send extra headers

          extra_query: Add additional query parameters to the request

          extra_body: Add additional JSON properties to the request

          timeout: Override the client-level default timeout for this request, in seconds
        r_   rN   rO   r`   rR   ra   rc   _deleter   r   re   s         r+   deletezAssistants.delete  s    .  	mk[gkklll&QM<ORQ||)<))(+Q[el   %  
 
 	
r-   )r%   r&   )r%   r.   r@   rA   r3   rB   r4   rB   r5   rC   r6   rB   r7   rD   r8   rE   r9   rF   r:   rG   r;   rE   r<   rH   r=   rI   r>   rJ   r?   rK   r%   r   r[   r\   r<   rH   r=   rI   r>   rJ   r?   rK   r%   r    r[   r\   r3   rB   r4   rB   r5   rC   r@   rh   r6   rB   r7   rD   r8   rE   r9   ri   r:   rG   r;   rE   r<   rH   r=   rI   r>   rJ   r?   rK   r%   r   )rq   rh   rr   rh   rs   ru   rt   rv   r<   rH   r=   rI   r>   rJ   r?   rK   r%   rw   r[   r\   r<   rH   r=   rI   r>   rJ   r?   rK   r%   r   __name__
__module____qualname__r   r,   r1   r
   rZ   rf   ro   r   r    r-   r+   r"   r"   #   s       / / / _/ 5 5 5 _5 1:1:09)2S\2;U^9B,5 )-$("&;D%p
 p
 p
 p
 p
 p
p )-$("&;D 
  
  
  
  
  
L 1:1:09 ))2S\2;U^9B,5 )-$("&;D't
 t
 t
 t
 t
 t
r !*!* )3< )-$("&;D?
 ?
 ?
 ?
 ?
 ?
N )-$("&;D 
  
  
  
  
  
  
  
r-   c                      e Zd Zed6d            Zed7d            Zeeeeeeeeedddedd8d"Zddded#d9d&Zeeeeeeeeeeddded'd:d*Z	eeeeddded+d;d3Z
ddded#d<d5ZdS )=r#   r%   AsyncAssistantsWithRawResponsec                     t          |           S r(   )r   r)   s    r+   r,   z!AsyncAssistants.with_raw_response  s     .d333r-   $AsyncAssistantsWithStreamingResponsec                     t          |           S r0   )r   r)   s    r+   r1   z'AsyncAssistants.with_streaming_response  s     4D999r-   Nr2   r@   rA   r3   rB   r4   r5   rC   r6   r7   rD   r8   rE   r9   rF   r:   rG   r;   r<   rH   r=   rI   r>   rJ   r?   rK   r   c                  K   ddi|pi }|                      dt          |||||||||	|
d
t          j                   d{V t	          ||||          t
                     d{V S )rM   rN   rO   rP   rQ   NrR   rS   )rW   r   r   rX   r   r   rY   s                  r+   rZ   zAsyncAssistants.create  s      t 'QM<ORQZZ,"#.$0 ( '6#.&4""  (=        )+Q[el   )   
 
 
 
 
 
 
 
 	
r-   rR   r[   r\   c          	        K   |st          d|          ddi|pi }|                     d| t          ||||          t                     d{V S )r^   r_   rN   rO   r`   rR   ra   Nrb   re   s         r+   rf   zAsyncAssistants.retrieve+  s      .  	mk[gkklll&QM<ORQYY)<))(+Q[el     
 
 
 
 
 
 
 
 	
r-   rg   rh   ri   c                  K   |st          d|          ddi|pi }|                     d| t          ||||||||	|
|d
t          j                   d{V t          ||||          t                     d{V S )	rk   r_   rN   rO   r`   rl   NrR   rS   )rc   rW   r   r   rm   r   r   rn   s                   r+   ro   zAsyncAssistants.updateM  s     x  	mk[gkklll&QM<ORQZZ)<)),#.$0 (" '6#.&4""  (=        )+Q[el   )   
 
 
 
 
 
 
 
 	
r-   rp   rq   rr   rs   ru   rt   rv   5AsyncPaginator[Assistant, AsyncCursorPage[Assistant]]c                   ddi|pi }|                      dt          t                   t          ||||t	          ||||dt
          j                            t                    S ry   )r|   r   r   r   r   r   r}   r~   s	            r+   r   zAsyncAssistants.list  s    V 'QM<ORQ!! +(+'%%!&"(!&!&	  *=    % " 
 
 	
r-   r   c          	        K   |st          d|          ddi|pi }|                     d| t          ||||          t                     d{V S )r   r_   rN   rO   r`   rR   ra   Nr   re   s         r+   r   zAsyncAssistants.delete  s      .  	mk[gkklll&QM<ORQ\\)<))(+Q[el   % " 
 
 
 
 
 
 
 
 	
r-   )r%   r   )r%   r   r   r   r   )rq   rh   rr   rh   rs   ru   rt   rv   r<   rH   r=   rI   r>   rJ   r?   rK   r%   r   r   r   r   r-   r+   r#   r#     s       4 4 4 _4 : : : _: 1:1:09)2S\2;U^9B,5 )-$("&;D%p
 p
 p
 p
 p
 p
p )-$("&;D 
  
  
  
  
  
L 1:1:09 ))2S\2;U^9B,5 )-$("&;D't
 t
 t
 t
 t
 t
r !*!* )3< )-$("&;D?
 ?
 ?
 ?
 ?
 ?
N )-$("&;D 
  
  
  
  
  
  
  
r-   c                      e Zd ZddZdS )r&   
assistantsr"   r%   Nonec                @   || _         t          j        |j                  | _        t          j        |j                  | _        t          j        |j                  | _        t          j        |j                  | _        t          j        |j                  | _        d S N)_assistantsr	   to_raw_response_wrapperrZ   rf   ro   r   r   r*   r   s     r+   __init__z"AssistantsWithRawResponse.__init__(  s    %&>
 
 )@
 
 '>
 
 %<O
 
	 '>
 
r-   Nr   r"   r%   r   r   r   r   r   r   r-   r+   r&   r&   '  (        
 
 
 
 
 
r-   r&   c                      e Zd ZddZdS )r   r   r#   r%   r   c                @   || _         t          j        |j                  | _        t          j        |j                  | _        t          j        |j                  | _        t          j        |j                  | _        t          j        |j                  | _        d S r   )r   r	   async_to_raw_response_wrapperrZ   rf   ro   r   r   r   s     r+   r   z'AsyncAssistantsWithRawResponse.__init__=  s    %&D
 
 )F
 
 'D
 
 %BO
 
	 'D
 
r-   Nr   r#   r%   r   r   r   r-   r+   r   r   <  r   r-   r   c                      e Zd ZddZdS )r.   r   r"   r%   r   c                   || _         t          |j                  | _        t          |j                  | _        t          |j                  | _        t          |j                  | _        t          |j                  | _        d S r   )r   r   rZ   rf   ro   r   r   r   s     r+   r   z(AssistantsWithStreamingResponse.__init__R  s    %2
 
 5
 
 3
 
 1O
 
	 3
 
r-   Nr   r   r   r-   r+   r.   r.   Q  r   r-   r.   c                      e Zd ZddZdS )r   r   r#   r%   r   c                   || _         t          |j                  | _        t          |j                  | _        t          |j                  | _        t          |j                  | _        t          |j                  | _        d S r   )r   r   rZ   rf   ro   r   r   r   s     r+   r   z-AsyncAssistantsWithStreamingResponse.__init__g  s    %8
 
 ;
 
 9
 
 7O
 
	 9
 
r-   Nr   r   r   r-   r+   r   r   f  r   r-   r   )7
__future__r   typingr   r   r   typing_extensionsr   httpx r	   _typesr
   r   r   r   r   _utilsr   r   _compatr   	_resourcer   r   	_responser   r   
paginationr   r   
types.betar   r   r   _base_clientr   r   types.chat_modelr   types.beta.assistantr   types.beta.assistant_deletedr   types.beta.assistant_tool_paramr    1types.beta.assistant_response_format_option_paramr!   __all__r"   r#   r&   r   r.   r   r   r-   r+   <module>r      s   # " " " " " , , , , , , , , , , % % % % % %              ? ? ? ? ? ? ? ? ? ? ? ? ? ?        ' & & & & & : : : : : : : : Y Y Y Y Y Y Y Y 9 9 9 9 9 9 9 9         
 A @ @ @ @ @ @ @ ) ) ) ) ) ) - - - - - - < < < < < < A A A A A A c c c c c c*
+
 
 
 
 
 
 
 
D
 
 
 
 
& 
 
 
D
 
 
 
 
 
 
 
*
 
 
 
 
 
 
 
*
 
 
 
 
 
 
 
*
 
 
 
 
 
 
 
 
 
r-   