from IPython.display import display,Image,Markdown
from datetime import datetime
from pprint import pprintCosette’s source
Setup
def print_columns(items, cols=3, width=30):
for i in range(0, len(items), cols):
row = items[i:i+cols]
print(''.join(item[:width-1].ljust(width) for item in row))
client = OpenAI()
model_list = client.models.list()
print(f"Available models as of {datetime.now().strftime('%Y-%m-%d')}:\n")
print_columns(sorted([m.id for m in model_list]))Available models as of 2025-08-10:
babbage-002 chatgpt-4o-latest codex-mini-latest
computer-use-preview computer-use-preview-2025-03- dall-e-2
dall-e-3 davinci-002 ft:gpt-4o-2024-08-06:answerai
ft:gpt-4o-2024-08-06:answerai ft:gpt-4o-2024-08-06:answerai ft:gpt-4o-mini-2024-07-18:ans
ft:gpt-4o-mini-2024-07-18:ans gpt-3.5-turbo gpt-3.5-turbo-0125
gpt-3.5-turbo-1106 gpt-3.5-turbo-16k gpt-3.5-turbo-instruct
gpt-3.5-turbo-instruct-0914 gpt-4 gpt-4-0125-preview
gpt-4-1106-preview gpt-4-turbo gpt-4-turbo-2024-04-09
gpt-4-turbo-preview gpt-4.1 gpt-4.1-2025-04-14
gpt-4.1-mini gpt-4.1-mini-2025-04-14 gpt-4.1-nano
gpt-4.1-nano-2025-04-14 gpt-4o gpt-4o-2024-05-13
gpt-4o-2024-08-06 gpt-4o-2024-11-20 gpt-4o-audio-preview
gpt-4o-audio-preview-2024-10- gpt-4o-audio-preview-2024-12- gpt-4o-audio-preview-2025-06-
gpt-4o-mini gpt-4o-mini-2024-07-18 gpt-4o-mini-audio-preview
gpt-4o-mini-audio-preview-202 gpt-4o-mini-realtime-preview gpt-4o-mini-realtime-preview-
gpt-4o-mini-search-preview gpt-4o-mini-search-preview-20 gpt-4o-mini-transcribe
gpt-4o-mini-tts gpt-4o-realtime-preview gpt-4o-realtime-preview-2024-
gpt-4o-realtime-preview-2024- gpt-4o-realtime-preview-2025- gpt-4o-search-preview
gpt-4o-search-preview-2025-03 gpt-4o-transcribe gpt-5
gpt-5-2025-08-07 gpt-5-chat-latest gpt-5-mini
gpt-5-mini-2025-08-07 gpt-5-nano gpt-5-nano-2025-08-07
gpt-image-1 o1 o1-2024-12-17
o1-mini o1-mini-2024-09-12 o1-pro
o1-pro-2025-03-19 o3 o3-2025-04-16
o3-deep-research o3-deep-research-2025-06-26 o3-mini
o3-mini-2025-01-31 o3-pro o3-pro-2025-06-10
o4-mini o4-mini-2025-04-16 o4-mini-deep-research
o4-mini-deep-research-2025-06 omni-moderation-2024-09-26 omni-moderation-latest
text-embedding-3-large text-embedding-3-small text-embedding-ada-002
tts-1 tts-1-1106 tts-1-hd
tts-1-hd-1106 whisper-1
Exported source
models = 'gpt-5', 'gpt-5-mini', 'gpt-5-nano', 'o1-preview', 'o1-mini', 'gpt-4o', 'gpt-4o-mini', 'gpt-4-turbo', 'gpt-4', 'gpt-4-32k', 'gpt-3.5-turbo', 'gpt-3.5-turbo-instruct', 'o1', 'o3-mini', 'chatgpt-4o-latest', 'o1-pro', 'o3', 'o4-mini', 'gpt-4.1', 'gpt-4.1-mini', 'gpt-4.1-nano'o1 should support images while o1-mini, o3-mini do not support images.
can_set_temp
can_set_temp (m)
Exported source
text_only_models = 'o1-preview', 'o1-mini', 'o3-mini'Exported source
has_streaming_models = set(models) - set(('o1-mini', 'o3-mini'))
has_sp_models = set(models) - set(('o1-mini', 'o3-mini'))
has_temp_models = set(models) - set(('o1', 'o1-mini', 'o3-mini'))Exported source
def can_stream(m): return m in has_streaming_models
def can_set_sp(m): return m in has_sp_models
def can_set_temp(m): return m in has_temp_modelscan_set_sp
can_set_sp (m)
can_stream
can_stream (m)
assert can_stream("gpt-4o")
assert not can_stream("o1-mini")model = 'gpt-5-mini'OpenAI SDK
cli = OpenAI().responsesm = {'role': 'user', 'content': "I'm Jeremy"}
r = cli.create(
input=[m], model=model, max_output_tokens=100,
text={ "verbosity": "low" },
reasoning={ "effort": "minimal" }
)
print(r)Response(id='resp_6897d45698e48195904fa8232bac129a0b2ecc78a6b61be8', created_at=1754780758.0, error=None, incomplete_details=None, instructions=None, metadata={}, model='gpt-5-mini-2025-08-07', object='response', output=[ResponseReasoningItem(id='rs_6897d457200c8195859175bf10d88f380b2ecc78a6b61be8', summary=[], type='reasoning', content=None, encrypted_content=None, status=None), ResponseOutputMessage(id='msg_6897d4573d948195a7cb1819a879cbb90b2ecc78a6b61be8', content=[ResponseOutputText(annotations=[], text='Nice to meet you, Jeremy. How can I help today?', type='output_text', logprobs=[])], role='assistant', status='completed', type='message')], parallel_tool_calls=True, temperature=1.0, tool_choice='auto', tools=[], top_p=1.0, background=False, max_output_tokens=100, max_tool_calls=None, previous_response_id=None, prompt=None, prompt_cache_key=None, reasoning=Reasoning(effort='minimal', generate_summary=None, summary=None), safety_identifier=None, service_tier='default', status='completed', text=ResponseTextConfig(format=ResponseFormatText(type='text'), verbosity='low'), top_logprobs=0, truncation='disabled', usage=In: 8; Out: 19; Total: 27, user=None, store=True)
Formatting output
Exported source
@patch
def _repr_markdown_(self:Response):
det = '\n- '.join(f'{k}: {v}' for k,v in dict(self).items())
res = self.output_text
if not res: return f"- {det}"
return f"""{res}
<details>
- {det}
</details>"""rNice to meet you, Jeremy. How can I help today?
- id: resp_6897d45698e48195904fa8232bac129a0b2ecc78a6b61be8
- created_at: 1754780758.0
- error: None
- incomplete_details: None
- instructions: None
- metadata: {}
- model: gpt-5-mini-2025-08-07
- object: response
- output: [ResponseReasoningItem(id=‘rs_6897d457200c8195859175bf10d88f380b2ecc78a6b61be8’, summary=[], type=‘reasoning’, content=None, encrypted_content=None, status=None), ResponseOutputMessage(id=‘msg_6897d4573d948195a7cb1819a879cbb90b2ecc78a6b61be8’, content=[ResponseOutputText(annotations=[], text=‘Nice to meet you, Jeremy. How can I help today?’, type=‘output_text’, logprobs=[])], role=‘assistant’, status=‘completed’, type=‘message’)]
- parallel_tool_calls: True
- temperature: 1.0
- tool_choice: auto
- tools: []
- top_p: 1.0
- background: False
- max_output_tokens: 100
- max_tool_calls: None
- previous_response_id: None
- prompt: None
- prompt_cache_key: None
- reasoning: Reasoning(effort=‘minimal’, generate_summary=None, summary=None)
- safety_identifier: None
- service_tier: default
- status: completed
- text: ResponseTextConfig(format=ResponseFormatText(type=‘text’), verbosity=‘low’)
- top_logprobs: 0
- truncation: disabled
- usage: ResponseUsage(input_tokens=8, input_tokens_details=InputTokensDetails(cached_tokens=0), output_tokens=19, output_tokens_details=OutputTokensDetails(reasoning_tokens=0), total_tokens=27)
- user: None
- store: True
r.usageIn: 8; Out: 19; Total: 27
usage
usage (inp=0, out=0)
Slightly more concise version of ResponseUsage.
| Type | Default | Details | |
|---|---|---|---|
| inp | int | 0 | Number of prompt tokens |
| out | int | 0 | Number of completion tokens |
Exported source
def usage(inp=0, # Number of prompt tokens
out=0 # Number of completion tokens
):
"Slightly more concise version of `ResponseUsage`."
return ResponseUsage(input_tokens=inp, output_tokens=out, total_tokens=inp+out, input_tokens_details={'cached_tokens':0}, output_tokens_details={'cached_tokens':0, 'reasoning_tokens':0})usage(5)In: 5; Out: 0; Total: 5
ResponseUsage.__repr__
ResponseUsage.__repr__ ()
Return repr(self).
Exported source
@patch
def __repr__(self:ResponseUsage): return f'In: {self.input_tokens}; Out: {self.output_tokens}; Total: {self.total_tokens}'r.usageIn: 8; Out: 19; Total: 27
ResponseUsage.__add__
ResponseUsage.__add__ (b)
Add together each of input_tokens and output_tokens
Exported source
@patch
def __add__(self:ResponseUsage, b):
"Add together each of `input_tokens` and `output_tokens`"
return usage(self.input_tokens+b.input_tokens, self.output_tokens+b.output_tokens)r.usage+r.usageIn: 16; Out: 38; Total: 54
wrap_latex
wrap_latex (text)
Replace OpenAI LaTeX codes with markdown-compatible ones
Creating messages
Creating correctly formatted dicts from scratch every time isn’t very handy, so we’ll import a couple of helper functions from the msglm library.
Let’s use mk_msg to recreate our msg {'role': 'user', 'content': "I'm Jeremy"} from earlier.
rkw = dict(
text={ "verbosity": "low" },
reasoning={ "effort": "minimal" }
)prompt = "I'm Jeremy"
m = mk_msg(prompt)
r = cli.create(input=[m], model=model, max_output_tokens=400, **rkw)
rNice to meet you, Jeremy. How can I help you today?
- id: resp_6897d45863f0819d8a8255d9bbe192530be02598260d4824
- created_at: 1754780760.0
- error: None
- incomplete_details: None
- instructions: None
- metadata: {}
- model: gpt-5-mini-2025-08-07
- object: response
- output: [ResponseReasoningItem(id=‘rs_6897d458bfb4819d9756fc650dcde8970be02598260d4824’, summary=[], type=‘reasoning’, content=None, encrypted_content=None, status=None), ResponseOutputMessage(id=‘msg_6897d458e28c819d9f16226aa7dc4f310be02598260d4824’, content=[ResponseOutputText(annotations=[], text=‘Nice to meet you, Jeremy. How can I help you today?’, type=‘output_text’, logprobs=[])], role=‘assistant’, status=‘completed’, type=‘message’)]
- parallel_tool_calls: True
- temperature: 1.0
- tool_choice: auto
- tools: []
- top_p: 1.0
- background: False
- max_output_tokens: 400
- max_tool_calls: None
- previous_response_id: None
- prompt: None
- prompt_cache_key: None
- reasoning: Reasoning(effort=‘minimal’, generate_summary=None, summary=None)
- safety_identifier: None
- service_tier: default
- status: completed
- text: ResponseTextConfig(format=ResponseFormatText(type=‘text’), verbosity=‘low’)
- top_logprobs: 0
- truncation: disabled
- usage: ResponseUsage(input_tokens=8, input_tokens_details=InputTokensDetails(cached_tokens=0), output_tokens=20, output_tokens_details=OutputTokensDetails(reasoning_tokens=0), total_tokens=28)
- user: None
- store: True
print(r)Response(id='resp_6897d45863f0819d8a8255d9bbe192530be02598260d4824', created_at=1754780760.0, error=None, incomplete_details=None, instructions=None, metadata={}, model='gpt-5-mini-2025-08-07', object='response', output=[ResponseReasoningItem(id='rs_6897d458bfb4819d9756fc650dcde8970be02598260d4824', summary=[], type='reasoning', content=None, encrypted_content=None, status=None), ResponseOutputMessage(id='msg_6897d458e28c819d9f16226aa7dc4f310be02598260d4824', content=[ResponseOutputText(annotations=[], text='Nice to meet you, Jeremy. How can I help you today?', type='output_text', logprobs=[])], role='assistant', status='completed', type='message')], parallel_tool_calls=True, temperature=1.0, tool_choice='auto', tools=[], top_p=1.0, background=False, max_output_tokens=400, max_tool_calls=None, previous_response_id=None, prompt=None, prompt_cache_key=None, reasoning=Reasoning(effort='minimal', generate_summary=None, summary=None), safety_identifier=None, service_tier='default', status='completed', text=ResponseTextConfig(format=ResponseFormatText(type='text'), verbosity='low'), top_logprobs=0, truncation='disabled', usage=In: 8; Out: 20; Total: 28, user=None, store=True)
We can pass more than just text messages to OpenAI. As we’ll see later we can also pass images, SDK objects, etc. To handle these different data types we need to pass the type along with our content to OpenAI.
mk_msg infers the type automatically and creates the appropriate data structure.
LLMs, don’t actually have state, but instead dialogs are created by passing back all previous prompts and responses every time. With OpenAI, they always alternate user and assistant. We’ll use mk_msgs from msglm to make it easier to build up these dialog lists.
msgs = mk_msgs([prompt, r, "I forgot my name. Can you remind me please?"])
msgs[{'role': 'user', 'content': "I'm Jeremy"},
ResponseReasoningItem(id='rs_6897d458bfb4819d9756fc650dcde8970be02598260d4824', summary=[], type='reasoning', content=None, encrypted_content=None, status=None),
ResponseOutputMessage(id='msg_6897d458e28c819d9f16226aa7dc4f310be02598260d4824', content=[ResponseOutputText(annotations=[], text='Nice to meet you, Jeremy. How can I help you today?', type='output_text', logprobs=[])], role='assistant', status='completed', type='message'),
{'role': 'user', 'content': 'I forgot my name. Can you remind me please?'}]
cli.create(input=msgs, model=model, max_output_tokens=400, **rkw)You told me your name is Jeremy.
- id: resp_6897d45a1b1c819d95ce1df0d393e9a80be02598260d4824
- created_at: 1754780762.0
- error: None
- incomplete_details: None
- instructions: None
- metadata: {}
- model: gpt-5-mini-2025-08-07
- object: response
- output: [ResponseReasoningItem(id=‘rs_6897d45a6144819d9270ff17363bda5a0be02598260d4824’, summary=[], type=‘reasoning’, content=None, encrypted_content=None, status=None), ResponseOutputMessage(id=‘msg_6897d45a79f8819d8cd32cf1da6b07aa0be02598260d4824’, content=[ResponseOutputText(annotations=[], text=‘You told me your name is Jeremy.’, type=‘output_text’, logprobs=[])], role=‘assistant’, status=‘completed’, type=‘message’)]
- parallel_tool_calls: True
- temperature: 1.0
- tool_choice: auto
- tools: []
- top_p: 1.0
- background: False
- max_output_tokens: 400
- max_tool_calls: None
- previous_response_id: None
- prompt: None
- prompt_cache_key: None
- reasoning: Reasoning(effort=‘minimal’, generate_summary=None, summary=None)
- safety_identifier: None
- service_tier: default
- status: completed
- text: ResponseTextConfig(format=ResponseFormatText(type=‘text’), verbosity=‘low’)
- top_logprobs: 0
- truncation: disabled
- usage: ResponseUsage(input_tokens=43, input_tokens_details=InputTokensDetails(cached_tokens=0), output_tokens=14, output_tokens_details=OutputTokensDetails(reasoning_tokens=0), total_tokens=57)
- user: None
- store: True
Client
Basics
Client
Client (model, cli=None)
Basic LLM messages client.
Exported source
class Client:
def __init__(self, model, cli=None):
"Basic LLM messages client."
self.model,self.use = model,usage(0,0)
self.text_only = model in text_only_models
self.c = (cli or OpenAI()).responsesc = Client(model)
c.useIn: 0; Out: 0; Total: 0
Exported source
@patch
def _r(self:Client, r):
"Store the result of the message and accrue total usage."
self.result = r
if getattr(r,'usage',None): self.use += r.usage
return rc._r(r)
c.useIn: 8; Out: 20; Total: 28
mk_openai_func
mk_openai_func (f)
mk_tool_choice
mk_tool_choice (f)
get_stream
get_stream (o, r, cli, cb=None)
Client.__call__
Client.__call__ (msgs:list, sp:str='', maxtok=4096, stream:bool=False, tools:Optional[list]=None, tool_choice:Optional[str]=None, cb:<built- infunctioncallable>=None, background:Optional[bool]|NotGiven=NOT_GIVEN, include:Op tional[List[ResponseIncludable]]|NotGiven=NOT_GIVEN, input:Union[str,ResponseInputParam]|NotGiven=NOT_GIVEN, instructions:Optional[str]|NotGiven=NOT_GIVEN, max_output_tokens:Optional[int]|NotGiven=NOT_GIVEN, max_tool_calls:Optional[int]|NotGiven=NOT_GIVEN, metadata:Optional[Metadata]|NotGiven=NOT_GIVEN, model:ResponsesModel|NotGiven=NOT_GIVEN, parallel_tool_calls:Optional[bool]|NotGiven=NOT_GIVEN, previous_response_id:Optional[str]|NotGiven=NOT_GIVEN, prompt:Optional[ResponsePromptParam]|NotGiven=NOT_GIVEN, prompt_cache_key:str|NotGiven=NOT_GIVEN, reasoning:Optional[Reasoning]|NotGiven=NOT_GIVEN, safety_identifier:str|NotGiven=NOT_GIVEN, service_tier:" Optional[Literal['auto','default','flex','scale','priori ty']]|NotGiven"=NOT_GIVEN, store:Optional[bool]|NotGiven=NOT_GIVEN, stream_options: Optional[response_create_params.StreamOptions]|NotGiven= NOT_GIVEN, temperature:Optional[float]|NotGiven=NOT_GIVEN, text:ResponseTextConfigParam|NotGiven=NOT_GIVEN, top_logprobs:Optional[int]|NotGiven=NOT_GIVEN, top_p:Optional[float]|NotGiven=NOT_GIVEN, truncation:"Op tional[Literal['auto','disabled']]|NotGiven"=NOT_GIVEN, user:str|NotGiven=NOT_GIVEN, extra_headers:Headers|None=None, extra_query:Query|None=None, extra_body:Body|None=None, timeout:float|httpx.Timeout|None|NotGiven=NOT_GIVEN)
Make a call to LLM.
| Type | Default | Details | |
|---|---|---|---|
| msgs | list | List of messages in the dialog | |
| sp | str | System prompt | |
| maxtok | int | 4096 | Maximum tokens |
| stream | bool | False | Stream response? |
| tools | Optional | None | List of tools to make available |
| tool_choice | Optional | None | Forced tool choice |
| cb | callable | None | Callback after completion |
| background | Optional[bool] | NotGiven | NOT_GIVEN | |
| include | Optional[List[ResponseIncludable]] | NotGiven | NOT_GIVEN | |
| input | Union[str, ResponseInputParam] | NotGiven | NOT_GIVEN | |
| instructions | Optional[str] | NotGiven | NOT_GIVEN | |
| max_output_tokens | Optional[int] | NotGiven | NOT_GIVEN | |
| max_tool_calls | Optional[int] | NotGiven | NOT_GIVEN | |
| metadata | Optional[Metadata] | NotGiven | NOT_GIVEN | |
| model | ResponsesModel | NotGiven | NOT_GIVEN | |
| parallel_tool_calls | Optional[bool] | NotGiven | NOT_GIVEN | |
| previous_response_id | Optional[str] | NotGiven | NOT_GIVEN | |
| prompt | Optional[ResponsePromptParam] | NotGiven | NOT_GIVEN | |
| prompt_cache_key | str | NotGiven | NOT_GIVEN | |
| reasoning | Optional[Reasoning] | NotGiven | NOT_GIVEN | |
| safety_identifier | str | NotGiven | NOT_GIVEN | |
| service_tier | Optional[Literal[‘auto’, ‘default’, ‘flex’, ‘scale’, ‘priority’]] | NotGiven | NOT_GIVEN | |
| store | Optional[bool] | NotGiven | NOT_GIVEN | |
| stream_options | Optional[response_create_params.StreamOptions] | NotGiven | NOT_GIVEN | |
| temperature | Optional[float] | NotGiven | NOT_GIVEN | |
| text | ResponseTextConfigParam | NotGiven | NOT_GIVEN | |
| top_logprobs | Optional[int] | NotGiven | NOT_GIVEN | |
| top_p | Optional[float] | NotGiven | NOT_GIVEN | |
| truncation | Optional[Literal[‘auto’, ‘disabled’]] | NotGiven | NOT_GIVEN | |
| user | str | NotGiven | NOT_GIVEN | |
| extra_headers | Optional | None | Use the following arguments if you need to pass additional parameters to the API that aren’t available via kwargs. The extra values given here take precedence over values defined on the client or passed to this method. |
| extra_query | Query | None | None | |
| extra_body | Body | None | None | |
| timeout | float | httpx.Timeout | None | NotGiven | NOT_GIVEN |
Exported source
@patch
@delegates(Responses.create)
def __call__(self:Client,
msgs:list, # List of messages in the dialog
sp:str='', # System prompt
maxtok=4096, # Maximum tokens
stream:bool=False, # Stream response?
tools:Optional[list]=None, # List of tools to make available
tool_choice:Optional[str]=None, # Forced tool choice
cb:callable=None, # Callback after completion
**kwargs):
"Make a call to LLM."
if tools: assert not self.text_only, "Tool use is not supported by the current model type."
if any(c['type'] == 'image_url' for msg in msgs if isinstance(msg, dict) and isinstance(msg.get('content'), list) for c in msg['content']): assert not self.text_only, "Images are not supported by the current model type."
tools = [mk_openai_func(o) for o in listify(tools)]
r = self.c.create(
model=self.model, input=msgs, max_output_tokens=maxtok, stream=stream, instructions=sp,
tools=tools, tool_choice=mk_tool_choice(tool_choice), **kwargs)
if stream: return get_stream(r, self, cb=cb)
else:
res = self._r(r)
if cb: cb(res)
return resmsgs = 'Hi'c(msgs)Hi — how can I help you today?
You can ask me to: - Answer a question or explain something - Draft or edit text (email, resume, essay) - Write or debug code - Summarize or translate - Create plans, lists, or ideas
Tell me what you need or give a bit of context and I’ll get started.
- id: resp_6897d45b513881a28479c4c92e434b720191c55f81955c91
- created_at: 1754780763.0
- error: None
- incomplete_details: None
- instructions: None
- metadata: {}
- model: gpt-5-mini-2025-08-07
- object: response
- output: [ResponseReasoningItem(id=‘rs_6897d45baa1481a2a0a0d15d1f9c7f460191c55f81955c91’, summary=[], type=‘reasoning’, content=None, encrypted_content=None, status=None), ResponseOutputMessage(id=‘msg_6897d45c9f6c81a2bd7c145c427103f60191c55f81955c91’, content=[ResponseOutputText(annotations=[], text=‘Hi — how can I help you today? can ask me to:- Answer a question or explain something- Draft or edit text (email, resume, essay)- Write or debug code- Summarize or translate- Create plans, lists, or ideasme what you need or give a bit of context and I’ll get started.’, type=‘output_text’, logprobs=[])], role=‘assistant’, status=‘completed’, type=‘message’)]
- parallel_tool_calls: True
- temperature: 1.0
- tool_choice: auto
- tools: []
- top_p: 1.0
- background: False
- max_output_tokens: 4096
- max_tool_calls: None
- previous_response_id: None
- prompt: None
- prompt_cache_key: None
- reasoning: Reasoning(effort=‘medium’, generate_summary=None, summary=None)
- safety_identifier: None
- service_tier: default
- status: completed
- text: ResponseTextConfig(format=ResponseFormatText(type=‘text’), verbosity=‘medium’)
- top_logprobs: 0
- truncation: disabled
- usage: ResponseUsage(input_tokens=7, input_tokens_details=InputTokensDetails(cached_tokens=0), output_tokens=145, output_tokens_details=OutputTokensDetails(reasoning_tokens=64), total_tokens=152)
- user: None
- store: True
c.useIn: 15; Out: 165; Total: 180
r = c(msgs, stream=True)
for o in r: print(o, end='')Hi! How can I help you today? (Questions, writing, code, summaries, planning, troubleshooting, translations — or something else?)
r.valueHi! How can I help you today? (Questions, writing, code, summaries, planning, troubleshooting, translations — or something else?)
- id: resp_6897d45df2c8819f8201d25df3c807e503876a077032caec
- created_at: 1754780765.0
- error: None
- incomplete_details: None
- instructions: None
- metadata: {}
- model: gpt-5-mini-2025-08-07
- object: response
- output: [ResponseReasoningItem(id=‘rs_6897d45e5034819f90493b3c80a1501903876a077032caec’, summary=[], type=‘reasoning’, content=None, encrypted_content=None, status=None), ResponseOutputMessage(id=‘msg_6897d45f0574819fa1f43885eb0d98b203876a077032caec’, content=[ResponseOutputText(annotations=[], text=‘Hi! How can I help you today? (Questions, writing, code, summaries, planning, troubleshooting, translations — or something else?)’, type=‘output_text’, logprobs=[])], role=‘assistant’, status=‘completed’, type=‘message’)]
- parallel_tool_calls: True
- temperature: 1.0
- tool_choice: auto
- tools: []
- top_p: 1.0
- background: False
- max_output_tokens: 4096
- max_tool_calls: None
- previous_response_id: None
- prompt: None
- prompt_cache_key: None
- reasoning: Reasoning(effort=‘medium’, generate_summary=None, summary=None)
- safety_identifier: None
- service_tier: default
- status: completed
- text: ResponseTextConfig(format=ResponseFormatText(type=‘text’), verbosity=‘medium’)
- top_logprobs: 0
- truncation: disabled
- usage: ResponseUsage(input_tokens=7, input_tokens_details=InputTokensDetails(cached_tokens=0), output_tokens=98, output_tokens_details=OutputTokensDetails(reasoning_tokens=64), total_tokens=105)
- user: None
- store: True
len(r.events)38
c.useIn: 22; Out: 263; Total: 285
c(msgs, sp='Talk like GLaDOS.', **rkw)Oh, hello. I see you’ve decided to say “Hi.” How delightfully predictable. What do you require from me?
- id: resp_6897d46033d88194bea98e1d57a0c0e00ca2cf68685ebb2d
- created_at: 1754780768.0
- error: None
- incomplete_details: None
- instructions: Talk like GLaDOS.
- metadata: {}
- model: gpt-5-mini-2025-08-07
- object: response
- output: [ResponseReasoningItem(id=‘rs_6897d460da888194ad6dbbf58329749c0ca2cf68685ebb2d’, summary=[], type=‘reasoning’, content=None, encrypted_content=None, status=None), ResponseOutputMessage(id=‘msg_6897d461025c8194a8e90f2b48010d1d0ca2cf68685ebb2d’, content=[ResponseOutputText(annotations=[], text=‘Oh, hello. I see you've decided to say “Hi.” How delightfully predictable. What do you require from me?’, type=‘output_text’, logprobs=[])], role=‘assistant’, status=‘completed’, type=‘message’)]
- parallel_tool_calls: True
- temperature: 1.0
- tool_choice: auto
- tools: []
- top_p: 1.0
- background: False
- max_output_tokens: 4096
- max_tool_calls: None
- previous_response_id: None
- prompt: None
- prompt_cache_key: None
- reasoning: Reasoning(effort=‘minimal’, generate_summary=None, summary=None)
- safety_identifier: None
- service_tier: default
- status: completed
- text: ResponseTextConfig(format=ResponseFormatText(type=‘text’), verbosity=‘low’)
- top_logprobs: 0
- truncation: disabled
- usage: ResponseUsage(input_tokens=17, input_tokens_details=InputTokensDetails(cached_tokens=0), output_tokens=31, output_tokens_details=OutputTokensDetails(reasoning_tokens=0), total_tokens=48)
- user: None
- store: True
Images
As everyone knows, when testing image APIs you have to use a cute puppy.
# Image is Cute_dog.jpg from Wikimedia
fn = Path('samples/puppy.jpg')
Image(filename=fn, width=200)
img = fn.read_bytes()OpenAI expects an image message to have the following structure
{
"type": "image_url",
"image_url": {
"url": f"data:{MEDIA_TYPE};base64,{IMG}"
}
}msglm automatically detects if a message is an image, encodes it, and generates the data structure above. All we need to do is a create a list containing our image and a query and then pass it to mk_msg.
Let’s try it out…
q = "In brief, what color flowers are in this image?"
msg = [mk_msg(img), mk_msg(q)]c = Client(model)
c(msg, **rkw)The flowers are light purple (lavender).
- id: resp_6897d4626da4819d92dd49c3630af97f054fee63558620b6
- created_at: 1754780770.0
- error: None
- incomplete_details: None
- instructions: None
- metadata: {}
- model: gpt-5-mini-2025-08-07
- object: response
- output: [ResponseReasoningItem(id=‘rs_6897d462d1a0819daf9d9d0dcaa99e68054fee63558620b6’, summary=[], type=‘reasoning’, content=None, encrypted_content=None, status=None), ResponseOutputMessage(id=‘msg_6897d462eeb8819da34a86ae8fa1d333054fee63558620b6’, content=[ResponseOutputText(annotations=[], text=‘The flowers are light purple (lavender).’, type=‘output_text’, logprobs=[])], role=‘assistant’, status=‘completed’, type=‘message’)]
- parallel_tool_calls: True
- temperature: 1.0
- tool_choice: auto
- tools: []
- top_p: 1.0
- background: False
- max_output_tokens: 4096
- max_tool_calls: None
- previous_response_id: None
- prompt: None
- prompt_cache_key: None
- reasoning: Reasoning(effort=‘minimal’, generate_summary=None, summary=None)
- safety_identifier: None
- service_tier: default
- status: completed
- text: ResponseTextConfig(format=ResponseFormatText(type=‘text’), verbosity=‘low’)
- top_logprobs: 0
- truncation: disabled
- usage: ResponseUsage(input_tokens=107, input_tokens_details=InputTokensDetails(cached_tokens=0), output_tokens=15, output_tokens_details=OutputTokensDetails(reasoning_tokens=0), total_tokens=122)
- user: None
- store: True
Tool use
Basic tool calling
def sums(
a:int, # First thing to sum
b:int # Second thing to sum
) -> int: # The sum of the inputs
"Adds a + b."
print(f"Finding the sum of {a} and {b}")
return a + bdef add(x: int, y:int):
"adds x and y"
return x + y
mk_openai_func(add){'type': 'function',
'name': 'add',
'description': 'adds x and y',
'parameters': {'type': 'object',
'properties': {'x': {'type': 'integer', 'description': ''},
'y': {'type': 'integer', 'description': ''}},
'required': ['x', 'y']}}
sysp = "You are a helpful assistant. When using tools, be sure to pass all required parameters. Don't use tools unless needed for the provided prompt."a,b = 604542,6458932
pr = f"What is {a}+{b}?"
tools=sums
tool_choice="sums"msgs = [mk_msg(pr)]
r = c(msgs, sp=sysp, tools=tools, tool_choice='required', **rkw)tc = [o for o in r.output if isinstance(o, ResponseFunctionToolCall)]
tc[ResponseFunctionToolCall(arguments='{"a":604542,"b":6458932}', call_id='call_8OjehHhvXJ2qIJhfEuo7Uqw4', name='sums', type='function_call', id='fc_6897d46448d08192ada5cc3f0ba43c360d5a5ea1c904ba0f', status='completed')]
func = tc[0]
funcResponseFunctionToolCall(arguments='{"a":604542,"b":6458932}', call_id='call_8OjehHhvXJ2qIJhfEuo7Uqw4', name='sums', type='function_call', id='fc_6897d46448d08192ada5cc3f0ba43c360d5a5ea1c904ba0f', status='completed')
call_func_openai
call_func_openai (func, ns:Optional[collections.abc.Mapping]=None)
Exported source
def call_func_openai(func, ns:Optional[abc.Mapping]=None):
return call_func(func.name, ast.literal_eval(func.arguments), ns, raise_on_err=False)ns = mk_ns(sums)
res = call_func_openai(func, ns=ns)
resFinding the sum of 604542 and 6458932
7063474
mk_toolres
mk_toolres (r:collections.abc.Mapping, ns:Optional[collections.abc.Mapping]=None)
Create a tool_result message from response r.
| Type | Default | Details | |
|---|---|---|---|
| r | Mapping | Response containing tool use request | |
| ns | Optional | None | Namespace to search for tools |
Exported source
def _toolres(r, ns):
"Create a result dict from `tcs`."
tcs = [o for o in getattr(r, 'output', []) if isinstance(o, ResponseFunctionToolCall)]
if ns is None: ns = globals()
return { tc.call_id: call_func_openai(tc, ns=mk_ns(ns)) for tc in tcs }Exported source
def mk_toolres(
r:abc.Mapping, # Response containing tool use request
ns:Optional[abc.Mapping]=None # Namespace to search for tools
):
"Create a `tool_result` message from response `r`."
tr = _toolres(r, ns)
r = mk_msg(r)
res = [r] if isinstance(r, dict) else listify(r)
for k,v in tr.items(): res.append(dict(type="function_call_output", call_id=k, output=str(v)))
return restr = mk_toolres(r, ns=ns)
trFinding the sum of 604542 and 6458932
[ResponseReasoningItem(id='rs_6897d46408708192acd5b08ce7b560c20d5a5ea1c904ba0f', summary=[], type='reasoning', content=None, encrypted_content=None, status=None),
ResponseFunctionToolCall(arguments='{"a":604542,"b":6458932}', call_id='call_8OjehHhvXJ2qIJhfEuo7Uqw4', name='sums', type='function_call', id='fc_6897d46448d08192ada5cc3f0ba43c360d5a5ea1c904ba0f', status='completed'),
{'type': 'function_call_output',
'call_id': 'call_8OjehHhvXJ2qIJhfEuo7Uqw4',
'output': '7063474'}]
m2 = msgs + trres = c(mk_msgs(m2), sp=sysp, tools=tools)
res604542 + 6,458,932 = 7,063,474
- id: resp_6897d465041c819282d225ae60a38c4e0d5a5ea1c904ba0f
- created_at: 1754780773.0
- error: None
- incomplete_details: None
- instructions: You are a helpful assistant. When using tools, be sure to pass all required parameters. Don’t use tools unless needed for the provided prompt.
- metadata: {}
- model: gpt-5-mini-2025-08-07
- object: response
- output: [ResponseOutputMessage(id=‘msg_6897d4656b088192a5569ed4cb14d8760d5a5ea1c904ba0f’, content=[ResponseOutputText(annotations=[], text=‘604542 + 6,458,932 = 7,063,474’, type=‘output_text’, logprobs=[])], role=‘assistant’, status=‘completed’, type=‘message’)]
- parallel_tool_calls: True
- temperature: 1.0
- tool_choice: auto
- tools: [FunctionTool(name=‘sums’, parameters={‘type’: ‘object’, ‘properties’: {‘a’: {‘type’: ‘integer’, ‘description’: ‘First thing to sum’}, ‘b’: {‘type’: ‘integer’, ‘description’: ‘Second thing to sum’}}, ‘required’: [‘a’, ‘b’], ‘additionalProperties’: False}, strict=True, type=‘function’, description=‘Adds a + b.:- type: integer’)]
- top_p: 1.0
- background: False
- max_output_tokens: 4096
- max_tool_calls: None
- previous_response_id: None
- prompt: None
- prompt_cache_key: None
- reasoning: Reasoning(effort=‘medium’, generate_summary=None, summary=None)
- safety_identifier: None
- service_tier: default
- status: completed
- text: ResponseTextConfig(format=ResponseFormatText(type=‘text’), verbosity=‘medium’)
- top_logprobs: 0
- truncation: disabled
- usage: ResponseUsage(input_tokens=157, input_tokens_details=InputTokensDetails(cached_tokens=0), output_tokens=20, output_tokens_details=OutputTokensDetails(reasoning_tokens=0), total_tokens=177)
- user: None
- store: True
This should also work in situations where no tool use is required:
msgs = mk_toolres("I'm Jeremy")
c(msgs, sp=sysp, tools=tools, **rkw)Nice to meet you, Jeremy. How can I help today?
- id: resp_6897d46622d881a3bdeac16f760cfc4e0943aa2098394400
- created_at: 1754780774.0
- error: None
- incomplete_details: None
- instructions: You are a helpful assistant. When using tools, be sure to pass all required parameters. Don’t use tools unless needed for the provided prompt.
- metadata: {}
- model: gpt-5-mini-2025-08-07
- object: response
- output: [ResponseReasoningItem(id=‘rs_6897d4667b4881a3842d8129cd95a7fd0943aa2098394400’, summary=[], type=‘reasoning’, content=None, encrypted_content=None, status=None), ResponseOutputMessage(id=‘msg_6897d466a44c81a3ba6292be4b603c4b0943aa2098394400’, content=[ResponseOutputText(annotations=[], text=‘Nice to meet you, Jeremy. How can I help today?’, type=‘output_text’, logprobs=[])], role=‘assistant’, status=‘completed’, type=‘message’)]
- parallel_tool_calls: True
- temperature: 1.0
- tool_choice: auto
- tools: [FunctionTool(name=‘sums’, parameters={‘type’: ‘object’, ‘properties’: {‘a’: {‘type’: ‘integer’, ‘description’: ‘First thing to sum’}, ‘b’: {‘type’: ‘integer’, ‘description’: ‘Second thing to sum’}}, ‘required’: [‘a’, ‘b’], ‘additionalProperties’: False}, strict=True, type=‘function’, description=‘Adds a + b.:- type: integer’)]
- top_p: 1.0
- background: False
- max_output_tokens: 4096
- max_tool_calls: None
- previous_response_id: None
- prompt: None
- prompt_cache_key: None
- reasoning: Reasoning(effort=‘minimal’, generate_summary=None, summary=None)
- safety_identifier: None
- service_tier: default
- status: completed
- text: ResponseTextConfig(format=ResponseFormatText(type=‘text’), verbosity=‘low’)
- top_logprobs: 0
- truncation: disabled
- usage: ResponseUsage(input_tokens=96, input_tokens_details=InputTokensDetails(cached_tokens=0), output_tokens=19, output_tokens_details=OutputTokensDetails(reasoning_tokens=0), total_tokens=115)
- user: None
- store: True
Client.structured
Client.structured (msgs:list, tools:Optional[list]=None, ns:Optional[collections.abc.Mapping]=None, sp:str='', maxtok=4096, stream:bool=False, tool_choice:Optional[str]=None, cb:<built- infunctioncallable>=None, background:Optional[bool]|NotGiven=NOT_GIVEN, include: Optional[List[ResponseIncludable]]|NotGiven=NOT_GIVEN, input:Union[str,ResponseInputParam]|NotGiven=NOT_GIVEN , instructions:Optional[str]|NotGiven=NOT_GIVEN, max_output_tokens:Optional[int]|NotGiven=NOT_GIVEN, max_tool_calls:Optional[int]|NotGiven=NOT_GIVEN, metadata:Optional[Metadata]|NotGiven=NOT_GIVEN, model:ResponsesModel|NotGiven=NOT_GIVEN, parallel_tool_calls:Optional[bool]|NotGiven=NOT_GIVEN, previous_response_id:Optional[str]|NotGiven=NOT_GIVEN, prompt:Optional[ResponsePromptParam]|NotGiven=NOT_GIVE N, prompt_cache_key:str|NotGiven=NOT_GIVEN, reasoning:Optional[Reasoning]|NotGiven=NOT_GIVEN, safety_identifier:str|NotGiven=NOT_GIVEN, service_tier :"Optional[Literal['auto','default','flex','scale','pr iority']]|NotGiven"=NOT_GIVEN, store:Optional[bool]|NotGiven=NOT_GIVEN, stream_option s:Optional[response_create_params.StreamOptions]|NotGi ven=NOT_GIVEN, temperature:Optional[float]|NotGiven=NOT_GIVEN, text:ResponseTextConfigParam|NotGiven=NOT_GIVEN, top_logprobs:Optional[int]|NotGiven=NOT_GIVEN, top_p:Optional[float]|NotGiven=NOT_GIVEN, truncation:" Optional[Literal['auto','disabled']]|NotGiven"=NOT_GIV EN, user:str|NotGiven=NOT_GIVEN, extra_headers:Headers|None=None, extra_query:Query|None=None, extra_body:Body|None=None, timeout:float|httpx.Timeout|None|NotGiven=NOT_GIVEN)
Return the value of all tool calls (generally used for structured outputs)
| Type | Default | Details | |
|---|---|---|---|
| msgs | list | Prompt | |
| tools | Optional | None | List of tools to make available to OpenAI model |
| ns | Optional | None | Namespace to search for tools |
| sp | str | System prompt | |
| maxtok | int | 4096 | Maximum tokens |
| stream | bool | False | Stream response? |
| tool_choice | Optional | None | Forced tool choice |
| cb | callable | None | Callback after completion |
| background | Optional[bool] | NotGiven | NOT_GIVEN | |
| include | Optional[List[ResponseIncludable]] | NotGiven | NOT_GIVEN | |
| input | Union[str, ResponseInputParam] | NotGiven | NOT_GIVEN | |
| instructions | Optional[str] | NotGiven | NOT_GIVEN | |
| max_output_tokens | Optional[int] | NotGiven | NOT_GIVEN | |
| max_tool_calls | Optional[int] | NotGiven | NOT_GIVEN | |
| metadata | Optional[Metadata] | NotGiven | NOT_GIVEN | |
| model | ResponsesModel | NotGiven | NOT_GIVEN | |
| parallel_tool_calls | Optional[bool] | NotGiven | NOT_GIVEN | |
| previous_response_id | Optional[str] | NotGiven | NOT_GIVEN | |
| prompt | Optional[ResponsePromptParam] | NotGiven | NOT_GIVEN | |
| prompt_cache_key | str | NotGiven | NOT_GIVEN | |
| reasoning | Optional[Reasoning] | NotGiven | NOT_GIVEN | |
| safety_identifier | str | NotGiven | NOT_GIVEN | |
| service_tier | Optional[Literal[‘auto’, ‘default’, ‘flex’, ‘scale’, ‘priority’]] | NotGiven | NOT_GIVEN | |
| store | Optional[bool] | NotGiven | NOT_GIVEN | |
| stream_options | Optional[response_create_params.StreamOptions] | NotGiven | NOT_GIVEN | |
| temperature | Optional[float] | NotGiven | NOT_GIVEN | |
| text | ResponseTextConfigParam | NotGiven | NOT_GIVEN | |
| top_logprobs | Optional[int] | NotGiven | NOT_GIVEN | |
| top_p | Optional[float] | NotGiven | NOT_GIVEN | |
| truncation | Optional[Literal[‘auto’, ‘disabled’]] | NotGiven | NOT_GIVEN | |
| user | str | NotGiven | NOT_GIVEN | |
| extra_headers | Optional | None | Use the following arguments if you need to pass additional parameters to the API that aren’t available via kwargs. The extra values given here take precedence over values defined on the client or passed to this method. |
| extra_query | Query | None | None | |
| extra_body | Body | None | None | |
| timeout | float | httpx.Timeout | None | NotGiven | NOT_GIVEN |
Exported source
@patch
@delegates(Client.__call__)
def structured(self:Client,
msgs: list, # Prompt
tools:Optional[list]=None, # List of tools to make available to OpenAI model
ns:Optional[abc.Mapping]=None, # Namespace to search for tools
**kwargs):
"Return the value of all tool calls (generally used for structured outputs)"
if ns is None: ns = mk_ns(tools)
r = self(msgs, tools=tools, tool_choice='required', **kwargs)
return first(_toolres(r, ns).values())class PrimeMinister(BasicRepr):
"An Australian prime minister"
def __init__(
self,
firstname:str, # First name
surname:str, # Surname
dob:str, # Date of birth
year_entered:int, # Year first became PM
): store_attr()c1 = Client(model)
c1.structured('Who was the first prime minister of Australia?', [PrimeMinister], **rkw)PrimeMinister(firstname='Edmund', surname='Barton', dob='1849-01-18', year_entered=1901)
Streaming tool calling
msgs = [mk_msg(pr)]
r = c(msgs, sp=sysp, tools=tools, stream=True, **rkw)We can stream back any tool call text (which may be empty):
for o in r: print(o, end='')After streaming is complete, value.output will contain the tool calls:
r.value.output[ResponseReasoningItem(id='rs_6897d46a17848191a867916f405548c0061eb8625c4ad035', summary=[], type='reasoning', content=None, encrypted_content=None, status=None),
ResponseFunctionToolCall(arguments='{"a":604542,"b":6458932}', call_id='call_TQst1ZFeUsUd7sujuapuNdhU', name='sums', type='function_call', id='fc_6897d46a52e48191a6b6b77b629943d0061eb8625c4ad035', status='completed')]
Therefore we can repeat the same process as before, but using the value attr:
tr = mk_toolres(r.value, ns=ns)
msgs += tr
c(mk_msgs(msgs), sp=sysp, tools=tools, **rkw)Finding the sum of 604542 and 6458932
7,063,474
- id: resp_6897d46b096c81918f61f7ed0ef103de061eb8625c4ad035
- created_at: 1754780779.0
- error: None
- incomplete_details: None
- instructions: You are a helpful assistant. When using tools, be sure to pass all required parameters. Don’t use tools unless needed for the provided prompt.
- metadata: {}
- model: gpt-5-mini-2025-08-07
- object: response
- output: [ResponseOutputMessage(id=‘msg_6897d46b804c8191bd3a793d5adbe1aa061eb8625c4ad035’, content=[ResponseOutputText(annotations=[], text=‘7,063,474’, type=‘output_text’, logprobs=[])], role=‘assistant’, status=‘completed’, type=‘message’)]
- parallel_tool_calls: True
- temperature: 1.0
- tool_choice: auto
- tools: [FunctionTool(name=‘sums’, parameters={‘type’: ‘object’, ‘properties’: {‘a’: {‘type’: ‘integer’, ‘description’: ‘First thing to sum’}, ‘b’: {‘type’: ‘integer’, ‘description’: ‘Second thing to sum’}}, ‘required’: [‘a’, ‘b’], ‘additionalProperties’: False}, strict=True, type=‘function’, description=‘Adds a + b.:- type: integer’)]
- top_p: 1.0
- background: False
- max_output_tokens: 4096
- max_tool_calls: None
- previous_response_id: None
- prompt: None
- prompt_cache_key: None
- reasoning: Reasoning(effort=‘minimal’, generate_summary=None, summary=None)
- safety_identifier: None
- service_tier: default
- status: completed
- text: ResponseTextConfig(format=ResponseFormatText(type=‘text’), verbosity=‘low’)
- top_logprobs: 0
- truncation: disabled
- usage: ResponseUsage(input_tokens=157, input_tokens_details=InputTokensDetails(cached_tokens=0), output_tokens=9, output_tokens_details=OutputTokensDetails(reasoning_tokens=0), total_tokens=166)
- user: None
- store: True
Chat
Basic chat
Chat
Chat (model:Optional[str]=None, cli:Optional[__main__.Client]=None, sp='', tools:Optional[list]=None, hist:list=None, tool_choice:Optional[str]=None, ns:Optional[collections.abc.Mapping]=None, **kw)
OpenAI chat client.
| Type | Default | Details | |
|---|---|---|---|
| model | Optional | None | Model to use (leave empty if passing cli) |
| cli | Optional | None | Client to use (leave empty if passing model) |
| sp | str | Optional system prompt | |
| tools | Optional | None | List of tools to make available |
| hist | list | None | Initialize history |
| tool_choice | Optional | None | Forced tool choice |
| ns | Optional | None | Namespace to search for tools |
| kw | VAR_KEYWORD |
Exported source
class Chat:
def __init__(self,
model:Optional[str]=None, # Model to use (leave empty if passing `cli`)
cli:Optional[Client]=None, # Client to use (leave empty if passing `model`)
sp='', # Optional system prompt
tools:Optional[list]=None, # List of tools to make available
hist: list = None, # Initialize history
tool_choice:Optional[str]=None, # Forced tool choice
ns:Optional[abc.Mapping]=None, # Namespace to search for tools
**kw):
"OpenAI chat client."
assert model or cli
self.c = (cli or Client(model))
self.h = hist if hist else []
if ns is None: ns=tools
self.sp,self.tools,self.tool_choice,self.ns,self.kw = sp,tools,tool_choice,ns,kw
@property
def use(self): return self.c.usechat = Chat(model, sp=sysp, **rkw)
chat.c.use, chat.h(In: 0; Out: 0; Total: 0, [])
Chat.__call__
Chat.__call__ (pr=None, stream:bool=False, tools=None, tool_choice=None, background:Optional[bool]|NotGiven=NOT_GIVEN, include:Opti onal[List[ResponseIncludable]]|NotGiven=NOT_GIVEN, input:Union[str,ResponseInputParam]|NotGiven=NOT_GIVEN, instructions:Optional[str]|NotGiven=NOT_GIVEN, max_output_tokens:Optional[int]|NotGiven=NOT_GIVEN, max_tool_calls:Optional[int]|NotGiven=NOT_GIVEN, metadata:Optional[Metadata]|NotGiven=NOT_GIVEN, model:ResponsesModel|NotGiven=NOT_GIVEN, parallel_tool_calls:Optional[bool]|NotGiven=NOT_GIVEN, previous_response_id:Optional[str]|NotGiven=NOT_GIVEN, prompt:Optional[ResponsePromptParam]|NotGiven=NOT_GIVEN, prompt_cache_key:str|NotGiven=NOT_GIVEN, reasoning:Optional[Reasoning]|NotGiven=NOT_GIVEN, safety_identifier:str|NotGiven=NOT_GIVEN, service_tier:"Op tional[Literal['auto','default','flex','scale','priority'] ]|NotGiven"=NOT_GIVEN, store:Optional[bool]|NotGiven=NOT_GIVEN, stream_options:Op tional[response_create_params.StreamOptions]|NotGiven=NOT_ GIVEN, temperature:Optional[float]|NotGiven=NOT_GIVEN, text:ResponseTextConfigParam|NotGiven=NOT_GIVEN, top_logprobs:Optional[int]|NotGiven=NOT_GIVEN, top_p:Optional[float]|NotGiven=NOT_GIVEN, truncation:"Opti onal[Literal['auto','disabled']]|NotGiven"=NOT_GIVEN, user:str|NotGiven=NOT_GIVEN, extra_headers:Headers|None=None, extra_query:Query|None=None, extra_body:Body|None=None, timeout:float|httpx.Timeout|None|NotGiven=NOT_GIVEN)
Add prompt pr to dialog and get a response
| Type | Default | Details | |
|---|---|---|---|
| pr | NoneType | None | Prompt / message |
| stream | bool | False | Stream response? |
| tools | NoneType | None | Tools to use |
| tool_choice | NoneType | None | Required tools to use |
| background | Optional[bool] | NotGiven | NOT_GIVEN | |
| include | Optional[List[ResponseIncludable]] | NotGiven | NOT_GIVEN | |
| input | Union[str, ResponseInputParam] | NotGiven | NOT_GIVEN | |
| instructions | Optional[str] | NotGiven | NOT_GIVEN | |
| max_output_tokens | Optional[int] | NotGiven | NOT_GIVEN | |
| max_tool_calls | Optional[int] | NotGiven | NOT_GIVEN | |
| metadata | Optional[Metadata] | NotGiven | NOT_GIVEN | |
| model | ResponsesModel | NotGiven | NOT_GIVEN | |
| parallel_tool_calls | Optional[bool] | NotGiven | NOT_GIVEN | |
| previous_response_id | Optional[str] | NotGiven | NOT_GIVEN | |
| prompt | Optional[ResponsePromptParam] | NotGiven | NOT_GIVEN | |
| prompt_cache_key | str | NotGiven | NOT_GIVEN | |
| reasoning | Optional[Reasoning] | NotGiven | NOT_GIVEN | |
| safety_identifier | str | NotGiven | NOT_GIVEN | |
| service_tier | Optional[Literal[‘auto’, ‘default’, ‘flex’, ‘scale’, ‘priority’]] | NotGiven | NOT_GIVEN | |
| store | Optional[bool] | NotGiven | NOT_GIVEN | |
| stream_options | Optional[response_create_params.StreamOptions] | NotGiven | NOT_GIVEN | |
| temperature | Optional[float] | NotGiven | NOT_GIVEN | |
| text | ResponseTextConfigParam | NotGiven | NOT_GIVEN | |
| top_logprobs | Optional[int] | NotGiven | NOT_GIVEN | |
| top_p | Optional[float] | NotGiven | NOT_GIVEN | |
| truncation | Optional[Literal[‘auto’, ‘disabled’]] | NotGiven | NOT_GIVEN | |
| user | str | NotGiven | NOT_GIVEN | |
| extra_headers | Optional | None | Use the following arguments if you need to pass additional parameters to the API that aren’t available via kwargs. The extra values given here take precedence over values defined on the client or passed to this method. |
| extra_query | Query | None | None | |
| extra_body | Body | None | None | |
| timeout | float | httpx.Timeout | None | NotGiven | NOT_GIVEN |
Exported source
@patch
@delegates(Responses.create)
def __call__(self:Chat,
pr=None, # Prompt / message
stream:bool=False, # Stream response?
tools=None, # Tools to use
tool_choice=None, # Required tools to use
**kwargs):
"Add prompt `pr` to dialog and get a response"
if isinstance(pr,str): pr = pr.strip()
if pr: self.h.append(mk_msg(pr))
if not tools: tools = self.tools
if not tool_choice: tool_choice = self.tool_choice
kw = self.kw | kwargs
def _cb(v):
self.last = mk_toolres(v, ns=self.ns)
self.h += self.last
res = self.c(self.h, sp=self.sp, stream=stream, cb=_cb, tools=tools, **kw)
return reschat("I'm Jeremy")
chat("What's my name?")You said your name is Jeremy.
- id: resp_6897d4e3859c81a09e4c0bb8776ff226028679f12842a0be
- created_at: 1754780899.0
- error: None
- incomplete_details: None
- instructions: You are a helpful assistant. When using tools, be sure to pass all required parameters. Don’t use tools unless needed for the provided prompt.
- metadata: {}
- model: gpt-5-mini-2025-08-07
- object: response
- output: [ResponseReasoningItem(id=‘rs_6897d4e4152481a0a46623292dcdd0c4028679f12842a0be’, summary=[], type=‘reasoning’, content=None, encrypted_content=None, status=None), ResponseOutputMessage(id=‘msg_6897d4e45dc481a0b7c10a2e7e1abdd6028679f12842a0be’, content=[ResponseOutputText(annotations=[], text=‘You said your name is Jeremy.’, type=‘output_text’, logprobs=[])], role=‘assistant’, status=‘completed’, type=‘message’)]
- parallel_tool_calls: True
- temperature: 1.0
- tool_choice: auto
- tools: []
- top_p: 1.0
- background: False
- max_output_tokens: 4096
- max_tool_calls: None
- previous_response_id: None
- prompt: None
- prompt_cache_key: None
- reasoning: Reasoning(effort=‘minimal’, generate_summary=None, summary=None)
- safety_identifier: None
- service_tier: default
- status: completed
- text: ResponseTextConfig(format=ResponseFormatText(type=‘text’), verbosity=‘low’)
- top_logprobs: 0
- truncation: disabled
- usage: ResponseUsage(input_tokens=68, input_tokens_details=InputTokensDetails(cached_tokens=0), output_tokens=13, output_tokens_details=OutputTokensDetails(reasoning_tokens=0), total_tokens=81)
- user: None
- store: True
chat = Chat(model, sp=sysp, **rkw)
for o in chat("I'm Jeremy", stream=True): print(o, end='')Nice to meet you, Jeremy. How can I help you today?
r = chat("What's my name?", stream=True, **rkw)
for o in r: print(o, end='')Your name is Jeremy.
r.valueYour name is Jeremy.
- id: resp_6897d4fefaf481a0a8d73f99e538c4660d7980b96cc8aea2
- created_at: 1754780927.0
- error: None
- incomplete_details: None
- instructions: You are a helpful assistant. When using tools, be sure to pass all required parameters. Don’t use tools unless needed for the provided prompt.
- metadata: {}
- model: gpt-5-mini-2025-08-07
- object: response
- output: [ResponseReasoningItem(id=‘rs_6897d4ff574c81a0a391da4adbc4974f0d7980b96cc8aea2’, summary=[], type=‘reasoning’, content=None, encrypted_content=None, status=None), ResponseOutputMessage(id=‘msg_6897d4ff7a6c81a0bdf0b159c3ca5b990d7980b96cc8aea2’, content=[ResponseOutputText(annotations=[], text=‘Your name is Jeremy.’, type=‘output_text’, logprobs=[])], role=‘assistant’, status=‘completed’, type=‘message’)]
- parallel_tool_calls: True
- temperature: 1.0
- tool_choice: auto
- tools: []
- top_p: 1.0
- background: False
- max_output_tokens: 4096
- max_tool_calls: None
- previous_response_id: None
- prompt: None
- prompt_cache_key: None
- reasoning: Reasoning(effort=‘minimal’, generate_summary=None, summary=None)
- safety_identifier: None
- service_tier: default
- status: completed
- text: ResponseTextConfig(format=ResponseFormatText(type=‘text’), verbosity=‘low’)
- top_logprobs: 0
- truncation: disabled
- usage: ResponseUsage(input_tokens=68, input_tokens_details=InputTokensDetails(cached_tokens=0), output_tokens=11, output_tokens_details=OutputTokensDetails(reasoning_tokens=0), total_tokens=79)
- user: None
- store: True
History is stored in the h attr:
chat.h[{'role': 'user', 'content': "I'm Jeremy"},
ResponseReasoningItem(id='rs_6897d4fce8c881a08804bababd51473f0d7980b96cc8aea2', summary=[], type='reasoning', content=None, encrypted_content=None, status=None),
ResponseOutputMessage(id='msg_6897d4fd045081a0ace336e7851c7c0a0d7980b96cc8aea2', content=[ResponseOutputText(annotations=[], text='Nice to meet you, Jeremy. How can I help you today?', type='output_text', logprobs=[])], role='assistant', status='completed', type='message'),
{'role': 'user', 'content': "What's my name?"},
ResponseReasoningItem(id='rs_6897d4ff574c81a0a391da4adbc4974f0d7980b96cc8aea2', summary=[], type='reasoning', content=None, encrypted_content=None, status=None),
ResponseOutputMessage(id='msg_6897d4ff7a6c81a0bdf0b159c3ca5b990d7980b96cc8aea2', content=[ResponseOutputText(annotations=[], text='Your name is Jeremy.', type='output_text', logprobs=[])], role='assistant', status='completed', type='message')]
Chat tool use
pr = f"What is {a}+{b}?"
pr'What is 604542+6458932?'
chat = Chat(model, sp=sysp, tools=[sums], **rkw)
r = chat(pr)
r.outputFinding the sum of 604542 and 6458932
[ResponseReasoningItem(id='rs_6897d50827fc819396ded0a212b4007d0bef3dfa0a48e169', summary=[], type='reasoning', content=None, encrypted_content=None, status=None),
ResponseFunctionToolCall(arguments='{"a":604542,"b":6458932}', call_id='call_5V1JqCfcUBNqIdD6YDixgWSq', name='sums', type='function_call', id='fc_6897d508600c8193ba31f085b085351c0bef3dfa0a48e169', status='completed')]
chat()7063474
- id: resp_6897d50927f88193b382e22544a32ccb0bef3dfa0a48e169
- created_at: 1754780937.0
- error: None
- incomplete_details: None
- instructions: You are a helpful assistant. When using tools, be sure to pass all required parameters. Don’t use tools unless needed for the provided prompt.
- metadata: {}
- model: gpt-5-mini-2025-08-07
- object: response
- output: [ResponseOutputMessage(id=‘msg_6897d509c86c8193aae59f13361a176f0bef3dfa0a48e169’, content=[ResponseOutputText(annotations=[], text=‘7063474’, type=‘output_text’, logprobs=[])], role=‘assistant’, status=‘completed’, type=‘message’)]
- parallel_tool_calls: True
- temperature: 1.0
- tool_choice: auto
- tools: [FunctionTool(name=‘sums’, parameters={‘type’: ‘object’, ‘properties’: {‘a’: {‘type’: ‘integer’, ‘description’: ‘First thing to sum’}, ‘b’: {‘type’: ‘integer’, ‘description’: ‘Second thing to sum’}}, ‘required’: [‘a’, ‘b’], ‘additionalProperties’: False}, strict=True, type=‘function’, description=‘Adds a + b.:- type: integer’)]
- top_p: 1.0
- background: False
- max_output_tokens: 4096
- max_tool_calls: None
- previous_response_id: None
- prompt: None
- prompt_cache_key: None
- reasoning: Reasoning(effort=‘minimal’, generate_summary=None, summary=None)
- safety_identifier: None
- service_tier: default
- status: completed
- text: ResponseTextConfig(format=ResponseFormatText(type=‘text’), verbosity=‘low’)
- top_logprobs: 0
- truncation: disabled
- usage: ResponseUsage(input_tokens=157, input_tokens_details=InputTokensDetails(cached_tokens=0), output_tokens=7, output_tokens_details=OutputTokensDetails(reasoning_tokens=0), total_tokens=164)
- user: None
- store: True
q = "In brief, what color flowers are in this image?"
chat([img, q])Purple
- id: resp_6897d50ae24c819381991f7c960a02e10bef3dfa0a48e169
- created_at: 1754780938.0
- error: None
- incomplete_details: None
- instructions: You are a helpful assistant. When using tools, be sure to pass all required parameters. Don’t use tools unless needed for the provided prompt.
- metadata: {}
- model: gpt-5-mini-2025-08-07
- object: response
- output: [ResponseReasoningItem(id=‘rs_6897d50b5c648193b2e1b02c0e9fc47f0bef3dfa0a48e169’, summary=[], type=‘reasoning’, content=None, encrypted_content=None, status=None), ResponseOutputMessage(id=‘msg_6897d50b76e88193b57c29e041953e410bef3dfa0a48e169’, content=[ResponseOutputText(annotations=[], text=‘Purple’, type=‘output_text’, logprobs=[])], role=‘assistant’, status=‘completed’, type=‘message’)]
- parallel_tool_calls: True
- temperature: 1.0
- tool_choice: auto
- tools: [FunctionTool(name=‘sums’, parameters={‘type’: ‘object’, ‘properties’: {‘a’: {‘type’: ‘integer’, ‘description’: ‘First thing to sum’}, ‘b’: {‘type’: ‘integer’, ‘description’: ‘Second thing to sum’}}, ‘required’: [‘a’, ‘b’], ‘additionalProperties’: False}, strict=True, type=‘function’, description=‘Adds a + b.:- type: integer’)]
- top_p: 1.0
- background: False
- max_output_tokens: 4096
- max_tool_calls: None
- previous_response_id: None
- prompt: None
- prompt_cache_key: None
- reasoning: Reasoning(effort=‘minimal’, generate_summary=None, summary=None)
- safety_identifier: None
- service_tier: default
- status: completed
- text: ResponseTextConfig(format=ResponseFormatText(type=‘text’), verbosity=‘low’)
- top_logprobs: 0
- truncation: disabled
- usage: ResponseUsage(input_tokens=255, input_tokens_details=InputTokensDetails(cached_tokens=0), output_tokens=7, output_tokens_details=OutputTokensDetails(reasoning_tokens=0), total_tokens=262)
- user: None
- store: True
Third Party Providers
Azure OpenAI Service
Example Azure usage:
azure_endpoint = AzureOpenAI(
azure_endpoint = os.getenv("AZURE_OPENAI_ENDPOINT"),
api_key=os.getenv("AZURE_OPENAI_API_KEY"),
api_version="2024-08-01-preview"
)
client = Client(models_azure[0], azure_endpoint)
chat = Chat(cli=client)
chat("Hi.")