from IPython.display import display, Markdown, clear_output
from pprint import pprintTool loop
' '.join(models)'gpt-5 gpt-5-mini gpt-5-nano o1-preview o1-mini gpt-4o gpt-4o-mini gpt-4-turbo gpt-4 gpt-4-32k gpt-3.5-turbo gpt-3.5-turbo-instruct o1 o3-mini chatgpt-4o-latest o1-pro o3 o4-mini gpt-4.1 gpt-4.1-mini gpt-4.1-nano'
model = first(m for m in models if 'mini' in m)
model'gpt-5-mini'
Sample Data
def _get_orders_customers():
orders = {
"O1": dict(id="O1", product="Widget A", quantity=2, price=19.99, status="Shipped"),
"O2": dict(id="O2", product="Gadget B", quantity=1, price=49.99, status="Processing"),
"O3": dict(id="O3", product="Gadget B", quantity=2, price=49.99, status="Shipped")}
customers = {
"C1": dict(name="John Doe", email="john@example.com", phone="123-456-7890",
orders=[orders['O1'], orders['O2']]),
"C2": dict(name="Jane Smith", email="jane@example.com", phone="987-654-3210",
orders=[orders['O3']])
}
return orders, customersorders, customers = _get_orders_customers()def get_customer_info(
customer_id:str # ID of the customer
): # Customer's name, email, phone number, and list of orders
"Retrieves a customer's information and their orders based on the customer ID"
print(f'- Retrieving customer {customer_id}')
return customers.get(customer_id, "Customer not found")
def get_order_details(
order_id:str # ID of the order
): # Order's ID, product name, quantity, price, and order status
"Retrieves the details of a specific order based on the order ID"
print(f'- Retrieving order {order_id}')
return orders.get(order_id, "Order not found")
def cancel_order(
order_id:str # ID of the order to cancel
)->bool: # True if the cancellation is successful
"Cancels an order based on the provided order ID"
print(f'- Cancelling order {order_id}')
if order_id not in orders: return False
orders[order_id]['status'] = 'Cancelled'
return Truechatkw = dict(
text={ "verbosity": "low" },
reasoning={ "effort": "minimal" }
)tools = [get_customer_info, get_order_details, cancel_order]
chat = Chat(model, tools=tools, **chatkw)r = chat('Hi.')
rHello! How can I help you today?
- id: resp_0610e51711a17c8b006943fce162a0819396584fc43c687fba
- created_at: 1766063329.0
- error: None
- incomplete_details: None
- instructions: None
- metadata: {}
- model: gpt-5-mini-2025-08-07
- object: response
- output: [ResponseReasoningItem(id=‘rs_0610e51711a17c8b006943fce1b7e08193a742dd433b62cbcb’, summary=[], type=‘reasoning’, content=None, encrypted_content=None, status=None), ResponseOutputMessage(id=‘msg_0610e51711a17c8b006943fce1dfc081939b2f726e067bd88c’, content=[ResponseOutputText(annotations=[], text=‘Hello! How can I help you today?’, type=‘output_text’, logprobs=[])], role=‘assistant’, status=‘completed’, type=‘message’)]
- parallel_tool_calls: True
- temperature: 1.0
- tool_choice: auto
- tools: [FunctionTool(name=‘get_customer_info’, parameters={‘type’: ‘object’, ‘properties’: {‘customer_id’: {‘type’: ‘string’, ‘description’: ‘ID of the customer’}}, ‘required’: [‘customer_id’], ‘additionalProperties’: False}, strict=True, type=‘function’, description=“Retrieves a customer’s information and their orders based on the customer ID”), FunctionTool(name=‘get_order_details’, parameters={‘type’: ‘object’, ‘properties’: {‘order_id’: {‘type’: ‘string’, ‘description’: ‘ID of the order’}}, ‘required’: [‘order_id’], ‘additionalProperties’: False}, strict=True, type=‘function’, description=‘Retrieves the details of a specific order based on the order ID’), FunctionTool(name=‘cancel_order’, parameters={‘type’: ‘object’, ‘properties’: {‘order_id’: {‘type’: ‘string’, ‘description’: ‘ID of the order to cancel’}}, ‘required’: [‘order_id’], ‘additionalProperties’: False}, strict=True, type=‘function’, description=‘Cancels an order based on the provided order ID:- type: boolean’)]
- top_p: 1.0
- background: False
- conversation: None
- max_output_tokens: 4096
- max_tool_calls: None
- previous_response_id: None
- prompt: None
- prompt_cache_key: None
- prompt_cache_retention: None
- reasoning: Reasoning(effort=‘minimal’, generate_summary=None, summary=None)
- safety_identifier: None
- service_tier: default
- status: completed
- text: ResponseTextConfig(format=ResponseFormatText(type=‘text’), verbosity=‘low’)
- top_logprobs: 0
- truncation: disabled
- usage: ResponseUsage(input_tokens=136, input_tokens_details=InputTokensDetails(cached_tokens=0), output_tokens=15, output_tokens_details=OutputTokensDetails(reasoning_tokens=0), total_tokens=151)
- user: None
- billing: {‘payer’: ‘openai’}
- store: True
r = chat('Can you tell me the email address for customer C2?')
r.output- Retrieving customer C2
[ResponseReasoningItem(id='rs_0610e51711a17c8b006943fce2fb408193a0298d1762a4b19e', summary=[], type='reasoning', content=None, encrypted_content=None, status=None),
ResponseFunctionToolCall(arguments='{"customer_id":"C2"}', call_id='call_1iQyH2m7zBT6AxtxpVfgOARS', name='get_customer_info', type='function_call', id='fc_0610e51711a17c8b006943fce34518819385d6cbeb29b1f63f', status='completed')]
r = chat()
r.output[ResponseOutputMessage(id='msg_0610e51711a17c8b006943fce4a59c8193b7914069038e07b0', content=[ResponseOutputText(annotations=[], text='The email address for customer C2 is jane@example.com.', type='output_text', logprobs=[])], role='assistant', status='completed', type='message')]
chat = Chat(model, tools=tools)
r = chat('Please cancel all orders for customer C1 for me.')
r.output- Retrieving customer C1
[ResponseReasoningItem(id='rs_067a83d17b75c4ea006943fce594008194904f7f4b1710ab11', summary=[], type='reasoning', content=None, encrypted_content=None, status=None),
ResponseFunctionToolCall(arguments='{"customer_id":"C1"}', call_id='call_5MXLQEl4LdzFyRHIR3DynD9I', name='get_customer_info', type='function_call', id='fc_067a83d17b75c4ea006943fce6c9848194b442cb73ffe02035', status='completed')]
r = chat()
r.output- Cancelling order O1
- Cancelling order O2
[ResponseReasoningItem(id='rs_067a83d17b75c4ea006943fce7b6648194aa8afa477dc81c42', summary=[], type='reasoning', content=None, encrypted_content=None, status=None),
ResponseFunctionToolCall(arguments='{"order_id":"O1"}', call_id='call_y3LWRVEn8X80nCQf50EOVOu5', name='cancel_order', type='function_call', id='fc_067a83d17b75c4ea006943fce992f88194b8de77d411f78e44', status='completed'),
ResponseFunctionToolCall(arguments='{"order_id":"O2"}', call_id='call_oPiTCOxwXjo8uac0mzXK3rEv', name='cancel_order', type='function_call', id='fc_067a83d17b75c4ea006943fce9f498819495f077de035b7454', status='completed')]
toolloop implementation
Chat.toolloop
Chat.toolloop (pr, max_steps=10, cont_func:<built- infunctioncallable>=<function noop>, final_prompt='You have no more tool uses. Please summarize your findings. If you did not complete your goal please tell the user what further work needs to be done so they can choose how best to proceed.', stream:bool=False, tools=None, tool_choice=None, background:Optional[bool]|Omit=<openai.Omit object at 0x7f67d1e4c850>, conversation:Optional[response_create_par ams.Conversation]|Omit=<openai.Omit object at 0x7f67d1e4c850>, include:Optional[List[ResponseIncludable] ]|Omit=<openai.Omit object at 0x7f67d1e4c850>, input:Union[str,ResponseInputParam]|Omit=<openai.Omit object at 0x7f67d1e4c850>, instructions:Optional[str]|Omit=<openai.Omit object at 0x7f67d1e4c850>, max_output_tokens:Optional[int]|Omit=<openai.Omit object at 0x7f67d1e4c850>, max_tool_calls:Optional[int]|Omit=<openai.Omit object at 0x7f67d1e4c850>, metadata:Optional[Metadata]|Omit=<openai.Omit object at 0x7f67d1e4c850>, model:ResponsesModel|Omit=<openai.Omit object at 0x7f67d1e4c850>, parallel_tool_calls:Optional[bool]|Omit=<openai.Omit object at 0x7f67d1e4c850>, previous_response_id:Optional[str]|Omit=<openai.Omit object at 0x7f67d1e4c850>, prompt:Optional[ResponsePromptParam]|Omit=<openai.Omit object at 0x7f67d1e4c850>, prompt_cache_key:str|Omit=<openai.Omit object at 0x7f67d1e4c850>, prompt_cache_retention:"Optional[Literal['in- memory','24h']]|Omit"=<openai.Omit object at 0x7f67d1e4c850>, reasoning:Optional[Reasoning]|Omit=<openai.Omit object at 0x7f67d1e4c850>, safety_identifier:str|Omit=<openai.Omit object at 0x7f67d1e4c850>, service_tier:"Optional[Literal[ 'auto','default','flex','scale','priority']]|Omit"=<openai .Omit object at 0x7f67d1e4c850>, store:Optional[bool]|Omit=<openai.Omit object at 0x7f67d1e4c850>, stream_options:Optional[response_create_p arams.StreamOptions]|Omit=<openai.Omit object at 0x7f67d1e4c850>, temperature:Optional[float]|Omit=<openai.Omit object at 0x7f67d1e4c850>, text:ResponseTextConfigParam|Omit=<openai.Omit object at 0x7f67d1e4c850>, top_logprobs:Optional[int]|Omit=<openai.Omit object at 0x7f67d1e4c850>, top_p:Optional[float]|Omit=<openai.Omit object at 0x7f67d1e4c850>, truncation:"Optional[Literal['a uto','disabled']]|Omit"=<openai.Omit object at 0x7f67d1e4c850>, user:str|Omit=<openai.Omit object at 0x7f67d1e4c850>, extra_headers:Headers|None=None, extra_query:Query|None=None, extra_body:Body|None=None, timeout:float|httpx.Timeout|None|NotGiven=NOT_GIVEN)
Add prompt pr to dialog and get a response from Claude, automatically following up with tool_use messages
| Type | Default | Details | |
|---|---|---|---|
| pr | Prompt to pass to Claude | ||
| max_steps | int | 10 | Maximum number of tool requests to loop through |
| cont_func | callable | noop | Function that stops loop if returns False |
| final_prompt | str | You have no more tool uses. Please summarize your findings. If you did not complete your goal please tell the user what further work needs to be done so they can choose how best to proceed. | Prompt to add if last message is a tool call |
| stream | bool | False | Stream response? |
| tools | NoneType | None | Tools to use |
| tool_choice | NoneType | None | Required tools to use |
| background | Optional[bool] | Omit | <openai.Omit object at 0x7f67d1e4c850> | |
| conversation | Optional[response_create_params.Conversation] | Omit | <openai.Omit object at 0x7f67d1e4c850> | |
| include | Optional[List[ResponseIncludable]] | Omit | <openai.Omit object at 0x7f67d1e4c850> | |
| input | Union[str, ResponseInputParam] | Omit | <openai.Omit object at 0x7f67d1e4c850> | |
| instructions | Optional[str] | Omit | <openai.Omit object at 0x7f67d1e4c850> | |
| max_output_tokens | Optional[int] | Omit | <openai.Omit object at 0x7f67d1e4c850> | |
| max_tool_calls | Optional[int] | Omit | <openai.Omit object at 0x7f67d1e4c850> | |
| metadata | Optional[Metadata] | Omit | <openai.Omit object at 0x7f67d1e4c850> | |
| model | ResponsesModel | Omit | <openai.Omit object at 0x7f67d1e4c850> | |
| parallel_tool_calls | Optional[bool] | Omit | <openai.Omit object at 0x7f67d1e4c850> | |
| previous_response_id | Optional[str] | Omit | <openai.Omit object at 0x7f67d1e4c850> | |
| prompt | Optional[ResponsePromptParam] | Omit | <openai.Omit object at 0x7f67d1e4c850> | |
| prompt_cache_key | str | Omit | <openai.Omit object at 0x7f67d1e4c850> | |
| prompt_cache_retention | Optional[Literal[‘in-memory’, ‘24h’]] | Omit | <openai.Omit object at 0x7f67d1e4c850> | |
| reasoning | Optional[Reasoning] | Omit | <openai.Omit object at 0x7f67d1e4c850> | |
| safety_identifier | str | Omit | <openai.Omit object at 0x7f67d1e4c850> | |
| service_tier | Optional[Literal[‘auto’, ‘default’, ‘flex’, ‘scale’, ‘priority’]] | Omit | <openai.Omit object at 0x7f67d1e4c850> | |
| store | Optional[bool] | Omit | <openai.Omit object at 0x7f67d1e4c850> | |
| stream_options | Optional[response_create_params.StreamOptions] | Omit | <openai.Omit object at 0x7f67d1e4c850> | |
| temperature | Optional[float] | Omit | <openai.Omit object at 0x7f67d1e4c850> | |
| text | ResponseTextConfigParam | Omit | <openai.Omit object at 0x7f67d1e4c850> | |
| top_logprobs | Optional[int] | Omit | <openai.Omit object at 0x7f67d1e4c850> | |
| top_p | Optional[float] | Omit | <openai.Omit object at 0x7f67d1e4c850> | |
| truncation | Optional[Literal[‘auto’, ‘disabled’]] | Omit | <openai.Omit object at 0x7f67d1e4c850> | |
| user | str | Omit | <openai.Omit object at 0x7f67d1e4c850> | |
| extra_headers | Optional | None | Use the following arguments if you need to pass additional parameters to the API that aren’t available via kwargs. The extra values given here take precedence over values defined on the client or passed to this method. |
| extra_query | Query | None | None | |
| extra_body | Body | None | None | |
| timeout | float | httpx.Timeout | None | NotGiven | NOT_GIVEN |
Exported source
_final_prompt = "You have no more tool uses. Please summarize your findings. If you did not complete your goal please tell the user what further work needs to be done so they can choose how best to proceed."Exported source
@patch
@delegates(Chat.__call__)
def toolloop(self:Chat,
pr, # Prompt to pass to Claude
max_steps=10, # Maximum number of tool requests to loop through
cont_func:callable=noop, # Function that stops loop if returns False
final_prompt=_final_prompt, # Prompt to add if last message is a tool call
**kwargs):
"Add prompt `pr` to dialog and get a response from Claude, automatically following up with `tool_use` messages"
@save_iter
def _f(o):
init_n = len(self.h)
r = self(pr, **kwargs)
yield r
if len(self.last)>1: yield from self.last[1:]
for i in range(max_steps-1):
x = self.h[-1]
if not (isinstance(x, dict) and x['type']=='function_call_output'): break
r = self(final_prompt if i==max_steps-2 else None, **kwargs)
yield r
if len(self.last)>1: yield from self.last[1:]
if not cont_func(*self.h[-3:]): break
o.value = self.h[init_n+1:]
return _f()Test Customer Dataset
def show(x):
if getattr(x, 'output_text', None): r = x
else: r = getattr(x,'output',x)
display(r)chat = Chat(model, tools=tools)
pr = 'Can you tell me the email address for customer C1?'
r = chat.toolloop(pr)
res = list(r)
for o in r: show(o)- Retrieving customer C1
The email address for customer C1 (John Doe) is john@example.com.
- id: resp_0787bac936d9204f006943fcee2ba48195ad60e3c1ec52d1d7
- created_at: 1766063342.0
- error: None
- incomplete_details: None
- instructions: None
- metadata: {}
- model: gpt-5-mini-2025-08-07
- object: response
- output: [ResponseReasoningItem(id=‘rs_0787bac936d9204f006943fcee7cd881959b82edf88db5e70d’, summary=[], type=‘reasoning’, content=None, encrypted_content=None, status=None), ResponseOutputMessage(id=‘msg_0787bac936d9204f006943fcef3ee88195832b056ec436b34f’, content=[ResponseOutputText(annotations=[], text=‘The email address for customer C1 (John Doe) is john@example.com.’, type=‘output_text’, logprobs=[])], role=‘assistant’, status=‘completed’, type=‘message’)]
- parallel_tool_calls: True
- temperature: 1.0
- tool_choice: auto
- tools: [FunctionTool(name=‘get_customer_info’, parameters={‘type’: ‘object’, ‘properties’: {‘customer_id’: {‘type’: ‘string’, ‘description’: ‘ID of the customer’}}, ‘required’: [‘customer_id’], ‘additionalProperties’: False}, strict=True, type=‘function’, description=“Retrieves a customer’s information and their orders based on the customer ID”), FunctionTool(name=‘get_order_details’, parameters={‘type’: ‘object’, ‘properties’: {‘order_id’: {‘type’: ‘string’, ‘description’: ‘ID of the order’}}, ‘required’: [‘order_id’], ‘additionalProperties’: False}, strict=True, type=‘function’, description=‘Retrieves the details of a specific order based on the order ID’), FunctionTool(name=‘cancel_order’, parameters={‘type’: ‘object’, ‘properties’: {‘order_id’: {‘type’: ‘string’, ‘description’: ‘ID of the order to cancel’}}, ‘required’: [‘order_id’], ‘additionalProperties’: False}, strict=True, type=‘function’, description=‘Cancels an order based on the provided order ID:- type: boolean’)]
- top_p: 1.0
- background: False
- conversation: None
- max_output_tokens: 4096
- max_tool_calls: None
- previous_response_id: None
- prompt: None
- prompt_cache_key: None
- prompt_cache_retention: None
- reasoning: Reasoning(effort=‘medium’, generate_summary=None, summary=None)
- safety_identifier: None
- service_tier: default
- status: completed
- text: ResponseTextConfig(format=ResponseFormatText(type=‘text’), verbosity=‘medium’)
- top_logprobs: 0
- truncation: disabled
- usage: ResponseUsage(input_tokens=316, input_tokens_details=InputTokensDetails(cached_tokens=0), output_tokens=86, output_tokens_details=OutputTokensDetails(reasoning_tokens=64), total_tokens=402)
- user: None
- billing: {‘payer’: ‘openai’}
- store: True
ResponseOutputMessage(id='msg_0787bac936d9204f006943fcef3ee88195832b056ec436b34f', content=[ResponseOutputText(annotations=[], text='The email address for customer C1 (John Doe) is john@example.com.', type='output_text', logprobs=[])], role='assistant', status='completed', type='message')
loop_outputs
loop_outputs (res)
Exported source
def loop_outputs(res):
return [dict(p) for o in res for p in ([o] if isinstance(o,dict) else getattr(o,'output',[]))]cl = loop_outputs(res)
cl[{'id': 'rs_0787bac936d9204f006943fceb6f088195a44ade2155966909',
'summary': [],
'type': 'reasoning',
'content': None,
'encrypted_content': None,
'status': None},
{'arguments': '{"customer_id":"C1"}',
'call_id': 'call_wqbYqGHnvgMg8lSY9JMUrwzU',
'name': 'get_customer_info',
'type': 'function_call',
'id': 'fc_0787bac936d9204f006943fcebe6d08195b9cd3ca97a01b7d2',
'status': 'completed'},
{'type': 'function_call_output',
'call_id': 'call_wqbYqGHnvgMg8lSY9JMUrwzU',
'output': "{'name': 'John Doe', 'email': 'john@example.com', 'phone': '123-456-7890', 'orders': [{'id': 'O1', 'product': 'Widget A', 'quantity': 2, 'price': 19.99, 'status': 'Cancelled'}, {'id': 'O2', 'product': 'Gadget B', 'quantity': 1, 'price': 49.99, 'status': 'Cancelled'}]}"},
{'id': 'rs_0787bac936d9204f006943fcec9c108195b41ad9735c482bc4',
'summary': [],
'type': 'reasoning',
'content': None,
'encrypted_content': None,
'status': None},
{'id': 'msg_0787bac936d9204f006943fced6e3c8195b5b4c9710ecb6485',
'content': [ResponseOutputText(annotations=[], text='The email address for customer C1 (John Doe) is john@example.com.', type='output_text', logprobs=[])],
'role': 'assistant',
'status': 'completed',
'type': 'message'}]
def disp_tc(x):
if x['type']=='function_call': return f"- `{x['name']}({x['arguments']})`\n"
elif x['type']=='function_call_output': return f" - `{x['output']}`\n\n"
else: return ''.join(o.text for o in x['content'])# Markdown(''.join(map(disp_tc, cl)))pprint(r.value)[ResponseReasoningItem(id='rs_0787bac936d9204f006943fcee7cd881959b82edf88db5e70d', summary=[], type='reasoning', content=None, encrypted_content=None, status=None),
ResponseOutputMessage(id='msg_0787bac936d9204f006943fcef3ee88195832b056ec436b34f', content=[ResponseOutputText(annotations=[], text='The email address for customer C1 (John Doe) is john@example.com.', type='output_text', logprobs=[])], role='assistant', status='completed', type='message')]
orders, customers = _get_orders_customers()chat = Chat(model, tools=tools)
r = chat.toolloop('What is the status of order O2?')
for o in r: display(getattr(o,'output',o))- Retrieving order O2
[ResponseReasoningItem(id='rs_080cc194cfa17c94006943fcf092688194adde78fdb74e90d3', summary=[], type='reasoning', content=None, encrypted_content=None, status=None),
ResponseFunctionToolCall(arguments='{"order_id":"O2"}', call_id='call_ARnBd6xSrlcAfn3wBQGdLtiu', name='get_order_details', type='function_call', id='fc_080cc194cfa17c94006943fcf1ed8c8194adc4f0474afe98cc', status='completed')]
ResponseFunctionToolCall(arguments='{"order_id":"O2"}', call_id='call_ARnBd6xSrlcAfn3wBQGdLtiu', name='get_order_details', type='function_call', id='fc_080cc194cfa17c94006943fcf1ed8c8194adc4f0474afe98cc', status='completed')
{'type': 'function_call_output',
'call_id': 'call_ARnBd6xSrlcAfn3wBQGdLtiu',
'output': "{'id': 'O2', 'product': 'Gadget B', 'quantity': 1, 'price': 49.99, 'status': 'Processing'}"}
[ResponseOutputMessage(id='msg_080cc194cfa17c94006943fcf3101081948ac3faafe2dcac65', content=[ResponseOutputText(annotations=[], text='Order O2 is currently: Processing.', type='output_text', logprobs=[])], role='assistant', status='completed', type='message')]
r = chat.toolloop('Please cancel all orders for customer C1 for me.')
res = list(r)
for o in res: display(getattr(o,'output',o))- Retrieving customer C1
- Cancelling order O1
- Cancelling order O2
[ResponseReasoningItem(id='rs_080cc194cfa17c94006943fcf412208194a4c7b87ce81c4736', summary=[], type='reasoning', content=None, encrypted_content=None, status=None),
ResponseFunctionToolCall(arguments='{"customer_id":"C1"}', call_id='call_Btl5F6UrkBoJJaDQzke1VvVN', name='get_customer_info', type='function_call', id='fc_080cc194cfa17c94006943fcf82ba08194a65b676e302e9aa6', status='completed')]
ResponseFunctionToolCall(arguments='{"customer_id":"C1"}', call_id='call_Btl5F6UrkBoJJaDQzke1VvVN', name='get_customer_info', type='function_call', id='fc_080cc194cfa17c94006943fcf82ba08194a65b676e302e9aa6', status='completed')
{'type': 'function_call_output',
'call_id': 'call_Btl5F6UrkBoJJaDQzke1VvVN',
'output': "{'name': 'John Doe', 'email': 'john@example.com', 'phone': '123-456-7890', 'orders': [{'id': 'O1', 'product': 'Widget A', 'quantity': 2, 'price': 19.99, 'status': 'Shipped'}, {'id': 'O2', 'product': 'Gadget B', 'quantity': 1, 'price': 49.99, 'status': 'Processing'}]}"}
[ResponseReasoningItem(id='rs_080cc194cfa17c94006943fcf913188194ab017b95ba08a13e', summary=[], type='reasoning', content=None, encrypted_content=None, status=None),
ResponseFunctionToolCall(arguments='{"order_id":"O1"}', call_id='call_4yaVagsUWe86YMk25kyCU9x2', name='cancel_order', type='function_call', id='fc_080cc194cfa17c94006943fcfa8bd081949731f7da8c5adc86', status='completed'),
ResponseFunctionToolCall(arguments='{"order_id":"O2"}', call_id='call_Vy0mCP8ocOYR31qk4sbVgRw5', name='cancel_order', type='function_call', id='fc_080cc194cfa17c94006943fcfab9688194a97ce8bb7ff46706', status='completed')]
ResponseFunctionToolCall(arguments='{"order_id":"O1"}', call_id='call_4yaVagsUWe86YMk25kyCU9x2', name='cancel_order', type='function_call', id='fc_080cc194cfa17c94006943fcfa8bd081949731f7da8c5adc86', status='completed')
ResponseFunctionToolCall(arguments='{"order_id":"O2"}', call_id='call_Vy0mCP8ocOYR31qk4sbVgRw5', name='cancel_order', type='function_call', id='fc_080cc194cfa17c94006943fcfab9688194a97ce8bb7ff46706', status='completed')
{'type': 'function_call_output',
'call_id': 'call_4yaVagsUWe86YMk25kyCU9x2',
'output': 'True'}
{'type': 'function_call_output',
'call_id': 'call_Vy0mCP8ocOYR31qk4sbVgRw5',
'output': 'True'}
[ResponseReasoningItem(id='rs_080cc194cfa17c94006943fcfbd120819490c3ad1d75350865', summary=[], type='reasoning', content=None, encrypted_content=None, status=None),
ResponseOutputMessage(id='msg_080cc194cfa17c94006943fd043f948194ad1bb58d51d2de71', content=[ResponseOutputText(annotations=[], text='Done — I canceled all orders for customer C1 (John Doe).\n\nSummary:\n- O1 — Widget A — previous status: Shipped — cancellation: Success\n- O2 — Gadget B — previous status: Processing — cancellation: Success\n\nWould you like me to check refund status, send a confirmation to john@example.com, or do anything else?', type='output_text', logprobs=[])], role='assistant', status='completed', type='message')]
ResponseOutputMessage(id='msg_080cc194cfa17c94006943fd043f948194ad1bb58d51d2de71', content=[ResponseOutputText(annotations=[], text='Done — I canceled all orders for customer C1 (John Doe).\n\nSummary:\n- O1 — Widget A — previous status: Shipped — cancellation: Success\n- O2 — Gadget B — previous status: Processing — cancellation: Success\n\nWould you like me to check refund status, send a confirmation to john@example.com, or do anything else?', type='output_text', logprobs=[])], role='assistant', status='completed', type='message')
# cl = loop_outputs(res)
# Markdown('\n'.join(map(disp_tc, cl)))for o in chat.toolloop('What is the status of order O2?'): display(o)- Retrieving order O2
- id: resp_080cc194cfa17c94006943fd06606c8194acbf56e9291c70ca
- created_at: 1766063366.0
- error: None
- incomplete_details: None
- instructions: None
- metadata: {}
- model: gpt-5-mini-2025-08-07
- object: response
- output: [ResponseReasoningItem(id=‘rs_080cc194cfa17c94006943fd06bbd4819499041c0e2a896d43’, summary=[], type=‘reasoning’, content=None, encrypted_content=None, status=None), ResponseFunctionToolCall(arguments=‘{“order_id”:“O2”}’, call_id=‘call_47iqPnLAFKxbJ4JJ9EQwWw71’, name=‘get_order_details’, type=‘function_call’, id=‘fc_080cc194cfa17c94006943fd07c3308194a643843cb1720a9e’, status=‘completed’)]
- parallel_tool_calls: True
- temperature: 1.0
- tool_choice: auto
- tools: [FunctionTool(name=‘get_customer_info’, parameters={‘type’: ‘object’, ‘properties’: {‘customer_id’: {‘type’: ‘string’, ‘description’: ‘ID of the customer’}}, ‘required’: [‘customer_id’], ‘additionalProperties’: False}, strict=True, type=‘function’, description=“Retrieves a customer’s information and their orders based on the customer ID”), FunctionTool(name=‘get_order_details’, parameters={‘type’: ‘object’, ‘properties’: {‘order_id’: {‘type’: ‘string’, ‘description’: ‘ID of the order’}}, ‘required’: [‘order_id’], ‘additionalProperties’: False}, strict=True, type=‘function’, description=‘Retrieves the details of a specific order based on the order ID’), FunctionTool(name=‘cancel_order’, parameters={‘type’: ‘object’, ‘properties’: {‘order_id’: {‘type’: ‘string’, ‘description’: ‘ID of the order to cancel’}}, ‘required’: [‘order_id’], ‘additionalProperties’: False}, strict=True, type=‘function’, description=‘Cancels an order based on the provided order ID:- type: boolean’)]
- top_p: 1.0
- background: False
- conversation: None
- max_output_tokens: 4096
- max_tool_calls: None
- previous_response_id: None
- prompt: None
- prompt_cache_key: None
- prompt_cache_retention: None
- reasoning: Reasoning(effort=‘medium’, generate_summary=None, summary=None)
- safety_identifier: None
- service_tier: default
- status: completed
- text: ResponseTextConfig(format=ResponseFormatText(type=‘text’), verbosity=‘medium’)
- top_logprobs: 0
- truncation: disabled
- usage: ResponseUsage(input_tokens=521, input_tokens_details=InputTokensDetails(cached_tokens=0), output_tokens=87, output_tokens_details=OutputTokensDetails(reasoning_tokens=64), total_tokens=608)
- user: None
- billing: {‘payer’: ‘openai’}
- store: True
ResponseFunctionToolCall(arguments='{"order_id":"O2"}', call_id='call_47iqPnLAFKxbJ4JJ9EQwWw71', name='get_order_details', type='function_call', id='fc_080cc194cfa17c94006943fd07c3308194a643843cb1720a9e', status='completed')
{'type': 'function_call_output',
'call_id': 'call_47iqPnLAFKxbJ4JJ9EQwWw71',
'output': "{'id': 'O2', 'product': 'Gadget B', 'quantity': 1, 'price': 49.99, 'status': 'Cancelled'}"}
Order O2 is now: Cancelled.
- id: resp_080cc194cfa17c94006943fd0841e88194b12aa8c8f30b4e64
- created_at: 1766063368.0
- error: None
- incomplete_details: None
- instructions: None
- metadata: {}
- model: gpt-5-mini-2025-08-07
- object: response
- output: [ResponseOutputMessage(id=‘msg_080cc194cfa17c94006943fd08ad488194a093653ee36021db’, content=[ResponseOutputText(annotations=[], text=‘Order O2 is now: Cancelled.’, type=‘output_text’, logprobs=[])], role=‘assistant’, status=‘completed’, type=‘message’)]
- parallel_tool_calls: True
- temperature: 1.0
- tool_choice: auto
- tools: [FunctionTool(name=‘get_customer_info’, parameters={‘type’: ‘object’, ‘properties’: {‘customer_id’: {‘type’: ‘string’, ‘description’: ‘ID of the customer’}}, ‘required’: [‘customer_id’], ‘additionalProperties’: False}, strict=True, type=‘function’, description=“Retrieves a customer’s information and their orders based on the customer ID”), FunctionTool(name=‘get_order_details’, parameters={‘type’: ‘object’, ‘properties’: {‘order_id’: {‘type’: ‘string’, ‘description’: ‘ID of the order’}}, ‘required’: [‘order_id’], ‘additionalProperties’: False}, strict=True, type=‘function’, description=‘Retrieves the details of a specific order based on the order ID’), FunctionTool(name=‘cancel_order’, parameters={‘type’: ‘object’, ‘properties’: {‘order_id’: {‘type’: ‘string’, ‘description’: ‘ID of the order to cancel’}}, ‘required’: [‘order_id’], ‘additionalProperties’: False}, strict=True, type=‘function’, description=‘Cancels an order based on the provided order ID:- type: boolean’)]
- top_p: 1.0
- background: False
- conversation: None
- max_output_tokens: 4096
- max_tool_calls: None
- previous_response_id: None
- prompt: None
- prompt_cache_key: None
- prompt_cache_retention: None
- reasoning: Reasoning(effort=‘medium’, generate_summary=None, summary=None)
- safety_identifier: None
- service_tier: default
- status: completed
- text: ResponseTextConfig(format=ResponseFormatText(type=‘text’), verbosity=‘medium’)
- top_logprobs: 0
- truncation: disabled
- usage: ResponseUsage(input_tokens=676, input_tokens_details=InputTokensDetails(cached_tokens=0), output_tokens=13, output_tokens_details=OutputTokensDetails(reasoning_tokens=0), total_tokens=689)
- user: None
- billing: {‘payer’: ‘openai’}
- store: True
Test Math Example
def add(x: int, y: int) -> int:
"adds x and y."
return x + y
def mul(x: int, y: int) -> int:
"multiplies x and y."
return x * ychat = Chat(model, tools=[add, mul], **chatkw)
pr = 'Can you add 1258585825128 to 34959234595, multiply by 93, and then add (-12439149)?'
r = chat.toolloop(pr)
for o in r: show(o)[ResponseReasoningItem(id='rs_02621606c3b513dc006943fd09ad40819781fb7b32d5e5996a', summary=[], type='reasoning', content=None, encrypted_content=None, status=None),
ResponseFunctionToolCall(arguments='{"x":1258585825128,"y":34959234595}', call_id='call_Fg3v2kfgWIogSl1IEB0w1Y0K', name='add', type='function_call', id='fc_02621606c3b513dc006943fd09fdf881979ba71de45af8d588', status='completed')]
ResponseFunctionToolCall(arguments='{"x":1258585825128,"y":34959234595}', call_id='call_Fg3v2kfgWIogSl1IEB0w1Y0K', name='add', type='function_call', id='fc_02621606c3b513dc006943fd09fdf881979ba71de45af8d588', status='completed')
{'type': 'function_call_output',
'call_id': 'call_Fg3v2kfgWIogSl1IEB0w1Y0K',
'output': '1293545059723'}
[ResponseFunctionToolCall(arguments='{"x":1293545059723,"y":93}', call_id='call_9rimpAnaVInF4ssbHosAfTi8', name='mul', type='function_call', id='fc_02621606c3b513dc006943fd0bae9c81978026c7bfaae4a759', status='completed')]
{'type': 'function_call_output',
'call_id': 'call_9rimpAnaVInF4ssbHosAfTi8',
'output': '120299690554239'}
[ResponseFunctionToolCall(arguments='{"x":120299690554239,"y":-12439149}', call_id='call_OS43dlWKEYtYAocZpHJ94RXM', name='add', type='function_call', id='fc_02621606c3b513dc006943fd0c9f5c8197a11bc188b0174d6d', status='completed')]
{'type': 'function_call_output',
'call_id': 'call_OS43dlWKEYtYAocZpHJ94RXM',
'output': '120299678115090'}
120299678115090
- id: resp_02621606c3b513dc006943fd0d30f48197a911787dca9e5d00
- created_at: 1766063373.0
- error: None
- incomplete_details: None
- instructions: None
- metadata: {}
- model: gpt-5-mini-2025-08-07
- object: response
- output: [ResponseOutputMessage(id=‘msg_02621606c3b513dc006943fd0d80f88197b58e845951e295e1’, content=[ResponseOutputText(annotations=[], text=‘120299678115090’, type=‘output_text’, logprobs=[])], role=‘assistant’, status=‘completed’, type=‘message’)]
- parallel_tool_calls: True
- temperature: 1.0
- tool_choice: auto
- tools: [FunctionTool(name=‘add’, parameters={‘type’: ‘object’, ‘properties’: {‘x’: {‘type’: ‘integer’, ‘description’: ’‘}, ’y’: {‘type’: ‘integer’, ‘description’: ’‘}}, ’required’: [‘x’, ‘y’], ‘additionalProperties’: False}, strict=True, type=‘function’, description=‘adds x and y.:- type: integer’), FunctionTool(name=‘mul’, parameters={‘type’: ‘object’, ‘properties’: {‘x’: {‘type’: ‘integer’, ‘description’: ’‘}, ’y’: {‘type’: ‘integer’, ‘description’: ’‘}}, ’required’: [‘x’, ‘y’], ‘additionalProperties’: False}, strict=True, type=‘function’, description=‘multiplies x and y.:- type: integer’)]
- top_p: 1.0
- background: False
- conversation: None
- max_output_tokens: 4096
- max_tool_calls: None
- previous_response_id: None
- prompt: None
- prompt_cache_key: None
- prompt_cache_retention: None
- reasoning: Reasoning(effort=‘minimal’, generate_summary=None, summary=None)
- safety_identifier: None
- service_tier: default
- status: completed
- text: ResponseTextConfig(format=ResponseFormatText(type=‘text’), verbosity=‘low’)
- top_logprobs: 0
- truncation: disabled
- usage: ResponseUsage(input_tokens=250, input_tokens_details=InputTokensDetails(cached_tokens=0), output_tokens=9, output_tokens_details=OutputTokensDetails(reasoning_tokens=0), total_tokens=259)
- user: None
- billing: {‘payer’: ‘openai’}
- store: True
(1258585825128 + 34959234595) * 93 - 12439149120299678115090
chat = Chat(model, tools=[add, mul], **chatkw)
r = chat.toolloop(pr, stream=True)
for o in r:
if isinstance(o, dict): print('- ', o)
else:
for p in o: print(p, end='')
if hasattr(o, 'value'): show(o.value)[ResponseReasoningItem(id='rs_0944024f48105103006943fd116be8819483df3a9f25e3d328', summary=[], type='reasoning', content=None, encrypted_content=None, status=None),
ResponseFunctionToolCall(arguments='{"x":1258585825128,"y":34959234595}', call_id='call_B6xGJDpQJm5GrS2hnngybWTe', name='add', type='function_call', id='fc_0944024f48105103006943fd11abcc81949200a4eabf12e97b', status='completed')]
('arguments', '{"x":1258585825128,"y":34959234595}')('call_id', 'call_B6xGJDpQJm5GrS2hnngybWTe')('name', 'add')('type', 'function_call')('id', 'fc_0944024f48105103006943fd11abcc81949200a4eabf12e97b')('status', 'completed')- {'type': 'function_call_output', 'call_id': 'call_B6xGJDpQJm5GrS2hnngybWTe', 'output': '1293545059723'}
[ResponseFunctionToolCall(arguments='{"x":1293545059723,"y":93}', call_id='call_Vb3v1awZxvoZuUjbq9RRZrfM', name='mul', type='function_call', id='fc_0944024f48105103006943fd1402188194a2adccf222c76591', status='completed')]
- {'type': 'function_call_output', 'call_id': 'call_Vb3v1awZxvoZuUjbq9RRZrfM', 'output': '120299690554239'}
[ResponseFunctionToolCall(arguments='{"x":120299690554239,"y":-12439149}', call_id='call_Dd1Uzt8rMfWJCH0irbMa5OVV', name='add', type='function_call', id='fc_0944024f48105103006943fd14e54481948906bd955b405418', status='completed')]
- {'type': 'function_call_output', 'call_id': 'call_Dd1Uzt8rMfWJCH0irbMa5OVV', 'output': '120299678115090'}
120299678115090
120299678115090
- id: resp_0944024f48105103006943fd1583ec8194863012f06a95bb9a
- created_at: 1766063381.0
- error: None
- incomplete_details: None
- instructions: None
- metadata: {}
- model: gpt-5-mini-2025-08-07
- object: response
- output: [ResponseOutputMessage(id=‘msg_0944024f48105103006943fd16463c81949df34dd0158af4c1’, content=[ResponseOutputText(annotations=[], text=‘120299678115090’, type=‘output_text’, logprobs=[])], role=‘assistant’, status=‘completed’, type=‘message’)]
- parallel_tool_calls: True
- temperature: 1.0
- tool_choice: auto
- tools: [FunctionTool(name=‘add’, parameters={‘type’: ‘object’, ‘properties’: {‘x’: {‘type’: ‘integer’, ‘description’: ’‘}, ’y’: {‘type’: ‘integer’, ‘description’: ’‘}}, ’required’: [‘x’, ‘y’], ‘additionalProperties’: False}, strict=True, type=‘function’, description=‘adds x and y.:- type: integer’), FunctionTool(name=‘mul’, parameters={‘type’: ‘object’, ‘properties’: {‘x’: {‘type’: ‘integer’, ‘description’: ’‘}, ’y’: {‘type’: ‘integer’, ‘description’: ’‘}}, ’required’: [‘x’, ‘y’], ‘additionalProperties’: False}, strict=True, type=‘function’, description=‘multiplies x and y.:- type: integer’)]
- top_p: 1.0
- background: False
- conversation: None
- max_output_tokens: 4096
- max_tool_calls: None
- previous_response_id: None
- prompt: None
- prompt_cache_key: None
- prompt_cache_retention: None
- reasoning: Reasoning(effort=‘minimal’, generate_summary=None, summary=None)
- safety_identifier: None
- service_tier: default
- status: completed
- text: ResponseTextConfig(format=ResponseFormatText(type=‘text’), verbosity=‘low’)
- top_logprobs: 0
- truncation: disabled
- usage: ResponseUsage(input_tokens=250, input_tokens_details=InputTokensDetails(cached_tokens=0), output_tokens=9, output_tokens_details=OutputTokensDetails(reasoning_tokens=0), total_tokens=259)
- user: None
- store: True
Error Conditions: Out of Iterations, Exception During Tool Invocation
def mydiv(a:float, b:float):
"Divide two numbers"
return a / bchat = Chat(model, tools=[mydiv], **chatkw)
r = chat.toolloop('Please calculate this sequence using your tools: 43/23454; 652/previous result; 6843/previous result; 321/previous result', max_steps=2)
for o in r: show(o)[ResponseReasoningItem(id='rs_02f9e0725e9fb6a5006943fd16f3dc81979a3aa89dfcb98aca', summary=[], type='reasoning', content=None, encrypted_content=None, status=None),
ResponseFunctionToolCall(arguments='{"a":43,"b":23454}', call_id='call_Fnsc5iNP8rMuj0rqeBLzXVsO', name='mydiv', type='function_call', id='fc_02f9e0725e9fb6a5006943fd177c6c8197a5ad34f31755933a', status='completed'),
ResponseFunctionToolCall(arguments='{"a":652,"b":0}', call_id='call_k7APDmfK4dy57LeiHYhBCNq6', name='mydiv', type='function_call', id='fc_02f9e0725e9fb6a5006943fd17b4d08197ada7efed9792c679', status='completed'),
ResponseFunctionToolCall(arguments='{"a":6843,"b":0}', call_id='call_ZpzLBH2zM3Wcy9aoFCbnZ0mN', name='mydiv', type='function_call', id='fc_02f9e0725e9fb6a5006943fd17e5bc8197b3e6f2903db6ce9d', status='completed'),
ResponseFunctionToolCall(arguments='{"a":321,"b":0}', call_id='call_JGS2KeOU7ku3n56kqbSrvEGy', name='mydiv', type='function_call', id='fc_02f9e0725e9fb6a5006943fd181dd88197bd6492c3465327ea', status='completed')]
ResponseFunctionToolCall(arguments='{"a":43,"b":23454}', call_id='call_Fnsc5iNP8rMuj0rqeBLzXVsO', name='mydiv', type='function_call', id='fc_02f9e0725e9fb6a5006943fd177c6c8197a5ad34f31755933a', status='completed')
ResponseFunctionToolCall(arguments='{"a":652,"b":0}', call_id='call_k7APDmfK4dy57LeiHYhBCNq6', name='mydiv', type='function_call', id='fc_02f9e0725e9fb6a5006943fd17b4d08197ada7efed9792c679', status='completed')
ResponseFunctionToolCall(arguments='{"a":6843,"b":0}', call_id='call_ZpzLBH2zM3Wcy9aoFCbnZ0mN', name='mydiv', type='function_call', id='fc_02f9e0725e9fb6a5006943fd17e5bc8197b3e6f2903db6ce9d', status='completed')
ResponseFunctionToolCall(arguments='{"a":321,"b":0}', call_id='call_JGS2KeOU7ku3n56kqbSrvEGy', name='mydiv', type='function_call', id='fc_02f9e0725e9fb6a5006943fd181dd88197bd6492c3465327ea', status='completed')
{'type': 'function_call_output',
'call_id': 'call_Fnsc5iNP8rMuj0rqeBLzXVsO',
'output': '0.001833375969983798'}
{'type': 'function_call_output',
'call_id': 'call_k7APDmfK4dy57LeiHYhBCNq6',
'output': 'Traceback (most recent call last):\n File "/usr/local/lib/python3.12/site-packages/toolslm/funccall.py", line 215, in call_func\n try: return func(**inps)\n ^^^^^^^^^^^^\n File "/tmp/ipykernel_5385/246724137.py", line 3, in mydiv\n return a / b\n ~~^~~\nZeroDivisionError: division by zero\n'}
{'type': 'function_call_output',
'call_id': 'call_ZpzLBH2zM3Wcy9aoFCbnZ0mN',
'output': 'Traceback (most recent call last):\n File "/usr/local/lib/python3.12/site-packages/toolslm/funccall.py", line 215, in call_func\n try: return func(**inps)\n ^^^^^^^^^^^^\n File "/tmp/ipykernel_5385/246724137.py", line 3, in mydiv\n return a / b\n ~~^~~\nZeroDivisionError: division by zero\n'}
{'type': 'function_call_output',
'call_id': 'call_JGS2KeOU7ku3n56kqbSrvEGy',
'output': 'Traceback (most recent call last):\n File "/usr/local/lib/python3.12/site-packages/toolslm/funccall.py", line 215, in call_func\n try: return func(**inps)\n ^^^^^^^^^^^^\n File "/tmp/ipykernel_5385/246724137.py", line 3, in mydiv\n return a / b\n ~~^~~\nZeroDivisionError: division by zero\n'}
I computed the first division successfully: - 43 / 23454 = 0.001833375969983798
I attempted the next steps but they failed because I tried to divide by zero (I passed 0 as the “previous result” for subsequent operations), causing errors. To complete the sequence you want, I need to perform these successive calculations using the preceding result each time: 1. 652 / (43/23454) 2. 6843 / (result of step 2) 3. 321 / (result of step 3)
If you want, I can now: - Recompute the chain without tool limits and give all four results, or - Compute them step-by-step here directly (no tools needed). Which do you prefer?
- id: resp_02f9e0725e9fb6a5006943fd18bff081979e13bb5930ee32b7
- created_at: 1766063384.0
- error: None
- incomplete_details: None
- instructions: None
- metadata: {}
- model: gpt-5-mini-2025-08-07
- object: response
- output: [ResponseReasoningItem(id=‘rs_02f9e0725e9fb6a5006943fd19334c8197b4b20beb1ae09493’, summary=[], type=‘reasoning’, content=None, encrypted_content=None, status=None), ResponseOutputMessage(id=‘msg_02f9e0725e9fb6a5006943fd195a9c81978fb9bcca07f80c87’, content=[ResponseOutputText(annotations=[], text=‘I computed the first division successfully:- 43 / 23454 = 0.001833375969983798attempted the next steps but they failed because I tried to divide by zero (I passed 0 as the “previous result” for subsequent operations), causing errors. To complete the sequence you want, I need to perform these successive calculations using the preceding result each time:. 652 / (43/23454). 6843 / (result of step 2). 321 / (result of step 3)you want, I can now:- Recompute the chain without tool limits and give all four results, or- Compute them step-by-step here directly (no tools needed). Which do you prefer?’, type=‘output_text’, logprobs=[])], role=‘assistant’, status=‘completed’, type=‘message’)]
- parallel_tool_calls: True
- temperature: 1.0
- tool_choice: auto
- tools: [FunctionTool(name=‘mydiv’, parameters={‘type’: ‘object’, ‘properties’: {‘a’: {‘type’: ‘number’, ‘description’: ’‘}, ’b’: {‘type’: ‘number’, ‘description’: ’‘}}, ’required’: [‘a’, ‘b’], ‘additionalProperties’: False}, strict=True, type=‘function’, description=‘Divide two numbers’)]
- top_p: 1.0
- background: False
- conversation: None
- max_output_tokens: 4096
- max_tool_calls: None
- previous_response_id: None
- prompt: None
- prompt_cache_key: None
- prompt_cache_retention: None
- reasoning: Reasoning(effort=‘minimal’, generate_summary=None, summary=None)
- safety_identifier: None
- service_tier: default
- status: completed
- text: ResponseTextConfig(format=ResponseFormatText(type=‘text’), verbosity=‘low’)
- top_logprobs: 0
- truncation: disabled
- usage: ResponseUsage(input_tokens=537, input_tokens_details=InputTokensDetails(cached_tokens=0), output_tokens=163, output_tokens_details=OutputTokensDetails(reasoning_tokens=0), total_tokens=700)
- user: None
- billing: {‘payer’: ‘openai’}
- store: True
ResponseOutputMessage(id='msg_02f9e0725e9fb6a5006943fd195a9c81978fb9bcca07f80c87', content=[ResponseOutputText(annotations=[], text='I computed the first division successfully:\n- 43 / 23454 = 0.001833375969983798\n\nI attempted the next steps but they failed because I tried to divide by zero (I passed 0 as the “previous result” for subsequent operations), causing errors. To complete the sequence you want, I need to perform these successive calculations using the preceding result each time:\n1. 652 / (43/23454)\n2. 6843 / (result of step 2)\n3. 321 / (result of step 3)\n\nIf you want, I can now:\n- Recompute the chain without tool limits and give all four results, or\n- Compute them step-by-step here directly (no tools needed). Which do you prefer?', type='output_text', logprobs=[])], role='assistant', status='completed', type='message')
This tests raise_on_err=False change to toolslm.call_func invocation. We should see this return an error as a string instead of crash:
chat = Chat(model, tools=[mydiv], **chatkw)
r = chat.toolloop('Try dividing 1 by 0 and see what the error result is')
for o in r: show(o)[ResponseReasoningItem(id='rs_0af57bb05fb6f746006943fd1bd07c81958a319ad4cb70eae8', summary=[], type='reasoning', content=None, encrypted_content=None, status=None),
ResponseFunctionToolCall(arguments='{"a":1,"b":0}', call_id='call_0FGtqnXi4rLuPC0ivZcaRw24', name='mydiv', type='function_call', id='fc_0af57bb05fb6f746006943fd1c29a08195989bf0c4f670baca', status='completed')]
ResponseFunctionToolCall(arguments='{"a":1,"b":0}', call_id='call_0FGtqnXi4rLuPC0ivZcaRw24', name='mydiv', type='function_call', id='fc_0af57bb05fb6f746006943fd1c29a08195989bf0c4f670baca', status='completed')
{'type': 'function_call_output',
'call_id': 'call_0FGtqnXi4rLuPC0ivZcaRw24',
'output': 'Traceback (most recent call last):\n File "/usr/local/lib/python3.12/site-packages/toolslm/funccall.py", line 215, in call_func\n try: return func(**inps)\n ^^^^^^^^^^^^\n File "/tmp/ipykernel_5385/246724137.py", line 3, in mydiv\n return a / b\n ~~^~~\nZeroDivisionError: division by zero\n'}
ZeroDivisionError: division by zero
- id: resp_0af57bb05fb6f746006943fd1cbdac8195b5d480c66c146b06
- created_at: 1766063388.0
- error: None
- incomplete_details: None
- instructions: None
- metadata: {}
- model: gpt-5-mini-2025-08-07
- object: response
- output: [ResponseOutputMessage(id=‘msg_0af57bb05fb6f746006943fd1d335881958929318eb6ad6c40’, content=[ResponseOutputText(annotations=[], text=‘ZeroDivisionError: division by zero’, type=‘output_text’, logprobs=[])], role=‘assistant’, status=‘completed’, type=‘message’)]
- parallel_tool_calls: True
- temperature: 1.0
- tool_choice: auto
- tools: [FunctionTool(name=‘mydiv’, parameters={‘type’: ‘object’, ‘properties’: {‘a’: {‘type’: ‘number’, ‘description’: ’‘}, ’b’: {‘type’: ‘number’, ‘description’: ’‘}}, ’required’: [‘a’, ‘b’], ‘additionalProperties’: False}, strict=True, type=‘function’, description=‘Divide two numbers’)]
- top_p: 1.0
- background: False
- conversation: None
- max_output_tokens: 4096
- max_tool_calls: None
- previous_response_id: None
- prompt: None
- prompt_cache_key: None
- prompt_cache_retention: None
- reasoning: Reasoning(effort=‘minimal’, generate_summary=None, summary=None)
- safety_identifier: None
- service_tier: default
- status: completed
- text: ResponseTextConfig(format=ResponseFormatText(type=‘text’), verbosity=‘low’)
- top_logprobs: 0
- truncation: disabled
- usage: ResponseUsage(input_tokens=198, input_tokens_details=InputTokensDetails(cached_tokens=0), output_tokens=11, output_tokens_details=OutputTokensDetails(reasoning_tokens=0), total_tokens=209)
- user: None
- billing: {‘payer’: ‘openai’}
- store: True