from IPython.display import display, Markdown, clear_output
from pprint import pprint
Tool loop
' '.join(models)
'o1-preview o1-mini gpt-4o gpt-4o-mini gpt-4-turbo gpt-4 gpt-4-32k gpt-3.5-turbo gpt-3.5-turbo-instruct o1 o3-mini chatgpt-4o-latest o1-pro o3 o4-mini gpt-4.1 gpt-4.1-mini gpt-4.1-nano'
= 'gpt-4.1' model
Sample Data
def _get_orders_customers():
= {
orders "O1": dict(id="O1", product="Widget A", quantity=2, price=19.99, status="Shipped"),
"O2": dict(id="O2", product="Gadget B", quantity=1, price=49.99, status="Processing"),
"O3": dict(id="O3", product="Gadget B", quantity=2, price=49.99, status="Shipped")}
= {
customers "C1": dict(name="John Doe", email="john@example.com", phone="123-456-7890",
=[orders['O1'], orders['O2']]),
orders"C2": dict(name="Jane Smith", email="jane@example.com", phone="987-654-3210",
=[orders['O3']])
orders
}return orders, customers
= _get_orders_customers() orders, customers
def get_customer_info(
str # ID of the customer
customer_id:# Customer's name, email, phone number, and list of orders
): "Retrieves a customer's information and their orders based on the customer ID"
print(f'- Retrieving customer {customer_id}')
return customers.get(customer_id, "Customer not found")
def get_order_details(
str # ID of the order
order_id:# Order's ID, product name, quantity, price, and order status
): "Retrieves the details of a specific order based on the order ID"
print(f'- Retrieving order {order_id}')
return orders.get(order_id, "Order not found")
def cancel_order(
str # ID of the order to cancel
order_id:->bool: # True if the cancellation is successful
)"Cancels an order based on the provided order ID"
print(f'- Cancelling order {order_id}')
if order_id not in orders: return False
'status'] = 'Cancelled'
orders[order_id][return True
model
'gpt-4.1'
= [get_customer_info, get_order_details, cancel_order]
tools = Chat(model, tools=tools) chat
= chat('Hi.')
r r
Hello! How can I assist you today?
- id: resp_6858d51dabd48191a071f3cf8357a1e101962923ea927335
- created_at: 1750652189.0
- error: None
- incomplete_details: None
- instructions: None
- metadata: {}
- model: gpt-4.1-2025-04-14
- object: response
- output: [ResponseOutputMessage(id=‘msg_6858d51e034c81919daf1363ea38a31f01962923ea927335’, content=[ResponseOutputText(annotations=[], text=‘Hello! How can I assist you today?’, type=‘output_text’, logprobs=None)], role=‘assistant’, status=‘completed’, type=‘message’)]
- parallel_tool_calls: True
- temperature: 1.0
- tool_choice: auto
- tools: [FunctionTool(name=‘get_customer_info’, parameters={‘type’: ‘object’, ‘properties’: {‘customer_id’: {‘type’: ‘string’, ‘description’: ‘ID of the customer’}}, ‘required’: [‘customer_id’]}, strict=True, type=‘function’, description=“Retrieves a customer’s information and their orders based on the customer ID”), FunctionTool(name=‘get_order_details’, parameters={‘type’: ‘object’, ‘properties’: {‘order_id’: {‘type’: ‘string’, ‘description’: ‘ID of the order’}}, ‘required’: [‘order_id’]}, strict=True, type=‘function’, description=‘Retrieves the details of a specific order based on the order ID’), FunctionTool(name=‘cancel_order’, parameters={‘type’: ‘object’, ‘properties’: {‘order_id’: {‘type’: ‘string’, ‘description’: ‘ID of the order to cancel’}}, ‘required’: [‘order_id’]}, strict=True, type=‘function’, description=‘Cancels an order based on the provided order ID:- type: boolean’)]
- top_p: 1.0
- background: False
- max_output_tokens: 4096
- previous_response_id: None
- prompt: None
- reasoning: Reasoning(effort=None, generate_summary=None, summary=None)
- service_tier: default
- status: completed
- text: ResponseTextConfig(format=ResponseFormatText(type=‘text’))
- truncation: disabled
- usage: ResponseUsage(input_tokens=131, input_tokens_details=InputTokensDetails(cached_tokens=0), output_tokens=11, output_tokens_details=OutputTokensDetails(reasoning_tokens=0), total_tokens=142)
- user: None
- store: True
= chat('Can you tell me the email address for customer C2?')
r r.output
- Retrieving customer C2
[ResponseFunctionToolCall(arguments='{"customer_id":"C2"}', call_id='call_3ds7FtEeugLeq4S94QhU3Doo', name='get_customer_info', type='function_call', id='fc_6858d51f39708191942eb5a90908bc0401962923ea927335', status='completed')]
= chat()
r r.output
[ResponseOutputMessage(id='msg_6858d520101081918ffbe85e9c5e64ff01962923ea927335', content=[ResponseOutputText(annotations=[], text='The email address for customer C2 (Jane Smith) is jane@example.com. If you need more information or help with anything else, just let me know!', type='output_text', logprobs=None)], role='assistant', status='completed', type='message')]
= Chat(model, tools=tools)
chat = chat('Please cancel all orders for customer C1 for me.')
r r.output
- Retrieving customer C1
[ResponseFunctionToolCall(arguments='{"customer_id":"C1"}', call_id='call_PiY4SODDLSsnuy3kQsuESsZd', name='get_customer_info', type='function_call', id='fc_6858d5219e8c81a29c90b0bd1508a11b04599ca30d650df4', status='completed')]
= chat()
r r.output
- Cancelling order O1
- Cancelling order O2
[ResponseFunctionToolCall(arguments='{"order_id":"O1"}', call_id='call_rV6krAiMjGR5OVkphVyBqonM', name='cancel_order', type='function_call', id='fc_6858d522a17481a2a6b44ad7ba70696904599ca30d650df4', status='completed'),
ResponseFunctionToolCall(arguments='{"order_id":"O2"}', call_id='call_tmPuED6b3bZrPnNTtj0MqmQ2', name='cancel_order', type='function_call', id='fc_6858d522c07c81a2a5dae90d4a444a4a04599ca30d650df4', status='completed')]
toolloop
implementation
Chat.toolloop
Chat.toolloop (pr, max_steps=10, cont_func:<built- infunctioncallable>=<function noop>, final_prompt='You have no more tool uses. Please summarize your findings. If you did not complete your goal please tell the user what further work needs to be done so they can choose how best to proceed.', stream:bool=False, tools=None, tool_choice=None, background:Optional[bool]|NotGiven=NOT_GIVEN, include:Opti onal[List[ResponseIncludable]]|NotGiven=NOT_GIVEN, input:Union[str,ResponseInputParam]|NotGiven=NOT_GIVEN, instructions:Optional[str]|NotGiven=NOT_GIVEN, max_output_tokens:Optional[int]|NotGiven=NOT_GIVEN, max_tool_calls:Optional[int]|NotGiven=NOT_GIVEN, metadata:Optional[Metadata]|NotGiven=NOT_GIVEN, model:ResponsesModel|NotGiven=NOT_GIVEN, parallel_tool_calls:Optional[bool]|NotGiven=NOT_GIVEN, previous_response_id:Optional[str]|NotGiven=NOT_GIVEN, prompt:Optional[ResponsePromptParam]|NotGiven=NOT_GIVEN, reasoning:Optional[Reasoning]|NotGiven=NOT_GIVEN, service_ tier:"Optional[Literal['auto','default','flex','scale','pr iority']]|NotGiven"=NOT_GIVEN, store:Optional[bool]|NotGiven=NOT_GIVEN, temperature:Optional[float]|NotGiven=NOT_GIVEN, text:ResponseTextConfigParam|NotGiven=NOT_GIVEN, top_logprobs:Optional[int]|NotGiven=NOT_GIVEN, top_p:Optional[float]|NotGiven=NOT_GIVEN, truncation:"Opti onal[Literal['auto','disabled']]|NotGiven"=NOT_GIVEN, user:str|NotGiven=NOT_GIVEN, extra_headers:Headers|None=None, extra_query:Query|None=None, extra_body:Body|None=None, timeout:float|httpx.Timeout|None|NotGiven=NOT_GIVEN)
Add prompt pr
to dialog and get a response from Claude, automatically following up with tool_use
messages
Type | Default | Details | |
---|---|---|---|
pr | Prompt to pass to Claude | ||
max_steps | int | 10 | Maximum number of tool requests to loop through |
cont_func | callable | noop | Function that stops loop if returns False |
final_prompt | str | You have no more tool uses. Please summarize your findings. If you did not complete your goal please tell the user what further work needs to be done so they can choose how best to proceed. | Prompt to add if last message is a tool call |
stream | bool | False | Stream response? |
tools | NoneType | None | Tools to use |
tool_choice | NoneType | None | Required tools to use |
background | Optional[bool] | NotGiven | NOT_GIVEN | |
include | Optional[List[ResponseIncludable]] | NotGiven | NOT_GIVEN | |
input | Union[str, ResponseInputParam] | NotGiven | NOT_GIVEN | |
instructions | Optional[str] | NotGiven | NOT_GIVEN | |
max_output_tokens | Optional[int] | NotGiven | NOT_GIVEN | |
max_tool_calls | Optional[int] | NotGiven | NOT_GIVEN | |
metadata | Optional[Metadata] | NotGiven | NOT_GIVEN | |
model | ResponsesModel | NotGiven | NOT_GIVEN | |
parallel_tool_calls | Optional[bool] | NotGiven | NOT_GIVEN | |
previous_response_id | Optional[str] | NotGiven | NOT_GIVEN | |
prompt | Optional[ResponsePromptParam] | NotGiven | NOT_GIVEN | |
reasoning | Optional[Reasoning] | NotGiven | NOT_GIVEN | |
service_tier | Optional[Literal[‘auto’, ‘default’, ‘flex’, ‘scale’, ‘priority’]] | NotGiven | NOT_GIVEN | |
store | Optional[bool] | NotGiven | NOT_GIVEN | |
temperature | Optional[float] | NotGiven | NOT_GIVEN | |
text | ResponseTextConfigParam | NotGiven | NOT_GIVEN | |
top_logprobs | Optional[int] | NotGiven | NOT_GIVEN | |
top_p | Optional[float] | NotGiven | NOT_GIVEN | |
truncation | Optional[Literal[‘auto’, ‘disabled’]] | NotGiven | NOT_GIVEN | |
user | str | NotGiven | NOT_GIVEN | |
extra_headers | Optional | None | Use the following arguments if you need to pass additional parameters to the API that aren’t available via kwargs. The extra values given here take precedence over values defined on the client or passed to this method. |
extra_query | Query | None | None | |
extra_body | Body | None | None | |
timeout | float | httpx.Timeout | None | NotGiven | NOT_GIVEN |
Exported source
= "You have no more tool uses. Please summarize your findings. If you did not complete your goal please tell the user what further work needs to be done so they can choose how best to proceed." _final_prompt
Exported source
@patch
@delegates(Chat.__call__)
def toolloop(self:Chat,
# Prompt to pass to Claude
pr, =10, # Maximum number of tool requests to loop through
max_stepscallable=noop, # Function that stops loop if returns False
cont_func:=_final_prompt, # Prompt to add if last message is a tool call
final_prompt**kwargs):
"Add prompt `pr` to dialog and get a response from Claude, automatically following up with `tool_use` messages"
@save_iter
def _f(o):
= len(self.h)
init_n = self(pr, **kwargs)
r yield r
if len(self.last)>1: yield from self.last[1:]
for i in range(max_steps-1):
= self.h[-1]
x if not (isinstance(x, dict) and x['type']=='function_call_output'): break
= self(final_prompt if i==max_steps-2 else None, **kwargs)
r yield r
if len(self.last)>1: yield from self.last[1:]
if not cont_func(*self.h[-3:]): break
= self.h[init_n+1:]
o.value return _f()
Test Customer Dataset
def show(x):
if getattr(x, 'output_text', None): r = x
else: r = getattr(x,'output',x)
display(r)
= Chat(model, tools=tools)
chat = 'Can you tell me the email address for customer C1?'
pr = chat.toolloop(pr)
r = list(r)
res for o in r: show(o)
- Retrieving customer C1
The email address for customer C1 (John Doe) is john@example.com.
- id: resp_6858d82deb6081a19fad1393c8e05dbe02f348a5ed2fba48
- created_at: 1750652973.0
- error: None
- incomplete_details: None
- instructions: None
- metadata: {}
- model: gpt-4.1-2025-04-14
- object: response
- output: [ResponseOutputMessage(id=‘msg_6858d82e32f081a197f2714466e024fb02f348a5ed2fba48’, content=[ResponseOutputText(annotations=[], text=‘The email address for customer C1 (John Doe) is john@example.com.’, type=‘output_text’, logprobs=None)], role=‘assistant’, status=‘completed’, type=‘message’)]
- parallel_tool_calls: True
- temperature: 1.0
- tool_choice: auto
- tools: [FunctionTool(name=‘get_customer_info’, parameters={‘type’: ‘object’, ‘properties’: {‘customer_id’: {‘type’: ‘string’, ‘description’: ‘ID of the customer’}}, ‘required’: [‘customer_id’]}, strict=True, type=‘function’, description=“Retrieves a customer’s information and their orders based on the customer ID”), FunctionTool(name=‘get_order_details’, parameters={‘type’: ‘object’, ‘properties’: {‘order_id’: {‘type’: ‘string’, ‘description’: ‘ID of the order’}}, ‘required’: [‘order_id’]}, strict=True, type=‘function’, description=‘Retrieves the details of a specific order based on the order ID’), FunctionTool(name=‘cancel_order’, parameters={‘type’: ‘object’, ‘properties’: {‘order_id’: {‘type’: ‘string’, ‘description’: ‘ID of the order to cancel’}}, ‘required’: [‘order_id’]}, strict=True, type=‘function’, description=‘Cancels an order based on the provided order ID:- type: boolean’)]
- top_p: 1.0
- background: False
- max_output_tokens: 4096
- previous_response_id: None
- prompt: None
- reasoning: Reasoning(effort=None, generate_summary=None, summary=None)
- service_tier: default
- status: completed
- text: ResponseTextConfig(format=ResponseFormatText(type=‘text’))
- truncation: disabled
- usage: ResponseUsage(input_tokens=303, input_tokens_details=InputTokensDetails(cached_tokens=0), output_tokens=18, output_tokens_details=OutputTokensDetails(reasoning_tokens=0), total_tokens=321)
- user: None
- store: True
loop_outputs
loop_outputs (res)
Exported source
def loop_outputs(res):
return [dict(p) for o in res for p in ([o] if isinstance(o,dict) else getattr(o,'output',[]))]
= loop_outputs(res)
cl cl
[{'arguments': '{"customer_id":"C1"}',
'call_id': 'call_LGkDfgCIYoi150YgpsR1Mgch',
'name': 'get_customer_info',
'type': 'function_call',
'id': 'fc_6858d82c5a2881a180da48f9ee11cec502f348a5ed2fba48',
'status': 'completed'},
{'type': 'function_call_output',
'call_id': 'call_LGkDfgCIYoi150YgpsR1Mgch',
'output': "{'name': 'John Doe', 'email': 'john@example.com', 'phone': '123-456-7890', 'orders': [{'id': 'O1', 'product': 'Widget A', 'quantity': 2, 'price': 19.99, 'status': 'Cancelled'}, {'id': 'O2', 'product': 'Gadget B', 'quantity': 1, 'price': 49.99, 'status': 'Cancelled'}]}"},
{'id': 'msg_6858d82d55f081a190f06d2678cc53a702f348a5ed2fba48',
'content': [ResponseOutputText(annotations=[], text='The email address for customer C1 (John Doe) is john@example.com.', type='output_text', logprobs=None)],
'role': 'assistant',
'status': 'completed',
'type': 'message'}]
def disp_tc(x):
if x['type']=='function_call': return f"- `{x['name']}({x['arguments']})`\n"
elif x['type']=='function_call_output': return f" - `{x['output']}`\n\n"
else: return ''.join(o.text for o in x['content'])
''.join(map(disp_tc, cl))) Markdown(
get_customer_info({"customer_id":"C1"})
{'name': 'John Doe', 'email': 'john@example.com', 'phone': '123-456-7890', 'orders': [{'id': 'O1', 'product': 'Widget A', 'quantity': 2, 'price': 19.99, 'status': 'Cancelled'}, {'id': 'O2', 'product': 'Gadget B', 'quantity': 1, 'price': 49.99, 'status': 'Cancelled'}]}
The email address for customer C1 (John Doe) is john@example.com.
pprint(r.value)
[ResponseOutputMessage(id='msg_6858d82e32f081a197f2714466e024fb02f348a5ed2fba48', content=[ResponseOutputText(annotations=[], text='The email address for customer C1 (John Doe) is john@example.com.', type='output_text', logprobs=None)], role='assistant', status='completed', type='message')]
= _get_orders_customers() orders, customers
= Chat(model, tools=tools)
chat = chat.toolloop('What is the status of order O2?')
r for o in r: display(getattr(o,'output',o))
- Retrieving order O2
[ResponseFunctionToolCall(arguments='{"order_id":"O2"}', call_id='call_SuCjlwbfWOdNNA4itfOQHfi9', name='get_order_details', type='function_call', id='fc_6858d839164c819faf3de0ee3738b1ff0f74f151fa878a00', status='completed')]
{'type': 'function_call_output',
'call_id': 'call_SuCjlwbfWOdNNA4itfOQHfi9',
'output': "{'id': 'O2', 'product': 'Gadget B', 'quantity': 1, 'price': 49.99, 'status': 'Processing'}"}
[ResponseOutputMessage(id='msg_6858d839dac8819fb4e823b41008f35e0f74f151fa878a00', content=[ResponseOutputText(annotations=[], text='Order O2 is currently in "Processing" status. If you need more details or want to take any action on this order, please let me know!', type='output_text', logprobs=None)], role='assistant', status='completed', type='message')]
= chat.toolloop('Please cancel all orders for customer C1 for me.')
r = list(r)
res for o in res: display(getattr(o,'output',o))
- Retrieving customer C1
- Cancelling order O1
- Cancelling order O2
[ResponseFunctionToolCall(arguments='{"customer_id":"C1"}', call_id='call_1dYvWLOrZLClL6Z0mPp3zV4d', name='get_customer_info', type='function_call', id='fc_6858d843b52c819f899a075e393e2f240f74f151fa878a00', status='completed')]
{'type': 'function_call_output',
'call_id': 'call_1dYvWLOrZLClL6Z0mPp3zV4d',
'output': "{'name': 'John Doe', 'email': 'john@example.com', 'phone': '123-456-7890', 'orders': [{'id': 'O1', 'product': 'Widget A', 'quantity': 2, 'price': 19.99, 'status': 'Shipped'}, {'id': 'O2', 'product': 'Gadget B', 'quantity': 1, 'price': 49.99, 'status': 'Processing'}]}"}
[ResponseFunctionToolCall(arguments='{"order_id":"O1"}', call_id='call_ixbGdJ1YEjYj4FbqQ8ShdCRn', name='cancel_order', type='function_call', id='fc_6858d844cc8c819f99199c37f4917ae10f74f151fa878a00', status='completed'),
ResponseFunctionToolCall(arguments='{"order_id":"O2"}', call_id='call_yXKD72U8RwlymgZ2CE2fYYLI', name='cancel_order', type='function_call', id='fc_6858d844dd7c819f9784a1de8a9b3d110f74f151fa878a00', status='completed')]
ResponseFunctionToolCall(arguments='{"order_id":"O2"}', call_id='call_yXKD72U8RwlymgZ2CE2fYYLI', name='cancel_order', type='function_call', id='fc_6858d844dd7c819f9784a1de8a9b3d110f74f151fa878a00', status='completed')
{'type': 'function_call_output',
'call_id': 'call_ixbGdJ1YEjYj4FbqQ8ShdCRn',
'output': 'True'}
{'type': 'function_call_output',
'call_id': 'call_yXKD72U8RwlymgZ2CE2fYYLI',
'output': 'True'}
[ResponseOutputMessage(id='msg_6858d8459a34819f8e9aba068ebc695b0f74f151fa878a00', content=[ResponseOutputText(annotations=[], text='All orders for customer C1 have been cancelled successfully. If you need a confirmation or further assistance, please let me know!', type='output_text', logprobs=None)], role='assistant', status='completed', type='message')]
= loop_outputs(res)
cl '\n'.join(map(disp_tc, cl))) Markdown(
get_customer_info({"customer_id":"C1"})
{'name': 'John Doe', 'email': 'john@example.com', 'phone': '123-456-7890', 'orders': [{'id': 'O1', 'product': 'Widget A', 'quantity': 2, 'price': 19.99, 'status': 'Shipped'}, {'id': 'O2', 'product': 'Gadget B', 'quantity': 1, 'price': 49.99, 'status': 'Processing'}]}
cancel_order({"order_id":"O1"})
cancel_order({"order_id":"O2"})
True
True
All orders for customer C1 have been cancelled successfully. If you need a confirmation or further assistance, please let me know!
for o in chat.toolloop('What is the status of order O2?'): display(o)
Order O2 has been cancelled. If you have any further questions or need additional assistance, please let me know!
- id: resp_6858d8536bc8819fa0654fda1f4a99ec0f74f151fa878a00
- created_at: 1750653011.0
- error: None
- incomplete_details: None
- instructions: None
- metadata: {}
- model: gpt-4.1-2025-04-14
- object: response
- output: [ResponseOutputMessage(id=‘msg_6858d853c930819f9e656a5a9653642b0f74f151fa878a00’, content=[ResponseOutputText(annotations=[], text=‘Order O2 has been cancelled. If you have any further questions or need additional assistance, please let me know!’, type=‘output_text’, logprobs=None)], role=‘assistant’, status=‘completed’, type=‘message’)]
- parallel_tool_calls: True
- temperature: 1.0
- tool_choice: auto
- tools: [FunctionTool(name=‘get_customer_info’, parameters={‘type’: ‘object’, ‘properties’: {‘customer_id’: {‘type’: ‘string’, ‘description’: ‘ID of the customer’}}, ‘required’: [‘customer_id’]}, strict=True, type=‘function’, description=“Retrieves a customer’s information and their orders based on the customer ID”), FunctionTool(name=‘get_order_details’, parameters={‘type’: ‘object’, ‘properties’: {‘order_id’: {‘type’: ‘string’, ‘description’: ‘ID of the order’}}, ‘required’: [‘order_id’]}, strict=True, type=‘function’, description=‘Retrieves the details of a specific order based on the order ID’), FunctionTool(name=‘cancel_order’, parameters={‘type’: ‘object’, ‘properties’: {‘order_id’: {‘type’: ‘string’, ‘description’: ‘ID of the order to cancel’}}, ‘required’: [‘order_id’]}, strict=True, type=‘function’, description=‘Cancels an order based on the provided order ID:- type: boolean’)]
- top_p: 1.0
- background: False
- max_output_tokens: 4096
- previous_response_id: None
- prompt: None
- reasoning: Reasoning(effort=None, generate_summary=None, summary=None)
- service_tier: default
- status: completed
- text: ResponseTextConfig(format=ResponseFormatText(type=‘text’))
- truncation: disabled
- usage: ResponseUsage(input_tokens=465, input_tokens_details=InputTokensDetails(cached_tokens=0), output_tokens=25, output_tokens_details=OutputTokensDetails(reasoning_tokens=0), total_tokens=490)
- user: None
- store: True
Test Math Example
def add(x: int, y: int) -> int:
"adds x and y."
return x + y
def mul(x: int, y: int) -> int:
"multiplies x and y."
return x * y
= Chat(model, tools=[add, mul])
chat = 'Can you add 1258585825128 to 34959234595, multiply by 93, and then add (-12439149)?'
pr = chat.toolloop(pr)
r for o in r: show(o)
[ResponseFunctionToolCall(arguments='{"x":1258585825128,"y":34959234595}', call_id='call_5cuCy9TkdCbdWq0JDzKixtVw', name='add', type='function_call', id='fc_6858d857f9a8819dbee7b721dfa15e220deec0bc13b52a48', status='completed')]
{'type': 'function_call_output',
'call_id': 'call_5cuCy9TkdCbdWq0JDzKixtVw',
'output': '1293545059723'}
[ResponseFunctionToolCall(arguments='{"x":1293545059723,"y":93}', call_id='call_aL3m3fGOEarJc30W3SqMXBsp', name='mul', type='function_call', id='fc_6858d858c41c819da77b609c39a222770deec0bc13b52a48', status='completed')]
{'type': 'function_call_output',
'call_id': 'call_aL3m3fGOEarJc30W3SqMXBsp',
'output': '120299690554239'}
[ResponseFunctionToolCall(arguments='{"x":120299690554239,"y":-12439149}', call_id='call_ZEIjJerD9ML37a0aC7dWrVIy', name='add', type='function_call', id='fc_6858d8599e10819d92e8cfb2616109f80deec0bc13b52a48', status='completed')]
{'type': 'function_call_output',
'call_id': 'call_ZEIjJerD9ML37a0aC7dWrVIy',
'output': '120299678115090'}
- First, adding 1,258,585,825,128 to 34,959,234,595 gives 1,293,545,059,723.
- Multiplying that result by 93 gives 120,299,690,554,239.
- Finally, adding (-12,439,149) gives the final result: 120,299,678,115,090.
- id: resp_6858d85a09b8819d8a3d6fddefa57ea90deec0bc13b52a48
- created_at: 1750653018.0
- error: None
- incomplete_details: None
- instructions: None
- metadata: {}
- model: gpt-4.1-2025-04-14
- object: response
- output: [ResponseOutputMessage(id=‘msg_6858d85a64e8819d8ca28162dcea93e20deec0bc13b52a48’, content=[ResponseOutputText(annotations=[], text=‘1. First, adding 1,258,585,825,128 to 34,959,234,595 gives 1,293,545,059,723.. Multiplying that result by 93 gives 120,299,690,554,239.. Finally, adding (-12,439,149) gives the final result: 120,299,678,115,090.’, type=‘output_text’, logprobs=None)], role=‘assistant’, status=‘completed’, type=‘message’)]
- parallel_tool_calls: True
- temperature: 1.0
- tool_choice: auto
- tools: [FunctionTool(name=‘add’, parameters={‘type’: ‘object’, ‘properties’: {‘x’: {‘type’: ‘integer’, ‘description’: ’‘}, ’y’: {‘type’: ‘integer’, ‘description’: ’‘}}, ’required’: [‘x’, ‘y’]}, strict=True, type=‘function’, description=‘adds x and y.:- type: integer’), FunctionTool(name=‘mul’, parameters={‘type’: ‘object’, ‘properties’: {‘x’: {‘type’: ‘integer’, ‘description’: ’‘}, ’y’: {‘type’: ‘integer’, ‘description’: ’‘}}, ’required’: [‘x’, ‘y’]}, strict=True, type=‘function’, description=‘multiplies x and y.:- type: integer’)]
- top_p: 1.0
- background: False
- max_output_tokens: 4096
- previous_response_id: None
- prompt: None
- reasoning: Reasoning(effort=None, generate_summary=None, summary=None)
- service_tier: default
- status: completed
- text: ResponseTextConfig(format=ResponseFormatText(type=‘text’))
- truncation: disabled
- usage: ResponseUsage(input_tokens=213, input_tokens_details=InputTokensDetails(cached_tokens=0), output_tokens=87, output_tokens_details=OutputTokensDetails(reasoning_tokens=0), total_tokens=300)
- user: None
- store: True
1258585825128 + 34959234595) * 93 - 12439149 (
120299678115090
= Chat(model, tools=[add, mul])
chat = chat.toolloop(pr, stream=True)
r for o in r:
if isinstance(o, dict): print('- ', o)
else:
for p in o: print(p, end='')
show(o.value)
[ResponseFunctionToolCall(arguments='{"x":1258585825128,"y":34959234595}', call_id='call_vAQduQuiQNaDRJE74PVV9bFm', name='add', type='function_call', id='fc_6858da0562e081a29af19180f4cd6529029fe7fc17fd78d0', status='completed')]
- {'type': 'function_call_output', 'call_id': 'call_vAQduQuiQNaDRJE74PVV9bFm', 'output': '1293545059723'}
[ResponseFunctionToolCall(arguments='{"x":1293545059723,"y":93}', call_id='call_HdFWjuMQ7AxX4avd20EqnGfF', name='mul', type='function_call', id='fc_6858da067f0481a282e552c8ef0b7c21029fe7fc17fd78d0', status='completed')]
- {'type': 'function_call_output', 'call_id': 'call_HdFWjuMQ7AxX4avd20EqnGfF', 'output': '120299690554239'}
[ResponseFunctionToolCall(arguments='{"x":120299690554239,"y":-12439149}', call_id='call_xqoEfHtk62hK2XgEUvR7oz7T', name='add', type='function_call', id='fc_6858da0747cc81a28607925fdaa3b66a029fe7fc17fd78d0', status='completed')]
- {'type': 'function_call_output', 'call_id': 'call_xqoEfHtk62hK2XgEUvR7oz7T', 'output': '120299678115090'}
First, I added 1,258,585,825,128 to 34,959,234,595 to get 1,293,545,059,723.
Next, I multiplied that result by 93, getting 120,299,690,554,239.
Finally, I added -12,439,149, which gives the final answer:
120,299,678,115,090
First, I added 1,258,585,825,128 to 34,959,234,595 to get 1,293,545,059,723. Next, I multiplied that result by 93, getting 120,299,690,554,239. Finally, I added -12,439,149, which gives the final answer:
120,299,678,115,090
- id: resp_6858da07b69081a2930ce75948396dac029fe7fc17fd78d0
- created_at: 1750653447.0
- error: None
- incomplete_details: None
- instructions: None
- metadata: {}
- model: gpt-4.1-2025-04-14
- object: response
- output: [ResponseOutputMessage(id=‘msg_6858da07fce881a297dc7606f752bbdd029fe7fc17fd78d0’, content=[ResponseOutputText(annotations=[], text=‘First, I added 1,258,585,825,128 to 34,959,234,595 to get 1,293,545,059,723., I multiplied that result by 93, getting 120,299,690,554,239., I added -12,439,149, which gives the final answer:,299,678,115,090’, type=‘output_text’, logprobs=None)], role=‘assistant’, status=‘completed’, type=‘message’)]
- parallel_tool_calls: True
- temperature: 1.0
- tool_choice: auto
- tools: [FunctionTool(name=‘add’, parameters={‘type’: ‘object’, ‘properties’: {‘x’: {‘type’: ‘integer’, ‘description’: ’‘}, ’y’: {‘type’: ‘integer’, ‘description’: ’‘}}, ’required’: [‘x’, ‘y’]}, strict=True, type=‘function’, description=‘adds x and y.:- type: integer’), FunctionTool(name=‘mul’, parameters={‘type’: ‘object’, ‘properties’: {‘x’: {‘type’: ‘integer’, ‘description’: ’‘}, ’y’: {‘type’: ‘integer’, ‘description’: ’‘}}, ’required’: [‘x’, ‘y’]}, strict=True, type=‘function’, description=‘multiplies x and y.:- type: integer’)]
- top_p: 1.0
- background: False
- max_output_tokens: 4096
- previous_response_id: None
- prompt: None
- reasoning: Reasoning(effort=None, generate_summary=None, summary=None)
- service_tier: default
- status: completed
- text: ResponseTextConfig(format=ResponseFormatText(type=‘text’))
- truncation: disabled
- usage: ResponseUsage(input_tokens=213, input_tokens_details=InputTokensDetails(cached_tokens=0), output_tokens=86, output_tokens_details=OutputTokensDetails(reasoning_tokens=0), total_tokens=299)
- user: None
- store: True
Error Conditions: Out of Iterations, Exception During Tool Invocation
def mydiv(a:float, b:float):
"Divide two numbers"
return a / b
= Chat(model, tools=[mydiv])
chat = chat.toolloop('Please calculate this sequence using your tools: 43/23454; 652/previous result; 6843/previous result; 321/previous result', max_steps=2)
r for o in r: show(o)
[ResponseFunctionToolCall(arguments='{"a":43,"b":23454}', call_id='call_nECnFfd0fya1CD2gX1K7amL6', name='mydiv', type='function_call', id='fc_6858da2a8cf8819ea08fccfdbc0a904501e130a6ba22408a', status='completed')]
{'type': 'function_call_output',
'call_id': 'call_nECnFfd0fya1CD2gX1K7amL6',
'output': '0.001833375969983798'}
Here is the progress so far:
- The result of 43 divided by 23,454 is approximately 0.00183338.
Next steps (NOT YET COMPLETED):
- To continue, you would divide 652 by the previous result (0.00183338).
- Take that result and divide 6,843 by it.
- Finally, divide 321 by the last result.
If you wish, you can provide another tool use session, or I can help you calculate the remaining steps manually or explain how to do them with a calculator. Let me know how you’d like to proceed!
- id: resp_6858da2b1024819eb16934c3ea9a1a1801e130a6ba22408a
- created_at: 1750653483.0
- error: None
- incomplete_details: None
- instructions: None
- metadata: {}
- model: gpt-4.1-2025-04-14
- object: response
- output: [ResponseOutputMessage(id=‘msg_6858da2b6340819e9b7e018c6f63430a01e130a6ba22408a’, content=[ResponseOutputText(annotations=[], text=‘Here is the progress so far:- The result of 43 divided by 23,454 is approximately 0.00183338.steps (NOT YET COMPLETED):- To continue, you would divide 652 by the previous result (0.00183338).- Take that result and divide 6,843 by it.- Finally, divide 321 by the last result.you wish, you can provide another tool use session, or I can help you calculate the remaining steps manually or explain how to do them with a calculator. Let me know how you’d like to proceed!’, type=‘output_text’, logprobs=None)], role=‘assistant’, status=‘completed’, type=‘message’)]
- parallel_tool_calls: True
- temperature: 1.0
- tool_choice: auto
- tools: [FunctionTool(name=‘mydiv’, parameters={‘type’: ‘object’, ‘properties’: {‘a’: {‘type’: ‘number’, ‘description’: ’‘}, ’b’: {‘type’: ‘number’, ‘description’: ’‘}}, ’required’: [‘a’, ‘b’]}, strict=True, type=‘function’, description=‘Divide two numbers’)]
- top_p: 1.0
- background: False
- max_output_tokens: 4096
- previous_response_id: None
- prompt: None
- reasoning: Reasoning(effort=None, generate_summary=None, summary=None)
- service_tier: default
- status: completed
- text: ResponseTextConfig(format=ResponseFormatText(type=‘text’))
- truncation: disabled
- usage: ResponseUsage(input_tokens=150, input_tokens_details=InputTokensDetails(cached_tokens=0), output_tokens=125, output_tokens_details=OutputTokensDetails(reasoning_tokens=0), total_tokens=275)
- user: None
- store: True
This tests raise_on_err=False
change to toolslm.call_func
invocation. We should see this return an error as a string instead of crash:
= Chat(model, tools=[mydiv])
chat = chat.toolloop('Try dividing 1 by 0 and see what the error result is')
r for o in r: show(o)
[ResponseFunctionToolCall(arguments='{"a":1,"b":0}', call_id='call_hQG5F4wRH6iMkeql8b93QAJK', name='mydiv', type='function_call', id='fc_6858da2e381081a3afeb279770217ff4007702c7bd215b0b', status='completed')]
{'type': 'function_call_output',
'call_id': 'call_hQG5F4wRH6iMkeql8b93QAJK',
'output': 'Traceback (most recent call last):\n File "/Users/jhoward/aai-ws/toolslm/toolslm/funccall.py", line 203, in call_func\n try: return func(**fc_inputs)\n ^^^^^^^^^^^^^^^^^\n File "/var/folders/51/b2_szf2945n072c0vj2cyty40000gn/T/ipykernel_90905/246724137.py", line 3, in mydiv\n return a / b\n ~~^~~\nZeroDivisionError: division by zero\n'}
When trying to divide 1 by 0, a ZeroDivisionError occurs with the message: division by zero. This error is raised because division by zero is undefined in mathematics and programming.
- id: resp_6858da2ea8cc81a3ba912ee9b4c026be007702c7bd215b0b
- created_at: 1750653486.0
- error: None
- incomplete_details: None
- instructions: None
- metadata: {}
- model: gpt-4.1-2025-04-14
- object: response
- output: [ResponseOutputMessage(id=‘msg_6858da2ef23c81a3b54d3b81a28a94a5007702c7bd215b0b’, content=[ResponseOutputText(annotations=[], text=‘When trying to divide 1 by 0, a ZeroDivisionError occurs with the message: division by zero. This error is raised because division by zero is undefined in mathematics and programming.’, type=‘output_text’, logprobs=None)], role=‘assistant’, status=‘completed’, type=‘message’)]
- parallel_tool_calls: True
- temperature: 1.0
- tool_choice: auto
- tools: [FunctionTool(name=‘mydiv’, parameters={‘type’: ‘object’, ‘properties’: {‘a’: {‘type’: ‘number’, ‘description’: ’‘}, ’b’: {‘type’: ‘number’, ‘description’: ’‘}}, ’required’: [‘a’, ‘b’]}, strict=True, type=‘function’, description=‘Divide two numbers’)]
- top_p: 1.0
- background: False
- max_output_tokens: 4096
- previous_response_id: None
- prompt: None
- reasoning: Reasoning(effort=None, generate_summary=None, summary=None)
- service_tier: default
- status: completed
- text: ResponseTextConfig(format=ResponseFormatText(type=‘text’))
- truncation: disabled
- usage: ResponseUsage(input_tokens=198, input_tokens_details=InputTokensDetails(cached_tokens=0), output_tokens=40, output_tokens_details=OutputTokensDetails(reasoning_tokens=0), total_tokens=238)
- user: None
- store: True