from IPython.display import display, Markdown, clear_output
from pprint import pprint
Tool loop
' '.join(models)
'o1-preview o1-mini gpt-4o gpt-4o-mini gpt-4-turbo gpt-4 gpt-4-32k gpt-3.5-turbo gpt-3.5-turbo-instruct o1 o3-mini chatgpt-4o-latest o1-pro o3 o4-mini'
= models[14]
model model
'o4-mini'
Sample Data
def _get_orders_customers():
= {
orders "O1": dict(id="O1", product="Widget A", quantity=2, price=19.99, status="Shipped"),
"O2": dict(id="O2", product="Gadget B", quantity=1, price=49.99, status="Processing"),
"O3": dict(id="O3", product="Gadget B", quantity=2, price=49.99, status="Shipped")}
= {
customers "C1": dict(name="John Doe", email="john@example.com", phone="123-456-7890",
=[orders['O1'], orders['O2']]),
orders"C2": dict(name="Jane Smith", email="jane@example.com", phone="987-654-3210",
=[orders['O3']])
orders
}return orders, customers
= _get_orders_customers() orders, customers
def get_customer_info(
str # ID of the customer
customer_id:# Customer's name, email, phone number, and list of orders
): "Retrieves a customer's information and their orders based on the customer ID"
print(f'- Retrieving customer {customer_id}')
return customers.get(customer_id, "Customer not found")
def get_order_details(
str # ID of the order
order_id:# Order's ID, product name, quantity, price, and order status
): "Retrieves the details of a specific order based on the order ID"
print(f'- Retrieving order {order_id}')
return orders.get(order_id, "Order not found")
def cancel_order(
str # ID of the order to cancel
order_id:->bool: # True if the cancellation is successful
)"Cancels an order based on the provided order ID"
print(f'- Cancelling order {order_id}')
if order_id not in orders: return False
'status'] = 'Cancelled'
orders[order_id][return True
= [get_customer_info, get_order_details, cancel_order]
tools = Chat(model, tools=tools) chat
= chat('Can you tell me the email address for customer C2?') r
- Retrieving customer C2
= r.choices[0]
choice print(choice.finish_reason)
choice
tool_calls
Choice(finish_reason='tool_calls', index=0, logprobs=None, message=ChatCompletionMessage(content=None, refusal=None, role='assistant', annotations=[], audio=None, function_call=None, tool_calls=[ChatCompletionMessageToolCall(id='call_rIx4jlrIc9YOYt04BuJQUuB3', function=Function(arguments='{"customer_id":"C2"}', name='get_customer_info'), type='function')]))
= chat()
r r
The email address for customer C2 (Jane Smith) is: jane@example.com.
- id: chatcmpl-BjA0oSZlb6qWEN6nhC9itpiEsd0F5
- choices: [Choice(finish_reason=‘stop’, index=0, logprobs=None, message=ChatCompletionMessage(content=‘The email address for customer C2 (Jane Smith) is: jane@example.com.’, refusal=None, role=‘assistant’, annotations=[], audio=None, function_call=None, tool_calls=None))]
- created: 1750102946
- model: o4-mini-2025-04-16
- object: chat.completion
- service_tier: default
- system_fingerprint: None
- usage: CompletionUsage(completion_tokens=35, prompt_tokens=246, total_tokens=281, completion_tokens_details=CompletionTokensDetails(accepted_prediction_tokens=0, audio_tokens=0, reasoning_tokens=0, rejected_prediction_tokens=0), prompt_tokens_details=PromptTokensDetails(audio_tokens=0, cached_tokens=0))
= Chat(model, tools=tools)
chat = chat('Please cancel all orders for customer C1 for me.')
r print(r.choices[0].finish_reason)
find_block(r)
- Retrieving customer C1
tool_calls
ChatCompletionMessage(content=None, refusal=None, role='assistant', annotations=[], audio=None, function_call=None, tool_calls=[ChatCompletionMessageToolCall(id='call_5b64GsUTmKMCqb2mtqtsUXp6', function=Function(arguments='{"customer_id":"C1"}', name='get_customer_info'), type='function')])
toolloop
implementation
Chat.toolloop
Chat.toolloop (pr, max_steps=10, cont_func:<built- infunctioncallable>=<function noop>, final_prompt='You have no more tool uses. Please summarize your findings. If you did not complete your goal please tell the user what further work needs to be done so they can choose how best to proceed.', stream:bool=False, audio:Optional[ChatComple tionAudioParam]|NotGiven=NOT_GIVEN, frequency_penalty:Optional[float]|NotGiven=NOT_GIVEN, func tion_call:completion_create_params.FunctionCall|NotGiven=N OT_GIVEN, functions:Iterable[completion_create_params.Func tion]|NotGiven=NOT_GIVEN, logit_bias:Optional[Dict[str,int]]|NotGiven=NOT_GIVEN, logprobs:Optional[bool]|NotGiven=NOT_GIVEN, max_completion_tokens:Optional[int]|NotGiven=NOT_GIVEN, max_tokens:Optional[int]|NotGiven=NOT_GIVEN, metadata:Optional[Metadata]|NotGiven=NOT_GIVEN, modalities :"Optional[List[Literal['text','audio']]]|NotGiven"=NOT_GI VEN, n:Optional[int]|NotGiven=NOT_GIVEN, parallel_tool_calls:bool|NotGiven=NOT_GIVEN, prediction:Op tional[ChatCompletionPredictionContentParam]|NotGiven=NOT_ GIVEN, presence_penalty:Optional[float]|NotGiven=NOT_GIVEN, reaso ning_effort:Optional[ReasoningEffort]|NotGiven=NOT_GIVEN, response_format:completion_create_params.ResponseFormat|No tGiven=NOT_GIVEN, seed:Optional[int]|NotGiven=NOT_GIVEN, s ervice_tier:"Optional[Literal['auto','default','flex','sca le']]|NotGiven"=NOT_GIVEN, stop:Union[Optional[str],List[s tr],None]|NotGiven=NOT_GIVEN, store:Optional[bool]|NotGiven=NOT_GIVEN, stream_options:Op tional[ChatCompletionStreamOptionsParam]|NotGiven=NOT_GIVE N, temperature:Optional[float]|NotGiven=NOT_GIVEN, tool_ch oice:ChatCompletionToolChoiceOptionParam|NotGiven=NOT_GIVE N, tools:Iterable[ChatCompletionToolParam]|NotGiven=NOT_GI VEN, top_logprobs:Optional[int]|NotGiven=NOT_GIVEN, top_p:Optional[float]|NotGiven=NOT_GIVEN, user:str|NotGiven=NOT_GIVEN, web_search_options:completion _create_params.WebSearchOptions|NotGiven=NOT_GIVEN, extra_headers:Headers|None=None, extra_query:Query|None=None, extra_body:Body|None=None, timeout:float|httpx.Timeout|None|NotGiven=NOT_GIVEN)
Add prompt pr
to dialog and get a response from Claude, automatically following up with tool_use
messages
Type | Default | Details | |
---|---|---|---|
pr | Prompt to pass to Claude | ||
max_steps | int | 10 | Maximum number of tool requests to loop through |
cont_func | callable | noop | Function that stops loop if returns False |
final_prompt | str | You have no more tool uses. Please summarize your findings. If you did not complete your goal please tell the user what further work needs to be done so they can choose how best to proceed. | Prompt to add if last message is a tool call |
stream | bool | False | Stream response? |
audio | Optional[ChatCompletionAudioParam] | NotGiven | NOT_GIVEN | |
frequency_penalty | Optional[float] | NotGiven | NOT_GIVEN | |
function_call | completion_create_params.FunctionCall | NotGiven | NOT_GIVEN | |
functions | Iterable[completion_create_params.Function] | NotGiven | NOT_GIVEN | |
logit_bias | Optional[Dict[str, int]] | NotGiven | NOT_GIVEN | |
logprobs | Optional[bool] | NotGiven | NOT_GIVEN | |
max_completion_tokens | Optional[int] | NotGiven | NOT_GIVEN | |
max_tokens | Optional[int] | NotGiven | NOT_GIVEN | |
metadata | Optional[Metadata] | NotGiven | NOT_GIVEN | |
modalities | Optional[List[Literal[‘text’, ‘audio’]]] | NotGiven | NOT_GIVEN | |
n | Optional[int] | NotGiven | NOT_GIVEN | |
parallel_tool_calls | bool | NotGiven | NOT_GIVEN | |
prediction | Optional[ChatCompletionPredictionContentParam] | NotGiven | NOT_GIVEN | |
presence_penalty | Optional[float] | NotGiven | NOT_GIVEN | |
reasoning_effort | Optional[ReasoningEffort] | NotGiven | NOT_GIVEN | |
response_format | completion_create_params.ResponseFormat | NotGiven | NOT_GIVEN | |
seed | Optional[int] | NotGiven | NOT_GIVEN | |
service_tier | Optional[Literal[‘auto’, ‘default’, ‘flex’, ‘scale’]] | NotGiven | NOT_GIVEN | |
stop | Union[Optional[str], List[str], None] | NotGiven | NOT_GIVEN | |
store | Optional[bool] | NotGiven | NOT_GIVEN | |
stream_options | Optional[ChatCompletionStreamOptionsParam] | NotGiven | NOT_GIVEN | |
temperature | Optional[float] | NotGiven | NOT_GIVEN | |
tool_choice | ChatCompletionToolChoiceOptionParam | NotGiven | NOT_GIVEN | |
tools | Iterable[ChatCompletionToolParam] | NotGiven | NOT_GIVEN | |
top_logprobs | Optional[int] | NotGiven | NOT_GIVEN | |
top_p | Optional[float] | NotGiven | NOT_GIVEN | |
user | str | NotGiven | NOT_GIVEN | |
web_search_options | completion_create_params.WebSearchOptions | NotGiven | NOT_GIVEN | |
extra_headers | Optional | None | Use the following arguments if you need to pass additional parameters to the API that aren’t available via kwargs. The extra values given here take precedence over values defined on the client or passed to this method. |
extra_query | Query | None | None | |
extra_body | Body | None | None | |
timeout | float | httpx.Timeout | None | NotGiven | NOT_GIVEN |
Exported source
= "You have no more tool uses. Please summarize your findings. If you did not complete your goal please tell the user what further work needs to be done so they can choose how best to proceed." _final_prompt
Exported source
@patch
@delegates(Chat.__call__)
def toolloop(self:Chat,
# Prompt to pass to Claude
pr, =10, # Maximum number of tool requests to loop through
max_stepscallable=noop, # Function that stops loop if returns False
cont_func:=_final_prompt, # Prompt to add if last message is a tool call
final_prompt**kwargs):
"Add prompt `pr` to dialog and get a response from Claude, automatically following up with `tool_use` messages"
class _Loop:
def __iter__(a):
= len(self.h)
init_n = self(pr, **kwargs)
r yield r
if len(self.last)>1: yield self.last[1]
for i in range(max_steps-1):
if r.choices[0].finish_reason != 'tool_calls': break
= self(final_prompt if i==max_steps-2 else None, **kwargs)
r yield r
if len(self.last)>1: yield self.last[1]
if not cont_func(*self.h[-3:]): break
= self.h[init_n+1:]
a.value return _Loop()
Test Customer Dataset
= Chat(model, tools=tools)
chat = 'Can you tell me the email address for customer C1?'
pr = chat.toolloop(pr)
r for o in r: display(o)
- Retrieving customer C1
- id: chatcmpl-BjA16YUSVRETf3MKfZhvOSToAGL3u
- choices: [Choice(finish_reason=‘tool_calls’, index=0, logprobs=None, message=ChatCompletionMessage(content=None, refusal=None, role=‘assistant’, annotations=[], audio=None, function_call=None, tool_calls=[ChatCompletionMessageToolCall(id=‘call_H0kNomg5F0wVqiR8EDSu2aWn’, function=Function(arguments=‘{“customer_id”:“C1”}’, name=‘get_customer_info’), type=‘function’)]))]
- created: 1750102964
- model: o4-mini-2025-04-16
- object: chat.completion
- service_tier: default
- system_fingerprint: None
- usage: CompletionUsage(completion_tokens=26, prompt_tokens=147, total_tokens=173, completion_tokens_details=CompletionTokensDetails(accepted_prediction_tokens=0, audio_tokens=0, reasoning_tokens=0, rejected_prediction_tokens=0), prompt_tokens_details=PromptTokensDetails(audio_tokens=0, cached_tokens=0))
{ 'content': "{'name': 'John Doe', 'email': 'john@example.com', 'phone': "
"'123-456-7890', 'orders': [{'id': 'O1', 'product': 'Widget A', "
"'quantity': 2, 'price': 19.99, 'status': 'Cancelled'}, {'id': "
"'O2', 'product': 'Gadget B', 'quantity': 1, 'price': 49.99, "
"'status': 'Cancelled'}]}",
'name': 'get_customer_info',
'role': 'tool',
'tool_call_id': 'call_H0kNomg5F0wVqiR8EDSu2aWn'}
The email address for customer C1 is john@example.com.
- id: chatcmpl-BjA18DXRyQQq88cKBavOGEn4SCkJC
- choices: [Choice(finish_reason=‘stop’, index=0, logprobs=None, message=ChatCompletionMessage(content=‘The email address for customer C1 is john@example.com.’, refusal=None, role=‘assistant’, annotations=[], audio=None, function_call=None, tool_calls=None))]
- created: 1750102966
- model: o4-mini-2025-04-16
- object: chat.completion
- service_tier: default
- system_fingerprint: None
- usage: CompletionUsage(completion_tokens=30, prompt_tokens=278, total_tokens=308, completion_tokens_details=CompletionTokensDetails(accepted_prediction_tokens=0, audio_tokens=0, reasoning_tokens=0, rejected_prediction_tokens=0), prompt_tokens_details=PromptTokensDetails(audio_tokens=0, cached_tokens=0))
pprint(r.value)
[ChatCompletionMessage(content=None, refusal=None, role='assistant', annotations=[], audio=None, function_call=None, tool_calls=[ChatCompletionMessageToolCall(id='call_H0kNomg5F0wVqiR8EDSu2aWn', function=Function(arguments='{"customer_id":"C1"}', name='get_customer_info'), type='function')]),
{'content': "{'name': 'John Doe', 'email': 'john@example.com', 'phone': "
"'123-456-7890', 'orders': [{'id': 'O1', 'product': 'Widget A', "
"'quantity': 2, 'price': 19.99, 'status': 'Cancelled'}, {'id': "
"'O2', 'product': 'Gadget B', 'quantity': 1, 'price': 49.99, "
"'status': 'Cancelled'}]}",
'name': 'get_customer_info',
'role': 'tool',
'tool_call_id': 'call_H0kNomg5F0wVqiR8EDSu2aWn'},
ChatCompletionMessage(content='The email address for customer C1 is john@example.com.', refusal=None, role='assistant', annotations=[], audio=None, function_call=None, tool_calls=None)]
= _get_orders_customers() orders, customers
= Chat(model, tools=tools)
chat = chat.toolloop('What is the status of order O2?')
r for o in r: display(o)
- Retrieving order O2
- id: chatcmpl-BjA1AQNdi4SF9WAxeT66NDCTXhQwE
- choices: [Choice(finish_reason=‘tool_calls’, index=0, logprobs=None, message=ChatCompletionMessage(content=None, refusal=None, role=‘assistant’, annotations=[], audio=None, function_call=None, tool_calls=[ChatCompletionMessageToolCall(id=‘call_7I3ugfxBZbhApKLgkhpH3pQf’, function=Function(arguments=‘{“order_id”:“O2”}’, name=‘get_order_details’), type=‘function’)]))]
- created: 1750102968
- model: o4-mini-2025-04-16
- object: chat.completion
- service_tier: default
- system_fingerprint: None
- usage: CompletionUsage(completion_tokens=26, prompt_tokens=144, total_tokens=170, completion_tokens_details=CompletionTokensDetails(accepted_prediction_tokens=0, audio_tokens=0, reasoning_tokens=0, rejected_prediction_tokens=0), prompt_tokens_details=PromptTokensDetails(audio_tokens=0, cached_tokens=0))
{ 'content': "{'id': 'O2', 'product': 'Gadget B', 'quantity': 1, 'price': "
"49.99, 'status': 'Processing'}",
'name': 'get_order_details',
'role': 'tool',
'tool_call_id': 'call_7I3ugfxBZbhApKLgkhpH3pQf'}
The status of order O2 is “Processing.”
- id: chatcmpl-BjA1C5TznRspip7U7eAl7Re7xUIey
- choices: [Choice(finish_reason=‘stop’, index=0, logprobs=None, message=ChatCompletionMessage(content=‘The status of order O2 is “Processing.”’, refusal=None, role=‘assistant’, annotations=[], audio=None, function_call=None, tool_calls=None))]
- created: 1750102970
- model: o4-mini-2025-04-16
- object: chat.completion
- service_tier: default
- system_fingerprint: None
- usage: CompletionUsage(completion_tokens=22, prompt_tokens=211, total_tokens=233, completion_tokens_details=CompletionTokensDetails(accepted_prediction_tokens=0, audio_tokens=0, reasoning_tokens=0, rejected_prediction_tokens=0), prompt_tokens_details=PromptTokensDetails(audio_tokens=0, cached_tokens=0))
= chat.toolloop('Please cancel all orders for customer C1 for me.')
r for o in r: display(o)
- Retrieving customer C1
- id: chatcmpl-BjA1DkRjKa0BOV0xQKmgeaHISzcbT
- choices: [Choice(finish_reason=‘tool_calls’, index=0, logprobs=None, message=ChatCompletionMessage(content=None, refusal=None, role=‘assistant’, annotations=[], audio=None, function_call=None, tool_calls=[ChatCompletionMessageToolCall(id=‘call_rGJnn2TY03odl0yot4Lcbk6R’, function=Function(arguments=‘{“customer_id”:“C1”}’, name=‘get_customer_info’), type=‘function’)]))]
- created: 1750102971
- model: o4-mini-2025-04-16
- object: chat.completion
- service_tier: default
- system_fingerprint: None
- usage: CompletionUsage(completion_tokens=218, prompt_tokens=242, total_tokens=460, completion_tokens_details=CompletionTokensDetails(accepted_prediction_tokens=0, audio_tokens=0, reasoning_tokens=192, rejected_prediction_tokens=0), prompt_tokens_details=PromptTokensDetails(audio_tokens=0, cached_tokens=0))
{ 'content': "{'name': 'John Doe', 'email': 'john@example.com', 'phone': "
"'123-456-7890', 'orders': [{'id': 'O1', 'product': 'Widget A', "
"'quantity': 2, 'price': 19.99, 'status': 'Shipped'}, {'id': "
"'O2', 'product': 'Gadget B', 'quantity': 1, 'price': 49.99, "
"'status': 'Processing'}]}",
'name': 'get_customer_info',
'role': 'tool',
'tool_call_id': 'call_rGJnn2TY03odl0yot4Lcbk6R'}
- Cancelling order O1
- id: chatcmpl-BjA1HTq3pyutXIeCIP47aDSyPWqs9
- choices: [Choice(finish_reason=‘tool_calls’, index=0, logprobs=None, message=ChatCompletionMessage(content=None, refusal=None, role=‘assistant’, annotations=[], audio=None, function_call=None, tool_calls=[ChatCompletionMessageToolCall(id=‘call_JWB8rjsYpoWBdmyz6MPPPVZU’, function=Function(arguments=‘{“order_id”:“O1”}’, name=‘cancel_order’), type=‘function’)]))]
- created: 1750102975
- model: o4-mini-2025-04-16
- object: chat.completion
- service_tier: default
- system_fingerprint: None
- usage: CompletionUsage(completion_tokens=217, prompt_tokens=374, total_tokens=591, completion_tokens_details=CompletionTokensDetails(accepted_prediction_tokens=0, audio_tokens=0, reasoning_tokens=192, rejected_prediction_tokens=0), prompt_tokens_details=PromptTokensDetails(audio_tokens=0, cached_tokens=0))
{ 'content': 'True',
'name': 'cancel_order',
'role': 'tool',
'tool_call_id': 'call_JWB8rjsYpoWBdmyz6MPPPVZU'}
- Cancelling order O2
- id: chatcmpl-BjA1Khf8gjWTUV82pzgxDs4zJ0pFp
- choices: [Choice(finish_reason=‘tool_calls’, index=0, logprobs=None, message=ChatCompletionMessage(content=None, refusal=None, role=‘assistant’, annotations=[], audio=None, function_call=None, tool_calls=[ChatCompletionMessageToolCall(id=‘call_Eq2CHHP881RPwaoOvuIjlfwL’, function=Function(arguments=‘{“order_id”:“O2”}’, name=‘cancel_order’), type=‘function’)]))]
- created: 1750102978
- model: o4-mini-2025-04-16
- object: chat.completion
- service_tier: default
- system_fingerprint: None
- usage: CompletionUsage(completion_tokens=217, prompt_tokens=405, total_tokens=622, completion_tokens_details=CompletionTokensDetails(accepted_prediction_tokens=0, audio_tokens=0, reasoning_tokens=192, rejected_prediction_tokens=0), prompt_tokens_details=PromptTokensDetails(audio_tokens=0, cached_tokens=0))
{ 'content': 'True',
'name': 'cancel_order',
'role': 'tool',
'tool_call_id': 'call_Eq2CHHP881RPwaoOvuIjlfwL'}
Both orders for customer C1 have been successfully canceled:
- Order O1: Canceled
- Order O2: Canceled
Let me know if there’s anything else I can help you with!
- id: chatcmpl-BjA1PFmlPuLUomaKvYxaJOlgCtABE
- choices: [Choice(finish_reason=‘stop’, index=0, logprobs=None, message=ChatCompletionMessage(content=‘Both orders for customer C1 have been successfully canceled:- Order O1: Canceled - Order O2: Canceledme know if there’s anything else I can help you with!’, refusal=None, role=‘assistant’, annotations=[], audio=None, function_call=None, tool_calls=None))]
- created: 1750102983
- model: o4-mini-2025-04-16
- object: chat.completion
- service_tier: default
- system_fingerprint: None
- usage: CompletionUsage(completion_tokens=53, prompt_tokens=436, total_tokens=489, completion_tokens_details=CompletionTokensDetails(accepted_prediction_tokens=0, audio_tokens=0, reasoning_tokens=0, rejected_prediction_tokens=0), prompt_tokens_details=PromptTokensDetails(audio_tokens=0, cached_tokens=0))
for o in chat.toolloop('What is the status of order O2?'): display(o)
- Retrieving order O2
- id: chatcmpl-BjA1QvyErrb3Rg9yra6vC8KxlPg65
- choices: [Choice(finish_reason=‘tool_calls’, index=0, logprobs=None, message=ChatCompletionMessage(content=None, refusal=None, role=‘assistant’, annotations=[], audio=None, function_call=None, tool_calls=[ChatCompletionMessageToolCall(id=‘call_75RFqe2Nm8sxT5SxsDhn5Fyz’, function=Function(arguments=‘{“order_id”:“O2”}’, name=‘get_order_details’), type=‘function’)]))]
- created: 1750102984
- model: o4-mini-2025-04-16
- object: chat.completion
- service_tier: default
- system_fingerprint: None
- usage: CompletionUsage(completion_tokens=218, prompt_tokens=496, total_tokens=714, completion_tokens_details=CompletionTokensDetails(accepted_prediction_tokens=0, audio_tokens=0, reasoning_tokens=192, rejected_prediction_tokens=0), prompt_tokens_details=PromptTokensDetails(audio_tokens=0, cached_tokens=0))
{ 'content': "{'id': 'O2', 'product': 'Gadget B', 'quantity': 1, 'price': "
"49.99, 'status': 'Cancelled'}",
'name': 'get_order_details',
'role': 'tool',
'tool_call_id': 'call_75RFqe2Nm8sxT5SxsDhn5Fyz'}
The status of order O2 is now “Cancelled.”
- id: chatcmpl-BjA1S4PoZEyLOKEU4KDl24hTYUTMx
- choices: [Choice(finish_reason=‘stop’, index=0, logprobs=None, message=ChatCompletionMessage(content=‘The status of order O2 is now “Cancelled.”’, refusal=None, role=‘assistant’, annotations=[], audio=None, function_call=None, tool_calls=None))]
- created: 1750102986
- model: o4-mini-2025-04-16
- object: chat.completion
- service_tier: default
- system_fingerprint: None
- usage: CompletionUsage(completion_tokens=23, prompt_tokens=563, total_tokens=586, completion_tokens_details=CompletionTokensDetails(accepted_prediction_tokens=0, audio_tokens=0, reasoning_tokens=0, rejected_prediction_tokens=0), prompt_tokens_details=PromptTokensDetails(audio_tokens=0, cached_tokens=0))
Test Math Example
def add(x: int, y: int) -> int:
"adds x and y."
return x + y
def mul(x: int, y: int) -> int:
"multiplies x and y."
return x * y
= Chat(model, tools=[add, mul])
chat = 'Can you add 1258585825128 to 34959234595, multiply by 93, and then add - 12439149?'
pr = chat.toolloop(pr)
r for o in r: display(o)
- id: chatcmpl-BjA1U9JzEL7oTfQfq3CNyksPDzt83
- choices: [Choice(finish_reason=‘tool_calls’, index=0, logprobs=None, message=ChatCompletionMessage(content=None, refusal=None, role=‘assistant’, annotations=[], audio=None, function_call=None, tool_calls=[ChatCompletionMessageToolCall(id=‘call_zlFxReIvYCeqMETgqcpMbqqu’, function=Function(arguments=‘{“x”:1258585825128,“y”:34959234595}’, name=‘add’), type=‘function’)]))]
- created: 1750102988
- model: o4-mini-2025-04-16
- object: chat.completion
- service_tier: default
- system_fingerprint: None
- usage: CompletionUsage(completion_tokens=1121, prompt_tokens=112, total_tokens=1233, completion_tokens_details=CompletionTokensDetails(accepted_prediction_tokens=0, audio_tokens=0, reasoning_tokens=1088, rejected_prediction_tokens=0), prompt_tokens_details=PromptTokensDetails(audio_tokens=0, cached_tokens=0))
{ 'content': '1293545059723',
'name': 'add',
'role': 'tool',
'tool_call_id': 'call_zlFxReIvYCeqMETgqcpMbqqu'}
- id: chatcmpl-BjA1efe2c7CKKX8F0EcIa19GTSvCG
- choices: [Choice(finish_reason=‘tool_calls’, index=0, logprobs=None, message=ChatCompletionMessage(content=None, refusal=None, role=‘assistant’, annotations=[], audio=None, function_call=None, tool_calls=[ChatCompletionMessageToolCall(id=‘call_WZfBhr0j9G3JWhue9lFQSK86’, function=Function(arguments=‘{“x”:1293545059723,“y”:93}’, name=‘mul’), type=‘function’)]))]
- created: 1750102998
- model: o4-mini-2025-04-16
- object: chat.completion
- service_tier: default
- system_fingerprint: None
- usage: CompletionUsage(completion_tokens=24, prompt_tokens=154, total_tokens=178, completion_tokens_details=CompletionTokensDetails(accepted_prediction_tokens=0, audio_tokens=0, reasoning_tokens=0, rejected_prediction_tokens=0), prompt_tokens_details=PromptTokensDetails(audio_tokens=0, cached_tokens=0))
{ 'content': '120299690554239',
'name': 'mul',
'role': 'tool',
'tool_call_id': 'call_WZfBhr0j9G3JWhue9lFQSK86'}
First, we computed:
- 1258585825128 + 34 959 234 595 = 1 293 545 059 723
- 1 293 545 059 723 × 93 = 120 299 690 554 239
- 120 299 690 554 239 + (–12 439 149) = 120 299 678 115 090
So the final result is 120299678115090.
- id: chatcmpl-BjA1f2IVdmLXuktSK6et0oJoibPWn
- choices: [Choice(finish_reason=‘stop’, index=0, logprobs=None, message=ChatCompletionMessage(content=‘First, we computed:. 1258585825128 + 34 959 234 595 = 1 293 545 059 723 . 1 293 545 059 723 × 93 = 120 299 690 554 239 . 120 299 690 554 239 + (–12 439 149) = 120 299 678 115 090 the final result is 120299678115090.’, refusal=None, role=‘assistant’, annotations=[], audio=None, function_call=None, tool_calls=None))]
- created: 1750102999
- model: o4-mini-2025-04-16
- object: chat.completion
- service_tier: default
- system_fingerprint: None
- usage: CompletionUsage(completion_tokens=316, prompt_tokens=193, total_tokens=509, completion_tokens_details=CompletionTokensDetails(accepted_prediction_tokens=0, audio_tokens=0, reasoning_tokens=192, rejected_prediction_tokens=0), prompt_tokens_details=PromptTokensDetails(audio_tokens=0, cached_tokens=0))
1258585825128 + 34959234595) * 93 - 12439149 (
120299678115090
Test Error Conditions: Out of Iterations, Exception During Tool Invocation
def mydiv(a:float, b:float):
"Divide two numbers"
return a / b
= Chat(model, tools=[mydiv])
chat = chat.toolloop('Please calculate this sequence using your tools: 43/23454; 652/previous result; 6843/previous result; 321/previous result', max_steps=2)
r for o in r: display(o)
- id: chatcmpl-BjA1krNKBzPL83FEgM6jLlREsezt3
- choices: [Choice(finish_reason=‘length’, index=0, logprobs=None, message=ChatCompletionMessage(content=’‘, refusal=None, role=’assistant’, annotations=[], audio=None, function_call=None, tool_calls=None))]
- created: 1750103004
- model: o4-mini-2025-04-16
- object: chat.completion
- service_tier: default
- system_fingerprint: None
- usage: CompletionUsage(completion_tokens=4096, prompt_tokens=77, total_tokens=4173, completion_tokens_details=CompletionTokensDetails(accepted_prediction_tokens=0, audio_tokens=0, reasoning_tokens=4096, rejected_prediction_tokens=0), prompt_tokens_details=PromptTokensDetails(audio_tokens=0, cached_tokens=0))
This tests raise_on_err=False
change to toolslm.call_func
invocation. We should see this return an error as a string instead of crash:
= Chat(model, tools=[mydiv])
chat = chat.toolloop('Try dividing 1 by 0 and see what the error result is')
r for o in r: display(o)
- id: chatcmpl-BjA2bgViq8n8CPimn1niDb8gVfYuX
- choices: [Choice(finish_reason=‘tool_calls’, index=0, logprobs=None, message=ChatCompletionMessage(content=None, refusal=None, role=‘assistant’, annotations=[], audio=None, function_call=None, tool_calls=[ChatCompletionMessageToolCall(id=‘call_dNL7NWhb44HN7HrvanBNqxfm’, function=Function(arguments=‘{“a”:1,“b”:0}’, name=‘mydiv’), type=‘function’)]))]
- created: 1750103057
- model: o4-mini-2025-04-16
- object: chat.completion
- service_tier: default
- system_fingerprint: None
- usage: CompletionUsage(completion_tokens=155, prompt_tokens=59, total_tokens=214, completion_tokens_details=CompletionTokensDetails(accepted_prediction_tokens=0, audio_tokens=0, reasoning_tokens=128, rejected_prediction_tokens=0), prompt_tokens_details=PromptTokensDetails(audio_tokens=0, cached_tokens=0))
{ 'content': 'Traceback (most recent call last):\n'
' File '
'"/home/austin/projects/aai-ws/toolslm/toolslm/funccall.py", line '
'198, in call_func\n'
' try: return func(**fc_inputs)\n'
' ^^^^^^^^^^^^^^^^^\n'
' File "/tmp/ipykernel_199809/246724137.py", line 3, in mydiv\n'
' return a / b\n'
' ~~^~~\n'
'ZeroDivisionError: division by zero\n',
'name': 'mydiv',
'role': 'tool',
'tool_call_id': 'call_dNL7NWhb44HN7HrvanBNqxfm'}
When attempting to divide 1 by 0, a ZeroDivisionError is raised with the message: “division by zero.”
- id: chatcmpl-BjA2eXvC8ciyiOF3ez7jqCXshr1Rd
- choices: [Choice(finish_reason=‘stop’, index=0, logprobs=None, message=ChatCompletionMessage(content=‘When attempting to divide 1 by 0, a ZeroDivisionError is raised with the message: “division by zero.”’, refusal=None, role=‘assistant’, annotations=[], audio=None, function_call=None, tool_calls=None))]
- created: 1750103060
- model: o4-mini-2025-04-16
- object: chat.completion
- service_tier: default
- system_fingerprint: None
- usage: CompletionUsage(completion_tokens=37, prompt_tokens=187, total_tokens=224, completion_tokens_details=CompletionTokensDetails(accepted_prediction_tokens=0, audio_tokens=0, reasoning_tokens=0, rejected_prediction_tokens=0), prompt_tokens_details=PromptTokensDetails(audio_tokens=0, cached_tokens=0))