import osTool loop
model = models[-1]orders = {
"O1": dict(id="O1", product="Widget A", quantity=2, price=19.99, status="Shipped"),
"O2": dict(id="O2", product="Gadget B", quantity=1, price=49.99, status="Processing"),
"O3": dict(id="O3", product="Gadget B", quantity=2, price=49.99, status="Shipped")}
customers = {
"C1": dict(name="John Doe", email="john@example.com", phone="123-456-7890",
orders=[orders['O1'], orders['O2']]),
"C2": dict(name="Jane Smith", email="jane@example.com", phone="987-654-3210",
orders=[orders['O3']])
}def get_customer_info(
customer_id:str # ID of the customer
): # Customer's name, email, phone number, and list of orders
"Retrieves a customer's information and their orders based on the customer ID"
print(f'- Retrieving customer {customer_id}')
return customers.get(customer_id, "Customer not found")
def get_order_details(
order_id:str # ID of the order
): # Order's ID, product name, quantity, price, and order status
"Retrieves the details of a specific order based on the order ID"
print(f'- Retrieving order {order_id}')
return orders.get(order_id, "Order not found")
def cancel_order(
order_id:str # ID of the order to cancel
)->bool: # True if the cancellation is successful
"Cancels an order based on the provided order ID"
print(f'- Cancelling order {order_id}')
if order_id not in orders: return False
orders[order_id]['status'] = 'Cancelled'
return Truetools = [get_customer_info, get_order_details, cancel_order]chat = Chat(model, tools=tools)r = chat('Can you tell me the email address for customer C2?')- Retrieving customer C2
r = chat()
rThe email address for customer C2 is jane@example.com.
- content: {‘parts’: [{‘text’: ‘The email address for customer C2 is jane@example.com. ’}], ‘role’: ‘model’}
- finish_reason: 1
- index: 0
- safety_ratings: [{‘category’: 9, ‘probability’: 1, ‘blocked’: False}, {‘category’: 8, ‘probability’: 1, ‘blocked’: False}, {‘category’: 7, ‘probability’: 1, ‘blocked’: False}, {‘category’: 10, ‘probability’: 1, ‘blocked’: False}]
- token_count: 0
- grounding_attributions: []
- prompt_token_count: 292
- candidates_token_count: 14
- total_token_count: 306
- cached_content_token_count: 0
chat = Chat(model, tools=tools)
r = chat('Please cancel all orders for customer C1 for me.')- Retrieving customer C1
Chat.toolloop
Chat.toolloop (pr, max_steps=10, trace_func:Optional[<built- infunctioncallable>]=None, cont_func:Optional[<built- infunctioncallable>]=<function noop>, generation_config:ge neration_types.GenerationConfigType|None=None, safety_sett ings:safety_types.SafetySettingOptions|None=None, stream:bool=False, tools:content_types.FunctionLibraryType|None=None, tool_config:content_types.ToolConfigType|None=None, request_options:helper_types.RequestOptionsType|None=None)
Add prompt pr to dialog and get a response from the model, automatically following up with tool_use messages
| Type | Default | Details | |
|---|---|---|---|
| pr | Prompt to pass to model | ||
| max_steps | int | 10 | Maximum number of tool requests to loop through |
| trace_func | Optional | None | Function to trace tool use steps (e.g print) |
| cont_func | Optional | noop | Function that stops loop if returns False |
| generation_config | generation_types.GenerationConfigType | None | None | |
| safety_settings | safety_types.SafetySettingOptions | None | None | |
| stream | bool | False | |
| tools | content_types.FunctionLibraryType | None | None | |
| tool_config | content_types.ToolConfigType | None | None | |
| request_options | helper_types.RequestOptionsType | None | None |
Exported source
@patch
@delegates(genai.GenerativeModel.generate_content)
def toolloop(self:Chat,
pr, # Prompt to pass to model
max_steps=10, # Maximum number of tool requests to loop through
trace_func:Optional[callable]=None, # Function to trace tool use steps (e.g `print`)
cont_func:Optional[callable]=noop, # Function that stops loop if returns False
**kwargs):
"Add prompt `pr` to dialog and get a response from the model, automatically following up with `tool_use` messages"
r = self(pr, **kwargs)
for i in range(max_steps):
pt = r.parts[0]
if not pt.function_call: break
if trace_func: trace_func(r)
r = self(**kwargs)
if not (cont_func or noop)(self.h[-2]): break
if trace_func: trace_func(r)
return rchat = Chat(model, tools=tools)
r = chat.toolloop('Please cancel all orders for customer C1 for me.', trace_func=print)
r- Retrieving customer C1
response:
GenerateContentResponse(
done=True,
iterator=None,
result=protos.GenerateContentResponse({
"candidates": [
{
"content": {
"parts": [
{
"function_call": {
"name": "get_customer_info",
"args": {
"customer_id": "C1"
}
}
}
],
"role": "model"
},
"finish_reason": "STOP",
"index": 0,
"safety_ratings": [
{
"category": "HARM_CATEGORY_HATE_SPEECH",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_HARASSMENT",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
"probability": "NEGLIGIBLE"
}
]
}
],
"usage_metadata": {
"prompt_token_count": 162,
"candidates_token_count": 20,
"total_token_count": 182
}
}),
)
- Cancelling order O2
response:
GenerateContentResponse(
done=True,
iterator=None,
result=protos.GenerateContentResponse({
"candidates": [
{
"content": {
"parts": [
{
"function_call": {
"name": "cancel_order",
"args": {
"order_id": "O2"
}
}
}
],
"role": "model"
},
"finish_reason": "STOP",
"index": 0,
"safety_ratings": [
{
"category": "HARM_CATEGORY_HATE_SPEECH",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_HARASSMENT",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
"probability": "NEGLIGIBLE"
}
]
}
],
"usage_metadata": {
"prompt_token_count": 336,
"candidates_token_count": 18,
"total_token_count": 354
}
}),
)
response:
GenerateContentResponse(
done=True,
iterator=None,
result=protos.GenerateContentResponse({
"candidates": [
{
"content": {
"parts": [
{
"text": "OK. I have cancelled order O2 for customer C1. Is there anything else? \n"
}
],
"role": "model"
},
"finish_reason": "STOP",
"index": 0,
"safety_ratings": [
{
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_HATE_SPEECH",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_HARASSMENT",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
"probability": "NEGLIGIBLE"
}
]
}
],
"usage_metadata": {
"prompt_token_count": 371,
"candidates_token_count": 18,
"total_token_count": 389
}
}),
)
OK. I have cancelled order O2 for customer C1. Is there anything else?
- content: {‘parts’: [{‘text’: ‘OK. I have cancelled order O2 for customer C1. Is there anything else? ’}], ‘role’: ‘model’}
- finish_reason: 1
- index: 0
- safety_ratings: [{‘category’: 9, ‘probability’: 1, ‘blocked’: False}, {‘category’: 8, ‘probability’: 1, ‘blocked’: False}, {‘category’: 7, ‘probability’: 1, ‘blocked’: False}, {‘category’: 10, ‘probability’: 1, ‘blocked’: False}]
- token_count: 0
- grounding_attributions: []
- prompt_token_count: 371
- candidates_token_count: 18
- total_token_count: 389
- cached_content_token_count: 0
chat.toolloop('What is the status of order O2?')- Retrieving order O2
Order O2 is now cancelled.
- content: {‘parts’: [{‘text’: ‘Order O2 is now cancelled. ’}], ‘role’: ‘model’}
- finish_reason: 1
- index: 0
- safety_ratings: [{‘category’: 9, ‘probability’: 1, ‘blocked’: False}, {‘category’: 8, ‘probability’: 1, ‘blocked’: False}, {‘category’: 7, ‘probability’: 1, ‘blocked’: False}, {‘category’: 10, ‘probability’: 1, ‘blocked’: False}]
- token_count: 0
- grounding_attributions: []
- prompt_token_count: 485
- candidates_token_count: 7
- total_token_count: 492
- cached_content_token_count: 0