import os
Tool loop
= models[-1] model
= {
orders "O1": dict(id="O1", product="Widget A", quantity=2, price=19.99, status="Shipped"),
"O2": dict(id="O2", product="Gadget B", quantity=1, price=49.99, status="Processing"),
"O3": dict(id="O3", product="Gadget B", quantity=2, price=49.99, status="Shipped")}
= {
customers "C1": dict(name="John Doe", email="john@example.com", phone="123-456-7890",
=[orders['O1'], orders['O2']]),
orders"C2": dict(name="Jane Smith", email="jane@example.com", phone="987-654-3210",
=[orders['O3']])
orders }
def get_customer_info(
str # ID of the customer
customer_id:# Customer's name, email, phone number, and list of orders
): "Retrieves a customer's information and their orders based on the customer ID"
print(f'- Retrieving customer {customer_id}')
return customers.get(customer_id, "Customer not found")
def get_order_details(
str # ID of the order
order_id:# Order's ID, product name, quantity, price, and order status
): "Retrieves the details of a specific order based on the order ID"
print(f'- Retrieving order {order_id}')
return orders.get(order_id, "Order not found")
def cancel_order(
str # ID of the order to cancel
order_id:->bool: # True if the cancellation is successful
)"Cancels an order based on the provided order ID"
print(f'- Cancelling order {order_id}')
if order_id not in orders: return False
'status'] = 'Cancelled'
orders[order_id][return True
= [get_customer_info, get_order_details, cancel_order] tools
= Chat(model, tools=tools) chat
= chat('Can you tell me the email address for customer C2?') r
- Retrieving customer C2
= chat()
r r
The email address for customer C2 is jane@example.com.
- content: {‘parts’: [{‘text’: ‘The email address for customer C2 is jane@example.com. ’}], ‘role’: ‘model’}
- finish_reason: 1
- index: 0
- safety_ratings: [{‘category’: 9, ‘probability’: 1, ‘blocked’: False}, {‘category’: 8, ‘probability’: 1, ‘blocked’: False}, {‘category’: 7, ‘probability’: 1, ‘blocked’: False}, {‘category’: 10, ‘probability’: 1, ‘blocked’: False}]
- token_count: 0
- grounding_attributions: []
- prompt_token_count: 292
- candidates_token_count: 14
- total_token_count: 306
- cached_content_token_count: 0
= Chat(model, tools=tools)
chat = chat('Please cancel all orders for customer C1 for me.') r
- Retrieving customer C1
Chat.toolloop
Chat.toolloop (pr, max_steps=10, trace_func:Optional[<built- infunctioncallable>]=None, cont_func:Optional[<built- infunctioncallable>]=<function noop>, generation_config:ge neration_types.GenerationConfigType|None=None, safety_sett ings:safety_types.SafetySettingOptions|None=None, stream:bool=False, tools:content_types.FunctionLibraryType|None=None, tool_config:content_types.ToolConfigType|None=None, request_options:helper_types.RequestOptionsType|None=None)
Add prompt pr
to dialog and get a response from the model, automatically following up with tool_use
messages
Type | Default | Details | |
---|---|---|---|
pr | Prompt to pass to model | ||
max_steps | int | 10 | Maximum number of tool requests to loop through |
trace_func | Optional | None | Function to trace tool use steps (e.g print ) |
cont_func | Optional | noop | Function that stops loop if returns False |
generation_config | generation_types.GenerationConfigType | None | None | |
safety_settings | safety_types.SafetySettingOptions | None | None | |
stream | bool | False | |
tools | content_types.FunctionLibraryType | None | None | |
tool_config | content_types.ToolConfigType | None | None | |
request_options | helper_types.RequestOptionsType | None | None |
Exported source
@patch
@delegates(genai.GenerativeModel.generate_content)
def toolloop(self:Chat,
# Prompt to pass to model
pr, =10, # Maximum number of tool requests to loop through
max_stepscallable]=None, # Function to trace tool use steps (e.g `print`)
trace_func:Optional[callable]=noop, # Function that stops loop if returns False
cont_func:Optional[**kwargs):
"Add prompt `pr` to dialog and get a response from the model, automatically following up with `tool_use` messages"
= self(pr, **kwargs)
r for i in range(max_steps):
= r.parts[0]
pt if not pt.function_call: break
if trace_func: trace_func(r)
= self(**kwargs)
r if not (cont_func or noop)(self.h[-2]): break
if trace_func: trace_func(r)
return r
= Chat(model, tools=tools)
chat = chat.toolloop('Please cancel all orders for customer C1 for me.', trace_func=print)
r r
- Retrieving customer C1
response:
GenerateContentResponse(
done=True,
iterator=None,
result=protos.GenerateContentResponse({
"candidates": [
{
"content": {
"parts": [
{
"function_call": {
"name": "get_customer_info",
"args": {
"customer_id": "C1"
}
}
}
],
"role": "model"
},
"finish_reason": "STOP",
"index": 0,
"safety_ratings": [
{
"category": "HARM_CATEGORY_HATE_SPEECH",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_HARASSMENT",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
"probability": "NEGLIGIBLE"
}
]
}
],
"usage_metadata": {
"prompt_token_count": 162,
"candidates_token_count": 20,
"total_token_count": 182
}
}),
)
- Cancelling order O2
response:
GenerateContentResponse(
done=True,
iterator=None,
result=protos.GenerateContentResponse({
"candidates": [
{
"content": {
"parts": [
{
"function_call": {
"name": "cancel_order",
"args": {
"order_id": "O2"
}
}
}
],
"role": "model"
},
"finish_reason": "STOP",
"index": 0,
"safety_ratings": [
{
"category": "HARM_CATEGORY_HATE_SPEECH",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_HARASSMENT",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
"probability": "NEGLIGIBLE"
}
]
}
],
"usage_metadata": {
"prompt_token_count": 336,
"candidates_token_count": 18,
"total_token_count": 354
}
}),
)
response:
GenerateContentResponse(
done=True,
iterator=None,
result=protos.GenerateContentResponse({
"candidates": [
{
"content": {
"parts": [
{
"text": "OK. I have cancelled order O2 for customer C1. Is there anything else? \n"
}
],
"role": "model"
},
"finish_reason": "STOP",
"index": 0,
"safety_ratings": [
{
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_HATE_SPEECH",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_HARASSMENT",
"probability": "NEGLIGIBLE"
},
{
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
"probability": "NEGLIGIBLE"
}
]
}
],
"usage_metadata": {
"prompt_token_count": 371,
"candidates_token_count": 18,
"total_token_count": 389
}
}),
)
OK. I have cancelled order O2 for customer C1. Is there anything else?
- content: {‘parts’: [{‘text’: ‘OK. I have cancelled order O2 for customer C1. Is there anything else? ’}], ‘role’: ‘model’}
- finish_reason: 1
- index: 0
- safety_ratings: [{‘category’: 9, ‘probability’: 1, ‘blocked’: False}, {‘category’: 8, ‘probability’: 1, ‘blocked’: False}, {‘category’: 7, ‘probability’: 1, ‘blocked’: False}, {‘category’: 10, ‘probability’: 1, ‘blocked’: False}]
- token_count: 0
- grounding_attributions: []
- prompt_token_count: 371
- candidates_token_count: 18
- total_token_count: 389
- cached_content_token_count: 0
'What is the status of order O2?') chat.toolloop(
- Retrieving order O2
Order O2 is now cancelled.
- content: {‘parts’: [{‘text’: ‘Order O2 is now cancelled. ’}], ‘role’: ‘model’}
- finish_reason: 1
- index: 0
- safety_ratings: [{‘category’: 9, ‘probability’: 1, ‘blocked’: False}, {‘category’: 8, ‘probability’: 1, ‘blocked’: False}, {‘category’: 7, ‘probability’: 1, ‘blocked’: False}, {‘category’: 10, ‘probability’: 1, ‘blocked’: False}]
- token_count: 0
- grounding_attributions: []
- prompt_token_count: 485
- candidates_token_count: 7
- total_token_count: 492
- cached_content_token_count: 0