def print_columns(items, cols=3, width=30):for i inrange(0, len(items), cols): row = items[i:i+cols]print(''.join(item[:width-1].ljust(width) for item in row))client = OpenAI()models = client.models.list()print(f"Available models as of {datetime.now().strftime('%Y-%m-%d')}:\n")print_columns(sorted([m.idfor m in models]))
NB Since index into models is often hardcoded in consuming code, always append newer entries to the end of the list to avoid breaking code that consumes this library.
def can_stream(m): return m in has_streaming_modelsdef can_set_system_prompt(m): return m in has_system_prompt_modelsdef can_set_temperature(m): return m in has_temperature_models
choices: [Choice(finish_reason=‘stop’, index=0, logprobs=None, message=ChatCompletionMessage(content=‘Hello, Jeremy! How can I assist you today?’, refusal=None, role=‘assistant’, audio=None, function_call=None, tool_calls=None))]
def find_block(r:abc.Mapping, # The message to look in ):"Find the message in `r`." m = nested_idx(r, 'choices', 0)ifnot m: return mifhasattr(m, 'message'): return m.messagereturn m.delta
def contents(r):"Helper to get the contents from response `r`." blk = find_block(r)ifnot blk: return rifhasattr(blk, 'content'): returngetattr(blk,'content')return blk
contents(r)
'Hello, Jeremy! How can I assist you today?'
Exported source
@patchdef _repr_markdown_(self:ChatCompletion): det ='\n- '.join(f'{k}: {v}'for k,v indict(self).items()) res = contents(self)ifnot res: returnf"- {det}"returnf"""{contents(self)}<details>- {det}</details>"""
r
Hello, Jeremy! How can I assist you today?
id: chatcmpl-AxxDzLirSVOpB1Fa5YINYyvmVNJtj
choices: [Choice(finish_reason=‘stop’, index=0, logprobs=None, message=ChatCompletionMessage(content=‘Hello, Jeremy! How can I assist you today?’, refusal=None, role=‘assistant’, audio=None, function_call=None, tool_calls=None))]
def usage(inp=0, # Number of prompt tokens out=0# Number of completion tokens ):"Slightly more concise version of `CompletionUsage`."return CompletionUsage(prompt_tokens=inp, completion_tokens=out, total_tokens=inp+out)
Add together each of input_tokens and output_tokens
Exported source
@patchdef__add__(self:CompletionUsage, b):"Add together each of `input_tokens` and `output_tokens`"return usage(self.prompt_tokens+b.prompt_tokens, self.completion_tokens+b.completion_tokens)
choices: [Choice(finish_reason=‘stop’, index=0, logprobs=None, message=ChatCompletionMessage(content=‘Hello, Jeremy! How can I assist you today?’, refusal=None, role=‘assistant’, audio=None, function_call=None, tool_calls=None))]
We can pass more than just text messages to OpenAI. As we’ll see later we can also pass images, SDK objects, etc. To handle these different data types we need to pass the type along with our content to OpenAI.
Here’s an example of a multimodal message containing text and images.
mk_msg infers the type automatically and creates the appropriate data structure.
LLMs, don’t actually have state, but instead dialogs are created by passing back all previous prompts and responses every time. With OpenAI, they always alternate user and assistant. We’ll use mk_msgs from msglm to make it easier to build up these dialog lists.
msgs = mk_msgs([prompt, r, "I forgot my name. Can you remind me please?"]) msgs
[{'role': 'user', 'content': "I'm Jeremy"},
ChatCompletionMessage(content='Hello, Jeremy! How can I assist you today?', refusal=None, role='assistant', audio=None, function_call=None, tool_calls=None),
{'role': 'user', 'content': 'I forgot my name. Can you remind me please?'}]
It sounds like you’re having a bit of a memory lapse! You just mentioned that your name is Jeremy. If there’s anything else you need help with, feel free to ask.
id: chatcmpl-AxxE28BxbRMXrKAMxiNw0dHXT9h6r
choices: [Choice(finish_reason=‘stop’, index=0, logprobs=None, message=ChatCompletionMessage(content=“It sounds like you’re having a bit of a memory lapse! You just mentioned that your name is Jeremy. If there’s anything else you need help with, feel free to ask.”, refusal=None, role=‘assistant’, audio=None, function_call=None, tool_calls=None))]
class Client:def__init__(self, model, cli=None):"Basic LLM messages client."self.model,self.use = model,usage(0,0)self.text_only = model in text_only_modelsself.c = (cli or OpenAI()).chat.completions
c = Client(model)c.use
In: 0; Out: 0; Total: 0
Exported source
@patchdef _r(self:Client, r:ChatCompletion):"Store the result of the message and accrue total usage."self.result = rifgetattr(r,'usage',None): self.use += r.usagereturn r
Use the following arguments if you need to pass additional parameters to the API that aren’t available via kwargs. The extra values given here take precedence over values defined on the client or passed to this method.
extra_query
Query | None
None
extra_body
Body | None
None
timeout
float | httpx.Timeout | None | NotGiven
NOT_GIVEN
Exported source
@patch@delegates(Completions.create)def__call__(self:Client, msgs:list, # List of messages in the dialog sp:str='', # System prompt maxtok=4096, # Maximum tokens stream:bool=False, # Stream response?**kwargs):"Make a call to LLM."if'tools'in kwargs: assertnotself.text_only, "Tool use is not supported by the current model type."ifany(c['type'] =='image_url'for msg in msgs ifisinstance(msg, dict) andisinstance(msg.get('content'), list) for c in msg['content']): assertnotself.text_only, "Images are not supported by the current model type."if stream: kwargs['stream_options'] = {"include_usage": True}if sp andself.model in has_system_prompt_models: msgs = [mk_msg(sp, 'system')] +list(msgs) r =self.c.create( model=self.model, messages=msgs, max_completion_tokens=maxtok, stream=stream, **kwargs)ifnot stream: returnself._r(r)else: return get_stream(map(self._r, r))
msgs = [mk_msg('Hi')]
c(msgs)
Hello! How can I assist you today?
id: chatcmpl-AxxE4eN6uGVZpCt4DqZpeqAfeVcVF
choices: [Choice(finish_reason=‘stop’, index=0, logprobs=None, message=ChatCompletionMessage(content=‘Hello! How can I assist you today?’, refusal=None, role=‘assistant’, audio=None, function_call=None, tool_calls=None))]
def sums( a:int, # First thing to sum b:int# Second thing to sum) ->int: # The sum of the inputs"Adds a + b."print(f"Finding the sum of {a} and {b}")return a + b
def mk_toolres( r:abc.Mapping, # Tool use request response ns:Optional[abc.Mapping]=None, # Namespace to search for tools obj:Optional=None# Class to search for tools ):"Create a `tool_result` message from response `r`." r = mk_msg(r) tcs =getattr(r, 'tool_calls', []) res = [r]if ns isNone: ns =globals()if obj isnotNone: ns = mk_ns(obj)for tc in (tcs or []): func = tc.function cts =str(call_func_openai(func, ns=ns)) res.append(mk_msg(str(cts), 'tool', tool_call_id=tc.id, name=func.name))return res
choices: [Choice(finish_reason=‘stop’, index=0, logprobs=None, message=ChatCompletionMessage(content=‘The sum of 604542 and 6458932 is 7,063,474.’, refusal=None, role=‘assistant’, audio=None, function_call=None, tool_calls=None))]
class Dummy:def sums(self, a:int, # First thing to sum b:int=1# Second thing to sum ) ->int: # The sum of the inputs"Adds a + b."print(f"Finding the sum of {a} and {b}")return a + b
choices: [Choice(finish_reason=‘stop’, index=0, logprobs=None, message=ChatCompletionMessage(content=‘Hello Jeremy! What can I do for you today?’, refusal=None, role=‘assistant’, audio=None, function_call=None, tool_calls=None))]
[{'role': 'user', 'content': "I'm Jeremy"},
ChatCompletionMessage(content='Hi Jeremy! How can I assist you today?', refusal=None, role='assistant', audio=None, function_call=None, tool_calls=None)]
def _mock_id(): return'call_'+''.join(choices(ascii_letters+digits, k=24))def mock_tooluse(name:str, # The name of the called function res, # The result of calling the function**kwargs): # The arguments to the function""id= _mock_id() func =dict(arguments=json.dumps(kwargs), name=name) tc =dict(id=id, function=func, type='function') req =dict(content=None, role='assistant', tool_calls=[tc]) resp = mk_msg(''if res isNoneelsestr(res), 'tool', tool_call_id=id, name=name)return [req,resp]
This function mocks the messages needed to implement tool use, for situations where you want to insert tool use messages into a dialog without actually calling into the model.
tu = mock_tooluse(name='sums', res=7063474, a=604542, b=6458932)r = c([mk_msg(pr)]+tu, tools=tools)r
The sum of 604542 and 6458932 is 7063474.
id: chatcmpl-AxxEEgwmhB6rTQiEHNbmN6HdLGFDg
choices: [Choice(finish_reason=‘stop’, index=0, logprobs=None, message=ChatCompletionMessage(content=‘The sum of 604542 and 6458932 is 7063474.’, refusal=None, role=‘assistant’, audio=None, function_call=None, tool_calls=None))]
Use the following arguments if you need to pass additional parameters to the API that aren’t available via kwargs. The extra values given here take precedence over values defined on the client or passed to this method.
extra_query
Query | None
None
extra_body
Body | None
None
timeout
float | httpx.Timeout | None | NotGiven
NOT_GIVEN
Exported source
@patch@delegates(Client.__call__)def structured(self:Client, msgs: list, # Prompt tools:Optional[list]=None, # List of tools to make available to OpenAI model obj:Optional=None, # Class to search for tools ns:Optional[abc.Mapping]=None, # Namespace to search for tools**kwargs):"Return the value of all tool calls (generally used for structured outputs)" tools = listify(tools)if ns isNone: ns=mk_ns(*tools) tools = [mk_openai_func(o) for o in tools]if obj isnotNone: ns = mk_ns(obj) res =self(msgs, tools=tools, tool_choice='required', **kwargs) cts =getattr(res, 'choices', []) tcs = [call_func_openai(t.function, ns=ns) for o in cts for t in (o.message.tool_calls or [])]return tcs
OpenAI’s API doesn’t natively support response formats, so we introduce a structured method to handle tool calling for this purpose. In this setup, the tool’s result is sent directly to the user without being passed back to the model.
class Chat:def__init__(self, model:Optional[str]=None, # Model to use (leave empty if passing `cli`) cli:Optional[Client]=None, # Client to use (leave empty if passing `model`) sp='', # Optional system prompt tools:Optional[list]=None, # List of tools to make available tool_choice:Optional[str]=None): # Forced tool choice"OpenAI chat client."assert model or cliself.c = (cli or Client(model))self.h,self.sp,self.tools,self.tool_choice = [],sp,tools,tool_choice@propertydef use(self): returnself.c.use
sp ="Never mention what tools you use."chat = Chat(model, sp=sp)chat.c.use, chat.h
Use the following arguments if you need to pass additional parameters to the API that aren’t available via kwargs. The extra values given here take precedence over values defined on the client or passed to this method.
extra_query
Query | None
None
extra_body
Body | None
None
timeout
float | httpx.Timeout | None | NotGiven
NOT_GIVEN
Exported source
@patch@delegates(Completions.create)def__call__(self:Chat, pr=None, # Prompt / message stream:bool=False, # Stream response?**kwargs):"Add prompt `pr` to dialog and get a response"ifisinstance(pr,str): pr = pr.strip()if pr: self.h.append(mk_msg(pr))ifself.tools: kwargs['tools'] = [mk_openai_func(o) for o inself.tools]ifself.tool_choice: kwargs['tool_choice'] = mk_tool_choice(self.tool_choice) res =self.c(self.h, sp=self.sp, stream=stream, **kwargs)self.h += mk_toolres(res, ns=self.tools)return res
chat("I'm Jeremy")chat("What's my name?")
Your name is Jeremy. How can I help you today?
id: chatcmpl-AxxEGWAHp292zs25lAzsUAW68RaQm
choices: [Choice(finish_reason=‘stop’, index=0, logprobs=None, message=ChatCompletionMessage(content=‘Your name is Jeremy. How can I help you today?’, refusal=None, role=‘assistant’, audio=None, function_call=None, tool_calls=None))]
choices: [Choice(finish_reason=‘stop’, index=0, logprobs=None, message=ChatCompletionMessage(content=‘The sum of 604542 and 6458932 is 7063474.’, refusal=None, role=‘assistant’, audio=None, function_call=None, tool_calls=None))]
msglm automatically detects if a message is an image, encodes it, and generates the data structure above. All we need to do is a create a list containing our image and a query and then pass it to mk_msg.
Let’s try it out…
q ="In brief, what color flowers are in this image?"msg = [mk_msg(img), mk_msg(q)]
c = Chat(model)c([img, q])
The flowers in the image are purple.
id: chatcmpl-AxxEQLbPnVVEmTjyVmmU5TWTIddeW
choices: [Choice(finish_reason=‘stop’, index=0, logprobs=None, message=ChatCompletionMessage(content=‘The flowers in the image are purple.’, refusal=None, role=‘assistant’, audio=None, function_call=None, tool_calls=None))]