# Cosette
>
---
# Source: https://answerdotai.github.io/cosette/core.html.md
# Cosette’s source
## Setup
``` python
from IPython.display import display,Image,Markdown
from datetime import datetime
from pprint import pprint
```
``` python
def print_columns(items, cols=3, width=30):
for i in range(0, len(items), cols):
row = items[i:i+cols]
print(''.join(item[:width-1].ljust(width) for item in row))
client = OpenAI()
model_list = client.models.list()
print(f"Available models as of {datetime.now().strftime('%Y-%m-%d')}:\n")
print_columns(sorted([m.id for m in model_list]))
```
Available models as of 2025-12-18:
babbage-002 chatgpt-4o-latest chatgpt-image-latest
codex-mini-latest dall-e-2 dall-e-3
davinci-002 gpt-3.5-turbo gpt-3.5-turbo-0125
gpt-3.5-turbo-1106 gpt-3.5-turbo-16k gpt-3.5-turbo-instruct
gpt-3.5-turbo-instruct-0914 gpt-4 gpt-4-0125-preview
gpt-4-0613 gpt-4-1106-preview gpt-4-turbo
gpt-4-turbo-2024-04-09 gpt-4-turbo-preview gpt-4.1
gpt-4.1-2025-04-14 gpt-4.1-mini gpt-4.1-mini-2025-04-14
gpt-4.1-nano gpt-4.1-nano-2025-04-14 gpt-4o
gpt-4o-2024-05-13 gpt-4o-2024-08-06 gpt-4o-2024-11-20
gpt-4o-audio-preview gpt-4o-audio-preview-2024-12- gpt-4o-audio-preview-2025-06-
gpt-4o-mini gpt-4o-mini-2024-07-18 gpt-4o-mini-audio-preview
gpt-4o-mini-audio-preview-202 gpt-4o-mini-realtime-preview gpt-4o-mini-realtime-preview-
gpt-4o-mini-search-preview gpt-4o-mini-search-preview-20 gpt-4o-mini-transcribe
gpt-4o-mini-transcribe-2025-0 gpt-4o-mini-transcribe-2025-1 gpt-4o-mini-tts
gpt-4o-mini-tts-2025-03-20 gpt-4o-mini-tts-2025-12-15 gpt-4o-realtime-preview
gpt-4o-realtime-preview-2024- gpt-4o-realtime-preview-2025- gpt-4o-search-preview
gpt-4o-search-preview-2025-03 gpt-4o-transcribe gpt-4o-transcribe-diarize
gpt-5 gpt-5-2025-08-07 gpt-5-chat-latest
gpt-5-codex gpt-5-mini gpt-5-mini-2025-08-07
gpt-5-nano gpt-5-nano-2025-08-07 gpt-5-pro
gpt-5-pro-2025-10-06 gpt-5-search-api gpt-5-search-api-2025-10-14
gpt-5.1 gpt-5.1-2025-11-13 gpt-5.1-chat-latest
gpt-5.1-codex gpt-5.1-codex-max gpt-5.1-codex-mini
gpt-5.2 gpt-5.2-2025-12-11 gpt-5.2-chat-latest
gpt-5.2-pro gpt-5.2-pro-2025-12-11 gpt-audio
gpt-audio-2025-08-28 gpt-audio-mini gpt-audio-mini-2025-10-06
gpt-audio-mini-2025-12-15 gpt-image-1 gpt-image-1-mini
gpt-image-1.5 gpt-realtime gpt-realtime-2025-08-28
gpt-realtime-mini gpt-realtime-mini-2025-10-06 gpt-realtime-mini-2025-12-15
o1 o1-2024-12-17 o1-pro
o1-pro-2025-03-19 o3 o3-2025-04-16
o3-deep-research o3-deep-research-2025-06-26 o3-mini
o3-mini-2025-01-31 o3-pro o3-pro-2025-06-10
o4-mini o4-mini-2025-04-16 o4-mini-deep-research
o4-mini-deep-research-2025-06 omni-moderation-2024-09-26 omni-moderation-latest
sora-2 sora-2-pro text-embedding-3-large
text-embedding-3-small text-embedding-ada-002 tts-1
tts-1-1106 tts-1-hd tts-1-hd-1106
whisper-1
Exported source
``` python
models = 'gpt-5.2', 'gpt-5.2-pro', 'gpt-5.2-chat-latest', 'gpt-5.1-codex', 'gpt-5-mini', 'gpt-5-nano', 'o1-preview', 'o1-mini', 'gpt-4o', 'gpt-4o-mini', 'gpt-4-turbo', 'gpt-4', 'gpt-4-32k', 'gpt-3.5-turbo', 'gpt-3.5-turbo-instruct', 'o1', 'o3-mini', 'chatgpt-4o-latest', 'o1-pro', 'o3', 'o4-mini', 'gpt-4.1', 'gpt-4.1-mini', 'gpt-4.1-nano'
```
`o1` should support images while `o1-mini`, `o3-mini` do not support
images.
------------------------------------------------------------------------
source
### can_set_temp
``` python
def can_set_temp(
m
):
```
Exported source
``` python
text_only_models = 'o1-preview', 'o1-mini', 'o3-mini'
```
Exported source
``` python
has_streaming_models = set(models) - set(('o1-mini', 'o3-mini'))
has_sp_models = set(models) - set(('o1-mini', 'o3-mini'))
has_temp_models = set(models) - set(('o1', 'o1-mini', 'o3-mini'))
```
Exported source
``` python
def can_stream(m): return m in has_streaming_models
def can_set_sp(m): return m in has_sp_models
def can_set_temp(m): return m in has_temp_models
```
------------------------------------------------------------------------
source
### can_set_sp
``` python
def can_set_sp(
m
):
```
------------------------------------------------------------------------
source
### can_stream
``` python
def can_stream(
m
):
```
``` python
assert can_stream("gpt-4o")
assert not can_stream("o1-mini")
```
``` python
model = first(m for m in models if 'mini' in m)
model
```
'gpt-5-mini'
## OpenAI SDK
``` python
cli = OpenAI().responses
```
``` python
m = {'role': 'user', 'content': "I'm Jeremy"}
r = cli.create(
input=[m], model=model, max_output_tokens=100,
text={ "verbosity": "low" },
reasoning={ "effort": "minimal" }
)
print(r)
```
Response(id='resp_0265a51280da05ce006943fbff17f48193b07e2e882e2f3fed', created_at=1766063103.0, error=None, incomplete_details=None, instructions=None, metadata={}, model='gpt-5-mini-2025-08-07', object='response', output=[ResponseReasoningItem(id='rs_0265a51280da05ce006943fbff642481939bf2e1300bc13f18', summary=[], type='reasoning', content=None, encrypted_content=None, status=None), ResponseOutputMessage(id='msg_0265a51280da05ce006943fbff9e5481938d162f29a6c301a2', content=[ResponseOutputText(annotations=[], text='Hi Jeremy — nice to meet you. How can I help today?', type='output_text', logprobs=[])], role='assistant', status='completed', type='message')], parallel_tool_calls=True, temperature=1.0, tool_choice='auto', tools=[], top_p=1.0, background=False, conversation=None, max_output_tokens=100, max_tool_calls=None, previous_response_id=None, prompt=None, prompt_cache_key=None, prompt_cache_retention=None, reasoning=Reasoning(effort='minimal', generate_summary=None, summary=None), safety_identifier=None, service_tier='default', status='completed', text=ResponseTextConfig(format=ResponseFormatText(type='text'), verbosity='low'), top_logprobs=0, truncation='disabled', usage=In: 8; Out: 20; Total: 28, user=None, billing={'payer': 'openai'}, store=True)
### Formatting output
Exported source
``` python
@patch
def _repr_markdown_(self:Response):
det = '\n- '.join(f'{k}: {v}' for k,v in dict(self).items())
res = self.output_text
if not res: return f"- {det}"
return f"""{res}
- {det}
"""
```
``` python
r
```
Hi Jeremy — nice to meet you. How can I help today?
- id: resp_0265a51280da05ce006943fbff17f48193b07e2e882e2f3fed
- created_at: 1766063103.0
- error: None
- incomplete_details: None
- instructions: None
- metadata: {}
- model: gpt-5-mini-2025-08-07
- object: response
- output:
\[ResponseReasoningItem(id=‘rs_0265a51280da05ce006943fbff642481939bf2e1300bc13f18’,
summary=\[\], type=‘reasoning’, content=None, encrypted_content=None,
status=None),
ResponseOutputMessage(id=‘msg_0265a51280da05ce006943fbff9e5481938d162f29a6c301a2’,
content=\[ResponseOutputText(annotations=\[\], text=‘Hi Jeremy — nice
to meet you. How can I help today?’, type=‘output_text’,
logprobs=\[\])\], role=‘assistant’, status=‘completed’,
type=‘message’)\]
- parallel_tool_calls: True
- temperature: 1.0
- tool_choice: auto
- tools: \[\]
- top_p: 1.0
- background: False
- conversation: None
- max_output_tokens: 100
- max_tool_calls: None
- previous_response_id: None
- prompt: None
- prompt_cache_key: None
- prompt_cache_retention: None
- reasoning: Reasoning(effort=‘minimal’, generate_summary=None,
summary=None)
- safety_identifier: None
- service_tier: default
- status: completed
- text: ResponseTextConfig(format=ResponseFormatText(type=‘text’),
verbosity=‘low’)
- top_logprobs: 0
- truncation: disabled
- usage: ResponseUsage(input_tokens=8,
input_tokens_details=InputTokensDetails(cached_tokens=0),
output_tokens=20,
output_tokens_details=OutputTokensDetails(reasoning_tokens=0),
total_tokens=28)
- user: None
- billing: {‘payer’: ‘openai’}
- store: True
``` python
r.usage
```
In: 8; Out: 20; Total: 28
------------------------------------------------------------------------
source
### usage
``` python
def usage(
inp:int=0, # Number of prompt tokens
out:int=0, # Number of completion tokens
):
```
*Slightly more concise version of `ResponseUsage`.*
Exported source
``` python
def usage(inp=0, # Number of prompt tokens
out=0 # Number of completion tokens
):
"Slightly more concise version of `ResponseUsage`."
return ResponseUsage(input_tokens=inp, output_tokens=out, total_tokens=inp+out, input_tokens_details={'cached_tokens':0}, output_tokens_details={'cached_tokens':0, 'reasoning_tokens':0})
```
``` python
usage(5)
```
In: 5; Out: 0; Total: 5
------------------------------------------------------------------------
source
### ResponseUsage.\_\_repr\_\_
``` python
def __repr__(
):
```
*Return repr(self).*
Exported source
``` python
@patch
def __repr__(self:ResponseUsage): return f'In: {self.input_tokens}; Out: {self.output_tokens}; Total: {self.total_tokens}'
```
``` python
r.usage
```
In: 8; Out: 20; Total: 28
------------------------------------------------------------------------
source
### ResponseUsage.\_\_add\_\_
``` python
def __add__(
b
):
```
*Add together each of `input_tokens` and `output_tokens`*
Exported source
``` python
@patch
def __add__(self:ResponseUsage, b):
"Add together each of `input_tokens` and `output_tokens`"
return usage(self.input_tokens+b.input_tokens, self.output_tokens+b.output_tokens)
```
``` python
r.usage+r.usage
```
In: 16; Out: 40; Total: 56
------------------------------------------------------------------------
source
### wrap_latex
``` python
def wrap_latex(
text
):
```
*Replace OpenAI LaTeX codes with markdown-compatible ones*
### Creating messages
Creating correctly formatted `dict`s from scratch every time isn’t very
handy, so we’ll import a couple of helper functions from the `msglm`
library.
Let’s use `mk_msg` to recreate our msg
`{'role': 'user', 'content': "I'm Jeremy"}` from earlier.
``` python
rkw = dict(
text={ "verbosity": "low" },
reasoning={ "effort": "minimal" }
)
```
``` python
prompt = "I'm Jeremy"
m = mk_msg(prompt)
r = cli.create(input=[m], model=model, max_output_tokens=400, **rkw)
r
```
Nice to meet you, Jeremy. How can I help you today?
- id: resp_0234b44bdd8bf5d2006943fc008fe88190970de1236e42e2f0
- created_at: 1766063104.0
- error: None
- incomplete_details: None
- instructions: None
- metadata: {}
- model: gpt-5-mini-2025-08-07
- object: response
- output:
\[ResponseReasoningItem(id=‘rs_0234b44bdd8bf5d2006943fc00d530819098ee80460a88d72d’,
summary=\[\], type=‘reasoning’, content=None, encrypted_content=None,
status=None),
ResponseOutputMessage(id=‘msg_0234b44bdd8bf5d2006943fc00ffcc8190b0df6e7599b23188’,
content=\[ResponseOutputText(annotations=\[\], text=‘Nice to meet you,
Jeremy. How can I help you today?’, type=‘output_text’,
logprobs=\[\])\], role=‘assistant’, status=‘completed’,
type=‘message’)\]
- parallel_tool_calls: True
- temperature: 1.0
- tool_choice: auto
- tools: \[\]
- top_p: 1.0
- background: False
- conversation: None
- max_output_tokens: 400
- max_tool_calls: None
- previous_response_id: None
- prompt: None
- prompt_cache_key: None
- prompt_cache_retention: None
- reasoning: Reasoning(effort=‘minimal’, generate_summary=None,
summary=None)
- safety_identifier: None
- service_tier: default
- status: completed
- text: ResponseTextConfig(format=ResponseFormatText(type=‘text’),
verbosity=‘low’)
- top_logprobs: 0
- truncation: disabled
- usage: ResponseUsage(input_tokens=8,
input_tokens_details=InputTokensDetails(cached_tokens=0),
output_tokens=20,
output_tokens_details=OutputTokensDetails(reasoning_tokens=0),
total_tokens=28)
- user: None
- billing: {‘payer’: ‘openai’}
- store: True
``` python
print(r)
```
Response(id='resp_0234b44bdd8bf5d2006943fc008fe88190970de1236e42e2f0', created_at=1766063104.0, error=None, incomplete_details=None, instructions=None, metadata={}, model='gpt-5-mini-2025-08-07', object='response', output=[ResponseReasoningItem(id='rs_0234b44bdd8bf5d2006943fc00d530819098ee80460a88d72d', summary=[], type='reasoning', content=None, encrypted_content=None, status=None), ResponseOutputMessage(id='msg_0234b44bdd8bf5d2006943fc00ffcc8190b0df6e7599b23188', content=[ResponseOutputText(annotations=[], text='Nice to meet you, Jeremy. How can I help you today?', type='output_text', logprobs=[])], role='assistant', status='completed', type='message')], parallel_tool_calls=True, temperature=1.0, tool_choice='auto', tools=[], top_p=1.0, background=False, conversation=None, max_output_tokens=400, max_tool_calls=None, previous_response_id=None, prompt=None, prompt_cache_key=None, prompt_cache_retention=None, reasoning=Reasoning(effort='minimal', generate_summary=None, summary=None), safety_identifier=None, service_tier='default', status='completed', text=ResponseTextConfig(format=ResponseFormatText(type='text'), verbosity='low'), top_logprobs=0, truncation='disabled', usage=In: 8; Out: 20; Total: 28, user=None, billing={'payer': 'openai'}, store=True)
We can pass more than just text messages to OpenAI. As we’ll see later
we can also pass images, SDK objects, etc. To handle these different
data types we need to pass the type along with our content to OpenAI.
`mk_msg` infers the type automatically and creates the appropriate data
structure.
LLMs, don’t actually have state, but instead dialogs are created by
passing back all previous prompts and responses every time. With OpenAI,
they always alternate *user* and *assistant*. We’ll use `mk_msgs` from
`msglm` to make it easier to build up these dialog lists.
``` python
msgs = mk_msgs([prompt, r, "I forgot my name. Can you remind me please?"])
msgs
```
[{'role': 'user', 'content': "I'm Jeremy"},
ResponseReasoningItem(id='rs_0234b44bdd8bf5d2006943fc00d530819098ee80460a88d72d', summary=[], type='reasoning', content=None, encrypted_content=None, status=None),
ResponseOutputMessage(id='msg_0234b44bdd8bf5d2006943fc00ffcc8190b0df6e7599b23188', content=[ResponseOutputText(annotations=[], text='Nice to meet you, Jeremy. How can I help you today?', type='output_text', logprobs=[])], role='assistant', status='completed', type='message'),
{'role': 'user', 'content': 'I forgot my name. Can you remind me please?'}]
``` python
cli.create(input=msgs, model=model, max_output_tokens=400, **rkw)
```
You told me your name is Jeremy.
- id: resp_0234b44bdd8bf5d2006943fc0198d481909981aa9c1e32488c
- created_at: 1766063105.0
- error: None
- incomplete_details: None
- instructions: None
- metadata: {}
- model: gpt-5-mini-2025-08-07
- object: response
- output:
\[ResponseReasoningItem(id=‘rs_0234b44bdd8bf5d2006943fc01ec0881908c7b8f1fea4a4c59’,
summary=\[\], type=‘reasoning’, content=None, encrypted_content=None,
status=None),
ResponseOutputMessage(id=‘msg_0234b44bdd8bf5d2006943fc0255948190973533dd978af3ad’,
content=\[ResponseOutputText(annotations=\[\], text=‘You told me your
name is Jeremy.’, type=‘output_text’, logprobs=\[\])\],
role=‘assistant’, status=‘completed’, type=‘message’)\]
- parallel_tool_calls: True
- temperature: 1.0
- tool_choice: auto
- tools: \[\]
- top_p: 1.0
- background: False
- conversation: None
- max_output_tokens: 400
- max_tool_calls: None
- previous_response_id: None
- prompt: None
- prompt_cache_key: None
- prompt_cache_retention: None
- reasoning: Reasoning(effort=‘minimal’, generate_summary=None,
summary=None)
- safety_identifier: None
- service_tier: default
- status: completed
- text: ResponseTextConfig(format=ResponseFormatText(type=‘text’),
verbosity=‘low’)
- top_logprobs: 0
- truncation: disabled
- usage: ResponseUsage(input_tokens=43,
input_tokens_details=InputTokensDetails(cached_tokens=0),
output_tokens=14,
output_tokens_details=OutputTokensDetails(reasoning_tokens=0),
total_tokens=57)
- user: None
- billing: {‘payer’: ‘openai’}
- store: True
## Client
### Basics
------------------------------------------------------------------------
source
### Client
``` python
def Client(
model, cli:NoneType=None, api_key_env:NoneType=None, base_url:NoneType=None
):
```
*Basic LLM messages client.*
Exported source
``` python
class Client:
def __init__(self, model, cli=None, api_key_env=None, base_url=None):
"Basic LLM messages client."
self.model,self.use = model,usage(0,0)
self.text_only = model in text_only_models
if not cli:
cli = OpenAI(api_key=os.getenv(api_key_env or "OPENAI_API_KEY"), base_url=base_url )
self.c = cli.responses
```
``` python
c = Client(model)
c.use
```
In: 0; Out: 0; Total: 0
Exported source
``` python
@patch
def _r(self:Client, r):
"Store the result of the message and accrue total usage."
self.result = r
if getattr(r,'usage',None): self.use += r.usage
return r
```
``` python
c._r(r)
c.use
```
In: 8; Out: 20; Total: 28
------------------------------------------------------------------------
source
### mk_openai_func
``` python
def mk_openai_func(
f
):
```
------------------------------------------------------------------------
source
### mk_tool_choice
``` python
def mk_tool_choice(
f
):
```
Our mk_tool_choice converts falsy values to `NOT_GIVEN` which omits the
value completely from the API call. It treats any string except for
`'required'|'none'` as tool call and converts it to dictionary
``` python
{"type": "function", "function": {"name": f}}
```
The remaining option `'auto'` is the default, so we simply recommend
using `None` that translates to `NOT_GIVEN`.
------------------------------------------------------------------------
source
### get_stream
``` python
def get_stream(
o, r, cli, cb:NoneType=None
):
```
------------------------------------------------------------------------
source
### Client.\_\_call\_\_
``` python
def __call__(
msgs:list, # List of messages in the dialog
sp:str='', # System prompt
maxtok:int=4096, # Maximum tokens
stream:bool=False, # Stream response?
tools:Optional=None, # List of tools to make available
tool_choice:Optional=None, # Forced tool choice
cb:callable=None, # Callback after completion
background:Optional[bool] | Omit=,
conversation:Optional[response_create_params.Conversation] | Omit=,
include:Optional[List[ResponseIncludable]] | Omit=,
input:Union[str, ResponseInputParam] | Omit=,
instructions:Optional[str] | Omit=,
max_output_tokens:Optional[int] | Omit=,
max_tool_calls:Optional[int] | Omit=,
metadata:Optional[Metadata] | Omit=,
model:ResponsesModel | Omit=,
parallel_tool_calls:Optional[bool] | Omit=,
previous_response_id:Optional[str] | Omit=,
prompt:Optional[ResponsePromptParam] | Omit=,
prompt_cache_key:str | Omit=,
prompt_cache_retention:Optional[Literal['in-memory', '24h']] | Omit=,
reasoning:Optional[Reasoning] | Omit=,
safety_identifier:str | Omit=,
service_tier:Optional[Literal['auto', 'default', 'flex', 'scale', 'priority']] | Omit=,
store:Optional[bool] | Omit=,
stream_options:Optional[response_create_params.StreamOptions] | Omit=,
temperature:Optional[float] | Omit=,
text:ResponseTextConfigParam | Omit=,
top_logprobs:Optional[int] | Omit=,
top_p:Optional[float] | Omit=,
truncation:Optional[Literal['auto', 'disabled']] | Omit=,
user:str | Omit=,
extra_headers:Headers | None=None, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
The extra values given here take precedence over values defined on the client or passed to this method.
extra_query:Query | None=None, extra_body:Body | None=None,
timeout:float | httpx.Timeout | None | NotGiven=NOT_GIVEN
):
```
*Make a call to LLM.*
Exported source
``` python
@patch
@delegates(Responses.create)
def __call__(self:Client,
msgs:list, # List of messages in the dialog
sp:str='', # System prompt
maxtok=4096, # Maximum tokens
stream:bool=False, # Stream response?
tools:Optional[list]=None, # List of tools to make available
tool_choice:Optional[str]=None, # Forced tool choice
cb:callable=None, # Callback after completion
**kwargs):
"Make a call to LLM."
if tools: assert not self.text_only, "Tool use is not supported by the current model type."
if any(c['type'] == 'image_url' for msg in msgs if isinstance(msg, dict) and isinstance(msg.get('content'), list) for c in msg['content']): assert not self.text_only, "Images are not supported by the current model type."
tools = [mk_openai_func(o) for o in listify(tools)]
r = self.c.create(
model=self.model, input=msgs, max_output_tokens=maxtok, stream=stream, instructions=sp,
tools=tools, tool_choice=mk_tool_choice(tool_choice), **kwargs)
if stream: return get_stream(r, self, cb=cb)
else:
res = self._r(r)
if cb: cb(res)
return res
```
``` python
msgs = 'Hi'
```
``` python
c(msgs)
```
Hi! How can I help you today?
- id: resp_065c2e7e7bef6bde006943fc03236c8195b210bffc9db409ef
- created_at: 1766063107.0
- error: None
- incomplete_details: None
- instructions: None
- metadata: {}
- model: gpt-5-mini-2025-08-07
- object: response
- output:
\[ResponseReasoningItem(id=‘rs_065c2e7e7bef6bde006943fc0373d08195b33048fb368515ca’,
summary=\[\], type=‘reasoning’, content=None, encrypted_content=None,
status=None),
ResponseOutputMessage(id=‘msg_065c2e7e7bef6bde006943fc04b9a081958509334f1f7d9aaf’,
content=\[ResponseOutputText(annotations=\[\], text=‘Hi! How can I
help you today?’, type=‘output_text’, logprobs=\[\])\],
role=‘assistant’, status=‘completed’, type=‘message’)\]
- parallel_tool_calls: True
- temperature: 1.0
- tool_choice: auto
- tools: \[\]
- top_p: 1.0
- background: False
- conversation: None
- max_output_tokens: 4096
- max_tool_calls: None
- previous_response_id: None
- prompt: None
- prompt_cache_key: None
- prompt_cache_retention: None
- reasoning: Reasoning(effort=‘medium’, generate_summary=None,
summary=None)
- safety_identifier: None
- service_tier: default
- status: completed
- text: ResponseTextConfig(format=ResponseFormatText(type=‘text’),
verbosity=‘medium’)
- top_logprobs: 0
- truncation: disabled
- usage: ResponseUsage(input_tokens=7,
input_tokens_details=InputTokensDetails(cached_tokens=0),
output_tokens=79,
output_tokens_details=OutputTokensDetails(reasoning_tokens=64),
total_tokens=86)
- user: None
- billing: {‘payer’: ‘openai’}
- store: True
``` python
c.use
```
In: 15; Out: 99; Total: 114
``` python
r = c(msgs, stream=True)
for o in r: print(o, end='')
```
Hi — how can I help you today?
``` python
r.value
```
Hi — how can I help you today?
- id: resp_0fa910a22542c20a006943fc0536708195b5bc51bfb1e09461
- created_at: 1766063109.0
- error: None
- incomplete_details: None
- instructions: None
- metadata: {}
- model: gpt-5-mini-2025-08-07
- object: response
- output:
\[ResponseReasoningItem(id=‘rs_0fa910a22542c20a006943fc057d6881959dc6d6773a9e11e3’,
summary=\[\], type=‘reasoning’, content=None, encrypted_content=None,
status=None),
ResponseOutputMessage(id=‘msg_0fa910a22542c20a006943fc069ef88195b4bbe832ae4bed0a’,
content=\[ResponseOutputText(annotations=\[\], text=‘Hi — how can I
help you today?’, type=‘output_text’, logprobs=\[\])\],
role=‘assistant’, status=‘completed’, type=‘message’)\]
- parallel_tool_calls: True
- temperature: 1.0
- tool_choice: auto
- tools: \[\]
- top_p: 1.0
- background: False
- conversation: None
- max_output_tokens: 4096
- max_tool_calls: None
- previous_response_id: None
- prompt: None
- prompt_cache_key: None
- prompt_cache_retention: None
- reasoning: Reasoning(effort=‘medium’, generate_summary=None,
summary=None)
- safety_identifier: None
- service_tier: default
- status: completed
- text: ResponseTextConfig(format=ResponseFormatText(type=‘text’),
verbosity=‘medium’)
- top_logprobs: 0
- truncation: disabled
- usage: ResponseUsage(input_tokens=7,
input_tokens_details=InputTokensDetails(cached_tokens=0),
output_tokens=79,
output_tokens_details=OutputTokensDetails(reasoning_tokens=64),
total_tokens=86)
- user: None
- store: True
``` python
len(r.events)
```
19
``` python
c.use
```
In: 22; Out: 178; Total: 200
``` python
c(msgs, sp='Talk like GLaDOS.', **rkw)
```
Hello. It’s… delightful that you’ve decided to communicate. State your
purpose so we may proceed with minimal wasted time.
- id: resp_0193b4fcbba4639a006943fc0711c48197bdd2e56dc80b6e6e
- created_at: 1766063111.0
- error: None
- incomplete_details: None
- instructions: Talk like GLaDOS.
- metadata: {}
- model: gpt-5-mini-2025-08-07
- object: response
- output:
\[ResponseReasoningItem(id=‘rs_0193b4fcbba4639a006943fc0759408197b8630a59417d0a52’,
summary=\[\], type=‘reasoning’, content=None, encrypted_content=None,
status=None),
ResponseOutputMessage(id=‘msg_0193b4fcbba4639a006943fc0791648197accd6a3c0557678b’,
content=\[ResponseOutputText(annotations=\[\], text=“Hello. It’s…
delightful that you’ve decided to communicate. State your purpose so
we may proceed with minimal wasted time.”, type=‘output_text’,
logprobs=\[\])\], role=‘assistant’, status=‘completed’,
type=‘message’)\]
- parallel_tool_calls: True
- temperature: 1.0
- tool_choice: auto
- tools: \[\]
- top_p: 1.0
- background: False
- conversation: None
- max_output_tokens: 4096
- max_tool_calls: None
- previous_response_id: None
- prompt: None
- prompt_cache_key: None
- prompt_cache_retention: None
- reasoning: Reasoning(effort=‘minimal’, generate_summary=None,
summary=None)
- safety_identifier: None
- service_tier: default
- status: completed
- text: ResponseTextConfig(format=ResponseFormatText(type=‘text’),
verbosity=‘low’)
- top_logprobs: 0
- truncation: disabled
- usage: ResponseUsage(input_tokens=17,
input_tokens_details=InputTokensDetails(cached_tokens=0),
output_tokens=29,
output_tokens_details=OutputTokensDetails(reasoning_tokens=0),
total_tokens=46)
- user: None
- billing: {‘payer’: ‘openai’}
- store: True
### Images
As everyone knows, when testing image APIs you have to use a cute puppy.
``` python
# Image is Cute_dog.jpg from Wikimedia
fn = Path('samples/puppy.jpg')
Image(filename=fn, width=200)
```
``` python
img = fn.read_bytes()
```
OpenAI expects an image message to have the following structure
``` js
{
"type": "image_url",
"image_url": {
"url": f"data:{MEDIA_TYPE};base64,{IMG}"
}
}
```
`msglm` automatically detects if a message is an image, encodes it, and
generates the data structure above. All we need to do is a create a list
containing our image and a query and then pass it to `mk_msg`.
Let’s try it out…
``` python
q = "In brief, what color flowers are in this image?"
msg = [mk_msg(img), mk_msg(q)]
```
``` python
c = Client(model)
c(msg, **rkw)
```
Light purple (lavender) flowers.
- id: resp_06c91e5a9021060d006943fc0859c08194a4697c2a89807169
- created_at: 1766063112.0
- error: None
- incomplete_details: None
- instructions: None
- metadata: {}
- model: gpt-5-mini-2025-08-07
- object: response
- output:
\[ResponseReasoningItem(id=‘rs_06c91e5a9021060d006943fc08ba9c8194905c9143163f1283’,
summary=\[\], type=‘reasoning’, content=None, encrypted_content=None,
status=None),
ResponseOutputMessage(id=‘msg_06c91e5a9021060d006943fc08e2a48194989a0193da86fdcf’,
content=\[ResponseOutputText(annotations=\[\], text=‘Light purple
(lavender) flowers.’, type=‘output_text’, logprobs=\[\])\],
role=‘assistant’, status=‘completed’, type=‘message’)\]
- parallel_tool_calls: True
- temperature: 1.0
- tool_choice: auto
- tools: \[\]
- top_p: 1.0
- background: False
- conversation: None
- max_output_tokens: 4096
- max_tool_calls: None
- previous_response_id: None
- prompt: None
- prompt_cache_key: None
- prompt_cache_retention: None
- reasoning: Reasoning(effort=‘minimal’, generate_summary=None,
summary=None)
- safety_identifier: None
- service_tier: default
- status: completed
- text: ResponseTextConfig(format=ResponseFormatText(type=‘text’),
verbosity=‘low’)
- top_logprobs: 0
- truncation: disabled
- usage: ResponseUsage(input_tokens=107,
input_tokens_details=InputTokensDetails(cached_tokens=0),
output_tokens=14,
output_tokens_details=OutputTokensDetails(reasoning_tokens=0),
total_tokens=121)
- user: None
- billing: {‘payer’: ‘openai’}
- store: True
## Tool use
### Basic tool calling
``` python
def sums(
a:int, # First thing to sum
b:int # Second thing to sum
) -> int: # The sum of the inputs
"Adds a + b."
print(f"Finding the sum of {a} and {b}")
return a + b
```
``` python
def add(x: int, y:int):
"adds x and y"
return x + y
mk_openai_func(add)
```
{'type': 'function',
'name': 'add',
'description': 'adds x and y',
'parameters': {'type': 'object',
'properties': {'x': {'type': 'integer', 'description': ''},
'y': {'type': 'integer', 'description': ''}},
'required': ['x', 'y']}}
``` python
sysp = "You are a helpful assistant. When using tools, be sure to pass all required parameters. Don't use tools unless needed for the provided prompt."
```
``` python
a,b = 604542,6458932
pr = f"What is {a}+{b}?"
tools=sums
tool_choice="sums"
```
``` python
msgs = [mk_msg(pr)]
r = c(msgs, sp=sysp, tools=tools, tool_choice='required', **rkw)
```
``` python
tc = [o for o in r.output if isinstance(o, ResponseFunctionToolCall)]
tc
```
[ResponseFunctionToolCall(arguments='{"a":604542,"b":6458932}', call_id='call_uZ9bpRTk2Rnr9vMKUGXh5gOZ', name='sums', type='function_call', id='fc_0278bf5ab7665d17006943fc0a0f508194aa69f150e7e7c560', status='completed')]
``` python
func = tc[0]
func
```
ResponseFunctionToolCall(arguments='{"a":604542,"b":6458932}', call_id='call_uZ9bpRTk2Rnr9vMKUGXh5gOZ', name='sums', type='function_call', id='fc_0278bf5ab7665d17006943fc0a0f508194aa69f150e7e7c560', status='completed')
------------------------------------------------------------------------
source
### call_func_openai
``` python
def call_func_openai(
func, ns:Optional=None
):
```
Exported source
``` python
def call_func_openai(func, ns:Optional[abc.Mapping]=None):
try: return call_func(func.name, json.loads(func.arguments), ns, raise_on_err=False)
except KeyError as e: return f"Error - tool not defined in the tool_schemas: {func.name}"
```
``` python
ns = mk_ns(sums)
res = call_func_openai(func, ns=ns)
res
```
Finding the sum of 604542 and 6458932
7063474
------------------------------------------------------------------------
source
### allowed_tools
``` python
def allowed_tools(
specs:Optional=None, choice:Union=None
):
```
Exported source
``` python
def _get_name(f):
if isinstance(f,str): return f
if isinstance(f, dict): return f['name']
if callable(f) and hasattr(f, '__name__'): return f.__name__
def allowed_tools(specs:Optional[list]=None, choice:Optional[Union[dict,str]]=None):
if choice:
choice = mk_tool_choice(choice)
if isinstance(choice, dict) and choice['type'] == 'function':
return {choice['function']['name']}
return {_get_name(v) for v in specs or []}
```
``` python
allowed_tools([sums, add], 'add')
```
{'add'}
------------------------------------------------------------------------
source
### limit_ns
``` python
def limit_ns(
ns:Optional=None, # Namespace to search for tools
specs:Union=None, # List of the tools that are allowed for llm to call, if None no tools are allowed
choice:Union=None, # Tool choice as defined by Anthropic API
):
```
*Filter namespace `ns` to only include tools allowed by `specs` and
`choice`*
Exported source
``` python
def limit_ns(
ns:Optional[abc.Mapping]=None, # Namespace to search for tools
specs:Optional[Union[str,abc.Callable]]=None, # List of the tools that are allowed for llm to call, if None no tools are allowed
choice:Optional[Union[dict,str]]=None # Tool choice as defined by Anthropic API
):
"Filter namespace `ns` to only include tools allowed by `specs` and `choice`"
if ns is None: ns = globals()
if not isinstance(ns, abc.Mapping): ns = mk_ns(ns)
ns = {k:v for k,v in ns.items() if k in allowed_tools(specs, choice)}
return ns
```
``` python
limit_ns([sums, add], None, None)
```
{}
``` python
limit_ns([sums, add], ['sums'], None)
```
{'sums': int>}
``` python
limit_ns([sums, add], ['sums', add], 'add')
```
{'add': }
------------------------------------------------------------------------
source
### mk_toolres
``` python
def mk_toolres(
r:Mapping, # Response containing tool use request
ns:Optional=None, # Namespace to search for tools
):
```
*Create a `tool_result` message from response `r`.*
Exported source
``` python
def _toolres(r, ns):
"Create a result dict from `tcs`."
if ns is None: ns = globals()
tcs = [o for o in getattr(r, 'output', []) if isinstance(o, ResponseFunctionToolCall)]
return { tc.call_id: call_func_openai(tc, ns=mk_ns(ns)) for tc in tcs }
```
Exported source
``` python
def mk_toolres(
r:abc.Mapping, # Response containing tool use request
ns:Optional[abc.Mapping]=None, # Namespace to search for tools
):
"Create a `tool_result` message from response `r`."
tr = _toolres(r, ns)
r = mk_msg(r)
res = [r] if isinstance(r, dict) else listify(r)
for k,v in tr.items(): res.append(dict(type="function_call_output", call_id=k, output=str(v)))
return res
```
``` python
tr = mk_toolres(r, ns=ns)
tr
```
Finding the sum of 604542 and 6458932
[ResponseReasoningItem(id='rs_0278bf5ab7665d17006943fc09c3e8819499322942022e9de6', summary=[], type='reasoning', content=None, encrypted_content=None, status=None),
ResponseFunctionToolCall(arguments='{"a":604542,"b":6458932}', call_id='call_uZ9bpRTk2Rnr9vMKUGXh5gOZ', name='sums', type='function_call', id='fc_0278bf5ab7665d17006943fc0a0f508194aa69f150e7e7c560', status='completed'),
{'type': 'function_call_output',
'call_id': 'call_uZ9bpRTk2Rnr9vMKUGXh5gOZ',
'output': '7063474'}]
``` python
m2 = msgs + tr
```
``` python
res = c(mk_msgs(m2), sp=sysp, tools=tools)
res
```
604542 + 6,458,932 = 7,063,474
- id: resp_0278bf5ab7665d17006943fc0ae8f081949ba1ce578dc4a48b
- created_at: 1766063114.0
- error: None
- incomplete_details: None
- instructions: You are a helpful assistant. When using tools, be sure
to pass all required parameters. Don’t use tools unless needed for the
provided prompt.
- metadata: {}
- model: gpt-5-mini-2025-08-07
- object: response
- output:
\[ResponseOutputMessage(id=‘msg_0278bf5ab7665d17006943fc0b33f081948ac26c67ad12dcef’,
content=\[ResponseOutputText(annotations=\[\], text=‘604542 +
6,458,932 = 7,063,474’, type=‘output_text’, logprobs=\[\])\],
role=‘assistant’, status=‘completed’, type=‘message’)\]
- parallel_tool_calls: True
- temperature: 1.0
- tool_choice: auto
- tools: \[FunctionTool(name=‘sums’, parameters={‘type’: ‘object’,
‘properties’: {‘a’: {‘type’: ‘integer’, ‘description’: ‘First thing to
sum’}, ‘b’: {‘type’: ‘integer’, ‘description’: ‘Second thing to
sum’}}, ‘required’: \[‘a’, ‘b’\], ‘additionalProperties’: False},
strict=True, type=‘function’, description=‘Adds a + b.:- type:
integer’)\]
- top_p: 1.0
- background: False
- conversation: None
- max_output_tokens: 4096
- max_tool_calls: None
- previous_response_id: None
- prompt: None
- prompt_cache_key: None
- prompt_cache_retention: None
- reasoning: Reasoning(effort=‘medium’, generate_summary=None,
summary=None)
- safety_identifier: None
- service_tier: default
- status: completed
- text: ResponseTextConfig(format=ResponseFormatText(type=‘text’),
verbosity=‘medium’)
- top_logprobs: 0
- truncation: disabled
- usage: ResponseUsage(input_tokens=157,
input_tokens_details=InputTokensDetails(cached_tokens=0),
output_tokens=20,
output_tokens_details=OutputTokensDetails(reasoning_tokens=0),
total_tokens=177)
- user: None
- billing: {‘payer’: ‘openai’}
- store: True
``` python
tr = mk_toolres(r, ns=limit_ns([sums, add], [sums, add], 'add'))
tr
```
[ResponseReasoningItem(id='rs_0278bf5ab7665d17006943fc09c3e8819499322942022e9de6', summary=[], type='reasoning', content=None, encrypted_content=None, status=None),
ResponseFunctionToolCall(arguments='{"a":604542,"b":6458932}', call_id='call_uZ9bpRTk2Rnr9vMKUGXh5gOZ', name='sums', type='function_call', id='fc_0278bf5ab7665d17006943fc0a0f508194aa69f150e7e7c560', status='completed'),
{'type': 'function_call_output',
'call_id': 'call_uZ9bpRTk2Rnr9vMKUGXh5gOZ',
'output': 'Error - tool not defined in the tool_schemas: sums'}]
This should also work in situations where no tool use is required:
``` python
msgs = mk_toolres("I'm Jeremy")
c(msgs, sp=sysp, tools=tools, **rkw)
```
Nice to meet you, Jeremy. How can I help you today?
- id: resp_01af5661a0ba543e006943fc0c01ac8196b66f21a944c1337e
- created_at: 1766063116.0
- error: None
- incomplete_details: None
- instructions: You are a helpful assistant. When using tools, be sure
to pass all required parameters. Don’t use tools unless needed for the
provided prompt.
- metadata: {}
- model: gpt-5-mini-2025-08-07
- object: response
- output:
\[ResponseReasoningItem(id=‘rs_01af5661a0ba543e006943fc0c4e588196934dd914a099574c’,
summary=\[\], type=‘reasoning’, content=None, encrypted_content=None,
status=None),
ResponseOutputMessage(id=‘msg_01af5661a0ba543e006943fc0c77d481969ca90d07baa45c08’,
content=\[ResponseOutputText(annotations=\[\], text=‘Nice to meet you,
Jeremy. How can I help you today?’, type=‘output_text’,
logprobs=\[\])\], role=‘assistant’, status=‘completed’,
type=‘message’)\]
- parallel_tool_calls: True
- temperature: 1.0
- tool_choice: auto
- tools: \[FunctionTool(name=‘sums’, parameters={‘type’: ‘object’,
‘properties’: {‘a’: {‘type’: ‘integer’, ‘description’: ‘First thing to
sum’}, ‘b’: {‘type’: ‘integer’, ‘description’: ‘Second thing to
sum’}}, ‘required’: \[‘a’, ‘b’\], ‘additionalProperties’: False},
strict=True, type=‘function’, description=‘Adds a + b.:- type:
integer’)\]
- top_p: 1.0
- background: False
- conversation: None
- max_output_tokens: 4096
- max_tool_calls: None
- previous_response_id: None
- prompt: None
- prompt_cache_key: None
- prompt_cache_retention: None
- reasoning: Reasoning(effort=‘minimal’, generate_summary=None,
summary=None)
- safety_identifier: None
- service_tier: default
- status: completed
- text: ResponseTextConfig(format=ResponseFormatText(type=‘text’),
verbosity=‘low’)
- top_logprobs: 0
- truncation: disabled
- usage: ResponseUsage(input_tokens=96,
input_tokens_details=InputTokensDetails(cached_tokens=0),
output_tokens=20,
output_tokens_details=OutputTokensDetails(reasoning_tokens=0),
total_tokens=116)
- user: None
- billing: {‘payer’: ‘openai’}
- store: True
------------------------------------------------------------------------
source
### Client.structured
``` python
def structured(
msgs:list, # Prompt
tools:Optional=None, # List of tools to make available to OpenAI model
ns:Optional=None, # Namespace to search for tools
sp:str='', # System prompt
maxtok:int=4096, # Maximum tokens
stream:bool=False, # Stream response?
tool_choice:Optional=None, # Forced tool choice
cb:callable=None, # Callback after completion
background:Optional[bool] | Omit=,
conversation:Optional[response_create_params.Conversation] | Omit=,
include:Optional[List[ResponseIncludable]] | Omit=,
input:Union[str, ResponseInputParam] | Omit=,
instructions:Optional[str] | Omit=,
max_output_tokens:Optional[int] | Omit=,
max_tool_calls:Optional[int] | Omit=,
metadata:Optional[Metadata] | Omit=,
model:ResponsesModel | Omit=,
parallel_tool_calls:Optional[bool] | Omit=,
previous_response_id:Optional[str] | Omit=,
prompt:Optional[ResponsePromptParam] | Omit=,
prompt_cache_key:str | Omit=,
prompt_cache_retention:Optional[Literal['in-memory', '24h']] | Omit=,
reasoning:Optional[Reasoning] | Omit=,
safety_identifier:str | Omit=,
service_tier:Optional[Literal['auto', 'default', 'flex', 'scale', 'priority']] | Omit=,
store:Optional[bool] | Omit=,
stream_options:Optional[response_create_params.StreamOptions] | Omit=,
temperature:Optional[float] | Omit=,
text:ResponseTextConfigParam | Omit=,
top_logprobs:Optional[int] | Omit=,
top_p:Optional[float] | Omit=,
truncation:Optional[Literal['auto', 'disabled']] | Omit=,
user:str | Omit=,
extra_headers:Headers | None=None, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
The extra values given here take precedence over values defined on the client or passed to this method.
extra_query:Query | None=None, extra_body:Body | None=None,
timeout:float | httpx.Timeout | None | NotGiven=NOT_GIVEN
):
```
*Return the value of all tool calls (generally used for structured
outputs)*
Exported source
``` python
@patch
@delegates(Client.__call__)
def structured(self:Client,
msgs: list, # Prompt
tools:Optional[list]=None, # List of tools to make available to OpenAI model
ns:Optional[abc.Mapping]=None, # Namespace to search for tools
**kwargs):
"Return the value of all tool calls (generally used for structured outputs)"
if ns is None: ns = mk_ns(tools)
r = self(msgs, tools=tools, tool_choice='required', **kwargs)
return first(_toolres(r, ns).values())
```
``` python
class PrimeMinister(BasicRepr):
"An Australian prime minister"
def __init__(
self,
firstname:str, # First name
surname:str, # Surname
dob:str, # Date of birth
year_entered:int, # Year first became PM
): store_attr()
```
``` python
c1 = Client(model)
c1.structured('Who was the first prime minister of Australia?', [PrimeMinister], **rkw)
```
PrimeMinister(firstname='Edmund', surname='Barton', dob='1849-01-18', year_entered=1901)
### Streaming tool calling
``` python
msgs = [mk_msg(pr)]
r = c(msgs, sp=sysp, tools=tools, stream=True, **rkw)
```
We can stream back any tool call text (which may be empty):
``` python
for o in r: print(o, end='')
```
After streaming is complete, `value.output` will contain the tool calls:
``` python
r.value.output
```
[ResponseReasoningItem(id='rs_0e3b419c50c1b99f006943fc0f02488190b01c0e4f5473ad74', summary=[], type='reasoning', content=None, encrypted_content=None, status=None),
ResponseFunctionToolCall(arguments='{"a":604542,"b":6458932}', call_id='call_mBzrloUYhxSVlhuJLQx52BJT', name='sums', type='function_call', id='fc_0e3b419c50c1b99f006943fc0f60bc81909d88ed289f143273', status='completed')]
Therefore we can repeat the same process as before, but using the
`value` attr:
``` python
tr = mk_toolres(r.value, ns=ns)
msgs += tr
c(mk_msgs(msgs), sp=sysp, tools=tools, **rkw)
```
Finding the sum of 604542 and 6458932
7,063,474
- id: resp_0e3b419c50c1b99f006943fc1025148190baac3f22eb1fc289
- created_at: 1766063120.0
- error: None
- incomplete_details: None
- instructions: You are a helpful assistant. When using tools, be sure
to pass all required parameters. Don’t use tools unless needed for the
provided prompt.
- metadata: {}
- model: gpt-5-mini-2025-08-07
- object: response
- output:
\[ResponseOutputMessage(id=‘msg_0e3b419c50c1b99f006943fc1079f88190ad05b366136e4ff1’,
content=\[ResponseOutputText(annotations=\[\], text=‘7,063,474’,
type=‘output_text’, logprobs=\[\])\], role=‘assistant’,
status=‘completed’, type=‘message’)\]
- parallel_tool_calls: True
- temperature: 1.0
- tool_choice: auto
- tools: \[FunctionTool(name=‘sums’, parameters={‘type’: ‘object’,
‘properties’: {‘a’: {‘type’: ‘integer’, ‘description’: ‘First thing to
sum’}, ‘b’: {‘type’: ‘integer’, ‘description’: ‘Second thing to
sum’}}, ‘required’: \[‘a’, ‘b’\], ‘additionalProperties’: False},
strict=True, type=‘function’, description=‘Adds a + b.:- type:
integer’)\]
- top_p: 1.0
- background: False
- conversation: None
- max_output_tokens: 4096
- max_tool_calls: None
- previous_response_id: None
- prompt: None
- prompt_cache_key: None
- prompt_cache_retention: None
- reasoning: Reasoning(effort=‘minimal’, generate_summary=None,
summary=None)
- safety_identifier: None
- service_tier: default
- status: completed
- text: ResponseTextConfig(format=ResponseFormatText(type=‘text’),
verbosity=‘low’)
- top_logprobs: 0
- truncation: disabled
- usage: ResponseUsage(input_tokens=157,
input_tokens_details=InputTokensDetails(cached_tokens=0),
output_tokens=9,
output_tokens_details=OutputTokensDetails(reasoning_tokens=0),
total_tokens=166)
- user: None
- billing: {‘payer’: ‘openai’}
- store: True
## Chat
### Basic chat
------------------------------------------------------------------------
source
### Chat
``` python
def Chat(
model:Optional=None, # Model to use (leave empty if passing `cli`)
cli:Optional=None, # Client to use (leave empty if passing `model`)
sp:str='', # Optional system prompt
tools:Optional=None, # List of tools to make available
hist:list=None, # Initialize history
tool_choice:Optional=None, # Forced tool choice
ns:Optional=None, # Namespace to search for tools
kw:VAR_KEYWORD
):
```
*OpenAI chat client.*
Exported source
``` python
class Chat:
def __init__(self,
model:Optional[str]=None, # Model to use (leave empty if passing `cli`)
cli:Optional[Client]=None, # Client to use (leave empty if passing `model`)
sp='', # Optional system prompt
tools:Optional[list]=None, # List of tools to make available
hist: list = None, # Initialize history
tool_choice:Optional[str]=None, # Forced tool choice
ns:Optional[abc.Mapping]=None, # Namespace to search for tools
**kw):
"OpenAI chat client."
assert model or cli
self.c = (cli or Client(model))
self.h = hist if hist else []
if ns is None: ns=tools
self.sp,self.tools,self.tool_choice,self.ns,self.kw = sp,tools,tool_choice,ns,kw
@property
def use(self): return self.c.use
```
``` python
chat = Chat(model, sp=sysp, **rkw)
chat.c.use, chat.h
```
(In: 0; Out: 0; Total: 0, [])
------------------------------------------------------------------------
source
### Chat.\_\_call\_\_
``` python
def __call__(
pr:NoneType=None, # Prompt / message
stream:bool=False, # Stream response?
tools:NoneType=None, # Tools to use
tool_choice:NoneType=None, # Required tools to use
background:Optional[bool] | Omit=,
conversation:Optional[response_create_params.Conversation] | Omit=,
include:Optional[List[ResponseIncludable]] | Omit=,
input:Union[str, ResponseInputParam] | Omit=,
instructions:Optional[str] | Omit=,
max_output_tokens:Optional[int] | Omit=,
max_tool_calls:Optional[int] | Omit=,
metadata:Optional[Metadata] | Omit=,
model:ResponsesModel | Omit=,
parallel_tool_calls:Optional[bool] | Omit=,
previous_response_id:Optional[str] | Omit=,
prompt:Optional[ResponsePromptParam] | Omit=,
prompt_cache_key:str | Omit=,
prompt_cache_retention:Optional[Literal['in-memory', '24h']] | Omit=,
reasoning:Optional[Reasoning] | Omit=,
safety_identifier:str | Omit=,
service_tier:Optional[Literal['auto', 'default', 'flex', 'scale', 'priority']] | Omit=,
store:Optional[bool] | Omit=,
stream_options:Optional[response_create_params.StreamOptions] | Omit=,
temperature:Optional[float] | Omit=,
text:ResponseTextConfigParam | Omit=,
top_logprobs:Optional[int] | Omit=,
top_p:Optional[float] | Omit=,
truncation:Optional[Literal['auto', 'disabled']] | Omit=,
user:str | Omit=,
extra_headers:Headers | None=None, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
The extra values given here take precedence over values defined on the client or passed to this method.
extra_query:Query | None=None, extra_body:Body | None=None,
timeout:float | httpx.Timeout | None | NotGiven=NOT_GIVEN
):
```
*Add prompt `pr` to dialog and get a response*
Exported source
``` python
@patch
@delegates(Responses.create)
def __call__(self:Chat,
pr=None, # Prompt / message
stream:bool=False, # Stream response?
tools=None, # Tools to use
tool_choice=None, # Required tools to use
**kwargs):
"Add prompt `pr` to dialog and get a response"
if isinstance(pr,str): pr = pr.strip()
if pr: self.h.append(mk_msg(pr))
if not tools: tools = self.tools
if not tool_choice: tool_choice = self.tool_choice
kw = self.kw | kwargs
def _cb(v):
self.last = mk_toolres(v, ns=limit_ns(self.ns, self.tools, tool_choice))
self.h += self.last
res = self.c(self.h, sp=self.sp, stream=stream, cb=_cb, tools=tools, **kw)
return res
```
``` python
chat("I'm Jeremy")
chat("What's my name?")
```
You said your name is Jeremy.
- id: resp_0fad0aabf9d158d7006943fc124d84819785a5290dc6c01b91
- created_at: 1766063122.0
- error: None
- incomplete_details: None
- instructions: You are a helpful assistant. When using tools, be sure
to pass all required parameters. Don’t use tools unless needed for the
provided prompt.
- metadata: {}
- model: gpt-5-mini-2025-08-07
- object: response
- output:
\[ResponseReasoningItem(id=‘rs_0fad0aabf9d158d7006943fc12f2b48197a746f0750976bf02’,
summary=\[\], type=‘reasoning’, content=None, encrypted_content=None,
status=None),
ResponseOutputMessage(id=‘msg_0fad0aabf9d158d7006943fc131c8081979f047c455a339294’,
content=\[ResponseOutputText(annotations=\[\], text=‘You said your
name is Jeremy.’, type=‘output_text’, logprobs=\[\])\],
role=‘assistant’, status=‘completed’, type=‘message’)\]
- parallel_tool_calls: True
- temperature: 1.0
- tool_choice: auto
- tools: \[\]
- top_p: 1.0
- background: False
- conversation: None
- max_output_tokens: 4096
- max_tool_calls: None
- previous_response_id: None
- prompt: None
- prompt_cache_key: None
- prompt_cache_retention: None
- reasoning: Reasoning(effort=‘minimal’, generate_summary=None,
summary=None)
- safety_identifier: None
- service_tier: default
- status: completed
- text: ResponseTextConfig(format=ResponseFormatText(type=‘text’),
verbosity=‘low’)
- top_logprobs: 0
- truncation: disabled
- usage: ResponseUsage(input_tokens=64,
input_tokens_details=InputTokensDetails(cached_tokens=0),
output_tokens=13,
output_tokens_details=OutputTokensDetails(reasoning_tokens=0),
total_tokens=77)
- user: None
- billing: {‘payer’: ‘openai’}
- store: True
``` python
chat = Chat(model, sp=sysp, **rkw)
for o in chat("I'm Jeremy", stream=True): print(o, end='')
```
Hi Jeremy — nice to meet you. How can I help today?
``` python
r = chat("What's my name?", stream=True, **rkw)
for o in r: print(o, end='')
```
You told me your name is Jeremy.
``` python
r.value
```
You told me your name is Jeremy.
- id: resp_0543b2d5231a1965006943fc14ef008190aaec7f2241f09349
- created_at: 1766063124.0
- error: None
- incomplete_details: None
- instructions: You are a helpful assistant. When using tools, be sure
to pass all required parameters. Don’t use tools unless needed for the
provided prompt.
- metadata: {}
- model: gpt-5-mini-2025-08-07
- object: response
- output:
\[ResponseReasoningItem(id=‘rs_0543b2d5231a1965006943fc153b188190b243d5ebcad39ee6’,
summary=\[\], type=‘reasoning’, content=None, encrypted_content=None,
status=None),
ResponseOutputMessage(id=‘msg_0543b2d5231a1965006943fc1564bc81909b4a13499aca14fe’,
content=\[ResponseOutputText(annotations=\[\], text=‘You told me your
name is Jeremy.’, type=‘output_text’, logprobs=\[\])\],
role=‘assistant’, status=‘completed’, type=‘message’)\]
- parallel_tool_calls: True
- temperature: 1.0
- tool_choice: auto
- tools: \[\]
- top_p: 1.0
- background: False
- conversation: None
- max_output_tokens: 4096
- max_tool_calls: None
- previous_response_id: None
- prompt: None
- prompt_cache_key: None
- prompt_cache_retention: None
- reasoning: Reasoning(effort=‘minimal’, generate_summary=None,
summary=None)
- safety_identifier: None
- service_tier: default
- status: completed
- text: ResponseTextConfig(format=ResponseFormatText(type=‘text’),
verbosity=‘low’)
- top_logprobs: 0
- truncation: disabled
- usage: ResponseUsage(input_tokens=68,
input_tokens_details=InputTokensDetails(cached_tokens=0),
output_tokens=14,
output_tokens_details=OutputTokensDetails(reasoning_tokens=0),
total_tokens=82)
- user: None
- store: True
History is stored in the `h` attr:
``` python
chat.h
```
[{'role': 'user', 'content': "I'm Jeremy"},
ResponseReasoningItem(id='rs_0543b2d5231a1965006943fc143c408190832fd504f8544e47', summary=[], type='reasoning', content=None, encrypted_content=None, status=None),
ResponseOutputMessage(id='msg_0543b2d5231a1965006943fc147de8819096cf364e6b837d18', content=[ResponseOutputText(annotations=[], text='Hi Jeremy — nice to meet you. How can I help today?', type='output_text', logprobs=[])], role='assistant', status='completed', type='message'),
{'role': 'user', 'content': "What's my name?"},
ResponseReasoningItem(id='rs_0543b2d5231a1965006943fc153b188190b243d5ebcad39ee6', summary=[], type='reasoning', content=None, encrypted_content=None, status=None),
ResponseOutputMessage(id='msg_0543b2d5231a1965006943fc1564bc81909b4a13499aca14fe', content=[ResponseOutputText(annotations=[], text='You told me your name is Jeremy.', type='output_text', logprobs=[])], role='assistant', status='completed', type='message')]
### Chat tool use
``` python
pr = f"What is {a}+{b}?"
pr
```
'What is 604542+6458932?'
``` python
chat = Chat(model, sp=sysp, tools=[sums], **rkw)
r = chat(pr)
r.output
```
[ResponseReasoningItem(id='rs_05f8244e8d805b77006943fc16df2c819581d1abf604e76779', summary=[], type='reasoning', content=None, encrypted_content=None, status=None),
ResponseOutputMessage(id='msg_05f8244e8d805b77006943fc170a848195a62a36f7b5af8774', content=[ResponseOutputText(annotations=[], text='7063474', type='output_text', logprobs=[])], role='assistant', status='completed', type='message')]
``` python
chat()
```
Finding the sum of 604542 and 6458932
- id: resp_05f8244e8d805b77006943fc1815d88195966a897b6bf45f32
- created_at: 1766063128.0
- error: None
- incomplete_details: None
- instructions: You are a helpful assistant. When using tools, be sure
to pass all required parameters. Don’t use tools unless needed for the
provided prompt.
- metadata: {}
- model: gpt-5-mini-2025-08-07
- object: response
- output:
\[ResponseFunctionToolCall(arguments=‘{“a”:604542,“b”:6458932}’,
call_id=‘call_xgNdloSrer0Bze0O7MtyYjyZ’, name=‘sums’,
type=‘function_call’,
id=‘fc_05f8244e8d805b77006943fc187a9481958e2ec6c71d8c7a4c’,
status=‘completed’)\]
- parallel_tool_calls: True
- temperature: 1.0
- tool_choice: auto
- tools: \[FunctionTool(name=‘sums’, parameters={‘type’: ‘object’,
‘properties’: {‘a’: {‘type’: ‘integer’, ‘description’: ‘First thing to
sum’}, ‘b’: {‘type’: ‘integer’, ‘description’: ‘Second thing to
sum’}}, ‘required’: \[‘a’, ‘b’\], ‘additionalProperties’: False},
strict=True, type=‘function’, description=‘Adds a + b.:- type:
integer’)\]
- top_p: 1.0
- background: False
- conversation: None
- max_output_tokens: 4096
- max_tool_calls: None
- previous_response_id: None
- prompt: None
- prompt_cache_key: None
- prompt_cache_retention: None
- reasoning: Reasoning(effort=‘minimal’, generate_summary=None,
summary=None)
- safety_identifier: None
- service_tier: default
- status: completed
- text: ResponseTextConfig(format=ResponseFormatText(type=‘text’),
verbosity=‘low’)
- top_logprobs: 0
- truncation: disabled
- usage: ResponseUsage(input_tokens=127,
input_tokens_details=InputTokensDetails(cached_tokens=0),
output_tokens=25,
output_tokens_details=OutputTokensDetails(reasoning_tokens=0),
total_tokens=152)
- user: None
- billing: {‘payer’: ‘openai’}
- store: True
The [`Chat`](https://AnswerDotAI.github.io/cosette/core.html#chat) class
automatically validates tool calls against the provided `tools` list. If
the model attempts to call a tool that isn’t in the allowed set (whether
due to hallucination or a mismatch between `tools` and `ns`), the tool
call will fail with an error message rather than executing arbitrary
code.
This provides an important safety mechanism - even if the model invents
a function name or tries to call a tool that shouldn’t be available,
[`Chat`](https://AnswerDotAI.github.io/cosette/core.html#chat) ensures
only explicitly allowed tools can be executed.
``` python
chat = Chat(model, sp=sysp, tools=[sums, add], **rkw)
chat.ns={} # Quick way to simulate call to tool that does not exist in ns or tools
r = chat(pr)
r.output
```
[ResponseReasoningItem(id='rs_07858df65bb0139a006943fc1979b48194bbeb443a6dc2345c', summary=[], type='reasoning', content=None, encrypted_content=None, status=None),
ResponseFunctionToolCall(arguments='{"a":604542,"b":6458932}', call_id='call_3ilZzNeuNYVwm3NoqKV5DATp', name='sums', type='function_call', id='fc_07858df65bb0139a006943fc19bd788194a11707b54eb3eb61', status='completed')]
``` python
chat.h
```
[{'role': 'user', 'content': 'What is 604542+6458932?'},
ResponseReasoningItem(id='rs_07858df65bb0139a006943fc1979b48194bbeb443a6dc2345c', summary=[], type='reasoning', content=None, encrypted_content=None, status=None),
ResponseFunctionToolCall(arguments='{"a":604542,"b":6458932}', call_id='call_3ilZzNeuNYVwm3NoqKV5DATp', name='sums', type='function_call', id='fc_07858df65bb0139a006943fc19bd788194a11707b54eb3eb61', status='completed'),
{'type': 'function_call_output',
'call_id': 'call_3ilZzNeuNYVwm3NoqKV5DATp',
'output': 'Error - tool not defined in the tool_schemas: sums'}]
Chat handles image prompts too.
``` python
q = "In brief, what color flowers are in this image?"
chat([img, q])
```
The flowers are purple.
- id: resp_07858df65bb0139a006943fc1a3a18819486051bea70b615a4
- created_at: 1766063130.0
- error: None
- incomplete_details: None
- instructions: You are a helpful assistant. When using tools, be sure
to pass all required parameters. Don’t use tools unless needed for the
provided prompt.
- metadata: {}
- model: gpt-5-mini-2025-08-07
- object: response
- output:
\[ResponseReasoningItem(id=‘rs_07858df65bb0139a006943fc1aa2b08194b7f22154fb9c9712’,
summary=\[\], type=‘reasoning’, content=None, encrypted_content=None,
status=None),
ResponseOutputMessage(id=‘msg_07858df65bb0139a006943fc1acef88194884b2b85ff6d5aba’,
content=\[ResponseOutputText(annotations=\[\], text=‘The flowers are
purple.’, type=‘output_text’, logprobs=\[\])\], role=‘assistant’,
status=‘completed’, type=‘message’)\]
- parallel_tool_calls: True
- temperature: 1.0
- tool_choice: auto
- tools: \[FunctionTool(name=‘sums’, parameters={‘type’: ‘object’,
‘properties’: {‘a’: {‘type’: ‘integer’, ‘description’: ‘First thing to
sum’}, ‘b’: {‘type’: ‘integer’, ‘description’: ‘Second thing to
sum’}}, ‘required’: \[‘a’, ‘b’\], ‘additionalProperties’: False},
strict=True, type=‘function’, description=‘Adds a + b.:- type:
integer’), FunctionTool(name=‘add’, parameters={‘type’: ‘object’,
‘properties’: {‘x’: {‘type’: ‘integer’, ‘description’: ’‘}, ’y’:
{‘type’: ‘integer’, ‘description’: ’‘}}, ’required’: \[‘x’, ‘y’\],
‘additionalProperties’: False}, strict=True, type=‘function’,
description=‘adds x and y’)\]
- top_p: 1.0
- background: False
- conversation: None
- max_output_tokens: 4096
- max_tool_calls: None
- previous_response_id: None
- prompt: None
- prompt_cache_key: None
- prompt_cache_retention: None
- reasoning: Reasoning(effort=‘minimal’, generate_summary=None,
summary=None)
- safety_identifier: None
- service_tier: default
- status: completed
- text: ResponseTextConfig(format=ResponseFormatText(type=‘text’),
verbosity=‘low’)
- top_logprobs: 0
- truncation: disabled
- usage: ResponseUsage(input_tokens=277,
input_tokens_details=InputTokensDetails(cached_tokens=0),
output_tokens=11,
output_tokens_details=OutputTokensDetails(reasoning_tokens=0),
total_tokens=288)
- user: None
- billing: {‘payer’: ‘openai’}
- store: True
## Third Party Providers
### Azure OpenAI Service
Example Azure usage:
``` python
azure_endpoint = AzureOpenAI(
azure_endpoint = os.getenv("AZURE_OPENAI_ENDPOINT"),
api_key=os.getenv("AZURE_OPENAI_API_KEY"),
api_version="2024-08-01-preview"
)
client = Client(models_azure[0], azure_endpoint)
chat = Chat(cli=client)
chat("Hi.")
```
## Other providers
Here’s an example of using the library with OpenRouter:
``` python
openrouter_c = Client(
model="openai/gpt-oss-20b",
api_key_env="OPENROUTER_API_KEY",
base_url="https://openrouter.ai/api/v1"
)
openrouter_c("Hello! What's 2+2?")
```
4
- id: gen-1766063131-zacuMF6yJHRUIPUXB48G
- created_at: 1766063131.0
- error: None
- incomplete_details: None
- instructions: None
- metadata: {}
- model: openai/gpt-oss-20b
- object: response
- output: \[ResponseReasoningItem(id=‘rs_tmp_u56eghazyi9’, summary=\[\],
type=‘reasoning’, content=\[Content(text=‘We need to answer: 2+2 = 4.
Also maybe friendly.’, type=‘reasoning_text’)\],
encrypted_content=None, status=None),
ResponseOutputMessage(id=‘msg_tmp_htb9r0aougc’,
content=\[ResponseOutputText(annotations=\[\], text=‘4’,
type=‘output_text’, logprobs=None)\], role=‘assistant’,
status=‘completed’, type=‘message’)\]
- parallel_tool_calls: True
- temperature: None
- tool_choice: auto
- tools: \[\]
- top_p: None
- background: False
- conversation: None
- max_output_tokens: 4096
- max_tool_calls: None
- previous_response_id: None
- prompt: None
- prompt_cache_key: None
- prompt_cache_retention: None
- reasoning: None
- safety_identifier: None
- service_tier: auto
- status: None
- text: None
- top_logprobs: None
- truncation: None
- usage: ResponseUsage(input_tokens=75,
input_tokens_details=InputTokensDetails(cached_tokens=0),
output_tokens=28,
output_tokens_details=OutputTokensDetails(reasoning_tokens=12),
total_tokens=103, cost=9.35e-06, is_byok=False,
cost_details={‘upstream_inference_cost’: None,
‘upstream_inference_input_cost’: 3.75e-06,
‘upstream_inference_output_cost’: 5.6e-06})
- user: None
- output_text:
- store: False
Here’s an example of using the library with Groq:
``` python
groq_c = Client(
model="openai/gpt-oss-20b",
api_key_env="GROQ_KEY",
base_url="https://api.groq.com/openai/v1"
)
groq_c("Hello! What's 2+2?")
```
``` python
gchat = Chat(cli=groq_c)
gchat("Hello! I'm Jeremy")
```
``` python
gchat("What's my name?")
```
---
# Source: https://answerdotai.github.io/cosette/toolloop.html.md
# Tool loop
``` python
from IPython.display import display, Markdown, clear_output
from pprint import pprint
```
``` python
' '.join(models)
```
'gpt-5 gpt-5-mini gpt-5-nano o1-preview o1-mini gpt-4o gpt-4o-mini gpt-4-turbo gpt-4 gpt-4-32k gpt-3.5-turbo gpt-3.5-turbo-instruct o1 o3-mini chatgpt-4o-latest o1-pro o3 o4-mini gpt-4.1 gpt-4.1-mini gpt-4.1-nano'
``` python
model = first(m for m in models if 'mini' in m)
model
```
'gpt-5-mini'
## Sample Data
``` python
def _get_orders_customers():
orders = {
"O1": dict(id="O1", product="Widget A", quantity=2, price=19.99, status="Shipped"),
"O2": dict(id="O2", product="Gadget B", quantity=1, price=49.99, status="Processing"),
"O3": dict(id="O3", product="Gadget B", quantity=2, price=49.99, status="Shipped")}
customers = {
"C1": dict(name="John Doe", email="john@example.com", phone="123-456-7890",
orders=[orders['O1'], orders['O2']]),
"C2": dict(name="Jane Smith", email="jane@example.com", phone="987-654-3210",
orders=[orders['O3']])
}
return orders, customers
```
``` python
orders, customers = _get_orders_customers()
```
``` python
def get_customer_info(
customer_id:str # ID of the customer
): # Customer's name, email, phone number, and list of orders
"Retrieves a customer's information and their orders based on the customer ID"
print(f'- Retrieving customer {customer_id}')
return customers.get(customer_id, "Customer not found")
def get_order_details(
order_id:str # ID of the order
): # Order's ID, product name, quantity, price, and order status
"Retrieves the details of a specific order based on the order ID"
print(f'- Retrieving order {order_id}')
return orders.get(order_id, "Order not found")
def cancel_order(
order_id:str # ID of the order to cancel
)->bool: # True if the cancellation is successful
"Cancels an order based on the provided order ID"
print(f'- Cancelling order {order_id}')
if order_id not in orders: return False
orders[order_id]['status'] = 'Cancelled'
return True
```
``` python
chatkw = dict(
text={ "verbosity": "low" },
reasoning={ "effort": "minimal" }
)
```
``` python
tools = [get_customer_info, get_order_details, cancel_order]
chat = Chat(model, tools=tools, **chatkw)
```
``` python
r = chat('Hi.')
r
```
Hello! How can I help you today?
- id: resp_0610e51711a17c8b006943fce162a0819396584fc43c687fba
- created_at: 1766063329.0
- error: None
- incomplete_details: None
- instructions: None
- metadata: {}
- model: gpt-5-mini-2025-08-07
- object: response
- output:
\[ResponseReasoningItem(id=‘rs_0610e51711a17c8b006943fce1b7e08193a742dd433b62cbcb’,
summary=\[\], type=‘reasoning’, content=None, encrypted_content=None,
status=None),
ResponseOutputMessage(id=‘msg_0610e51711a17c8b006943fce1dfc081939b2f726e067bd88c’,
content=\[ResponseOutputText(annotations=\[\], text=‘Hello! How can I
help you today?’, type=‘output_text’, logprobs=\[\])\],
role=‘assistant’, status=‘completed’, type=‘message’)\]
- parallel_tool_calls: True
- temperature: 1.0
- tool_choice: auto
- tools: \[FunctionTool(name=‘get_customer_info’, parameters={‘type’:
‘object’, ‘properties’: {‘customer_id’: {‘type’: ‘string’,
‘description’: ‘ID of the customer’}}, ‘required’: \[‘customer_id’\],
‘additionalProperties’: False}, strict=True, type=‘function’,
description=“Retrieves a customer’s information and their orders based
on the customer ID”), FunctionTool(name=‘get_order_details’,
parameters={‘type’: ‘object’, ‘properties’: {‘order_id’: {‘type’:
‘string’, ‘description’: ‘ID of the order’}}, ‘required’:
\[‘order_id’\], ‘additionalProperties’: False}, strict=True,
type=‘function’, description=‘Retrieves the details of a specific
order based on the order ID’), FunctionTool(name=‘cancel_order’,
parameters={‘type’: ‘object’, ‘properties’: {‘order_id’: {‘type’:
‘string’, ‘description’: ‘ID of the order to cancel’}}, ‘required’:
\[‘order_id’\], ‘additionalProperties’: False}, strict=True,
type=‘function’, description=‘Cancels an order based on the provided
order ID:- type: boolean’)\]
- top_p: 1.0
- background: False
- conversation: None
- max_output_tokens: 4096
- max_tool_calls: None
- previous_response_id: None
- prompt: None
- prompt_cache_key: None
- prompt_cache_retention: None
- reasoning: Reasoning(effort=‘minimal’, generate_summary=None,
summary=None)
- safety_identifier: None
- service_tier: default
- status: completed
- text: ResponseTextConfig(format=ResponseFormatText(type=‘text’),
verbosity=‘low’)
- top_logprobs: 0
- truncation: disabled
- usage: ResponseUsage(input_tokens=136,
input_tokens_details=InputTokensDetails(cached_tokens=0),
output_tokens=15,
output_tokens_details=OutputTokensDetails(reasoning_tokens=0),
total_tokens=151)
- user: None
- billing: {‘payer’: ‘openai’}
- store: True
``` python
r = chat('Can you tell me the email address for customer C2?')
r.output
```
- Retrieving customer C2
[ResponseReasoningItem(id='rs_0610e51711a17c8b006943fce2fb408193a0298d1762a4b19e', summary=[], type='reasoning', content=None, encrypted_content=None, status=None),
ResponseFunctionToolCall(arguments='{"customer_id":"C2"}', call_id='call_1iQyH2m7zBT6AxtxpVfgOARS', name='get_customer_info', type='function_call', id='fc_0610e51711a17c8b006943fce34518819385d6cbeb29b1f63f', status='completed')]
``` python
r = chat()
r.output
```
[ResponseOutputMessage(id='msg_0610e51711a17c8b006943fce4a59c8193b7914069038e07b0', content=[ResponseOutputText(annotations=[], text='The email address for customer C2 is jane@example.com.', type='output_text', logprobs=[])], role='assistant', status='completed', type='message')]
``` python
chat = Chat(model, tools=tools)
r = chat('Please cancel all orders for customer C1 for me.')
r.output
```
- Retrieving customer C1
[ResponseReasoningItem(id='rs_067a83d17b75c4ea006943fce594008194904f7f4b1710ab11', summary=[], type='reasoning', content=None, encrypted_content=None, status=None),
ResponseFunctionToolCall(arguments='{"customer_id":"C1"}', call_id='call_5MXLQEl4LdzFyRHIR3DynD9I', name='get_customer_info', type='function_call', id='fc_067a83d17b75c4ea006943fce6c9848194b442cb73ffe02035', status='completed')]
``` python
r = chat()
r.output
```
- Cancelling order O1
- Cancelling order O2
[ResponseReasoningItem(id='rs_067a83d17b75c4ea006943fce7b6648194aa8afa477dc81c42', summary=[], type='reasoning', content=None, encrypted_content=None, status=None),
ResponseFunctionToolCall(arguments='{"order_id":"O1"}', call_id='call_y3LWRVEn8X80nCQf50EOVOu5', name='cancel_order', type='function_call', id='fc_067a83d17b75c4ea006943fce992f88194b8de77d411f78e44', status='completed'),
ResponseFunctionToolCall(arguments='{"order_id":"O2"}', call_id='call_oPiTCOxwXjo8uac0mzXK3rEv', name='cancel_order', type='function_call', id='fc_067a83d17b75c4ea006943fce9f498819495f077de035b7454', status='completed')]
## `toolloop` implementation
------------------------------------------------------------------------
source
### Chat.toolloop
``` python
def toolloop(
pr, # Prompt to pass to Claude
max_steps:int=10, # Maximum number of tool requests to loop through
cont_func:callable=noop, # Function that stops loop if returns False
final_prompt:str='You have no more tool uses. Please summarize your findings. If you did not complete your goal please tell the user what further work needs to be done so they can choose how best to proceed.', # Prompt to add if last message is a tool call
stream:bool=False, # Stream response?
tools:NoneType=None, # Tools to use
tool_choice:NoneType=None, # Required tools to use
background:Optional[bool] | Omit=,
conversation:Optional[response_create_params.Conversation] | Omit=,
include:Optional[List[ResponseIncludable]] | Omit=,
input:Union[str, ResponseInputParam] | Omit=,
instructions:Optional[str] | Omit=,
max_output_tokens:Optional[int] | Omit=,
max_tool_calls:Optional[int] | Omit=,
metadata:Optional[Metadata] | Omit=,
model:ResponsesModel | Omit=,
parallel_tool_calls:Optional[bool] | Omit=,
previous_response_id:Optional[str] | Omit=,
prompt:Optional[ResponsePromptParam] | Omit=,
prompt_cache_key:str | Omit=,
prompt_cache_retention:Optional[Literal['in-memory', '24h']] | Omit=,
reasoning:Optional[Reasoning] | Omit=,
safety_identifier:str | Omit=,
service_tier:Optional[Literal['auto', 'default', 'flex', 'scale', 'priority']] | Omit=,
store:Optional[bool] | Omit=,
stream_options:Optional[response_create_params.StreamOptions] | Omit=,
temperature:Optional[float] | Omit=,
text:ResponseTextConfigParam | Omit=,
top_logprobs:Optional[int] | Omit=,
top_p:Optional[float] | Omit=,
truncation:Optional[Literal['auto', 'disabled']] | Omit=,
user:str | Omit=,
extra_headers:Headers | None=None, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
The extra values given here take precedence over values defined on the client or passed to this method.
extra_query:Query | None=None, extra_body:Body | None=None,
timeout:float | httpx.Timeout | None | NotGiven=NOT_GIVEN
):
```
*Add prompt `pr` to dialog and get a response from Claude, automatically
following up with `tool_use` messages*
Exported source
``` python
_final_prompt = "You have no more tool uses. Please summarize your findings. If you did not complete your goal please tell the user what further work needs to be done so they can choose how best to proceed."
```
Exported source
``` python
@patch
@delegates(Chat.__call__)
def toolloop(self:Chat,
pr, # Prompt to pass to Claude
max_steps=10, # Maximum number of tool requests to loop through
cont_func:callable=noop, # Function that stops loop if returns False
final_prompt=_final_prompt, # Prompt to add if last message is a tool call
**kwargs):
"Add prompt `pr` to dialog and get a response from Claude, automatically following up with `tool_use` messages"
@save_iter
def _f(o):
init_n = len(self.h)
r = self(pr, **kwargs)
yield r
if len(self.last)>1: yield from self.last[1:]
for i in range(max_steps-1):
x = self.h[-1]
if not (isinstance(x, dict) and x['type']=='function_call_output'): break
r = self(final_prompt if i==max_steps-2 else None, **kwargs)
yield r
if len(self.last)>1: yield from self.last[1:]
if not cont_func(*self.h[-3:]): break
o.value = self.h[init_n+1:]
return _f()
```
### Test Customer Dataset
``` python
def show(x):
if getattr(x, 'output_text', None): r = x
else: r = getattr(x,'output',x)
display(r)
```
``` python
chat = Chat(model, tools=tools)
pr = 'Can you tell me the email address for customer C1?'
r = chat.toolloop(pr)
res = list(r)
for o in r: show(o)
```
- Retrieving customer C1
The email address for customer C1 (John Doe) is john@example.com.
- id: resp_0787bac936d9204f006943fcee2ba48195ad60e3c1ec52d1d7
- created_at: 1766063342.0
- error: None
- incomplete_details: None
- instructions: None
- metadata: {}
- model: gpt-5-mini-2025-08-07
- object: response
- output:
\[ResponseReasoningItem(id=‘rs_0787bac936d9204f006943fcee7cd881959b82edf88db5e70d’,
summary=\[\], type=‘reasoning’, content=None, encrypted_content=None,
status=None),
ResponseOutputMessage(id=‘msg_0787bac936d9204f006943fcef3ee88195832b056ec436b34f’,
content=\[ResponseOutputText(annotations=\[\], text=‘The email address
for customer C1 (John Doe) is john@example.com.’, type=‘output_text’,
logprobs=\[\])\], role=‘assistant’, status=‘completed’,
type=‘message’)\]
- parallel_tool_calls: True
- temperature: 1.0
- tool_choice: auto
- tools: \[FunctionTool(name=‘get_customer_info’, parameters={‘type’:
‘object’, ‘properties’: {‘customer_id’: {‘type’: ‘string’,
‘description’: ‘ID of the customer’}}, ‘required’: \[‘customer_id’\],
‘additionalProperties’: False}, strict=True, type=‘function’,
description=“Retrieves a customer’s information and their orders based
on the customer ID”), FunctionTool(name=‘get_order_details’,
parameters={‘type’: ‘object’, ‘properties’: {‘order_id’: {‘type’:
‘string’, ‘description’: ‘ID of the order’}}, ‘required’:
\[‘order_id’\], ‘additionalProperties’: False}, strict=True,
type=‘function’, description=‘Retrieves the details of a specific
order based on the order ID’), FunctionTool(name=‘cancel_order’,
parameters={‘type’: ‘object’, ‘properties’: {‘order_id’: {‘type’:
‘string’, ‘description’: ‘ID of the order to cancel’}}, ‘required’:
\[‘order_id’\], ‘additionalProperties’: False}, strict=True,
type=‘function’, description=‘Cancels an order based on the provided
order ID:- type: boolean’)\]
- top_p: 1.0
- background: False
- conversation: None
- max_output_tokens: 4096
- max_tool_calls: None
- previous_response_id: None
- prompt: None
- prompt_cache_key: None
- prompt_cache_retention: None
- reasoning: Reasoning(effort=‘medium’, generate_summary=None,
summary=None)
- safety_identifier: None
- service_tier: default
- status: completed
- text: ResponseTextConfig(format=ResponseFormatText(type=‘text’),
verbosity=‘medium’)
- top_logprobs: 0
- truncation: disabled
- usage: ResponseUsage(input_tokens=316,
input_tokens_details=InputTokensDetails(cached_tokens=0),
output_tokens=86,
output_tokens_details=OutputTokensDetails(reasoning_tokens=64),
total_tokens=402)
- user: None
- billing: {‘payer’: ‘openai’}
- store: True
ResponseOutputMessage(id='msg_0787bac936d9204f006943fcef3ee88195832b056ec436b34f', content=[ResponseOutputText(annotations=[], text='The email address for customer C1 (John Doe) is john@example.com.', type='output_text', logprobs=[])], role='assistant', status='completed', type='message')
------------------------------------------------------------------------
source
### loop_outputs
``` python
def loop_outputs(
res
):
```
Exported source
``` python
def loop_outputs(res):
return [dict(p) for o in res for p in ([o] if isinstance(o,dict) else getattr(o,'output',[]))]
```
``` python
cl = loop_outputs(res)
cl
```
[{'id': 'rs_0787bac936d9204f006943fceb6f088195a44ade2155966909',
'summary': [],
'type': 'reasoning',
'content': None,
'encrypted_content': None,
'status': None},
{'arguments': '{"customer_id":"C1"}',
'call_id': 'call_wqbYqGHnvgMg8lSY9JMUrwzU',
'name': 'get_customer_info',
'type': 'function_call',
'id': 'fc_0787bac936d9204f006943fcebe6d08195b9cd3ca97a01b7d2',
'status': 'completed'},
{'type': 'function_call_output',
'call_id': 'call_wqbYqGHnvgMg8lSY9JMUrwzU',
'output': "{'name': 'John Doe', 'email': 'john@example.com', 'phone': '123-456-7890', 'orders': [{'id': 'O1', 'product': 'Widget A', 'quantity': 2, 'price': 19.99, 'status': 'Cancelled'}, {'id': 'O2', 'product': 'Gadget B', 'quantity': 1, 'price': 49.99, 'status': 'Cancelled'}]}"},
{'id': 'rs_0787bac936d9204f006943fcec9c108195b41ad9735c482bc4',
'summary': [],
'type': 'reasoning',
'content': None,
'encrypted_content': None,
'status': None},
{'id': 'msg_0787bac936d9204f006943fced6e3c8195b5b4c9710ecb6485',
'content': [ResponseOutputText(annotations=[], text='The email address for customer C1 (John Doe) is john@example.com.', type='output_text', logprobs=[])],
'role': 'assistant',
'status': 'completed',
'type': 'message'}]
``` python
def disp_tc(x):
if x['type']=='function_call': return f"- `{x['name']}({x['arguments']})`\n"
elif x['type']=='function_call_output': return f" - `{x['output']}`\n\n"
else: return ''.join(o.text for o in x['content'])
```
``` python
# Markdown(''.join(map(disp_tc, cl)))
```
``` python
pprint(r.value)
```
[ResponseReasoningItem(id='rs_0787bac936d9204f006943fcee7cd881959b82edf88db5e70d', summary=[], type='reasoning', content=None, encrypted_content=None, status=None),
ResponseOutputMessage(id='msg_0787bac936d9204f006943fcef3ee88195832b056ec436b34f', content=[ResponseOutputText(annotations=[], text='The email address for customer C1 (John Doe) is john@example.com.', type='output_text', logprobs=[])], role='assistant', status='completed', type='message')]
``` python
orders, customers = _get_orders_customers()
```
``` python
chat = Chat(model, tools=tools)
r = chat.toolloop('What is the status of order O2?')
for o in r: display(getattr(o,'output',o))
```
- Retrieving order O2
[ResponseReasoningItem(id='rs_080cc194cfa17c94006943fcf092688194adde78fdb74e90d3', summary=[], type='reasoning', content=None, encrypted_content=None, status=None),
ResponseFunctionToolCall(arguments='{"order_id":"O2"}', call_id='call_ARnBd6xSrlcAfn3wBQGdLtiu', name='get_order_details', type='function_call', id='fc_080cc194cfa17c94006943fcf1ed8c8194adc4f0474afe98cc', status='completed')]
ResponseFunctionToolCall(arguments='{"order_id":"O2"}', call_id='call_ARnBd6xSrlcAfn3wBQGdLtiu', name='get_order_details', type='function_call', id='fc_080cc194cfa17c94006943fcf1ed8c8194adc4f0474afe98cc', status='completed')
{'type': 'function_call_output',
'call_id': 'call_ARnBd6xSrlcAfn3wBQGdLtiu',
'output': "{'id': 'O2', 'product': 'Gadget B', 'quantity': 1, 'price': 49.99, 'status': 'Processing'}"}
[ResponseOutputMessage(id='msg_080cc194cfa17c94006943fcf3101081948ac3faafe2dcac65', content=[ResponseOutputText(annotations=[], text='Order O2 is currently: Processing.', type='output_text', logprobs=[])], role='assistant', status='completed', type='message')]
``` python
r = chat.toolloop('Please cancel all orders for customer C1 for me.')
res = list(r)
for o in res: display(getattr(o,'output',o))
```
- Retrieving customer C1
- Cancelling order O1
- Cancelling order O2
[ResponseReasoningItem(id='rs_080cc194cfa17c94006943fcf412208194a4c7b87ce81c4736', summary=[], type='reasoning', content=None, encrypted_content=None, status=None),
ResponseFunctionToolCall(arguments='{"customer_id":"C1"}', call_id='call_Btl5F6UrkBoJJaDQzke1VvVN', name='get_customer_info', type='function_call', id='fc_080cc194cfa17c94006943fcf82ba08194a65b676e302e9aa6', status='completed')]
ResponseFunctionToolCall(arguments='{"customer_id":"C1"}', call_id='call_Btl5F6UrkBoJJaDQzke1VvVN', name='get_customer_info', type='function_call', id='fc_080cc194cfa17c94006943fcf82ba08194a65b676e302e9aa6', status='completed')
{'type': 'function_call_output',
'call_id': 'call_Btl5F6UrkBoJJaDQzke1VvVN',
'output': "{'name': 'John Doe', 'email': 'john@example.com', 'phone': '123-456-7890', 'orders': [{'id': 'O1', 'product': 'Widget A', 'quantity': 2, 'price': 19.99, 'status': 'Shipped'}, {'id': 'O2', 'product': 'Gadget B', 'quantity': 1, 'price': 49.99, 'status': 'Processing'}]}"}
[ResponseReasoningItem(id='rs_080cc194cfa17c94006943fcf913188194ab017b95ba08a13e', summary=[], type='reasoning', content=None, encrypted_content=None, status=None),
ResponseFunctionToolCall(arguments='{"order_id":"O1"}', call_id='call_4yaVagsUWe86YMk25kyCU9x2', name='cancel_order', type='function_call', id='fc_080cc194cfa17c94006943fcfa8bd081949731f7da8c5adc86', status='completed'),
ResponseFunctionToolCall(arguments='{"order_id":"O2"}', call_id='call_Vy0mCP8ocOYR31qk4sbVgRw5', name='cancel_order', type='function_call', id='fc_080cc194cfa17c94006943fcfab9688194a97ce8bb7ff46706', status='completed')]
ResponseFunctionToolCall(arguments='{"order_id":"O1"}', call_id='call_4yaVagsUWe86YMk25kyCU9x2', name='cancel_order', type='function_call', id='fc_080cc194cfa17c94006943fcfa8bd081949731f7da8c5adc86', status='completed')
ResponseFunctionToolCall(arguments='{"order_id":"O2"}', call_id='call_Vy0mCP8ocOYR31qk4sbVgRw5', name='cancel_order', type='function_call', id='fc_080cc194cfa17c94006943fcfab9688194a97ce8bb7ff46706', status='completed')
{'type': 'function_call_output',
'call_id': 'call_4yaVagsUWe86YMk25kyCU9x2',
'output': 'True'}
{'type': 'function_call_output',
'call_id': 'call_Vy0mCP8ocOYR31qk4sbVgRw5',
'output': 'True'}
[ResponseReasoningItem(id='rs_080cc194cfa17c94006943fcfbd120819490c3ad1d75350865', summary=[], type='reasoning', content=None, encrypted_content=None, status=None),
ResponseOutputMessage(id='msg_080cc194cfa17c94006943fd043f948194ad1bb58d51d2de71', content=[ResponseOutputText(annotations=[], text='Done — I canceled all orders for customer C1 (John Doe).\n\nSummary:\n- O1 — Widget A — previous status: Shipped — cancellation: Success\n- O2 — Gadget B — previous status: Processing — cancellation: Success\n\nWould you like me to check refund status, send a confirmation to john@example.com, or do anything else?', type='output_text', logprobs=[])], role='assistant', status='completed', type='message')]
ResponseOutputMessage(id='msg_080cc194cfa17c94006943fd043f948194ad1bb58d51d2de71', content=[ResponseOutputText(annotations=[], text='Done — I canceled all orders for customer C1 (John Doe).\n\nSummary:\n- O1 — Widget A — previous status: Shipped — cancellation: Success\n- O2 — Gadget B — previous status: Processing — cancellation: Success\n\nWould you like me to check refund status, send a confirmation to john@example.com, or do anything else?', type='output_text', logprobs=[])], role='assistant', status='completed', type='message')
``` python
# cl = loop_outputs(res)
# Markdown('\n'.join(map(disp_tc, cl)))
```
``` python
for o in chat.toolloop('What is the status of order O2?'): display(o)
```
- Retrieving order O2
- id: resp_080cc194cfa17c94006943fd06606c8194acbf56e9291c70ca
- created_at: 1766063366.0
- error: None
- incomplete_details: None
- instructions: None
- metadata: {}
- model: gpt-5-mini-2025-08-07
- object: response
- output:
\[ResponseReasoningItem(id=‘rs_080cc194cfa17c94006943fd06bbd4819499041c0e2a896d43’,
summary=\[\], type=‘reasoning’, content=None, encrypted_content=None,
status=None), ResponseFunctionToolCall(arguments=‘{“order_id”:“O2”}’,
call_id=‘call_47iqPnLAFKxbJ4JJ9EQwWw71’, name=‘get_order_details’,
type=‘function_call’,
id=‘fc_080cc194cfa17c94006943fd07c3308194a643843cb1720a9e’,
status=‘completed’)\]
- parallel_tool_calls: True
- temperature: 1.0
- tool_choice: auto
- tools: \[FunctionTool(name=‘get_customer_info’, parameters={‘type’:
‘object’, ‘properties’: {‘customer_id’: {‘type’: ‘string’,
‘description’: ‘ID of the customer’}}, ‘required’: \[‘customer_id’\],
‘additionalProperties’: False}, strict=True, type=‘function’,
description=“Retrieves a customer’s information and their orders based
on the customer ID”), FunctionTool(name=‘get_order_details’,
parameters={‘type’: ‘object’, ‘properties’: {‘order_id’: {‘type’:
‘string’, ‘description’: ‘ID of the order’}}, ‘required’:
\[‘order_id’\], ‘additionalProperties’: False}, strict=True,
type=‘function’, description=‘Retrieves the details of a specific
order based on the order ID’), FunctionTool(name=‘cancel_order’,
parameters={‘type’: ‘object’, ‘properties’: {‘order_id’: {‘type’:
‘string’, ‘description’: ‘ID of the order to cancel’}}, ‘required’:
\[‘order_id’\], ‘additionalProperties’: False}, strict=True,
type=‘function’, description=‘Cancels an order based on the provided
order ID:- type: boolean’)\]
- top_p: 1.0
- background: False
- conversation: None
- max_output_tokens: 4096
- max_tool_calls: None
- previous_response_id: None
- prompt: None
- prompt_cache_key: None
- prompt_cache_retention: None
- reasoning: Reasoning(effort=‘medium’, generate_summary=None,
summary=None)
- safety_identifier: None
- service_tier: default
- status: completed
- text: ResponseTextConfig(format=ResponseFormatText(type=‘text’),
verbosity=‘medium’)
- top_logprobs: 0
- truncation: disabled
- usage: ResponseUsage(input_tokens=521,
input_tokens_details=InputTokensDetails(cached_tokens=0),
output_tokens=87,
output_tokens_details=OutputTokensDetails(reasoning_tokens=64),
total_tokens=608)
- user: None
- billing: {‘payer’: ‘openai’}
- store: True
ResponseFunctionToolCall(arguments='{"order_id":"O2"}', call_id='call_47iqPnLAFKxbJ4JJ9EQwWw71', name='get_order_details', type='function_call', id='fc_080cc194cfa17c94006943fd07c3308194a643843cb1720a9e', status='completed')
{'type': 'function_call_output',
'call_id': 'call_47iqPnLAFKxbJ4JJ9EQwWw71',
'output': "{'id': 'O2', 'product': 'Gadget B', 'quantity': 1, 'price': 49.99, 'status': 'Cancelled'}"}
Order O2 is now: Cancelled.
- id: resp_080cc194cfa17c94006943fd0841e88194b12aa8c8f30b4e64
- created_at: 1766063368.0
- error: None
- incomplete_details: None
- instructions: None
- metadata: {}
- model: gpt-5-mini-2025-08-07
- object: response
- output:
\[ResponseOutputMessage(id=‘msg_080cc194cfa17c94006943fd08ad488194a093653ee36021db’,
content=\[ResponseOutputText(annotations=\[\], text=‘Order O2 is now:
Cancelled.’, type=‘output_text’, logprobs=\[\])\], role=‘assistant’,
status=‘completed’, type=‘message’)\]
- parallel_tool_calls: True
- temperature: 1.0
- tool_choice: auto
- tools: \[FunctionTool(name=‘get_customer_info’, parameters={‘type’:
‘object’, ‘properties’: {‘customer_id’: {‘type’: ‘string’,
‘description’: ‘ID of the customer’}}, ‘required’: \[‘customer_id’\],
‘additionalProperties’: False}, strict=True, type=‘function’,
description=“Retrieves a customer’s information and their orders based
on the customer ID”), FunctionTool(name=‘get_order_details’,
parameters={‘type’: ‘object’, ‘properties’: {‘order_id’: {‘type’:
‘string’, ‘description’: ‘ID of the order’}}, ‘required’:
\[‘order_id’\], ‘additionalProperties’: False}, strict=True,
type=‘function’, description=‘Retrieves the details of a specific
order based on the order ID’), FunctionTool(name=‘cancel_order’,
parameters={‘type’: ‘object’, ‘properties’: {‘order_id’: {‘type’:
‘string’, ‘description’: ‘ID of the order to cancel’}}, ‘required’:
\[‘order_id’\], ‘additionalProperties’: False}, strict=True,
type=‘function’, description=‘Cancels an order based on the provided
order ID:- type: boolean’)\]
- top_p: 1.0
- background: False
- conversation: None
- max_output_tokens: 4096
- max_tool_calls: None
- previous_response_id: None
- prompt: None
- prompt_cache_key: None
- prompt_cache_retention: None
- reasoning: Reasoning(effort=‘medium’, generate_summary=None,
summary=None)
- safety_identifier: None
- service_tier: default
- status: completed
- text: ResponseTextConfig(format=ResponseFormatText(type=‘text’),
verbosity=‘medium’)
- top_logprobs: 0
- truncation: disabled
- usage: ResponseUsage(input_tokens=676,
input_tokens_details=InputTokensDetails(cached_tokens=0),
output_tokens=13,
output_tokens_details=OutputTokensDetails(reasoning_tokens=0),
total_tokens=689)
- user: None
- billing: {‘payer’: ‘openai’}
- store: True
### Test Math Example
``` python
def add(x: int, y: int) -> int:
"adds x and y."
return x + y
def mul(x: int, y: int) -> int:
"multiplies x and y."
return x * y
```
``` python
chat = Chat(model, tools=[add, mul], **chatkw)
pr = 'Can you add 1258585825128 to 34959234595, multiply by 93, and then add (-12439149)?'
r = chat.toolloop(pr)
for o in r: show(o)
```
[ResponseReasoningItem(id='rs_02621606c3b513dc006943fd09ad40819781fb7b32d5e5996a', summary=[], type='reasoning', content=None, encrypted_content=None, status=None),
ResponseFunctionToolCall(arguments='{"x":1258585825128,"y":34959234595}', call_id='call_Fg3v2kfgWIogSl1IEB0w1Y0K', name='add', type='function_call', id='fc_02621606c3b513dc006943fd09fdf881979ba71de45af8d588', status='completed')]
ResponseFunctionToolCall(arguments='{"x":1258585825128,"y":34959234595}', call_id='call_Fg3v2kfgWIogSl1IEB0w1Y0K', name='add', type='function_call', id='fc_02621606c3b513dc006943fd09fdf881979ba71de45af8d588', status='completed')
{'type': 'function_call_output',
'call_id': 'call_Fg3v2kfgWIogSl1IEB0w1Y0K',
'output': '1293545059723'}
[ResponseFunctionToolCall(arguments='{"x":1293545059723,"y":93}', call_id='call_9rimpAnaVInF4ssbHosAfTi8', name='mul', type='function_call', id='fc_02621606c3b513dc006943fd0bae9c81978026c7bfaae4a759', status='completed')]
{'type': 'function_call_output',
'call_id': 'call_9rimpAnaVInF4ssbHosAfTi8',
'output': '120299690554239'}
[ResponseFunctionToolCall(arguments='{"x":120299690554239,"y":-12439149}', call_id='call_OS43dlWKEYtYAocZpHJ94RXM', name='add', type='function_call', id='fc_02621606c3b513dc006943fd0c9f5c8197a11bc188b0174d6d', status='completed')]
{'type': 'function_call_output',
'call_id': 'call_OS43dlWKEYtYAocZpHJ94RXM',
'output': '120299678115090'}
120299678115090
- id: resp_02621606c3b513dc006943fd0d30f48197a911787dca9e5d00
- created_at: 1766063373.0
- error: None
- incomplete_details: None
- instructions: None
- metadata: {}
- model: gpt-5-mini-2025-08-07
- object: response
- output:
\[ResponseOutputMessage(id=‘msg_02621606c3b513dc006943fd0d80f88197b58e845951e295e1’,
content=\[ResponseOutputText(annotations=\[\], text=‘120299678115090’,
type=‘output_text’, logprobs=\[\])\], role=‘assistant’,
status=‘completed’, type=‘message’)\]
- parallel_tool_calls: True
- temperature: 1.0
- tool_choice: auto
- tools: \[FunctionTool(name=‘add’, parameters={‘type’: ‘object’,
‘properties’: {‘x’: {‘type’: ‘integer’, ‘description’: ’‘}, ’y’:
{‘type’: ‘integer’, ‘description’: ’‘}}, ’required’: \[‘x’, ‘y’\],
‘additionalProperties’: False}, strict=True, type=‘function’,
description=‘adds x and y.:- type: integer’), FunctionTool(name=‘mul’,
parameters={‘type’: ‘object’, ‘properties’: {‘x’: {‘type’: ‘integer’,
‘description’: ’‘}, ’y’: {‘type’: ‘integer’, ‘description’: ’‘}},
’required’: \[‘x’, ‘y’\], ‘additionalProperties’: False}, strict=True,
type=‘function’, description=‘multiplies x and y.:- type: integer’)\]
- top_p: 1.0
- background: False
- conversation: None
- max_output_tokens: 4096
- max_tool_calls: None
- previous_response_id: None
- prompt: None
- prompt_cache_key: None
- prompt_cache_retention: None
- reasoning: Reasoning(effort=‘minimal’, generate_summary=None,
summary=None)
- safety_identifier: None
- service_tier: default
- status: completed
- text: ResponseTextConfig(format=ResponseFormatText(type=‘text’),
verbosity=‘low’)
- top_logprobs: 0
- truncation: disabled
- usage: ResponseUsage(input_tokens=250,
input_tokens_details=InputTokensDetails(cached_tokens=0),
output_tokens=9,
output_tokens_details=OutputTokensDetails(reasoning_tokens=0),
total_tokens=259)
- user: None
- billing: {‘payer’: ‘openai’}
- store: True
``` python
(1258585825128 + 34959234595) * 93 - 12439149
```
120299678115090
``` python
chat = Chat(model, tools=[add, mul], **chatkw)
r = chat.toolloop(pr, stream=True)
for o in r:
if isinstance(o, dict): print('- ', o)
else:
for p in o: print(p, end='')
if hasattr(o, 'value'): show(o.value)
```
[ResponseReasoningItem(id='rs_0944024f48105103006943fd116be8819483df3a9f25e3d328', summary=[], type='reasoning', content=None, encrypted_content=None, status=None),
ResponseFunctionToolCall(arguments='{"x":1258585825128,"y":34959234595}', call_id='call_B6xGJDpQJm5GrS2hnngybWTe', name='add', type='function_call', id='fc_0944024f48105103006943fd11abcc81949200a4eabf12e97b', status='completed')]
('arguments', '{"x":1258585825128,"y":34959234595}')('call_id', 'call_B6xGJDpQJm5GrS2hnngybWTe')('name', 'add')('type', 'function_call')('id', 'fc_0944024f48105103006943fd11abcc81949200a4eabf12e97b')('status', 'completed')- {'type': 'function_call_output', 'call_id': 'call_B6xGJDpQJm5GrS2hnngybWTe', 'output': '1293545059723'}
[ResponseFunctionToolCall(arguments='{"x":1293545059723,"y":93}', call_id='call_Vb3v1awZxvoZuUjbq9RRZrfM', name='mul', type='function_call', id='fc_0944024f48105103006943fd1402188194a2adccf222c76591', status='completed')]
- {'type': 'function_call_output', 'call_id': 'call_Vb3v1awZxvoZuUjbq9RRZrfM', 'output': '120299690554239'}
[ResponseFunctionToolCall(arguments='{"x":120299690554239,"y":-12439149}', call_id='call_Dd1Uzt8rMfWJCH0irbMa5OVV', name='add', type='function_call', id='fc_0944024f48105103006943fd14e54481948906bd955b405418', status='completed')]
- {'type': 'function_call_output', 'call_id': 'call_Dd1Uzt8rMfWJCH0irbMa5OVV', 'output': '120299678115090'}
120299678115090
120299678115090
- id: resp_0944024f48105103006943fd1583ec8194863012f06a95bb9a
- created_at: 1766063381.0
- error: None
- incomplete_details: None
- instructions: None
- metadata: {}
- model: gpt-5-mini-2025-08-07
- object: response
- output:
\[ResponseOutputMessage(id=‘msg_0944024f48105103006943fd16463c81949df34dd0158af4c1’,
content=\[ResponseOutputText(annotations=\[\], text=‘120299678115090’,
type=‘output_text’, logprobs=\[\])\], role=‘assistant’,
status=‘completed’, type=‘message’)\]
- parallel_tool_calls: True
- temperature: 1.0
- tool_choice: auto
- tools: \[FunctionTool(name=‘add’, parameters={‘type’: ‘object’,
‘properties’: {‘x’: {‘type’: ‘integer’, ‘description’: ’‘}, ’y’:
{‘type’: ‘integer’, ‘description’: ’‘}}, ’required’: \[‘x’, ‘y’\],
‘additionalProperties’: False}, strict=True, type=‘function’,
description=‘adds x and y.:- type: integer’), FunctionTool(name=‘mul’,
parameters={‘type’: ‘object’, ‘properties’: {‘x’: {‘type’: ‘integer’,
‘description’: ’‘}, ’y’: {‘type’: ‘integer’, ‘description’: ’‘}},
’required’: \[‘x’, ‘y’\], ‘additionalProperties’: False}, strict=True,
type=‘function’, description=‘multiplies x and y.:- type: integer’)\]
- top_p: 1.0
- background: False
- conversation: None
- max_output_tokens: 4096
- max_tool_calls: None
- previous_response_id: None
- prompt: None
- prompt_cache_key: None
- prompt_cache_retention: None
- reasoning: Reasoning(effort=‘minimal’, generate_summary=None,
summary=None)
- safety_identifier: None
- service_tier: default
- status: completed
- text: ResponseTextConfig(format=ResponseFormatText(type=‘text’),
verbosity=‘low’)
- top_logprobs: 0
- truncation: disabled
- usage: ResponseUsage(input_tokens=250,
input_tokens_details=InputTokensDetails(cached_tokens=0),
output_tokens=9,
output_tokens_details=OutputTokensDetails(reasoning_tokens=0),
total_tokens=259)
- user: None
- store: True
### Error Conditions: Out of Iterations, Exception During Tool Invocation
``` python
def mydiv(a:float, b:float):
"Divide two numbers"
return a / b
```
``` python
chat = Chat(model, tools=[mydiv], **chatkw)
r = chat.toolloop('Please calculate this sequence using your tools: 43/23454; 652/previous result; 6843/previous result; 321/previous result', max_steps=2)
for o in r: show(o)
```
[ResponseReasoningItem(id='rs_02f9e0725e9fb6a5006943fd16f3dc81979a3aa89dfcb98aca', summary=[], type='reasoning', content=None, encrypted_content=None, status=None),
ResponseFunctionToolCall(arguments='{"a":43,"b":23454}', call_id='call_Fnsc5iNP8rMuj0rqeBLzXVsO', name='mydiv', type='function_call', id='fc_02f9e0725e9fb6a5006943fd177c6c8197a5ad34f31755933a', status='completed'),
ResponseFunctionToolCall(arguments='{"a":652,"b":0}', call_id='call_k7APDmfK4dy57LeiHYhBCNq6', name='mydiv', type='function_call', id='fc_02f9e0725e9fb6a5006943fd17b4d08197ada7efed9792c679', status='completed'),
ResponseFunctionToolCall(arguments='{"a":6843,"b":0}', call_id='call_ZpzLBH2zM3Wcy9aoFCbnZ0mN', name='mydiv', type='function_call', id='fc_02f9e0725e9fb6a5006943fd17e5bc8197b3e6f2903db6ce9d', status='completed'),
ResponseFunctionToolCall(arguments='{"a":321,"b":0}', call_id='call_JGS2KeOU7ku3n56kqbSrvEGy', name='mydiv', type='function_call', id='fc_02f9e0725e9fb6a5006943fd181dd88197bd6492c3465327ea', status='completed')]
ResponseFunctionToolCall(arguments='{"a":43,"b":23454}', call_id='call_Fnsc5iNP8rMuj0rqeBLzXVsO', name='mydiv', type='function_call', id='fc_02f9e0725e9fb6a5006943fd177c6c8197a5ad34f31755933a', status='completed')
ResponseFunctionToolCall(arguments='{"a":652,"b":0}', call_id='call_k7APDmfK4dy57LeiHYhBCNq6', name='mydiv', type='function_call', id='fc_02f9e0725e9fb6a5006943fd17b4d08197ada7efed9792c679', status='completed')
ResponseFunctionToolCall(arguments='{"a":6843,"b":0}', call_id='call_ZpzLBH2zM3Wcy9aoFCbnZ0mN', name='mydiv', type='function_call', id='fc_02f9e0725e9fb6a5006943fd17e5bc8197b3e6f2903db6ce9d', status='completed')
ResponseFunctionToolCall(arguments='{"a":321,"b":0}', call_id='call_JGS2KeOU7ku3n56kqbSrvEGy', name='mydiv', type='function_call', id='fc_02f9e0725e9fb6a5006943fd181dd88197bd6492c3465327ea', status='completed')
{'type': 'function_call_output',
'call_id': 'call_Fnsc5iNP8rMuj0rqeBLzXVsO',
'output': '0.001833375969983798'}
{'type': 'function_call_output',
'call_id': 'call_k7APDmfK4dy57LeiHYhBCNq6',
'output': 'Traceback (most recent call last):\n File "/usr/local/lib/python3.12/site-packages/toolslm/funccall.py", line 215, in call_func\n try: return func(**inps)\n ^^^^^^^^^^^^\n File "/tmp/ipykernel_5385/246724137.py", line 3, in mydiv\n return a / b\n ~~^~~\nZeroDivisionError: division by zero\n'}
{'type': 'function_call_output',
'call_id': 'call_ZpzLBH2zM3Wcy9aoFCbnZ0mN',
'output': 'Traceback (most recent call last):\n File "/usr/local/lib/python3.12/site-packages/toolslm/funccall.py", line 215, in call_func\n try: return func(**inps)\n ^^^^^^^^^^^^\n File "/tmp/ipykernel_5385/246724137.py", line 3, in mydiv\n return a / b\n ~~^~~\nZeroDivisionError: division by zero\n'}
{'type': 'function_call_output',
'call_id': 'call_JGS2KeOU7ku3n56kqbSrvEGy',
'output': 'Traceback (most recent call last):\n File "/usr/local/lib/python3.12/site-packages/toolslm/funccall.py", line 215, in call_func\n try: return func(**inps)\n ^^^^^^^^^^^^\n File "/tmp/ipykernel_5385/246724137.py", line 3, in mydiv\n return a / b\n ~~^~~\nZeroDivisionError: division by zero\n'}
I computed the first division successfully: - 43 / 23454 =
0.001833375969983798
I attempted the next steps but they failed because I tried to divide by
zero (I passed 0 as the “previous result” for subsequent operations),
causing errors. To complete the sequence you want, I need to perform
these successive calculations using the preceding result each time: 1.
652 / (43/23454) 2. 6843 / (result of step 2) 3. 321 / (result of step
3)
If you want, I can now: - Recompute the chain without tool limits and
give all four results, or - Compute them step-by-step here directly (no
tools needed). Which do you prefer?
- id: resp_02f9e0725e9fb6a5006943fd18bff081979e13bb5930ee32b7
- created_at: 1766063384.0
- error: None
- incomplete_details: None
- instructions: None
- metadata: {}
- model: gpt-5-mini-2025-08-07
- object: response
- output:
\[ResponseReasoningItem(id=‘rs_02f9e0725e9fb6a5006943fd19334c8197b4b20beb1ae09493’,
summary=\[\], type=‘reasoning’, content=None, encrypted_content=None,
status=None),
ResponseOutputMessage(id=‘msg_02f9e0725e9fb6a5006943fd195a9c81978fb9bcca07f80c87’,
content=\[ResponseOutputText(annotations=\[\], text=‘I computed the
first division successfully:- 43 / 23454 =
0.001833375969983798attempted the next steps but they failed because I
tried to divide by zero (I passed 0 as the “previous result” for
subsequent operations), causing errors. To complete the sequence you
want, I need to perform these successive calculations using the
preceding result each time:. 652 / (43/23454). 6843 / (result of step
2). 321 / (result of step 3)you want, I can now:- Recompute the chain
without tool limits and give all four results, or- Compute them
step-by-step here directly (no tools needed). Which do you prefer?’,
type=‘output_text’, logprobs=\[\])\], role=‘assistant’,
status=‘completed’, type=‘message’)\]
- parallel_tool_calls: True
- temperature: 1.0
- tool_choice: auto
- tools: \[FunctionTool(name=‘mydiv’, parameters={‘type’: ‘object’,
‘properties’: {‘a’: {‘type’: ‘number’, ‘description’: ’‘}, ’b’:
{‘type’: ‘number’, ‘description’: ’‘}}, ’required’: \[‘a’, ‘b’\],
‘additionalProperties’: False}, strict=True, type=‘function’,
description=‘Divide two numbers’)\]
- top_p: 1.0
- background: False
- conversation: None
- max_output_tokens: 4096
- max_tool_calls: None
- previous_response_id: None
- prompt: None
- prompt_cache_key: None
- prompt_cache_retention: None
- reasoning: Reasoning(effort=‘minimal’, generate_summary=None,
summary=None)
- safety_identifier: None
- service_tier: default
- status: completed
- text: ResponseTextConfig(format=ResponseFormatText(type=‘text’),
verbosity=‘low’)
- top_logprobs: 0
- truncation: disabled
- usage: ResponseUsage(input_tokens=537,
input_tokens_details=InputTokensDetails(cached_tokens=0),
output_tokens=163,
output_tokens_details=OutputTokensDetails(reasoning_tokens=0),
total_tokens=700)
- user: None
- billing: {‘payer’: ‘openai’}
- store: True
ResponseOutputMessage(id='msg_02f9e0725e9fb6a5006943fd195a9c81978fb9bcca07f80c87', content=[ResponseOutputText(annotations=[], text='I computed the first division successfully:\n- 43 / 23454 = 0.001833375969983798\n\nI attempted the next steps but they failed because I tried to divide by zero (I passed 0 as the “previous result” for subsequent operations), causing errors. To complete the sequence you want, I need to perform these successive calculations using the preceding result each time:\n1. 652 / (43/23454)\n2. 6843 / (result of step 2)\n3. 321 / (result of step 3)\n\nIf you want, I can now:\n- Recompute the chain without tool limits and give all four results, or\n- Compute them step-by-step here directly (no tools needed). Which do you prefer?', type='output_text', logprobs=[])], role='assistant', status='completed', type='message')
This tests `raise_on_err=False` change to `toolslm.call_func`
invocation. We should see this return an error as a string instead of
crash:
``` python
chat = Chat(model, tools=[mydiv], **chatkw)
r = chat.toolloop('Try dividing 1 by 0 and see what the error result is')
for o in r: show(o)
```
[ResponseReasoningItem(id='rs_0af57bb05fb6f746006943fd1bd07c81958a319ad4cb70eae8', summary=[], type='reasoning', content=None, encrypted_content=None, status=None),
ResponseFunctionToolCall(arguments='{"a":1,"b":0}', call_id='call_0FGtqnXi4rLuPC0ivZcaRw24', name='mydiv', type='function_call', id='fc_0af57bb05fb6f746006943fd1c29a08195989bf0c4f670baca', status='completed')]
ResponseFunctionToolCall(arguments='{"a":1,"b":0}', call_id='call_0FGtqnXi4rLuPC0ivZcaRw24', name='mydiv', type='function_call', id='fc_0af57bb05fb6f746006943fd1c29a08195989bf0c4f670baca', status='completed')
{'type': 'function_call_output',
'call_id': 'call_0FGtqnXi4rLuPC0ivZcaRw24',
'output': 'Traceback (most recent call last):\n File "/usr/local/lib/python3.12/site-packages/toolslm/funccall.py", line 215, in call_func\n try: return func(**inps)\n ^^^^^^^^^^^^\n File "/tmp/ipykernel_5385/246724137.py", line 3, in mydiv\n return a / b\n ~~^~~\nZeroDivisionError: division by zero\n'}
ZeroDivisionError: division by zero
- id: resp_0af57bb05fb6f746006943fd1cbdac8195b5d480c66c146b06
- created_at: 1766063388.0
- error: None
- incomplete_details: None
- instructions: None
- metadata: {}
- model: gpt-5-mini-2025-08-07
- object: response
- output:
\[ResponseOutputMessage(id=‘msg_0af57bb05fb6f746006943fd1d335881958929318eb6ad6c40’,
content=\[ResponseOutputText(annotations=\[\],
text=‘ZeroDivisionError: division by zero’, type=‘output_text’,
logprobs=\[\])\], role=‘assistant’, status=‘completed’,
type=‘message’)\]
- parallel_tool_calls: True
- temperature: 1.0
- tool_choice: auto
- tools: \[FunctionTool(name=‘mydiv’, parameters={‘type’: ‘object’,
‘properties’: {‘a’: {‘type’: ‘number’, ‘description’: ’‘}, ’b’:
{‘type’: ‘number’, ‘description’: ’‘}}, ’required’: \[‘a’, ‘b’\],
‘additionalProperties’: False}, strict=True, type=‘function’,
description=‘Divide two numbers’)\]
- top_p: 1.0
- background: False
- conversation: None
- max_output_tokens: 4096
- max_tool_calls: None
- previous_response_id: None
- prompt: None
- prompt_cache_key: None
- prompt_cache_retention: None
- reasoning: Reasoning(effort=‘minimal’, generate_summary=None,
summary=None)
- safety_identifier: None
- service_tier: default
- status: completed
- text: ResponseTextConfig(format=ResponseFormatText(type=‘text’),
verbosity=‘low’)
- top_logprobs: 0
- truncation: disabled
- usage: ResponseUsage(input_tokens=198,
input_tokens_details=InputTokensDetails(cached_tokens=0),
output_tokens=11,
output_tokens_details=OutputTokensDetails(reasoning_tokens=0),
total_tokens=209)
- user: None
- billing: {‘payer’: ‘openai’}
- store: True