Code Sandbox

GLM-4-AllTools model provides a Code Interpreter tool that can accurately understand programming requirements described in natural language and automatically generate code snippets to solve practical problems. It also supports a secure code sandbox, Sandbox, which can simulate code execution results in a real environment.

Enabling Code Sandbox

When using the model, if the Code Interpreter tool is selected, the sandbox environment will be invoked by default, corresponding to the parameter sandbox = auto.

Example Call

from zhipuai import ZhipuAI
client = ZhipuAI(api_key="") # Please fill in your own APIKey
response = client.chat.completions.create(
    model="glm-4-alltools",  # Fill in the name of the model to be called
    messages=[
        {
            "role": "user",
            "content":[
                {
                    "type":"text",
                    "text":"The national tourism travel data during the May Day holiday over the years is [100, 200, 300, 400, 500]. Create a bar chart to show the data trend."
                }
            ]
        }
    ],
    stream=True,
    tools=[
      {
        "type": "code_interpreter"
      }
    ]
)
 
for chunk in response:
   print(chunk)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25

Response Example

After enabling the code sandbox, the model automatically generates and executes the code, further reasoning based on the output.

ChatCompletionChunk(id='8760198606258174996', choices=[Choice(delta=ChoiceDelta(content=None, role='assistant', tool_calls=[ChoiceDeltaToolCall(index=None, id='call_87601986062581749962', function=None, type='code_interpreter', code_interpreter={'input': ' the'})]), finish_reason=None, index=0)], created=1718687730, model='glm-4-alltools', usage=None, extra_json=None)
ChatCompletionChunk(id='8760198606258174996', choices=[Choice(delta=ChoiceDelta(content=None, role='assistant', tool_calls=[ChoiceDeltaToolCall(index=None, id='call_87601986062581749962', function=None, type='code_interpreter', code_interpreter={'input': ' chart'})]), finish_reason=None, index=0)], created=1718687730, model='glm-4-alltools', usage=None, extra_json=None)
ChatCompletionChunk(id='8760198606258174996', choices=[Choice(delta=ChoiceDelta(content=None, role='assistant', tool_calls=[ChoiceDeltaToolCall(index=None, id='call_87601986062581749962', function=None, type='code_interpreter', code_interpreter={'input': '\n'})]), finish_reason=None, index=0)], created=1718687730, model='glm-4-alltools', usage=None, extra_json=None)
ChatCompletionChunk(id='8760198606258174996', choices=[Choice(delta=ChoiceDelta(content=None, role='assistant', tool_calls=[ChoiceDeltaToolCall(index=None, id='call_87601986062581749962', function=None, type='code_interpreter', code_interpreter={'input': 'plt'})]), finish_reason=None, index=0)], created=1718687730, model='glm-4-alltools', usage=None, extra_json=None)
ChatCompletionChunk(id='8760198606258174996', choices=[Choice(delta=ChoiceDelta(content=None, role='assistant', tool_calls=[ChoiceDeltaToolCall(index=None, id='call_87601986062581749962', function=None, type='code_interpreter', code_interpreter={'input': '.'})]), finish_reason=None, index=0)], created=1718687730, model='glm-4-alltools', usage=None, extra_json=None)
ChatCompletionChunk(id='8760198606258174996', choices=[Choice(delta=ChoiceDelta(content=None, role='assistant', tool_calls=[ChoiceDeltaToolCall(index=None, id='call_87601986062581749962', function=None, type='code_interpreter', code_interpreter={'input': 'show'})]), finish_reason=None, index=0)], created=1718687730, model='glm-4-alltools', usage=None, extra_json=None)
ChatCompletionChunk(id='8760198606258174996', choices=[Choice(delta=ChoiceDelta(content=None, role='assistant', tool_calls=[ChoiceDeltaToolCall(index=None, id='call_87601986062581749962', function=None, type='code_interpreter', code_interpreter={'input': '()'})]), finish_reason=None, index=0)], created=1718687730, model='glm-4-alltools', usage=None, extra_json=None)
ChatCompletionChunk(id='8760198606258174996', choices=[Choice(delta=ChoiceDelta(content=None, role='assistant', tool_calls=None), finish_reason='tool_calls', index=0)], created=1718687730, model='glm-4-alltools', usage=None, extra_json=None)
ChatCompletionChunk(id='8760198606258174996', choices=[Choice(delta=ChoiceDelta(content=None, role='tool', tool_calls=[ChoiceDeltaToolCall(index=None, id=None, function=None, type='code_interpreter', code_interpreter={'outputs': [{'type': 'file', 'file': 'http://all-tool-interpreter.cn-wlcb.ufileos.com/10571a86-9194-43f7-ab2c-274ba29b9835_fig.png'}]})]), finish_reason=None, index=0)], created=1718687735, model='glm-4-alltools', usage=None, extra_json=None)
1
2
3
4
5
6
7
8
9

Disabling Code Sandbox

If you only need the model to generate code without using the code sandbox to run it, set the parameter sandbox = none. After code generation, the status status = requires_action will be returned, requiring the user to submit the code execution result.

Example Call

from zhipuai import ZhipuAI
client = ZhipuAI(api_key="") # Please fill in your own APIKey
response = client.chat.completions.create(
    model="glm-4-alltools",  # Fill in the name of the model to be called
    messages=[
        {
            "role": "user",
            "content":[
                {
                    "type":"text",
                    "text":"The national tourism travel data during the May Day holiday over the years is [100, 200, 300, 400, 500]. Create a bar chart to show the data trend."
                }
            ]
        }
    ],
    stream=True,
    tools=[
      {
        "type": "code_interpreter",
        "code_interpreter":{
            "sandbox":"none"
        }
      }
    ]
)
 
for chunk in response:
   print(chunk)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28

Response Example

After disabling the sandbox, the model generates code and returns status = requires_action.

ChatCompletionChunk(id='60955153-fcff-4c7b-b610-166cab49a92b', choices=[Choice(delta=ChoiceDelta(content=None, role='assistant', tool_calls=[ChoiceDeltaToolCall(index=None, id='call_Oiwh-QgVVajIUhafnJsFG', function=None, type='code_interpreter', code_interpreter={'input': '\n'})]), finish_reason=None, index=0)], created=1719802220, model='glm-4-alltools', usage=None, extra_json=None)
ChatCompletionChunk(id='60955153-fcff-4c7b-b610-166cab49a92b', choices=[Choice(delta=ChoiceDelta(content=None, role='assistant', tool_calls=[ChoiceDeltaToolCall(index=None, id='call_Oiwh-QgVVajIUhafnJsFG', function=None, type='code_interpreter', code_interpreter={'input': 'plt'})]), finish_reason=None, index=0)], created=1719802220, model='glm-4-alltools', usage=None, extra_json=None)
ChatCompletionChunk(id='60955153-fcff-4c7b-b610-166cab49a92b', choices=[Choice(delta=ChoiceDelta(content=None, role='assistant', tool_calls=[ChoiceDeltaToolCall(index=None, id='call_Oiwh-QgVVajIUhafnJsFG', function=None, type='code_interpreter', code_interpreter={'input': '.'})]), finish_reason=None, index=0)], created=1719802220, model='glm-4-alltools', usage=None, extra_json=None)
ChatCompletionChunk(id='60955153-fcff-4c7b-b610-166cab49a92b', choices=[Choice(delta=ChoiceDelta(content=None, role='assistant', tool_calls=[ChoiceDeltaToolCall(index=None, id='call_Oiwh-QgVVajIUhafnJsFG', function=None, type='code_interpreter', code_interpreter={'input': 'show'})]), finish_reason=None, index=0)], created=1719802220, model='glm-4-alltools', usage=None, extra_json=None)
ChatCompletionChunk(id='60955153-fcff-4c7b-b610-166cab49a92b', choices=[Choice(delta=ChoiceDelta(content=None, role='assistant', tool_calls=[ChoiceDeltaToolCall(index=None, id='call_Oiwh-QgVVajIUhafnJsFG', function=None, type='code_interpreter', code_interpreter={'input': '()'})]), finish_reason=None, index=0)], created=1719802220, model='glm-4-alltools', usage=None, extra_json=None)
ChatCompletionChunk(id='60955153-fcff-4c7b-b610-166cab49a92b', choices=[Choice(delta=ChoiceDelta(content=None, role='assistant', tool_calls=[ChoiceDeltaToolCall(index=None, id='call_Oiwh-QgVVajIUhafnJsFG', function=None, type='code_interpreter', code_interpreter={'input': ''})]), finish_reason='tool_calls', index=0)], created=1719802220, model='glm-4-alltools', usage=CompletionUsage(prompt_tokens=510, completion_tokens=226, total_tokens=736), extra_json=None, status='requires_action')
1
2
3
4
5
6

Submitting Sandbox Results

Next, the user needs to submit the execution results of the code sandbox, Tool Message, and the code generated by the model, Assistant Message. After submission, the model will continue to reason.

from zhipuai import ZhipuAI
client = ZhipuAI(api_key="") # Please fill in your own APIKey
response = client.chat.completions.create(
    model="glm-4-alltools",  # Fill in the name of the model to be called
    messages=[
        {
            "role": "user",
            "content":[
                {
                    "type":"text",
                    "text":"The national tourism travel data during the May Day holiday over the years is [100, 200, 300, 400, 500]. Create a bar chart to show the data trend."
                }
            ]
        },
        {
            "role": "assistant",
            "content":"""
                import matplotlib.pyplot as plt
                
                # Data received from the API
                years = ["2018", "2019", "2020", "2021", "2022", "2023", "2024"]
                tourist_data = [100, 100, 200, 200, 300, 400, 500]  # Assuming the data for 2023 and 2024 based on the trend
                
                # Creating a bar chart
                plt.figure(figsize=(10, 6))
                plt.bar(years, tourist_data, color='skyblue')
                plt.xlabel('Year')
                plt.ylabel('Tourist Count')
                plt.title('National Tourism Travel Data Trend During May Day Holiday from 2018 to 2024')
                plt.grid(axis='y')
                
                # Show the chart
                plt.show()
            """
        },
        {
            "role": "tool",
            "content":"http://all-tool-interpreter.cn-wlcb.ufileos.com/e01459c3-ddd6-4963-adf7-163513184f0c_fig.png"
        }
    ],
    stream=True,
    tools=[
      {
        "type": "code_interpreter",
        "code_interpreter":{
            "sandbox":"none"
        }
      }
    ]
)
 
for chunk in response:
   print(chunk)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53