Skip to content

Commit 904bb6f

Browse files
author
mengqian
committed
Merge remote-tracking branch 'origin/main' into feat-fileparsing
2 parents b155301 + d0c8ad2 commit 904bb6f

File tree

11 files changed

+153
-35
lines changed

11 files changed

+153
-35
lines changed

examples/basic_usage.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -6,9 +6,9 @@ def completion():
66

77
# Create chat completion
88
response = client.chat.completions.create(
9-
model='glm-4',
9+
model='glm-4.6',
1010
messages=[{'role': 'user', 'content': 'Hello, Z.ai!'}],
11-
temperature=0.7,
11+
temperature=1.0,
1212
)
1313
print(response.choices[0].message.content)
1414

@@ -19,7 +19,7 @@ def completion_with_stream():
1919

2020
# Create chat completion
2121
response = client.chat.completions.create(
22-
model='glm-4',
22+
model='glm-4.6',
2323
messages=[
2424
{'role': 'system', 'content': 'You are a helpful assistant.'},
2525
{'role': 'user', 'content': 'Tell me a story about AI.'},
@@ -38,7 +38,7 @@ def completion_with_websearch():
3838

3939
# Create chat completion
4040
response = client.chat.completions.create(
41-
model='glm-4',
41+
model='glm-4.6',
4242
messages=[
4343
{'role': 'system', 'content': 'You are a helpful assistant.'},
4444
{'role': 'user', 'content': 'What is artificial intelligence?'},
@@ -52,7 +52,7 @@ def completion_with_websearch():
5252
},
5353
}
5454
],
55-
temperature=0.5,
55+
temperature=1.0,
5656
max_tokens=2000,
5757
)
5858

@@ -234,10 +234,10 @@ def ofZhipu():
234234
print(response.choices[0].message.content)
235235

236236
if __name__ == '__main__':
237-
# completion()
238-
# completion_with_stream()
237+
completion()
238+
completion_with_stream()
239239
# completion_with_websearch()
240-
multi_modal_chat()
240+
# multi_modal_chat()
241241
# role_play()
242242
# assistant_conversation()
243243
# video_generation()

examples/stream_tools.py

Lines changed: 81 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,81 @@
1+
from zai import ZhipuAiClient
2+
3+
def main():
4+
client = ZhipuAiClient()
5+
# create chat completion with tool calls and streaming
6+
response = client.chat.completions.create(
7+
model="glm-4.6",
8+
messages=[
9+
{"role": "user", "content": "How is the weather in Beijing and Shanghai? Please provide the answer in Celsius."},
10+
],
11+
tools=[
12+
{
13+
"type": "function",
14+
"function": {
15+
"name": "get_weather",
16+
"description": "Get the weather information for a specific location",
17+
"parameters": {
18+
"type": "object",
19+
"properties": {
20+
"location": {"type": "string", "description": "City, eg: Beijing, Shanghai"},
21+
"unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}
22+
},
23+
"required": ["location"]
24+
}
25+
}
26+
}
27+
],
28+
stream=True, # enable streaming
29+
tool_stream=True # enable tool call streaming
30+
)
31+
32+
# init variables to collect streaming data
33+
reasoning_content = "" # reasoning content
34+
content = "" # response content
35+
final_tool_calls = {} # tool call data
36+
reasoning_started = False # is reasoning started
37+
content_started = False # is content started
38+
39+
# process streaming response
40+
for chunk in response:
41+
if not chunk.choices:
42+
continue
43+
44+
delta = chunk.choices[0].delta
45+
46+
# process streaming reasoning output
47+
if hasattr(delta, 'reasoning_content') and delta.reasoning_content:
48+
if not reasoning_started and delta.reasoning_content.strip():
49+
print("\n🧠 Thinking: ")
50+
reasoning_started = True
51+
reasoning_content += delta.reasoning_content
52+
print(delta.reasoning_content, end="", flush=True)
53+
54+
# process streaming answer content output
55+
if hasattr(delta, 'content') and delta.content:
56+
if not content_started and delta.content.strip():
57+
print("\n\n💬 Answer: ")
58+
content_started = True
59+
content += delta.content
60+
print(delta.content, end="", flush=True)
61+
62+
# process streaming tool call info
63+
if delta.tool_calls:
64+
for tool_call in delta.tool_calls:
65+
index = tool_call.index
66+
if index not in final_tool_calls:
67+
# add new tool call
68+
final_tool_calls[index] = tool_call
69+
final_tool_calls[index].function.arguments = tool_call.function.arguments
70+
else:
71+
# append tool call params by streaming index
72+
final_tool_calls[index].function.arguments += tool_call.function.arguments
73+
74+
# output the final construct tool call info
75+
if final_tool_calls:
76+
print("\n📋 Function Calls Triggered:")
77+
for index, tool_call in final_tool_calls.items():
78+
print(f" {index}: Function Name: {tool_call.function.name}, Params: {tool_call.function.arguments}")
79+
80+
if __name__ == "__main__":
81+
main()

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[tool.poetry]
22
name = "zai-sdk"
3-
version = "0.0.3.6"
3+
version = "0.0.4"
44
description = "A SDK library for accessing big model apis from Z.ai"
55
authors = ["Z.ai"]
66
readme = "README.md"

src/zai/_version.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,2 @@
11
__title__ = 'Z.ai'
2-
__version__ = '0.0.3.6'
2+
__version__ = '0.0.4'

src/zai/api_resource/audio/audio.py

Lines changed: 9 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,8 @@
2222
from zai.types.sensitive_word_check import SensitiveWordCheckRequest
2323

2424
from .transcriptions import Transcriptions
25+
from zai.core._streaming import StreamResponse
26+
from zai.types.audio import AudioSpeechChunk
2527

2628
if TYPE_CHECKING:
2729
from zai._client import ZaiClient
@@ -55,9 +57,11 @@ def speech(
5557
extra_headers: Headers | None = None,
5658
extra_body: Body | None = None,
5759
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
60+
encode_format: str = None,
5861
speed: float | None = 1.0,
5962
volume: float | None = 1.0,
60-
) -> HttpxBinaryResponseContent:
63+
stream: bool | None = False
64+
) -> HttpxBinaryResponseContent | StreamResponse[AudioSpeechChunk]:
6165
"""
6266
Generate speech audio from text input
6367
@@ -79,18 +83,21 @@ def speech(
7983
'input': input,
8084
'voice': voice,
8185
'response_format': response_format,
82-
'sensitive_word_check': sensitive_word_check,
86+
'encode_format': encode_format,
8387
'request_id': request_id,
8488
'user_id': user_id,
8589
'speed': speed,
8690
'volume': volume,
91+
'stream': stream
8792
}
8893
)
8994
return self._post(
9095
'/audio/speech',
9196
body=maybe_transform(body, AudioSpeechParams),
9297
options=make_request_options(extra_headers=extra_headers, extra_body=extra_body, timeout=timeout),
9398
cast_type=HttpxBinaryResponseContent,
99+
stream=stream or False,
100+
stream_cls=StreamResponse[AudioSpeechChunk]
94101
)
95102

96103
def customization(

src/zai/api_resource/chat/async_completions.py

Lines changed: 0 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -88,25 +88,6 @@ def create(
8888
watermark_enabled (Optional[bool]): Whether to enable watermark on generated audio
8989
"""
9090
_cast_type = AsyncTaskStatus
91-
logger.debug(f'temperature:{temperature}, top_p:{top_p}')
92-
if temperature is not None and temperature != NOT_GIVEN:
93-
if temperature <= 0:
94-
do_sample = False
95-
temperature = 0.01
96-
# logger.warning("temperature: value range is (0.0, 1.0) open interval,"
97-
# "do_sample rewritten as false (parameters top_p temperature do not take effect)")
98-
if temperature >= 1:
99-
temperature = 0.99
100-
# logger.warning("temperature: value range is (0.0, 1.0) open interval")
101-
if top_p is not None and top_p != NOT_GIVEN:
102-
if top_p >= 1:
103-
top_p = 0.99
104-
# logger.warning("top_p: value range is (0.0, 1.0) open interval, cannot equal 0 or 1")
105-
if top_p <= 0:
106-
top_p = 0.01
107-
# logger.warning("top_p: value range is (0.0, 1.0) open interval, cannot equal 0 or 1")
108-
109-
logger.debug(f'temperature:{temperature}, top_p:{top_p}')
11091
if isinstance(messages, List):
11192
for item in messages:
11293
if item.get('content'):

src/zai/api_resource/chat/completions.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -66,6 +66,7 @@ def create(
6666
response_format: object | None = None,
6767
thinking: object | None = None,
6868
watermark_enabled: Optional[bool] | NotGiven = NOT_GIVEN,
69+
tool_stream: bool | NotGiven = NOT_GIVEN,
6970
) -> Completion | StreamResponse[ChatCompletionChunk]:
7071
"""
7172
Create a chat completion
@@ -93,6 +94,7 @@ def create(
9394
response_format (object): Response format specification
9495
thinking (Optional[object]): Configuration parameters for model reasoning
9596
watermark_enabled (Optional[bool]): Whether to enable watermark on generated audio
97+
tool_stream (Optional[bool]): Whether to enable tool streaming
9698
"""
9799
logger.debug(f'temperature:{temperature}, top_p:{top_p}')
98100
if temperature is not None and temperature != NOT_GIVEN:
@@ -141,6 +143,7 @@ def create(
141143
'response_format': response_format,
142144
'thinking': thinking,
143145
'watermark_enabled': watermark_enabled,
146+
'tool_stream': tool_stream,
144147
}
145148
)
146149
return self._post(

src/zai/types/audio/__init__.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
from .audio_customization_param import AudioCustomizationParam
2+
from .audio_speech_chunk import AudioSpeechChunk
23
from .audio_speech_params import AudioSpeechParams
34
from .transcriptions_create_param import TranscriptionsParam
45

5-
__all__ = ['AudioSpeechParams', 'AudioCustomizationParam', 'TranscriptionsParam']
6+
__all__ = ['AudioSpeechParams', 'AudioCustomizationParam', 'TranscriptionsParam', 'AudioSpeechChunk']
Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,32 @@
1+
from typing import List, Optional, Dict, Any
2+
3+
from ...core import BaseModel
4+
5+
__all__ = [
6+
"AudioSpeechChunk",
7+
"AudioError",
8+
"AudioSpeechChoice",
9+
"AudioSpeechDelta"
10+
]
11+
12+
13+
class AudioSpeechDelta(BaseModel):
14+
content: Optional[str] = None
15+
role: Optional[str] = None
16+
17+
18+
class AudioSpeechChoice(BaseModel):
19+
delta: AudioSpeechDelta
20+
finish_reason: Optional[str] = None
21+
index: int
22+
23+
class AudioError(BaseModel):
24+
code: Optional[str] = None
25+
message: Optional[str] = None
26+
27+
28+
class AudioSpeechChunk(BaseModel):
29+
choices: List[AudioSpeechChoice]
30+
request_id: Optional[str] = None
31+
created: Optional[int] = None
32+
error: Optional[AudioError] = None

src/zai/types/audio/audio_speech_params.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -29,3 +29,7 @@ class AudioSpeechParams(TypedDict, total=False):
2929
sensitive_word_check: Optional[SensitiveWordCheckRequest]
3030
request_id: str
3131
user_id: str
32+
encode_format: str
33+
speed: float
34+
volume: float
35+
stream: bool

0 commit comments

Comments
 (0)