You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
//MaxCompletionTokens = 800, // o1 series models support this property instead of MaxTokens
39
40
Temperature=0.7
40
41
};
41
42
});
@@ -85,6 +86,8 @@ Currently available models are:
85
86
- gpt-4-turbo
86
87
- gpt-4o
87
88
- gpt-4o-mini
89
+
- o1-preview
90
+
- o1-mini
88
91
89
92
They have fixed names, available in the [OpenAIChatGptModels.cs file](https://github.com/marcominerva/ChatGptNet/blob/master/src/ChatGptNet/Models/OpenAIChatGptModels.cs).
90
93
@@ -163,6 +166,7 @@ The configuration can be automatically read from [IConfiguration](https://learn.
163
166
// "Temperature": 0.8,
164
167
// "TopP": 1,
165
168
// "MaxTokens": 500,
169
+
// "MaxCompletionTokens": null, // o1 series models support this property instead of MaxTokens
166
170
// "PresencePenalty": 0,
167
171
// "FrequencyPenalty": 0,
168
172
// "ResponseFormat": { "Type": "text" }, // Allowed values for Type: text (default) or json_object
Copy file name to clipboardExpand all lines: src/ChatGptNet/Models/ChatGptParameters.cs
+13Lines changed: 13 additions & 0 deletions
Original file line number
Diff line number
Diff line change
@@ -40,8 +40,21 @@ public class ChatGptParameters
40
40
/// <summary>
41
41
/// Gets or sets the maximum number of tokens to generate in the chat completion. The total length of input tokens and generated tokens is limited by the model's context length.
42
42
/// </summary>
43
+
/// <remarks>
44
+
/// This value is now deprecated in favor of <see cref="MaxCompletionTokens"/>, and is not compatible with <see href="https://platform.openai.com/docs/guides/reasoning">o1 series models</see>.
45
+
/// </remarks>
46
+
/// <seealso cref="MaxCompletionTokens"/>
47
+
[JsonPropertyName("max_tokens")]
43
48
publicint?MaxTokens{get;set;}
44
49
50
+
/// <summary>
51
+
/// An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and <see href="https://platform.openai.com/docs/guides/reasoning">reasoning tokens</see>.
52
+
/// </summary>
53
+
/// <remarks>o1 series models must use this property instead of <see cref="MaxTokens"/>.</remarks>
54
+
/// <seealso cref="MaxTokens"/>
55
+
[JsonPropertyName("max_completion_tokens")]
56
+
publicint?MaxCompletionTokens{get;set;}
57
+
45
58
/// <summary>
46
59
/// Gets or sets the presence penalties for chat completion. Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics (default: 0).
Copy file name to clipboardExpand all lines: src/ChatGptNet/Models/ChatGptRequest.cs
+12Lines changed: 12 additions & 0 deletions
Original file line number
Diff line number
Diff line change
@@ -126,9 +126,21 @@ internal class ChatGptRequest
126
126
/// <summary>
127
127
/// Gets or sets the maximum number of tokens to generate in the chat completion. The total length of input tokens and generated tokens is limited by the model's context length.
128
128
/// </summary>
129
+
/// <remarks>
130
+
/// This value is now deprecated in favor of <see cref="MaxCompletionTokens"/>, and is not compatible with <see href="https://platform.openai.com/docs/guides/reasoning">o1 series models</see>.
131
+
/// </remarks>
132
+
/// <seealso cref="MaxCompletionTokens"/>
129
133
[JsonPropertyName("max_tokens")]
130
134
publicint?MaxTokens{get;set;}
131
135
136
+
/// <summary>
137
+
/// An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and <see href="https://platform.openai.com/docs/guides/reasoning">reasoning tokens</see>.
138
+
/// </summary>
139
+
/// <remarks>o1 series models must use this property instead of <see cref="MaxTokens"/>.</remarks>
140
+
/// <seealso cref="MaxTokens"/>
141
+
[JsonPropertyName("max_completion_tokens")]
142
+
publicint?MaxCompletionTokens{get;set;}
143
+
132
144
/// <summary>
133
145
/// Gets or sets the presence penalties for chat completion. A number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics (default: 0).
0 commit comments