Skip to content

Commit 460521a

Browse files
Copilotmarkwallace-microsoftrogerbarreto
authored
.Net: Add ThinkingLevel parameter to GeminiThinkingConfig for Gemini 3.0+ (#13396)
### Motivation and Context Gemini 3.0 introduces a new `ThinkingLevel` string parameter with values: "none", "low", "medium", "high". The existing `ThinkingBudget` parameter remains valid for older Gemini 2.5 models. Fixes #11920 ### Description - Added `ThinkingLevel` property to `GeminiThinkingConfig` - Kept `ThinkingBudget` property as-is (still applicable for older Gemini 2.5 models) - Updated `GeminiRequestThinkingConfig` to serialize both properties - Added unit tests for `ThinkingLevel` serialization and deserialization - Converted `PromptExecutionSettingsCloneWorksAsExpected` to a Theory that tests both `ThinkingBudget` and `ThinkingLevel` clone work as expected - Kept existing `ThinkingBudget` unit tests unchanged - Renamed sample file to `Google_GeminiChatCompletionWithThinking.cs` to demonstrate both APIs **Usage:** ```csharp // Gemini 3.0+ var settings = new GeminiPromptExecutionSettings { ThinkingConfig = new GeminiThinkingConfig { ThinkingLevel = "high" } }; // Gemini 2.5 var settings = new GeminiPromptExecutionSettings { ThinkingConfig = new GeminiThinkingConfig { ThinkingBudget = 2000 } }; ``` ### Contribution Checklist - [x] The code builds clean without any errors or warnings - [x] The PR follows the [SK Contribution Guidelines](https://github.com/microsoft/semantic-kernel/blob/main/CONTRIBUTING.md) and the [pre-submission formatting script](https://github.com/microsoft/semantic-kernel/blob/main/CONTRIBUTING.md#development-scripts) raises no violations - [x] All unit tests pass, and I have added new tests where possible - [x] I didn't break anyone 😄 <!-- START COPILOT CODING AGENT SUFFIX --> <details> <summary>Original prompt</summary> > > ---- > > *This section details on the original issue you should resolve* > > <issue_title>New Feature: .NET Gemini 3.0 Thinking Level parameter</issue_title> > <issue_description>--- > name: NET Gemini 3.0 Thinking Level parameter > about: Gemini 3.0 Thinking parameter > > --- > > <!-- ⚠️⚠️ Do Not Delete This! feature_request_template ⚠️⚠️ --> > <!-- Please read our Rules of Conduct: https://opensource.microsoft.com/codeofconduct/ --> > <!-- Please search existing issues to avoid creating duplicates. --> > > <!-- Describe the feature you'd like. --> > > In gemini 3.0 the thinking budget is depricated instead we have a new parameter which is a string and its called ThinkingLevel where we can specify what thinking level we want , high, low, default is high > </issue_description> > > <agent_instructions>Update GeminiThinkingConfig.cs to support ThinkingLevel. Mark the existing ThinkingBudget parameter as deprecated.</agent_instructions> > > ## Comments on the Issue (you are @copilot in this section) > > <comments> > </comments> > </details> - Fixes #13377 <!-- START COPILOT CODING AGENT TIPS --> --- 💡 You can make Copilot smarter by setting up custom instructions, customizing its development environment and configuring Model Context Protocol (MCP) servers. Learn more [Copilot coding agent tips](https://gh.io/copilot-coding-agent-tips) in the docs. --------- Co-authored-by: copilot-swe-agent[bot] <[email protected]> Co-authored-by: markwallace-microsoft <[email protected]> Co-authored-by: rogerbarreto <[email protected]>
1 parent 60685a7 commit 460521a

File tree

5 files changed

+123
-7
lines changed

5 files changed

+123
-7
lines changed
Lines changed: 36 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -9,11 +9,12 @@ namespace ChatCompletion;
99
/// <summary>
1010
/// These examples demonstrate different ways of using chat completion with Google AI API.
1111
/// <para>
12-
/// Currently thinking budget is only supported in Google AI Gemini 2.5+ models
12+
/// Currently thinking is only supported in Google AI Gemini 2.5+ models.
13+
/// For Gemini 2.5 models, use ThinkingBudget. For Gemini 3.0 and later, use ThinkingLevel.
1314
/// See: https://developers.googleblog.com/en/start-building-with-gemini-25-flash/#:~:text=thinking%20budgets
1415
/// </para>
1516
/// </summary>
16-
public sealed class Google_GeminiChatCompletionWithThinkingBudget(ITestOutputHelper output) : BaseTest(output)
17+
public sealed class Google_GeminiChatCompletionWithThinking(ITestOutputHelper output) : BaseTest(output)
1718
{
1819
[Fact]
1920
public async Task GoogleAIChatCompletionUsingThinkingBudget()
@@ -47,6 +48,39 @@ public async Task GoogleAIChatCompletionUsingThinkingBudget()
4748
await MessageOutputAsync(chatHistory);
4849
}
4950

51+
[Fact]
52+
public async Task GoogleAIChatCompletionUsingThinkingLevel()
53+
{
54+
Console.WriteLine("============= Google AI - Gemini 3.0 Chat Completion using Thinking Level =============");
55+
56+
Assert.NotNull(TestConfiguration.GoogleAI.ApiKey);
57+
string geminiModelId = "gemini-3.0-flash";
58+
59+
Kernel kernel = Kernel.CreateBuilder()
60+
.AddGoogleAIGeminiChatCompletion(
61+
modelId: geminiModelId,
62+
apiKey: TestConfiguration.GoogleAI.ApiKey)
63+
.Build();
64+
65+
var chatHistory = new ChatHistory("You are an expert in the tool shop.");
66+
var chat = kernel.GetRequiredService<IChatCompletionService>();
67+
var executionSettings = new GeminiPromptExecutionSettings
68+
{
69+
// This parameter specifies the thinking level for Gemini 3.0+ models.
70+
// Possible values are "none", "low", "medium", and "high".
71+
ThinkingConfig = new() { ThinkingLevel = "high" }
72+
};
73+
74+
// First user message
75+
chatHistory.AddUserMessage("Hi, I'm looking for new power tools, any suggestion?");
76+
await MessageOutputAsync(chatHistory);
77+
78+
// First assistant message
79+
var reply = await chat.GetChatMessageContentAsync(chatHistory, executionSettings);
80+
chatHistory.Add(reply);
81+
await MessageOutputAsync(chatHistory);
82+
}
83+
5084
/// <summary>
5185
/// Outputs the last message of the chat history
5286
/// </summary>

dotnet/src/Connectors/Connectors.Google.UnitTests/Core/Gemini/GeminiRequestTests.cs

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -721,6 +721,24 @@ public void FromPromptAndExecutionSettingsWithThinkingConfigReturnsInGenerationC
721721
Assert.Equal(executionSettings.ThinkingConfig.ThinkingBudget, request.Configuration?.ThinkingConfig?.ThinkingBudget);
722722
}
723723

724+
[Fact]
725+
public void FromPromptAndExecutionSettingsWithThinkingLevelReturnsInGenerationConfig()
726+
{
727+
// Arrange
728+
var prompt = "prompt-example";
729+
var executionSettings = new GeminiPromptExecutionSettings
730+
{
731+
ModelId = "gemini-3.0-flash",
732+
ThinkingConfig = new GeminiThinkingConfig { ThinkingLevel = "high" }
733+
};
734+
735+
// Act
736+
var request = GeminiRequest.FromPromptAndExecutionSettings(prompt, executionSettings);
737+
738+
// Assert
739+
Assert.Equal(executionSettings.ThinkingConfig.ThinkingLevel, request.Configuration?.ThinkingConfig?.ThinkingLevel);
740+
}
741+
724742
private sealed class DummyContent(object? innerContent, string? modelId = null, IReadOnlyDictionary<string, object?>? metadata = null) :
725743
KernelContent(innerContent, modelId, metadata);
726744

dotnet/src/Connectors/Connectors.Google.UnitTests/GeminiPromptExecutionSettingsTests.cs

Lines changed: 51 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -137,32 +137,79 @@ public void ItCreatesGeminiExecutionSettingsFromJsonSnakeCase()
137137
}
138138

139139
[Fact]
140-
public void PromptExecutionSettingsCloneWorksAsExpected()
140+
public void ItCreatesGeminiExecutionSettingsFromJsonSnakeCaseWithThinkingLevel()
141141
{
142142
// Arrange
143143
var category = GeminiSafetyCategory.Harassment;
144144
var threshold = GeminiSafetyThreshold.BlockOnlyHigh;
145145
string json = $$"""
146146
{
147-
"model_id": "gemini-pro",
148147
"temperature": 0.7,
149148
"top_p": 0.7,
150149
"top_k": 25,
151150
"candidate_count": 2,
152-
"audio_timestamp": true,
153151
"stop_sequences": [ "foo", "bar" ],
154152
"max_tokens": 128,
153+
"audio_timestamp": true,
155154
"safety_settings": [
156155
{
157156
"category": "{{category.Label}}",
158157
"threshold": "{{threshold.Label}}"
159158
}
160159
],
161160
"thinking_config": {
162-
"thinking_budget": 1000
161+
"thinking_level": "high"
163162
}
164163
}
165164
""";
165+
var actualSettings = JsonSerializer.Deserialize<PromptExecutionSettings>(json);
166+
167+
// Act
168+
GeminiPromptExecutionSettings executionSettings = GeminiPromptExecutionSettings.FromExecutionSettings(actualSettings);
169+
170+
// Assert
171+
Assert.NotNull(executionSettings);
172+
Assert.Equal(0.7, executionSettings.Temperature);
173+
Assert.Equal(0.7, executionSettings.TopP);
174+
Assert.Equal(25, executionSettings.TopK);
175+
Assert.Equal(2, executionSettings.CandidateCount);
176+
Assert.Equal(["foo", "bar"], executionSettings.StopSequences);
177+
Assert.Equal(128, executionSettings.MaxTokens);
178+
Assert.True(executionSettings.AudioTimestamp);
179+
Assert.Single(executionSettings.SafetySettings!, settings =>
180+
settings.Category.Equals(category) &&
181+
settings.Threshold.Equals(threshold));
182+
183+
Assert.Equal("high", executionSettings.ThinkingConfig?.ThinkingLevel);
184+
}
185+
186+
[Theory]
187+
[InlineData("""{ "thinking_budget": 1000 }""")]
188+
[InlineData("""{ "thinking_level": "high" }""")]
189+
public void PromptExecutionSettingsCloneWorksAsExpected(string thinkingConfigJson)
190+
{
191+
// Arrange
192+
var category = GeminiSafetyCategory.Harassment;
193+
var threshold = GeminiSafetyThreshold.BlockOnlyHigh;
194+
string json = $$"""
195+
{
196+
"model_id": "gemini-pro",
197+
"temperature": 0.7,
198+
"top_p": 0.7,
199+
"top_k": 25,
200+
"candidate_count": 2,
201+
"audio_timestamp": true,
202+
"stop_sequences": [ "foo", "bar" ],
203+
"max_tokens": 128,
204+
"safety_settings": [
205+
{
206+
"category": "{{category.Label}}",
207+
"threshold": "{{threshold.Label}}"
208+
}
209+
],
210+
"thinking_config": {{thinkingConfigJson}}
211+
}
212+
""";
166213
var executionSettings = JsonSerializer.Deserialize<GeminiPromptExecutionSettings>(json);
167214

168215
// Act

dotnet/src/Connectors/Connectors.Google/Core/Gemini/Models/GeminiRequest.cs

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -498,7 +498,11 @@ private static void AddAdditionalBodyFields(GeminiPromptExecutionSettings execut
498498
if (executionSettings.ThinkingConfig is not null)
499499
{
500500
request.Configuration ??= new ConfigurationElement();
501-
request.Configuration.ThinkingConfig = new GeminiRequestThinkingConfig { ThinkingBudget = executionSettings.ThinkingConfig.ThinkingBudget };
501+
request.Configuration.ThinkingConfig = new GeminiRequestThinkingConfig
502+
{
503+
ThinkingBudget = executionSettings.ThinkingConfig.ThinkingBudget,
504+
ThinkingLevel = executionSettings.ThinkingConfig.ThinkingLevel
505+
};
502506
}
503507
}
504508

@@ -550,5 +554,9 @@ internal sealed class GeminiRequestThinkingConfig
550554
[JsonPropertyName("thinkingBudget")]
551555
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)]
552556
public int? ThinkingBudget { get; set; }
557+
558+
[JsonPropertyName("thinkingLevel")]
559+
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)]
560+
public string? ThinkingLevel { get; set; }
553561
}
554562
}

dotnet/src/Connectors/Connectors.Google/GeminiThinkingConfig.cs

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,15 @@ public class GeminiThinkingConfig
2222
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)]
2323
public int? ThinkingBudget { get; set; }
2424

25+
/// <summary>The thinking level parameter specifies the amount of thinking the model should use for its thinking process.</summary>
26+
/// <remarks>
27+
/// <para>Possible values are "none", "low", "medium", and "high". The default is "medium".</para>
28+
/// This parameter is specific to Gemini 3.0 and later models.
29+
/// </remarks>
30+
[JsonPropertyName("thinking_level")]
31+
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)]
32+
public string? ThinkingLevel { get; set; }
33+
2534
/// <summary>
2635
/// Clones this instance.
2736
/// </summary>

0 commit comments

Comments
 (0)