Skip to content

Commit 3bd7155

Browse files
authored
fix(api-nodes): edge cases in responses for Gemini models (#10860)
1 parent f66183a commit 3bd7155

File tree

2 files changed

+14
-13
lines changed

2 files changed

+14
-13
lines changed

comfy_api_nodes/apis/gemini_api.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -113,9 +113,9 @@ class GeminiGenerationConfig(BaseModel):
113113
maxOutputTokens: int | None = Field(None, ge=16, le=8192)
114114
seed: int | None = Field(None)
115115
stopSequences: list[str] | None = Field(None)
116-
temperature: float | None = Field(1, ge=0.0, le=2.0)
117-
topK: int | None = Field(40, ge=1)
118-
topP: float | None = Field(0.95, ge=0.0, le=1.0)
116+
temperature: float | None = Field(None, ge=0.0, le=2.0)
117+
topK: int | None = Field(None, ge=1)
118+
topP: float | None = Field(None, ge=0.0, le=1.0)
119119

120120

121121
class GeminiImageConfig(BaseModel):

comfy_api_nodes/nodes_gemini.py

Lines changed: 11 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -104,14 +104,14 @@ def get_parts_by_type(response: GeminiGenerateContentResponse, part_type: Litera
104104
List of response parts matching the requested type.
105105
"""
106106
if response.candidates is None:
107-
if response.promptFeedback.blockReason:
107+
if response.promptFeedback and response.promptFeedback.blockReason:
108108
feedback = response.promptFeedback
109109
raise ValueError(
110110
f"Gemini API blocked the request. Reason: {feedback.blockReason} ({feedback.blockReasonMessage})"
111111
)
112-
raise NotImplementedError(
113-
"Gemini returned no response candidates. "
114-
"Please report to ComfyUI repository with the example of workflow to reproduce this."
112+
raise ValueError(
113+
"Gemini API returned no response candidates. If you are using the `IMAGE` modality, "
114+
"try changing it to `IMAGE+TEXT` to view the model's reasoning and understand why image generation failed."
115115
)
116116
parts = []
117117
for part in response.candidates[0].content.parts:
@@ -182,11 +182,12 @@ def calculate_tokens_price(response: GeminiGenerateContentResponse) -> float | N
182182
else:
183183
return None
184184
final_price = response.usageMetadata.promptTokenCount * input_tokens_price
185-
for i in response.usageMetadata.candidatesTokensDetails:
186-
if i.modality == Modality.IMAGE:
187-
final_price += output_image_tokens_price * i.tokenCount # for Nano Banana models
188-
else:
189-
final_price += output_text_tokens_price * i.tokenCount
185+
if response.usageMetadata.candidatesTokensDetails:
186+
for i in response.usageMetadata.candidatesTokensDetails:
187+
if i.modality == Modality.IMAGE:
188+
final_price += output_image_tokens_price * i.tokenCount # for Nano Banana models
189+
else:
190+
final_price += output_text_tokens_price * i.tokenCount
190191
if response.usageMetadata.thoughtsTokenCount:
191192
final_price += output_text_tokens_price * response.usageMetadata.thoughtsTokenCount
192193
return final_price / 1_000_000.0
@@ -645,7 +646,7 @@ def define_schema(cls):
645646
options=["auto", "1:1", "2:3", "3:2", "3:4", "4:3", "4:5", "5:4", "9:16", "16:9", "21:9"],
646647
default="auto",
647648
tooltip="If set to 'auto', matches your input image's aspect ratio; "
648-
"if no image is provided, generates a 1:1 square.",
649+
"if no image is provided, a 16:9 square is usually generated.",
649650
),
650651
IO.Combo.Input(
651652
"resolution",

0 commit comments

Comments
 (0)