Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 4 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,10 @@ A [Telegram bot](https://core.telegram.org/bots/api) that integrates with OpenAI
- [x] (NEW!) Text-to-speech support [announced on November 6, 2023](https://platform.openai.com/docs/guides/text-to-speech) - by [@gilcu3](https://github.com/gilcu3)
- [x] (NEW!) Vision support [announced on November 6, 2023](https://platform.openai.com/docs/guides/vision) - by [@gilcu3](https://github.com/gilcu3)
- [x] (NEW!) GPT-4o model support [announced on May 12, 2024](https://openai.com/index/hello-gpt-4o/) - by [@err09r](https://github.com/err09r)
- [x] (NEW!) GPT-4.1, gpt-4.1-mini and gpt-4.1-nano model support [(docs)](https://platform.openai.com/docs/models/)
- [x] (NEW!) o1 and o1-mini model preliminary support
- [x] (NEW!) o3 and o3-mini model preliminary support
- [x] (NEW!) o4 and o4-mini model preliminary support

## Additional features - help needed!
If you'd like to help, check out the [issues](https://github.com/n3d1117/chatgpt-telegram-bot/issues) section and contribute!
Expand Down Expand Up @@ -94,7 +97,7 @@ Check out the [Budget Manual](https://github.com/n3d1117/chatgpt-telegram-bot/di
| `PROXY` | Proxy to be used for OpenAI and Telegram bot (e.g. `http://localhost:8080`) | - |
| `OPENAI_PROXY` | Proxy to be used only for OpenAI (e.g. `http://localhost:8080`) | - |
| `TELEGRAM_PROXY` | Proxy to be used only for Telegram bot (e.g. `http://localhost:8080`) | - |
| `OPENAI_MODEL` | The OpenAI model to use for generating responses. You can find all available models [here](https://platform.openai.com/docs/models/) | `gpt-4o` |
| `OPENAI_MODEL` | The OpenAI model to use for generating responses. You can find all available models [here](https://platform.openai.com/docs/models/) | `gpt-4.1` |
| `OPENAI_BASE_URL` | Endpoint URL for unofficial OpenAI-compatible APIs (e.g., LocalAI or text-generation-webui) | Default OpenAI API URL |
| `ASSISTANT_PROMPT` | A system message that sets the tone and controls the behavior of the assistant | `You are a helpful assistant.` |
| `SHOW_USAGE` | Whether to show OpenAI token usage information after each response | `false` |
Expand Down
46 changes: 38 additions & 8 deletions bot/openai_helper.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,8 +26,33 @@
GPT_4_VISION_MODELS = ("gpt-4o",)
GPT_4_128K_MODELS = ("gpt-4-1106-preview", "gpt-4-0125-preview", "gpt-4-turbo-preview", "gpt-4-turbo", "gpt-4-turbo-2024-04-09")
GPT_4O_MODELS = ("gpt-4o", "gpt-4o-mini", "chatgpt-4o-latest")
O_MODELS = ("o1", "o1-mini", "o1-preview")
GPT_ALL_MODELS = GPT_3_MODELS + GPT_3_16K_MODELS + GPT_4_MODELS + GPT_4_32K_MODELS + GPT_4_VISION_MODELS + GPT_4_128K_MODELS + GPT_4O_MODELS + O_MODELS
GPT_4_1_MODELS = (
"gpt-4.1",
"gpt-4.1-mini",
"gpt-4.1-nano",
)
O_MODELS = (
"o1",
"o1-mini",
"o1-preview",
"o3",
"o3-mini",
"o3-preview",
"o4",
"o4-mini",
"o4-preview",
)
GPT_ALL_MODELS = (
GPT_3_MODELS
+ GPT_3_16K_MODELS
+ GPT_4_MODELS
+ GPT_4_32K_MODELS
+ GPT_4_VISION_MODELS
+ GPT_4_128K_MODELS
+ GPT_4O_MODELS
+ GPT_4_1_MODELS
+ O_MODELS
)

def default_max_tokens(model: str) -> int:
"""
Expand All @@ -52,6 +77,8 @@ def default_max_tokens(model: str) -> int:
return 4096
elif model in GPT_4O_MODELS:
return 4096
elif model in GPT_4_1_MODELS:
return 4096
elif model in O_MODELS:
return 4096

Expand Down Expand Up @@ -632,14 +659,14 @@ def __max_model_tokens(self):
return base * 31
if self.config['model'] in GPT_4O_MODELS:
return base * 31
if self.config['model'] in GPT_4_1_MODELS:
return base * 256 # 1,048,576 token context
elif self.config['model'] in O_MODELS:
# https://platform.openai.com/docs/models#o1
if self.config['model'] == "o1":
return 100_000
elif self.config['model'] == "o1-preview":
return 32_768
if self.config['model'] == "o1-preview":
return 128_000
else:
return 65_536
return 200_000
raise NotImplementedError(
f"Max tokens for model {self.config['model']} is not implemented yet."
)
Expand All @@ -655,7 +682,10 @@ def __count_tokens(self, messages) -> int:
try:
encoding = tiktoken.encoding_for_model(model)
except KeyError:
encoding = tiktoken.get_encoding("o200k_base")
if model in O_MODELS or model in GPT_4O_MODELS or model in GPT_4_1_MODELS:
encoding = tiktoken.get_encoding("o200k_base")
else:
encoding = tiktoken.get_encoding("cl100k_base")

if model in GPT_ALL_MODELS:
tokens_per_message = 3
Expand Down