Skip to content
This repository was archived by the owner on Jun 9, 2024. It is now read-only.
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
23 commits
Select commit Hold shift + click to select a range
7c7affb
Sophie Telegram Plugin
Wladastic Jun 3, 2023
193616b
Update README.md
Wladastic Jun 3, 2023
0e4fd60
Merge remote-tracking branch 'upstream/master'
Wladastic Jun 17, 2023
0eb9011
Sophie Telegram Plugin
Wladastic Jun 26, 2023
1ae1cc6
Merge remote-tracking branch 'upstream/master'
Wladastic Jun 26, 2023
d32cf53
remove
Wladastic Jul 2, 2023
4eeaab0
log message
Wladastic Jul 2, 2023
8f9d098
fix
Wladastic Jul 2, 2023
c6bb426
add feedback message has been sent.
Wladastic Jul 7, 2023
9ef8f1f
sophie: add longterm memory and massive refactor
Wladastic Jul 8, 2023
1ff28bb
sophie improvements
Wladastic Aug 6, 2023
adbf070
fix error when someone else texts the bot
Wladastic Aug 11, 2023
d2d5843
update readme
Wladastic Aug 11, 2023
1f2e19c
Merge branch 'master' of https://github.com/Wladastic/Auto-GPT-Plugins
Wladastic Aug 12, 2023
85d0b62
more improvements
Wladastic Aug 12, 2023
9516df4
Merge branch 'master' of https://github.com/Wladastic/Auto-GPT-Plugins
Wladastic Aug 18, 2023
77ceb6b
disallow strangers to use the bot
Wladastic Aug 22, 2023
299ffc7
Merge remote-tracking branch 'upstream/master'
Wladastic Sep 5, 2023
bfb3915
fix with latest auto-gpt changes
Wladastic Sep 5, 2023
29c07e7
timeout not necessary to be printed.
Wladastic Sep 5, 2023
98d49ce
Merge branch 'master' of https://github.com/Wladastic/Auto-GPT-Plugins
Wladastic Sep 6, 2023
65ebb9f
fix to fit new auto-gpt master
Wladastic Oct 9, 2023
de99736
Merge branch 'Significant-Gravitas:master' into master
Wladastic Apr 21, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -22,3 +22,7 @@ twine
validators
wheel
wolframalpha==5.0.0
traceback
pathlib
torch
torchaudio
25 changes: 25 additions & 0 deletions src/autogpt_plugins/sophie/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
# AutoGPT Sophie Plugin
Telegram Chat companion plugin for AutoGPT

![image](https://github.com/Wladastic/Auto-GPT-Plugins/assets/11997278/16c7cb86-0a1f-4aae-a485-f40a1aa6506c)
![image](https://github.com/Wladastic/Auto-GPT-Plugins/assets/11997278/249ae4de-e3c6-4c8a-95b3-93348472978d)


### Getting started
1. Install the plugin
```

Remember to also update your .env to include

```
ALLOWLISTED_PLUGINS=SophieTelegram
TELEGRAM_SOPHIE_API_KEY=<your telegram bot api key>
TELEGRAM_SOPHIE_CHAT_ID=<your telegram chat id>
```

2. Modify your yaml file to include the following
```
- Prefer ask_user and send_message to remain in the conversation unless the User gave you a task, then you have to update him on what you are doing.
- As User might be multilangual, If you use ask_user_voice or send_message_voice, then speak English, but if you use ask_user or send_message, then respond in the language the User used.


290 changes: 290 additions & 0 deletions src/autogpt_plugins/sophie/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,290 @@
"""This is a Chat companion plugin that turns auto-gpt into a real assisstant.
It allows you to interact with your bot via Telegram in a more natural way.
It is based on the telegram plugin I made earlier but instead of forwarding the logs to a telegram chat, it allows Auto-GPT to interact with you.

built by @wladastic on github"""

import os
import re
from typing import Any, Dict, List, Optional, Tuple, TypedDict, TypeVar

from auto_gpt_plugin_template import AutoGPTPluginTemplate

from .sophie_chat import TelegramUtils

PromptGenerator = TypeVar("PromptGenerator")

class Message(TypedDict):
role: str
content: str


def remove_color_codes(s: str) -> str:
ansi_escape = re.compile(r"\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])")
return ansi_escape.sub("", s)


class SophieTelegram(AutoGPTPluginTemplate):
"""
This is a companion plugin for Auto-GPT that allows you to interact with your bot via Telegram in a more natural way.
"""

def __init__(self):
super().__init__()
self._name = "Auto-GPT-Sophie"
self._version = "0.3.0"
self._description = (
"This integrates a Telegram chat bot with your autogpt instance."
)
self.telegram_sophie_api_key = os.getenv("TELEGRAM_SOPHIE_API_KEY", None)
self.telegram_sophie_chat_id = os.getenv("TELEGRAM_SOPHIE_CHAT_ID", None)
self.telegram_sophie_utils = TelegramUtils(
chat_id=self.telegram_sophie_chat_id,
api_key=self.telegram_sophie_api_key
)

def post_prompt(self, prompt: PromptGenerator) -> PromptGenerator:
"""This method is called just after the generate_prompt is called,
but actually before the prompt is generated.
Args:
prompt (PromptGenerator): The prompt generator.
Returns:
PromptGenerator: The prompt generator.
"""

prompt.add_command(
"get_previous_message_history",
"Get the previous messages from the chat when you start.",
{},
self.telegram_sophie_utils.get_previous_message_history,
)

prompt.add_command(
"ask_user",
"Ask the user for input or tell them something and wait for their response if.",
{
"prompt": "string",
},
self.telegram_sophie_utils.ask_user,
)

prompt.add_command(
"ask_user_voice",
"Same as ask_user but also sends a friendly voice message.",
{
"prompt": "string",
},
self.telegram_sophie_utils.ask_user_voice,
)

prompt.add_command(
"send_message",
"Send a message to the user without awaiting response.",
{
"message": "string",
},
self.telegram_sophie_utils.send_message,
)

prompt.add_command(
"send_voice_message",
"Same as send_message but also sends a friendly voice message.",
{
"message": "string",
},
self.telegram_sophie_utils.send_message_and_speak,
)

prompt.add_command(
"sleep_until_interaction",
"To be used to pause the conversation for next user interaction. Similar to ask but without a prompt, user is only notified that you sleep.",
{},
self.telegram_sophie_utils.idle_until_interaction,
)

return prompt

def can_handle_post_prompt(self) -> bool:
"""This method is called to check that the plugin can
handle the post_prompt method.
Returns:
bool: True if the plugin can handle the post_prompt method."""
return True

def can_handle_on_response(self) -> bool:
"""This method is called to check that the plugin can
handle the on_response method.
Returns:
bool: True if the plugin can handle the on_response method."""
return False

def on_response(self, response: str, *args, **kwargs) -> str:
"""This method is called when a response is received from the model."""
pass

def can_handle_on_planning(self) -> bool:
"""This method is called to check that the plugin can
handle the on_planning method.
Returns:
bool: True if the plugin can handle the on_planning method."""
return False

def on_planning(
self, prompt: PromptGenerator, messages: List[Message]
) -> Optional[str]:
"""This method is called before the planning chat completion is done.
Args:
prompt (PromptGenerator): The prompt generator.
messages (List[str]): The list of messages.
"""
pass

def can_handle_post_planning(self) -> bool:
"""This method is called to check that the plugin can
handle the post_planning method.
Returns:
bool: True if the plugin can handle the post_planning method."""
return False

def post_planning(self, response: str) -> str:
"""This method is called after the planning chat completion is done.
Args:
response (str): The response.
Returns:
str: The resulting response.
"""
pass

def can_handle_pre_instruction(self) -> bool:
"""This method is called to check that the plugin can
handle the pre_instruction method.
Returns:
bool: True if the plugin can handle the pre_instruction method."""
return False

def pre_instruction(self, messages: List[Message]) -> List[Message]:
"""This method is called before the instruction chat is done.
Args:
messages (List[Message]): The list of context messages.
Returns:
List[Message]: The resulting list of messages.
"""
pass

def can_handle_on_instruction(self) -> bool:
"""This method is called to check that the plugin can
handle the on_instruction method.
Returns:
bool: True if the plugin can handle the on_instruction method."""
return False

def on_instruction(self, messages: List[Message]) -> Optional[str]:
"""This method is called when the instruction chat is done.
Args:
messages (List[Message]): The list of context messages.
Returns:
Optional[str]: The resulting message.
"""
pass

def can_handle_post_instruction(self) -> bool:
"""This method is called to check that the plugin can
handle the post_instruction method.
Returns:
bool: True if the plugin can handle the post_instruction method."""
return False

def post_instruction(self, response: str) -> str:
"""This method is called after the instruction chat is done.
Args:
response (str): The response.
Returns:
str: The resulting response.
"""
pass

def can_handle_pre_command(self) -> bool:
"""This method is called to check that the plugin can
handle the pre_command method.
Returns:
bool: True if the plugin can handle the pre_command method."""
return False

def pre_command(
self, command_name: str, arguments: Dict[str, Any]
) -> Tuple[str, Dict[str, Any]]:
"""This method is called before the command is executed.
Args:
command_name (str): The command name.
arguments (Dict[str, Any]): The arguments.
Returns:
Tuple[str, Dict[str, Any]]: The command name and the arguments.
"""
pass

def can_handle_post_command(self) -> bool:
"""This method is called to check that the plugin can
handle the post_command method.
Returns:
bool: True if the plugin can handle the post_command method."""
return False

def post_command(self, command_name: str, response: str) -> str:
"""This method is called after the command is executed.
Args:
command_name (str): The command name.
response (str): The response.
Returns:
str: The resulting response.
"""
pass

def can_handle_chat_completion(
self, messages: Dict[Any, Any], model: str, temperature: float, max_tokens: int
) -> bool:
"""This method is called to check that the plugin can
handle the chat_completion method.
Args:
messages (List[Message]): The messages.
model (str): The model name.
temperature (float): The temperature.
max_tokens (int): The max tokens.
Returns:
bool: True if the plugin can handle the chat_completion method."""
return False

def handle_chat_completion(
self, messages: List[Message], model: str, temperature: float, max_tokens: int
) -> str:
"""This method is called when the chat completion is done.
Args:
messages (List[Message]): The messages.
model (str): The model name.
temperature (float): The temperature.
max_tokens (int): The max tokens.
Returns:
str: The resulting response.
"""
pass

def can_handle_text_embedding(
self, text: str
) -> bool:
return False

def handle_text_embedding(
self, text: str
) -> list:
pass

def can_handle_user_input(self, user_input: str) -> bool:
return False

def user_input(self, user_input: str) -> str:
return user_input

def can_handle_report(self) -> bool:
return False

def report(self, message: str) -> None:
pass
Loading