Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
29 changes: 28 additions & 1 deletion solidgpt/src/manager/autogenmanager.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,7 @@
import logging
import agentops
from dotenv import load_dotenv
import os

import autogen
from autogen import oai
Expand All @@ -8,18 +11,23 @@

from solidgpt.src.manager.promptresource import DEFAULT_SYSTEM_MESSAGE, ASSISTANT_SYSTEM_MESSAGE

# Load environment variables
load_dotenv()

# Initialize AgentOps
agentops.init(os.getenv('AGENTOPS_API_KEY'))

def colored(x, *args, **kwargs):
return x


class SolidUserProxyAgent(autogen.UserProxyAgent):

manager = None
callback_map = {

}

@agentops.record_function('SolidUserProxyAgent_init')
def __init__(
self,
name: str,
Expand All @@ -44,6 +52,7 @@ def __init__(
default_auto_reply=default_auto_reply,
)

@agentops.record_function('SolidUserProxyAgent_print_received_message')
def _print_received_message(self, message: Union[Dict, str], sender):
# print the message received
self.manager.add_message(sender.name, "(to", f"{self.name}):\n")
Expand Down Expand Up @@ -71,6 +80,7 @@ def _print_received_message(self, message: Union[Dict, str], sender):
self.manager.add_message("")
self.manager.add_message("-" * 80)

@agentops.record_function('SolidUserProxyAgent_get_human_input')
def get_human_input(self, prompt: str) -> str:
reply = ""
# get reply from frontend
Expand All @@ -92,6 +102,7 @@ class SolidAssistantAgent(autogen.AssistantAgent):

manager = None

@agentops.record_function('SolidAssistantAgent_init')
def __init__(
self,
name: str,
Expand All @@ -114,6 +125,7 @@ def __init__(
**kwargs,
)

@agentops.record_function('SolidAssistantAgent_print_received_message')
def _print_received_message(self, message: Union[Dict, str], sender):
# print the message received
self.manager.add_message(sender.name, "(to", f"{self.name}):\n")
Expand Down Expand Up @@ -141,6 +153,7 @@ def _print_received_message(self, message: Union[Dict, str], sender):
self.manager.add_message("")
self.manager.add_message("-" * 80)

@agentops.record_function('SolidAssistantAgent_get_human_input')
def get_human_input(self, prompt: str) -> str:
print(prompt)
reply = ""
Expand All @@ -151,6 +164,7 @@ def get_human_input(self, prompt: str) -> str:
class AutoGenManager:
cumulative_message = ""

@agentops.record_function('AutoGenManager_init')
def __init__(self, if_show_reply=False):
# read api key from config file
global_openai_key = ConfigReader().get_property("openai_api_key")
Expand All @@ -164,6 +178,7 @@ def __init__(self, if_show_reply=False):
self.assistant = None
self.user_proxy = None

@agentops.record_function('AutoGenManager_run')
def run(self, requirement, relatived_code):
self.construct_agents(relatived_code)
self.user_proxy.initiate_chat(
Expand All @@ -172,6 +187,7 @@ def run(self, requirement, relatived_code):
)

@staticmethod
@agentops.record_function('AutoGenManager_get_customized_assistant_agent')
def get_customized_assistant_agent(name: str,
system_message: Optional[str] = DEFAULT_SYSTEM_MESSAGE,
llm_config: Optional[Union[Dict, bool]] = None,
Expand All @@ -193,6 +209,7 @@ def get_customized_assistant_agent(name: str,
**kwargs)

@staticmethod
@agentops.record_function('AutoGenManager_get_customized_user_proxy_agent')
def get_customized_user_proxy_agent(name: str,
is_termination_msg: Optional[Callable[[Dict], bool]] = None,
max_consecutive_auto_reply: Optional[int] = None,
Expand All @@ -215,6 +232,7 @@ def get_customized_user_proxy_agent(name: str,
default_auto_reply=default_auto_reply,
)

@agentops.record_function('AutoGenManager_construct_agents')
def construct_agents(self, relatived_code):
self.planner = self.generate_default_planner()
self.planner_user = self.generate_default_planner_user()
Expand All @@ -226,16 +244,19 @@ def construct_agents(self, relatived_code):
self.user_proxy.manager = self
return

@agentops.record_function('AutoGenManager_add_message')
def add_message(self, *args):
# Joining all arguments with a space after converting each to a string
messages = ' '.join(map(str, args))
self.cumulative_message += messages + "\n"

@agentops.record_function('AutoGenManager_retrieve_message')
def retrieve_message(self):
msg = self.cumulative_message
self.cumulative_message = ""
return msg

@agentops.record_function('AutoGenManager_generate_default_planner')
def generate_default_planner(self):
# todo: update callback function
planner = SolidAssistantAgent(
Expand All @@ -245,6 +266,7 @@ def generate_default_planner(self):
system_message=DEFAULT_SYSTEM_MESSAGE)
return planner

@agentops.record_function('AutoGenManager_generate_default_planner_user')
def generate_default_planner_user(self):
# todo: update callback function
planner_user = SolidUserProxyAgent(
Expand All @@ -254,12 +276,14 @@ def generate_default_planner_user(self):
)
return planner_user

@agentops.record_function('AutoGenManager_ask_planner')
def ask_planner(self, message):
self.planner_user.initiate_chat(self.planner, message=message)
self.planner_msg = self.planner_user.last_message()["content"]
# return the last message received from the planner
return self.planner_user.last_message()["content"]

@agentops.record_function('AutoGenManager_generate_default_assistant')
def generate_default_assistant(self, relatived_code: str):
# todo: update callback function
assistant = SolidAssistantAgent(
Expand Down Expand Up @@ -291,6 +315,7 @@ def generate_default_assistant(self, relatived_code: str):
)
return assistant

@agentops.record_function('AutoGenManager_generate_default_user_proxy')
def generate_default_user_proxy(self):
# todo: update callback function
user_proxy = SolidUserProxyAgent(
Expand All @@ -304,3 +329,5 @@ def generate_default_user_proxy(self):
)
return user_proxy

# End of program
agentops.end_session('Success')
26 changes: 24 additions & 2 deletions solidgpt/src/manager/gptmanager.py
Original file line number Diff line number Diff line change
@@ -1,16 +1,27 @@
import openai
from solidgpt.src.configuration.configreader import ConfigReader
import agentops
from dotenv import load_dotenv
import os

# Load environment variables
load_dotenv()

# Initialize AgentOps
agentops.init(os.getenv('AGENTOPS_API_KEY'))

class GPTManager:

_instance = None

@agentops.record_function('GPTManager_new')
def __new__(cls):
if cls._instance is None:
cls._instance = super(GPTManager, cls).__new__(cls)
# You can initialize the instance attributes here
return cls._instance

@agentops.record_function('GPTManager_init')
def __init__(self, if_show_reply = False):
# read api key from config file
global_openai_key = ConfigReader().get_property("openai_api_key")
Expand All @@ -20,24 +31,29 @@ def __init__(self, if_show_reply = False):
self.gpt_models_container = {}
self.if_show_reply = if_show_reply

@agentops.record_function('GPTManager_create_model')
def create_model(self, prompt, gpt_model_label, temperature=1.0, model = None):
if model is None:
model = self.__default_model
gpt_model = GPTModel(prompt, self.__default_model, self.if_show_reply, temperature)
self.gpt_models_container[gpt_model_label] = gpt_model
return gpt_model

@agentops.record_function('GPTManager_create_and_chat_with_model')
def create_and_chat_with_model(self, prompt, gpt_model_label, input_message, temperature=0.1, model=None, is_stream = False):
gpt_model = self.create_model(prompt, gpt_model_label, temperature, model)
return gpt_model.chat_with_model(input_message, is_stream=is_stream)

@agentops.record_function('GPTManager_get_gpt_model')
def get_gpt_model(self, gpt_model_label):
return self.completions_container[gpt_model_label]

@agentops.record_function('GPTManager_remove_gpt_model')
def remove_gpt_model(self, gpt_model_label):
self.completions_container.pop(gpt_model_label)

class GPTModel:
@agentops.record_function('GPTModel_init')
def __init__(self, prompt, model, if_show_reply = True, temperature = 0.1):
self.prompt = prompt
self.model = model
Expand All @@ -46,6 +62,7 @@ def __init__(self, prompt, model, if_show_reply = True, temperature = 0.1):
self.if_show_reply = if_show_reply
self.temperature = temperature

@agentops.record_function('GPTModel_chat_with_model')
def chat_with_model(self, input_message, is_stream=False):
self.messages.append({"role": "user", "content": input_message})
if not is_stream:
Expand All @@ -54,6 +71,7 @@ def chat_with_model(self, input_message, is_stream=False):
return self._run_model_stream()
return self.last_reply

@agentops.record_function('GPTModel_run_model')
def _run_model(self):
chat = openai.ChatCompletion.create(
model=self.model,
Expand All @@ -66,6 +84,7 @@ def _run_model(self):
self.messages.append({"role": "assistant", "content": reply})
self.last_reply = reply

@agentops.record_function('GPTModel_run_model_stream')
def _run_model_stream(self):
stream = openai.ChatCompletion.create(
model=self.model,
Expand All @@ -75,6 +94,9 @@ def _run_model_stream(self):
)
return stream


@agentops.record_function('GPTModel_add_background')
def add_background(self, background_message):
self.messages.append({"role": "assistant", "content": background_message})
self.messages.append({"role": "assistant", "content": background_message})

# End of program
agentops.end_session('Success')
20 changes: 20 additions & 0 deletions solidgpt/src/manager/llamanager.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,39 +2,54 @@
from solidgpt.src.configuration.configreader import ConfigReader
from text_generation import Client
from solidgpt.src.manager.promptresource import llama_v2_prompt
import agentops
from dotenv import load_dotenv

# Load environment variables
load_dotenv()

# Initialize AgentOps
agentops.init(os.getenv('AGENTOPS_API_KEY'))

class LLAManager:
_instance = None

@agentops.record_function('LLAManager_new')
def __new__(cls):
if cls._instance is None:
cls._instance = super(LLAManager, cls).__new__(cls)
return cls._instance

@agentops.record_function('LLAManager_init')
def __init__(self, if_show_reply=False):
self.llama2_base_url = ConfigReader().get_property("HF_API_LLAMA2_BASE")
self.llama2_api_key = ConfigReader().get_property("HF_API_KEY")
self.llama_models_container = {}
self.if_show_reply = if_show_reply

@agentops.record_function('LLAManager_create_model')
def create_model(self, prompt, llama_api, llama_model_label, temperature=1, model=None):
if model is None:
model = self.llama2_base_url # Use LLAMA2 base URL as the model
llama_model = LLamaModel(prompt, self.llama2_api_key, self.llama2_base_url, self.if_show_reply, temperature)
self.llama_models_container[llama_model_label] = llama_model
return llama_model

@agentops.record_function('LLAManager_create_and_chat_with_model')
def create_and_chat_with_model(self, prompt, llama_model_label, input_message, temperature=0.1, model=None):
llama_model = self.create_model(prompt, llama_model_label, temperature, model)
return llama_model.chat_with_model(input_message)

@agentops.record_function('LLAManager_get_llama_model')
def get_llama_model(self, llama_model_label):
return self.llama_models_container.get(llama_model_label)

@agentops.record_function('LLAManager_remove_llama_model')
def remove_llama_model(self, llama_model_label):
self.llama_models_container.pop(llama_model_label, None)

class LLamaModel:
@agentops.record_function('LLamaModel_init')
def __init__(self, prompt, api, model, if_show_reply=True, temperature=0.1):
self.prompt = prompt
self.api = api
Expand All @@ -44,11 +59,13 @@ def __init__(self, prompt, api, model, if_show_reply=True, temperature=0.1):
self.if_show_reply = if_show_reply
self.temperature = temperature

@agentops.record_function('LLamaModel_chat_with_model')
def chat_with_model(self, input_message):
self.messages.append({"role": "user", "content": input_message})
self._run_model()
return self.last_reply

@agentops.record_function('LLamaModel_run_model')
def _run_model(self):
client = Client(self.model, headers={"Authorization": f"Bearer {self.api}"}, timeout=120)
chat = client.generate(
Expand All @@ -62,6 +79,9 @@ def _run_model(self):
self.messages.append({"role": "assistant", "content": reply})
self.last_reply = reply

@agentops.record_function('LLamaModel_add_background')
def add_background(self, background_message):
self.messages.append({"role": "assistant", "content": background_message})

# End of program
agentops.end_session('Success')