diff --git a/CompanyConfig/Art/ChatChainConfig.json b/CompanyConfig/Art/ChatChainConfig.json index d11a849..28a4489 100644 --- a/CompanyConfig/Art/ChatChainConfig.json +++ b/CompanyConfig/Art/ChatChainConfig.json @@ -116,5 +116,6 @@ "gui_design": "True", "git_management": "False", "self_improve": "False", - "incremental_develop": "False" + "incremental_develop": "False", + "background_prompt": "ChatDev is a software company powered by multiple intelligent agents, such as chief executive officer, chief human resources officer, chief product officer, chief technology officer, etc, with a multi-agent organizational structure and the mission of 'changing the digital world through programming'." } \ No newline at end of file diff --git a/CompanyConfig/Default/ChatChainConfig.json b/CompanyConfig/Default/ChatChainConfig.json index d9d8a62..c022053 100644 --- a/CompanyConfig/Default/ChatChainConfig.json +++ b/CompanyConfig/Default/ChatChainConfig.json @@ -97,5 +97,6 @@ "gui_design": "True", "git_management": "False", "self_improve": "False", - "incremental_develop": "False" + "incremental_develop": "False", + "background_prompt": "ChatDev is a software company powered by multiple intelligent agents, such as chief executive officer, chief human resources officer, chief product officer, chief technology officer, etc, with a multi-agent organizational structure and the mission of 'changing the digital world through programming'." } diff --git a/CompanyConfig/Human/ChatChainConfig.json b/CompanyConfig/Human/ChatChainConfig.json index 7571149..986bf6e 100644 --- a/CompanyConfig/Human/ChatChainConfig.json +++ b/CompanyConfig/Human/ChatChainConfig.json @@ -110,5 +110,6 @@ "gui_design": "True", "git_management": "False", "self_improve": "False", - "incremental_develop": "False" + "incremental_develop": "False", + "background_prompt": "ChatDev is a software company powered by multiple intelligent agents, such as chief executive officer, chief human resources officer, chief product officer, chief technology officer, etc, with a multi-agent organizational structure and the mission of 'changing the digital world through programming'." } diff --git a/CompanyConfig/Incremental/ChatChainConfig.json b/CompanyConfig/Incremental/ChatChainConfig.json index d3ee2fc..de36346 100644 --- a/CompanyConfig/Incremental/ChatChainConfig.json +++ b/CompanyConfig/Incremental/ChatChainConfig.json @@ -91,5 +91,6 @@ "gui_design": "True", "git_management": "False", "self_improve": "False", - "incremental_develop": "True" + "incremental_develop": "True", + "background_prompt": "ChatDev is a software company powered by multiple intelligent agents, such as chief executive officer, chief human resources officer, chief product officer, chief technology officer, etc, with a multi-agent organizational structure and the mission of 'changing the digital world through programming'." } diff --git a/camel/agents/role_playing.py b/camel/agents/role_playing.py index 3844a1a..c694bb0 100644 --- a/camel/agents/role_playing.py +++ b/camel/agents/role_playing.py @@ -90,6 +90,7 @@ class RolePlaying: sys_msg_generator_kwargs: Optional[Dict] = None, extend_sys_msg_meta_dicts: Optional[List[Dict]] = None, extend_task_specify_meta_dict: Optional[Dict] = None, + background_prompt: Optional[str] = "" ) -> None: self.with_task_specify = with_task_specify self.with_task_planner = with_task_planner @@ -131,9 +132,7 @@ class RolePlaying: self.task_prompt = task_prompt - chatdev_prompt_template = "ChatDev is a software company powered by multiple intelligent agents, such as chief executive officer, chief human resources officer, chief product officer, chief technology officer, etc, with a multi-agent organizational structure and the mission of \"changing the digital world through programming\"." - - sys_msg_meta_dicts = [dict(chatdev_prompt=chatdev_prompt_template, task=task_prompt)] * 2 + sys_msg_meta_dicts = [dict(chatdev_prompt=background_prompt, task=task_prompt)] * 2 if (extend_sys_msg_meta_dicts is None and self.task_type in [TaskType.AI_SOCIETY, TaskType.MISALIGNMENT, TaskType.CHATDEV]): extend_sys_msg_meta_dicts = [dict(assistant_role=assistant_role_name, user_role=user_role_name)] * 2 @@ -200,7 +199,7 @@ class RolePlaying: # here we concatenate to store the real message in the log log_visualize(self.user_agent.role_name, - "**[Start Chat]**\n\n[" + self.assistant_agent.system_message.content + "]\n\n" + content) + "**[Start Chat]**\n\n[" + self.assistant_agent.system_message.content + "]\n\n" + content) return None, user_msg def process_messages( diff --git a/chatdev/chat_chain.py b/chatdev/chat_chain.py index 3940757..b883db0 100644 --- a/chatdev/chat_chain.py +++ b/chatdev/chat_chain.py @@ -67,7 +67,8 @@ class ChatChain: self.chat_env_config = ChatEnvConfig(clear_structure=check_bool(self.config["clear_structure"]), gui_design=check_bool(self.config["gui_design"]), git_management=check_bool(self.config["git_management"]), - incremental_develop=check_bool(self.config["incremental_develop"])) + incremental_develop=check_bool(self.config["incremental_develop"]), + background_prompt=self.config["background_prompt"]) self.chat_env = ChatEnv(self.chat_env_config) # the user input prompt will be self-improved (if set "self_improve": "True" in ChatChainConfig.json) diff --git a/chatdev/chat_env.py b/chatdev/chat_env.py index 63ab159..1a75e28 100644 --- a/chatdev/chat_env.py +++ b/chatdev/chat_env.py @@ -27,11 +27,13 @@ class ChatEnvConfig: def __init__(self, clear_structure, gui_design, git_management, - incremental_develop): + incremental_develop, + background_prompt): self.clear_structure = clear_structure # Whether to clear non-software files in the WareHouse and cache files in generated software path self.gui_design = gui_design # Encourage ChatDev generate software with GUI self.git_management = git_management # Whether to use git to manage the creation and changes of generated software self.incremental_develop = incremental_develop # Whether to use incremental develop on an existing project + self.background_prompt = background_prompt # background prompt that will be added to every inquiry to LLM def __str__(self): string = "" @@ -39,6 +41,7 @@ class ChatEnvConfig: string += "ChatEnvConfig.git_management: {}\n".format(self.git_management) string += "ChatEnvConfig.gui_design: {}\n".format(self.gui_design) string += "ChatEnvConfig.incremental_develop: {}\n".format(self.incremental_develop) + string += "ChatEnvConfig.background_prompt: {}\n".format(self.background_prompt) return string diff --git a/chatdev/phase.py b/chatdev/phase.py index 15a6ee4..1bc2017 100644 --- a/chatdev/phase.py +++ b/chatdev/phase.py @@ -65,7 +65,7 @@ class Phase(ABC): """ Args: - chat_env: global chatchain environment TODO: only for employee detection, can be deleted + chat_env: global chatchain environment task_prompt: user query prompt for building the software assistant_role_name: who receives the chat user_role_name: who starts the chat @@ -103,6 +103,7 @@ class Phase(ABC): task_type=task_type, with_task_specify=with_task_specify, model_type=model_type, + background_prompt=chat_env.config.background_prompt ) # log_visualize("System", role_play_session.assistant_sys_msg) diff --git a/chatdev/statistics.py b/chatdev/statistics.py index 42dcc4f..82a08fb 100644 --- a/chatdev/statistics.py +++ b/chatdev/statistics.py @@ -105,7 +105,7 @@ def get_info(dir, log_filepath): if len(sublines) > 0: model_type = sublines[0].split("| **model_type** | ModelType.")[-1].split(" | ")[0] model_type = model_type[:-2] - if model_type == "GPT_3_5_TURBO": + if model_type == "GPT_3_5_TURBO" or model_type == "GPT_3_5_TURBO_NEW": model_type = "gpt-3.5-turbo" elif model_type == "GPT_4": model_type = "gpt-4" @@ -114,7 +114,7 @@ def get_info(dir, log_filepath): elif model_type == "GPT_4_TURBO": model_type = "gpt-4-1106-preview" # print("model_type:", model_type) - + lines = open(log_filepath, "r", encoding="utf8").read().split("\n") start_lines = [line for line in lines if "**[Start Chat]**" in line] chat_lines = [line for line in lines if "<->" in line] diff --git a/wiki.md b/wiki.md index 518cea5..b10a515 100644 --- a/wiki.md +++ b/wiki.md @@ -277,6 +277,7 @@ then go to [Visualizer Website](http://127.0.0.1:8000/) to see an online visuali - *incremental_develop*: Whether to use incremental develop on an existing project. - *self_improve*: flag for self-improvement on user input prompt. It is a special chatting that LLM plays as a prompt engineer to improve the user input prompt. **⚠️ Attention** Model generated prompts contains uncertainty and there may be a deviation from the requirement meaning contained in the original prompt. +- *background_prompt*: background prompt that will be added to every inquiry to LLM - params in SimplePhase: - *max_turn_step*: Max number of chatting turn. You can increase max_turn_step for better performance but it will take longer time to finish the phase.