Resolve #302: Hardcoded chatdev_prompt_template --> background_prompt parameter in ChatChainConfig.json

This commit is contained in:
thinkwee 2023-12-21 11:06:23 +08:00
parent 4cbd741a42
commit 262976083b
10 changed files with 22 additions and 13 deletions

View File

@ -116,5 +116,6 @@
"gui_design": "True",
"git_management": "False",
"self_improve": "False",
"incremental_develop": "False"
"incremental_develop": "False",
"background_prompt": "ChatDev is a software company powered by multiple intelligent agents, such as chief executive officer, chief human resources officer, chief product officer, chief technology officer, etc, with a multi-agent organizational structure and the mission of 'changing the digital world through programming'."
}

View File

@ -97,5 +97,6 @@
"gui_design": "True",
"git_management": "False",
"self_improve": "False",
"incremental_develop": "False"
"incremental_develop": "False",
"background_prompt": "ChatDev is a software company powered by multiple intelligent agents, such as chief executive officer, chief human resources officer, chief product officer, chief technology officer, etc, with a multi-agent organizational structure and the mission of 'changing the digital world through programming'."
}

View File

@ -110,5 +110,6 @@
"gui_design": "True",
"git_management": "False",
"self_improve": "False",
"incremental_develop": "False"
"incremental_develop": "False",
"background_prompt": "ChatDev is a software company powered by multiple intelligent agents, such as chief executive officer, chief human resources officer, chief product officer, chief technology officer, etc, with a multi-agent organizational structure and the mission of 'changing the digital world through programming'."
}

View File

@ -91,5 +91,6 @@
"gui_design": "True",
"git_management": "False",
"self_improve": "False",
"incremental_develop": "True"
"incremental_develop": "True",
"background_prompt": "ChatDev is a software company powered by multiple intelligent agents, such as chief executive officer, chief human resources officer, chief product officer, chief technology officer, etc, with a multi-agent organizational structure and the mission of 'changing the digital world through programming'."
}

View File

@ -90,6 +90,7 @@ class RolePlaying:
sys_msg_generator_kwargs: Optional[Dict] = None,
extend_sys_msg_meta_dicts: Optional[List[Dict]] = None,
extend_task_specify_meta_dict: Optional[Dict] = None,
background_prompt: Optional[str] = ""
) -> None:
self.with_task_specify = with_task_specify
self.with_task_planner = with_task_planner
@ -131,9 +132,7 @@ class RolePlaying:
self.task_prompt = task_prompt
chatdev_prompt_template = "ChatDev is a software company powered by multiple intelligent agents, such as chief executive officer, chief human resources officer, chief product officer, chief technology officer, etc, with a multi-agent organizational structure and the mission of \"changing the digital world through programming\"."
sys_msg_meta_dicts = [dict(chatdev_prompt=chatdev_prompt_template, task=task_prompt)] * 2
sys_msg_meta_dicts = [dict(chatdev_prompt=background_prompt, task=task_prompt)] * 2
if (extend_sys_msg_meta_dicts is None and self.task_type in [TaskType.AI_SOCIETY, TaskType.MISALIGNMENT,
TaskType.CHATDEV]):
extend_sys_msg_meta_dicts = [dict(assistant_role=assistant_role_name, user_role=user_role_name)] * 2
@ -200,7 +199,7 @@ class RolePlaying:
# here we concatenate to store the real message in the log
log_visualize(self.user_agent.role_name,
"**[Start Chat]**\n\n[" + self.assistant_agent.system_message.content + "]\n\n" + content)
"**[Start Chat]**\n\n[" + self.assistant_agent.system_message.content + "]\n\n" + content)
return None, user_msg
def process_messages(

View File

@ -67,7 +67,8 @@ class ChatChain:
self.chat_env_config = ChatEnvConfig(clear_structure=check_bool(self.config["clear_structure"]),
gui_design=check_bool(self.config["gui_design"]),
git_management=check_bool(self.config["git_management"]),
incremental_develop=check_bool(self.config["incremental_develop"]))
incremental_develop=check_bool(self.config["incremental_develop"]),
background_prompt=self.config["background_prompt"])
self.chat_env = ChatEnv(self.chat_env_config)
# the user input prompt will be self-improved (if set "self_improve": "True" in ChatChainConfig.json)

View File

@ -27,11 +27,13 @@ class ChatEnvConfig:
def __init__(self, clear_structure,
gui_design,
git_management,
incremental_develop):
incremental_develop,
background_prompt):
self.clear_structure = clear_structure # Whether to clear non-software files in the WareHouse and cache files in generated software path
self.gui_design = gui_design # Encourage ChatDev generate software with GUI
self.git_management = git_management # Whether to use git to manage the creation and changes of generated software
self.incremental_develop = incremental_develop # Whether to use incremental develop on an existing project
self.background_prompt = background_prompt # background prompt that will be added to every inquiry to LLM
def __str__(self):
string = ""
@ -39,6 +41,7 @@ class ChatEnvConfig:
string += "ChatEnvConfig.git_management: {}\n".format(self.git_management)
string += "ChatEnvConfig.gui_design: {}\n".format(self.gui_design)
string += "ChatEnvConfig.incremental_develop: {}\n".format(self.incremental_develop)
string += "ChatEnvConfig.background_prompt: {}\n".format(self.background_prompt)
return string

View File

@ -65,7 +65,7 @@ class Phase(ABC):
"""
Args:
chat_env: global chatchain environment TODO: only for employee detection, can be deleted
chat_env: global chatchain environment
task_prompt: user query prompt for building the software
assistant_role_name: who receives the chat
user_role_name: who starts the chat
@ -103,6 +103,7 @@ class Phase(ABC):
task_type=task_type,
with_task_specify=with_task_specify,
model_type=model_type,
background_prompt=chat_env.config.background_prompt
)
# log_visualize("System", role_play_session.assistant_sys_msg)

View File

@ -105,7 +105,7 @@ def get_info(dir, log_filepath):
if len(sublines) > 0:
model_type = sublines[0].split("| **model_type** | ModelType.")[-1].split(" | ")[0]
model_type = model_type[:-2]
if model_type == "GPT_3_5_TURBO":
if model_type == "GPT_3_5_TURBO" or model_type == "GPT_3_5_TURBO_NEW":
model_type = "gpt-3.5-turbo"
elif model_type == "GPT_4":
model_type = "gpt-4"
@ -114,7 +114,7 @@ def get_info(dir, log_filepath):
elif model_type == "GPT_4_TURBO":
model_type = "gpt-4-1106-preview"
# print("model_type:", model_type)
lines = open(log_filepath, "r", encoding="utf8").read().split("\n")
start_lines = [line for line in lines if "**[Start Chat]**" in line]
chat_lines = [line for line in lines if "<->" in line]

View File

@ -277,6 +277,7 @@ then go to [Visualizer Website](http://127.0.0.1:8000/) to see an online visuali
- *incremental_develop*: Whether to use incremental develop on an existing project.
- *self_improve*: flag for self-improvement on user input prompt. It is a special chatting that LLM plays as a prompt engineer to improve the user input prompt. **⚠️ Attention** Model generated prompts contains uncertainty and there may
be a deviation from the requirement meaning contained in the original prompt.
- *background_prompt*: background prompt that will be added to every inquiry to LLM
- params in SimplePhase:
- *max_turn_step*: Max number of chatting turn. You can increase max_turn_step for better performance but it will
take longer time to finish the phase.