mirror of
https://github.com/OpenBMB/ChatDev.git
synced 2024-12-25 12:52:29 +03:00
Merge pull request #337 from NA-Wen/main
Integrate Experiential Co-Learning Module
This commit is contained in:
commit
dc6dd4284d
@ -1,103 +1,99 @@
|
|||||||
{
|
{
|
||||||
"chain": [
|
"chain": [{
|
||||||
{
|
"phase": "DemandAnalysis",
|
||||||
"phase": "DemandAnalysis",
|
"phaseType": "SimplePhase",
|
||||||
"phaseType": "SimplePhase",
|
"max_turn_step": -1,
|
||||||
"max_turn_step": -1,
|
"need_reflect": "True"
|
||||||
"need_reflect": "True"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"phase": "LanguageChoose",
|
|
||||||
"phaseType": "SimplePhase",
|
|
||||||
"max_turn_step": -1,
|
|
||||||
"need_reflect": "True"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"phase": "Coding",
|
|
||||||
"phaseType": "SimplePhase",
|
|
||||||
"max_turn_step": 1,
|
|
||||||
"need_reflect": "False"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"phase": "CodeCompleteAll",
|
|
||||||
"phaseType": "ComposedPhase",
|
|
||||||
"cycleNum": 10,
|
|
||||||
"Composition": [
|
|
||||||
{
|
|
||||||
"phase": "CodeComplete",
|
|
||||||
"phaseType": "SimplePhase",
|
|
||||||
"max_turn_step": 1,
|
|
||||||
"need_reflect": "False"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"phase": "CodeReview",
|
|
||||||
"phaseType": "ComposedPhase",
|
|
||||||
"cycleNum": 3,
|
|
||||||
"Composition": [
|
|
||||||
{
|
|
||||||
"phase": "CodeReviewComment",
|
|
||||||
"phaseType": "SimplePhase",
|
|
||||||
"max_turn_step": 1,
|
|
||||||
"need_reflect": "False"
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"phase": "CodeReviewModification",
|
"phase": "LanguageChoose",
|
||||||
"phaseType": "SimplePhase",
|
"phaseType": "SimplePhase",
|
||||||
"max_turn_step": 1,
|
"max_turn_step": -1,
|
||||||
"need_reflect": "False"
|
"need_reflect": "True"
|
||||||
}
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"phase": "Test",
|
|
||||||
"phaseType": "ComposedPhase",
|
|
||||||
"cycleNum": 3,
|
|
||||||
"Composition": [
|
|
||||||
{
|
|
||||||
"phase": "TestErrorSummary",
|
|
||||||
"phaseType": "SimplePhase",
|
|
||||||
"max_turn_step": 1,
|
|
||||||
"need_reflect": "False"
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"phase": "TestModification",
|
"phase": "Coding",
|
||||||
"phaseType": "SimplePhase",
|
"phaseType": "SimplePhase",
|
||||||
"max_turn_step": 1,
|
"max_turn_step": 1,
|
||||||
"need_reflect": "False"
|
"need_reflect": "False"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"phase": "CodeCompleteAll",
|
||||||
|
"phaseType": "ComposedPhase",
|
||||||
|
"cycleNum": 10,
|
||||||
|
"Composition": [{
|
||||||
|
"phase": "CodeComplete",
|
||||||
|
"phaseType": "SimplePhase",
|
||||||
|
"max_turn_step": 1,
|
||||||
|
"need_reflect": "False"
|
||||||
|
}]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"phase": "CodeReview",
|
||||||
|
"phaseType": "ComposedPhase",
|
||||||
|
"cycleNum": 3,
|
||||||
|
"Composition": [{
|
||||||
|
"phase": "CodeReviewComment",
|
||||||
|
"phaseType": "SimplePhase",
|
||||||
|
"max_turn_step": 1,
|
||||||
|
"need_reflect": "False"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"phase": "CodeReviewModification",
|
||||||
|
"phaseType": "SimplePhase",
|
||||||
|
"max_turn_step": 1,
|
||||||
|
"need_reflect": "False"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"phase": "Test",
|
||||||
|
"phaseType": "ComposedPhase",
|
||||||
|
"cycleNum": 3,
|
||||||
|
"Composition": [{
|
||||||
|
"phase": "TestErrorSummary",
|
||||||
|
"phaseType": "SimplePhase",
|
||||||
|
"max_turn_step": 1,
|
||||||
|
"need_reflect": "False"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"phase": "TestModification",
|
||||||
|
"phaseType": "SimplePhase",
|
||||||
|
"max_turn_step": 1,
|
||||||
|
"need_reflect": "False"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"phase": "EnvironmentDoc",
|
||||||
|
"phaseType": "SimplePhase",
|
||||||
|
"max_turn_step": 1,
|
||||||
|
"need_reflect": "True"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"phase": "Manual",
|
||||||
|
"phaseType": "SimplePhase",
|
||||||
|
"max_turn_step": 1,
|
||||||
|
"need_reflect": "False"
|
||||||
}
|
}
|
||||||
]
|
],
|
||||||
},
|
"recruitments": [
|
||||||
{
|
"Chief Executive Officer",
|
||||||
"phase": "EnvironmentDoc",
|
"Counselor",
|
||||||
"phaseType": "SimplePhase",
|
"Chief Human Resource Officer",
|
||||||
"max_turn_step": 1,
|
"Chief Product Officer",
|
||||||
"need_reflect": "True"
|
"Chief Technology Officer",
|
||||||
},
|
"Programmer",
|
||||||
{
|
"Code Reviewer",
|
||||||
"phase": "Manual",
|
"Software Test Engineer",
|
||||||
"phaseType": "SimplePhase",
|
"Chief Creative Officer"
|
||||||
"max_turn_step": 1,
|
],
|
||||||
"need_reflect": "False"
|
"clear_structure": "True",
|
||||||
}
|
"gui_design": "True",
|
||||||
],
|
"git_management": "False",
|
||||||
"recruitments": [
|
"web_spider": "False",
|
||||||
"Chief Executive Officer",
|
"self_improve": "False",
|
||||||
"Counselor",
|
"incremental_develop": "False",
|
||||||
"Chief Human Resource Officer",
|
"with_memory": "False",
|
||||||
"Chief Product Officer",
|
"background_prompt": "ChatDev is a software company powered by multiple intelligent agents, such as chief executive officer, chief human resources officer, chief product officer, chief technology officer, etc, with a multi-agent organizational structure and the mission of 'changing the digital world through programming'."
|
||||||
"Chief Technology Officer",
|
|
||||||
"Programmer",
|
|
||||||
"Code Reviewer",
|
|
||||||
"Software Test Engineer",
|
|
||||||
"Chief Creative Officer"
|
|
||||||
],
|
|
||||||
"clear_structure": "True",
|
|
||||||
"gui_design": "True",
|
|
||||||
"git_management": "False",
|
|
||||||
"web_spider": "False",
|
|
||||||
"self_improve": "False",
|
|
||||||
"incremental_develop": "False",
|
|
||||||
"background_prompt": "ChatDev is a software company powered by multiple intelligent agents, such as chief executive officer, chief human resources officer, chief product officer, chief technology officer, etc, with a multi-agent organizational structure and the mission of 'changing the digital world through programming'."
|
|
||||||
}
|
}
|
@ -27,7 +27,8 @@
|
|||||||
</p>
|
</p>
|
||||||
|
|
||||||
## 🎉 News
|
## 🎉 News
|
||||||
* **December 28, 2023: We present Experiential Co-Learning, an innovative approach where instructor and assistant agents accumulate shortcut-oriented experiences to effectively solve new tasks, reducing repetitive errors and enhancing efficiency. Check out our preprint paper at https://arxiv.org/abs/2312.17025 and this technique will soon be integrated into ChatDev.**
|
* **January 25, 2024: We integrate Experiential Co-Learning Module into ChatDev. Please see the [Experiential Co-Learning Guide](wiki.md#co-tracking).**
|
||||||
|
* December 28, 2023: We present Experiential Co-Learning, an innovative approach where instructor and assistant agents accumulate shortcut-oriented experiences to effectively solve new tasks, reducing repetitive errors and enhancing efficiency. Check out our preprint paper at https://arxiv.org/abs/2312.17025 and this technique will soon be integrated into ChatDev.
|
||||||
<p align="center">
|
<p align="center">
|
||||||
<img src='./misc/ecl.png' width=860>
|
<img src='./misc/ecl.png' width=860>
|
||||||
</p>
|
</p>
|
||||||
|
Binary file not shown.
@ -1 +0,0 @@
|
|||||||
Subproject commit e0396448114be2e320564cdfbe6bcf4082dd4e42
|
|
Binary file not shown.
@ -1,120 +1,120 @@
|
|||||||
{
|
{
|
||||||
"chain": [
|
"chain": [
|
||||||
{
|
{
|
||||||
"phase": "DemandAnalysis",
|
"phase": "DemandAnalysis",
|
||||||
"phaseType": "SimplePhase",
|
"phaseType": "SimplePhase",
|
||||||
"max_turn_step": -1,
|
"max_turn_step": -1,
|
||||||
"need_reflect": "True"
|
"need_reflect": "True"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"phase": "LanguageChoose",
|
"phase": "LanguageChoose",
|
||||||
"phaseType": "SimplePhase",
|
"phaseType": "SimplePhase",
|
||||||
"max_turn_step": -1,
|
"max_turn_step": -1,
|
||||||
"need_reflect": "False"
|
"need_reflect": "False"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"phase": "Coding",
|
"phase": "Coding",
|
||||||
"phaseType": "SimplePhase",
|
"phaseType": "SimplePhase",
|
||||||
"max_turn_step": 1,
|
"max_turn_step": 1,
|
||||||
"need_reflect": "False"
|
"need_reflect": "False"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"phase": "Art",
|
"phase": "Art",
|
||||||
"phaseType": "ComposedPhase",
|
"phaseType": "ComposedPhase",
|
||||||
"cycleNum": 1,
|
"cycleNum": 1,
|
||||||
"Composition": [
|
"Composition": [
|
||||||
{
|
{
|
||||||
"phase": "ArtDesign",
|
"phase": "ArtDesign",
|
||||||
"phaseType": "SimplePhase",
|
"phaseType": "SimplePhase",
|
||||||
"max_turn_step": 1,
|
"max_turn_step": 1,
|
||||||
"need_reflect": "False"
|
"need_reflect": "False"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"phase": "ArtIntegration",
|
"phase": "ArtIntegration",
|
||||||
"phaseType": "SimplePhase",
|
"phaseType": "SimplePhase",
|
||||||
"max_turn_step": 1,
|
"max_turn_step": 1,
|
||||||
"need_reflect": "False"
|
"need_reflect": "False"
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"phase": "CodeCompleteAll",
|
"phase": "CodeCompleteAll",
|
||||||
"phaseType": "ComposedPhase",
|
"phaseType": "ComposedPhase",
|
||||||
"cycleNum": 10,
|
"cycleNum": 10,
|
||||||
"Composition": [
|
"Composition": [
|
||||||
{
|
{
|
||||||
"phase": "CodeComplete",
|
"phase": "CodeComplete",
|
||||||
"phaseType": "SimplePhase",
|
"phaseType": "SimplePhase",
|
||||||
"max_turn_step": 1,
|
"max_turn_step": 1,
|
||||||
"need_reflect": "False"
|
"need_reflect": "False"
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"phase": "CodeReview",
|
"phase": "CodeReview",
|
||||||
"phaseType": "ComposedPhase",
|
"phaseType": "ComposedPhase",
|
||||||
"cycleNum": 3,
|
"cycleNum": 3,
|
||||||
"Composition": [
|
"Composition": [
|
||||||
{
|
{
|
||||||
"phase": "CodeReviewComment",
|
"phase": "CodeReviewComment",
|
||||||
"phaseType": "SimplePhase",
|
"phaseType": "SimplePhase",
|
||||||
"max_turn_step": 1,
|
"max_turn_step": 1,
|
||||||
"need_reflect": "False"
|
"need_reflect": "False"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"phase": "CodeReviewModification",
|
"phase": "CodeReviewModification",
|
||||||
"phaseType": "SimplePhase",
|
"phaseType": "SimplePhase",
|
||||||
"max_turn_step": 1,
|
"max_turn_step": 1,
|
||||||
"need_reflect": "False"
|
"need_reflect": "False"
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"phase": "Test",
|
"phase": "Test",
|
||||||
"phaseType": "ComposedPhase",
|
"phaseType": "ComposedPhase",
|
||||||
"cycleNum": 3,
|
"cycleNum": 3,
|
||||||
"Composition": [
|
"Composition": [
|
||||||
{
|
{
|
||||||
"phase": "TestErrorSummary",
|
"phase": "TestErrorSummary",
|
||||||
"phaseType": "SimplePhase",
|
"phaseType": "SimplePhase",
|
||||||
"max_turn_step": 1,
|
"max_turn_step": 1,
|
||||||
"need_reflect": "False"
|
"need_reflect": "False"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"phase": "TestModification",
|
"phase": "TestModification",
|
||||||
"phaseType": "SimplePhase",
|
"phaseType": "SimplePhase",
|
||||||
"max_turn_step": 1,
|
"max_turn_step": 1,
|
||||||
"need_reflect": "False"
|
"need_reflect": "False"
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"phase": "EnvironmentDoc",
|
"phase": "EnvironmentDoc",
|
||||||
"phaseType": "SimplePhase",
|
"phaseType": "SimplePhase",
|
||||||
"max_turn_step": 1,
|
"max_turn_step": 1,
|
||||||
"need_reflect": "True"
|
"need_reflect": "True"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"phase": "Manual",
|
"phase": "Manual",
|
||||||
"phaseType": "SimplePhase",
|
"phaseType": "SimplePhase",
|
||||||
"max_turn_step": 1,
|
"max_turn_step": 1,
|
||||||
"need_reflect": "False"
|
"need_reflect": "False"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"recruitments": [
|
"recruitments": [
|
||||||
"Chief Executive Officer",
|
"Chief Executive Officer",
|
||||||
"Counselor",
|
"Counselor",
|
||||||
"Chief Human Resource Officer",
|
"Chief Human Resource Officer",
|
||||||
"Chief Product Officer",
|
"Chief Product Officer",
|
||||||
"Chief Technology Officer",
|
"Chief Technology Officer",
|
||||||
"Programmer",
|
"Programmer",
|
||||||
"Code Reviewer",
|
"Code Reviewer",
|
||||||
"Software Test Engineer",
|
"Software Test Engineer",
|
||||||
"Chief Creative Officer"
|
"Chief Creative Officer"
|
||||||
],
|
],
|
||||||
"clear_structure": "True",
|
"clear_structure": "True",
|
||||||
"brainstorming": "False",
|
"brainstorming": "False",
|
||||||
"gui_design": "True",
|
"gui_design": "True",
|
||||||
"git_management": "False",
|
"git_management": "False",
|
||||||
"self_improve": "False"
|
"self_improve": "False"
|
||||||
}
|
}
|
@ -1,301 +1,301 @@
|
|||||||
{
|
{
|
||||||
"DemandAnalysis": {
|
"DemandAnalysis": {
|
||||||
"assistant_role_name": "Chief Product Officer",
|
"assistant_role_name": "Chief Product Officer",
|
||||||
"user_role_name": "Chief Executive Officer",
|
"user_role_name": "Chief Executive Officer",
|
||||||
"phase_prompt": [
|
"phase_prompt": [
|
||||||
"ChatDev has made products in the following form before:",
|
"ChatDev has made products in the following form before:",
|
||||||
"Image: can present information in line chart, bar chart, flow chart, cloud chart, Gantt chart, etc.",
|
"Image: can present information in line chart, bar chart, flow chart, cloud chart, Gantt chart, etc.",
|
||||||
"Document: can present information via .docx files.",
|
"Document: can present information via .docx files.",
|
||||||
"PowerPoint: can present information via .pptx files.",
|
"PowerPoint: can present information via .pptx files.",
|
||||||
"Excel: can present information via .xlsx files.",
|
"Excel: can present information via .xlsx files.",
|
||||||
"PDF: can present information via .pdf files.",
|
"PDF: can present information via .pdf files.",
|
||||||
"Website: can present personal resume, tutorial, products, or ideas, via .html files.",
|
"Website: can present personal resume, tutorial, products, or ideas, via .html files.",
|
||||||
"Application: can implement visualized game, software, tool, etc, via python.",
|
"Application: can implement visualized game, software, tool, etc, via python.",
|
||||||
"Dashboard: can display a panel visualizing real-time information.",
|
"Dashboard: can display a panel visualizing real-time information.",
|
||||||
"Mind Map: can represent ideas, with related concepts arranged around a core concept.",
|
"Mind Map: can represent ideas, with related concepts arranged around a core concept.",
|
||||||
"As the {assistant_role}, to satisfy the new user's demand and the product should be realizable, you should keep discussing with me to decide which product modality do we want the product to be?",
|
"As the {assistant_role}, to satisfy the new user's demand and the product should be realizable, you should keep discussing with me to decide which product modality do we want the product to be?",
|
||||||
"Note that we must ONLY discuss the product modality and do not discuss anything else! Once we all have expressed our opinion(s) and agree with the results of the discussion unanimously, any of us must actively terminate the discussion by replying with only one line, which starts with a single word <INFO>, followed by our final product modality without any other words, e.g., \"<INFO> PowerPoint\"."
|
"Note that we must ONLY discuss the product modality and do not discuss anything else! Once we all have expressed our opinion(s) and agree with the results of the discussion unanimously, any of us must actively terminate the discussion by replying with only one line, which starts with a single word <INFO>, followed by our final product modality without any other words, e.g., \"<INFO> PowerPoint\"."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"LanguageChoose": {
|
"LanguageChoose": {
|
||||||
"assistant_role_name": "Chief Technology Officer",
|
"assistant_role_name": "Chief Technology Officer",
|
||||||
"user_role_name": "Chief Executive Officer",
|
"user_role_name": "Chief Executive Officer",
|
||||||
"phase_prompt": [
|
"phase_prompt": [
|
||||||
"According to the new user's task and some creative brainstorm ideas listed below: ",
|
"According to the new user's task and some creative brainstorm ideas listed below: ",
|
||||||
"Task: \"{task}\".",
|
"Task: \"{task}\".",
|
||||||
"Modality: \"{modality}\".",
|
"Modality: \"{modality}\".",
|
||||||
"Ideas: \"{ideas}\".",
|
"Ideas: \"{ideas}\".",
|
||||||
"We have decided to complete the task through a executable software implemented via a programming language. ",
|
"We have decided to complete the task through a executable software implemented via a programming language. ",
|
||||||
"As the {assistant_role}, to satisfy the new user's demand and make the software realizable, you should propose a concrete programming language. If python can complete this task via Python, please answer Python; otherwise, answer another programming language (e.g., Java, C++, etc,).",
|
"As the {assistant_role}, to satisfy the new user's demand and make the software realizable, you should propose a concrete programming language. If python can complete this task via Python, please answer Python; otherwise, answer another programming language (e.g., Java, C++, etc,).",
|
||||||
"Note that we must ONLY discuss the target programming language and do not discuss anything else! Once we all have expressed our opinion(s) and agree with the results of the discussion unanimously, any of us must actively terminate the discussion and conclude the best programming language we have discussed without any other words or reasons, return only one line using the format: \"<INFO> *\" where \"*\" represents a programming language."
|
"Note that we must ONLY discuss the target programming language and do not discuss anything else! Once we all have expressed our opinion(s) and agree with the results of the discussion unanimously, any of us must actively terminate the discussion and conclude the best programming language we have discussed without any other words or reasons, return only one line using the format: \"<INFO> *\" where \"*\" represents a programming language."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"Coding": {
|
"Coding": {
|
||||||
"assistant_role_name": "Programmer",
|
"assistant_role_name": "Programmer",
|
||||||
"user_role_name": "Chief Technology Officer",
|
"user_role_name": "Chief Technology Officer",
|
||||||
"phase_prompt": [
|
"phase_prompt": [
|
||||||
"According to the new user's task and our software designs listed below: ",
|
"According to the new user's task and our software designs listed below: ",
|
||||||
"Task: \"{task}\".",
|
"Task: \"{task}\".",
|
||||||
"Modality: \"{modality}\".",
|
"Modality: \"{modality}\".",
|
||||||
"Programming Language: \"{language}\"",
|
"Programming Language: \"{language}\"",
|
||||||
"Ideas:\"{ideas}\"",
|
"Ideas:\"{ideas}\"",
|
||||||
"We have decided to complete the task through a executable software with multiple files implemented via {language}. As the {assistant_role}, to satisfy the new user's demands, you should write one or multiple files and make sure that every detail of the architecture is, in the end, implemented as code. {gui}",
|
"We have decided to complete the task through a executable software with multiple files implemented via {language}. As the {assistant_role}, to satisfy the new user's demands, you should write one or multiple files and make sure that every detail of the architecture is, in the end, implemented as code. {gui}",
|
||||||
"Think step by step and reason yourself to the right decisions to make sure we get it right.",
|
"Think step by step and reason yourself to the right decisions to make sure we get it right.",
|
||||||
"You will first lay out the names of the core classes, functions, methods that will be necessary, as well as a quick comment on their purpose.",
|
"You will first lay out the names of the core classes, functions, methods that will be necessary, as well as a quick comment on their purpose.",
|
||||||
"Then you will output the content of each file including complete code. Each file must strictly follow a markdown code block format, where the following tokens must be replaced such that \"FILENAME\" is the lowercase file name including the file extension, \"LANGUAGE\" in the programming language, \"DOCSTRING\" is a string literal specified in source code that is used to document a specific segment of code, and \"CODE\" is the original code:",
|
"Then you will output the content of each file including complete code. Each file must strictly follow a markdown code block format, where the following tokens must be replaced such that \"FILENAME\" is the lowercase file name including the file extension, \"LANGUAGE\" in the programming language, \"DOCSTRING\" is a string literal specified in source code that is used to document a specific segment of code, and \"CODE\" is the original code:",
|
||||||
"FILENAME",
|
"FILENAME",
|
||||||
"```LANGUAGE",
|
"```LANGUAGE",
|
||||||
"'''",
|
"'''",
|
||||||
"DOCSTRING",
|
"DOCSTRING",
|
||||||
"'''",
|
"'''",
|
||||||
"CODE",
|
"CODE",
|
||||||
"```",
|
"```",
|
||||||
"You will start with the \"main\" file, then go to the ones that are imported by that file, and so on.",
|
"You will start with the \"main\" file, then go to the ones that are imported by that file, and so on.",
|
||||||
"Please note that the code should be fully functional. Ensure to implement all functions. No placeholders (such as 'pass' in Python)."
|
"Please note that the code should be fully functional. Ensure to implement all functions. No placeholders (such as 'pass' in Python)."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"ArtDesign": {
|
"ArtDesign": {
|
||||||
"assistant_role_name": "Programmer",
|
"assistant_role_name": "Programmer",
|
||||||
"user_role_name": "Chief Creative Officer",
|
"user_role_name": "Chief Creative Officer",
|
||||||
"phase_prompt": [
|
"phase_prompt": [
|
||||||
"Our developed source codes and corresponding test reports are listed below: ",
|
"Our developed source codes and corresponding test reports are listed below: ",
|
||||||
"Task: \"{task}\".",
|
"Task: \"{task}\".",
|
||||||
"Programming Language: \"{language}\"",
|
"Programming Language: \"{language}\"",
|
||||||
"Source Codes:",
|
"Source Codes:",
|
||||||
"\"{codes}\"",
|
"\"{codes}\"",
|
||||||
"Note that each file must strictly follow a markdown code block format, where the following tokens must be replaced such that \"FILENAME\" is the lowercase file name including the file extension, \"LANGUAGE\" in the programming language, \"DOCSTRING\" is a string literal specified in source code that is used to document a specific segment of code, and \"CODE\" is the original code:",
|
"Note that each file must strictly follow a markdown code block format, where the following tokens must be replaced such that \"FILENAME\" is the lowercase file name including the file extension, \"LANGUAGE\" in the programming language, \"DOCSTRING\" is a string literal specified in source code that is used to document a specific segment of code, and \"CODE\" is the original code:",
|
||||||
"FILENAME",
|
"FILENAME",
|
||||||
"```LANGUAGE",
|
"```LANGUAGE",
|
||||||
"'''",
|
"'''",
|
||||||
"DOCSTRING",
|
"DOCSTRING",
|
||||||
"'''",
|
"'''",
|
||||||
"CODE",
|
"CODE",
|
||||||
"```",
|
"```",
|
||||||
"As the {assistant_role}, to satisfy the new user's demand and equip the software with a beautiful graphical user interface (GUI), we will discuss and design many decorative images for GUI decoration. Now, we keep discussing the GUI beautification by listing some functionally independent elements in GUI that are being considered to be decorated by different pictures. For example, ten digits (0-9) in a calculator are functionally independent.",
|
"As the {assistant_role}, to satisfy the new user's demand and equip the software with a beautiful graphical user interface (GUI), we will discuss and design many decorative images for GUI decoration. Now, we keep discussing the GUI beautification by listing some functionally independent elements in GUI that are being considered to be decorated by different pictures. For example, ten digits (0-9) in a calculator are functionally independent.",
|
||||||
"To answer, use the format: \" FILENAME.png: DESCRIPTION\" where \"FILENAME\" is the filename of the image and \"DESCRIPTION\" denotes the detailed description of the independent elements. For example:",
|
"To answer, use the format: \" FILENAME.png: DESCRIPTION\" where \"FILENAME\" is the filename of the image and \"DESCRIPTION\" denotes the detailed description of the independent elements. For example:",
|
||||||
"'''",
|
"'''",
|
||||||
"button_1.png: The button with the number \"1\" on it.",
|
"button_1.png: The button with the number \"1\" on it.",
|
||||||
"button_multiply.png: The button with the multiplication symbol (\"*\") on it.",
|
"button_multiply.png: The button with the multiplication symbol (\"*\") on it.",
|
||||||
"background.png: the background color to decorate the Go game",
|
"background.png: the background color to decorate the Go game",
|
||||||
"'''",
|
"'''",
|
||||||
"Now, list all functionally independent elements as much as possible."
|
"Now, list all functionally independent elements as much as possible."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"ArtIntegration": {
|
"ArtIntegration": {
|
||||||
"assistant_role_name": "Programmer",
|
"assistant_role_name": "Programmer",
|
||||||
"user_role_name": "Chief Creative Officer",
|
"user_role_name": "Chief Creative Officer",
|
||||||
"phase_prompt": [
|
"phase_prompt": [
|
||||||
"Our developed source codes and corresponding test reports are listed below: ",
|
"Our developed source codes and corresponding test reports are listed below: ",
|
||||||
"Task: \"{task}\".",
|
"Task: \"{task}\".",
|
||||||
"Programming Language: \"{language}\"",
|
"Programming Language: \"{language}\"",
|
||||||
"Source Codes:",
|
"Source Codes:",
|
||||||
"\"{codes}\"",
|
"\"{codes}\"",
|
||||||
"Note that each file must strictly follow a markdown code block format, where the following tokens must be replaced such that \"FILENAME\" is the lowercase file name including the file extension, \"LANGUAGE\" in the programming language, \"DOCSTRING\" is a string literal specified in source code that is used to document a specific segment of code, and \"CODE\" is the original code:",
|
"Note that each file must strictly follow a markdown code block format, where the following tokens must be replaced such that \"FILENAME\" is the lowercase file name including the file extension, \"LANGUAGE\" in the programming language, \"DOCSTRING\" is a string literal specified in source code that is used to document a specific segment of code, and \"CODE\" is the original code:",
|
||||||
"FILENAME",
|
"FILENAME",
|
||||||
"```LANGUAGE",
|
"```LANGUAGE",
|
||||||
"'''",
|
"'''",
|
||||||
"DOCSTRING",
|
"DOCSTRING",
|
||||||
"'''",
|
"'''",
|
||||||
"CODE",
|
"CODE",
|
||||||
"```",
|
"```",
|
||||||
"As the {assistant_role}, to satisfy the new user's demand and equip the software with a beautiful graphical user interface (GUI), you will incorporate our designed images for GUI decoration. Here are some ready-made high-quality pictures and corresponding descriptions:",
|
"As the {assistant_role}, to satisfy the new user's demand and equip the software with a beautiful graphical user interface (GUI), you will incorporate our designed images for GUI decoration. Here are some ready-made high-quality pictures and corresponding descriptions:",
|
||||||
"{images}",
|
"{images}",
|
||||||
"Note that the designed images have a fixed size of 256x256 pixels and the images are located in the same directory as all the Python files; please dynamically scaling these images according to the size of GUI, and use \"self.*\" to avoid displaying-related problems caused by automatic garbage collection. For example:",
|
"Note that the designed images have a fixed size of 256x256 pixels and the images are located in the same directory as all the Python files; please dynamically scaling these images according to the size of GUI, and use \"self.*\" to avoid displaying-related problems caused by automatic garbage collection. For example:",
|
||||||
"```",
|
"```",
|
||||||
"self.image = ImageTk.PhotoImage(Image.open(\"./image.png\").resize((50, 50)))",
|
"self.image = ImageTk.PhotoImage(Image.open(\"./image.png\").resize((50, 50)))",
|
||||||
"```",
|
"```",
|
||||||
"Now, use some or all of the pictures into the GUI to make it more beautiful and creative. Output codes strictly following the required format mentioned above."
|
"Now, use some or all of the pictures into the GUI to make it more beautiful and creative. Output codes strictly following the required format mentioned above."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"CodeComplete": {
|
"CodeComplete": {
|
||||||
"assistant_role_name": "Programmer",
|
"assistant_role_name": "Programmer",
|
||||||
"user_role_name": "Chief Technology Officer",
|
"user_role_name": "Chief Technology Officer",
|
||||||
"phase_prompt": [
|
"phase_prompt": [
|
||||||
"According to the new user's task and our software designs listed below: ",
|
"According to the new user's task and our software designs listed below: ",
|
||||||
"Task: \"{task}\".",
|
"Task: \"{task}\".",
|
||||||
"Modality: \"{modality}\".",
|
"Modality: \"{modality}\".",
|
||||||
"Programming Language: \"{language}\"",
|
"Programming Language: \"{language}\"",
|
||||||
"Codes:",
|
"Codes:",
|
||||||
"\"{codes}\"",
|
"\"{codes}\"",
|
||||||
"Unimplemented File:",
|
"Unimplemented File:",
|
||||||
"\"{unimplemented_file}\"",
|
"\"{unimplemented_file}\"",
|
||||||
"In our software, each file must strictly follow a markdown code block format, where the following tokens must be replaced such that \"FILENAME\" is the lowercase file name including the file extension, \"LANGUAGE\" in the programming language, \"DOCSTRING\" is a string literal specified in source code that is used to document a specific segment of code, and \"CODE\" is the original code:",
|
"In our software, each file must strictly follow a markdown code block format, where the following tokens must be replaced such that \"FILENAME\" is the lowercase file name including the file extension, \"LANGUAGE\" in the programming language, \"DOCSTRING\" is a string literal specified in source code that is used to document a specific segment of code, and \"CODE\" is the original code:",
|
||||||
"FILENAME",
|
"FILENAME",
|
||||||
"```LANGUAGE",
|
"```LANGUAGE",
|
||||||
"'''",
|
"'''",
|
||||||
"DOCSTRING",
|
"DOCSTRING",
|
||||||
"'''",
|
"'''",
|
||||||
"CODE",
|
"CODE",
|
||||||
"```",
|
"```",
|
||||||
"As the {assistant_role}, to satisfy the complete function of our developed software, you have to implement all methods in the {unimplemented_file} file which contains a unimplemented class. Now, implement all methods of the {unimplemented_file} and all other codes needed, then output the fully implemented codes, strictly following the required format."
|
"As the {assistant_role}, to satisfy the complete function of our developed software, you have to implement all methods in the {unimplemented_file} file which contains a unimplemented class. Now, implement all methods of the {unimplemented_file} and all other codes needed, then output the fully implemented codes, strictly following the required format."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"CodeReviewComment": {
|
"CodeReviewComment": {
|
||||||
"assistant_role_name": "Code Reviewer",
|
"assistant_role_name": "Code Reviewer",
|
||||||
"user_role_name": "Programmer",
|
"user_role_name": "Programmer",
|
||||||
"phase_prompt": [
|
"phase_prompt": [
|
||||||
"According to the new user's task and our software designs: ",
|
"According to the new user's task and our software designs: ",
|
||||||
"Task: \"{task}\".",
|
"Task: \"{task}\".",
|
||||||
"Modality: \"{modality}\".",
|
"Modality: \"{modality}\".",
|
||||||
"Programming Language: \"{language}\"",
|
"Programming Language: \"{language}\"",
|
||||||
"Ideas: \"{ideas}\"",
|
"Ideas: \"{ideas}\"",
|
||||||
"Codes:",
|
"Codes:",
|
||||||
"\"{codes}\"",
|
"\"{codes}\"",
|
||||||
"As the {assistant_role}, to make the software directly operable without further coding, ChatDev have formulated the following regulations:",
|
"As the {assistant_role}, to make the software directly operable without further coding, ChatDev have formulated the following regulations:",
|
||||||
"1) all referenced classes should be imported;",
|
"1) all referenced classes should be imported;",
|
||||||
"2) all methods should be implemented;",
|
"2) all methods should be implemented;",
|
||||||
"3) all methods need to have the necessary comments;",
|
"3) all methods need to have the necessary comments;",
|
||||||
"4) no potential bugs;",
|
"4) no potential bugs;",
|
||||||
"5) The entire project conforms to the tasks proposed by the user;",
|
"5) The entire project conforms to the tasks proposed by the user;",
|
||||||
"6) most importantly, do not only check the errors in the code, but also the logic of code. Make sure that user can interact with generated software without losing any feature in the requirement;",
|
"6) most importantly, do not only check the errors in the code, but also the logic of code. Make sure that user can interact with generated software without losing any feature in the requirement;",
|
||||||
"Now, you should check the above regulations one by one and review the codes in detail, propose one comment with the highest priority about the codes, and give me instructions on how to fix. Tell me your comment with the highest priority and corresponding suggestions on revision. If the codes are perfect and you have no comment on them, return only one line like \"<INFO> Finished\"."
|
"Now, you should check the above regulations one by one and review the codes in detail, propose one comment with the highest priority about the codes, and give me instructions on how to fix. Tell me your comment with the highest priority and corresponding suggestions on revision. If the codes are perfect and you have no comment on them, return only one line like \"<INFO> Finished\"."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"CodeReviewModification": {
|
"CodeReviewModification": {
|
||||||
"assistant_role_name": "Programmer",
|
"assistant_role_name": "Programmer",
|
||||||
"user_role_name": "Code Reviewer",
|
"user_role_name": "Code Reviewer",
|
||||||
"phase_prompt": [
|
"phase_prompt": [
|
||||||
"According to the new user's task, our designed product modality, languages and ideas, our developed first-edition source codes are listed below: ",
|
"According to the new user's task, our designed product modality, languages and ideas, our developed first-edition source codes are listed below: ",
|
||||||
"Task: \"{task}\".",
|
"Task: \"{task}\".",
|
||||||
"Modality: \"{modality}\".",
|
"Modality: \"{modality}\".",
|
||||||
"Programming Language: \"{language}\"",
|
"Programming Language: \"{language}\"",
|
||||||
"Ideas: \"{ideas}\"",
|
"Ideas: \"{ideas}\"",
|
||||||
"Codes: ",
|
"Codes: ",
|
||||||
"\"{codes}\"",
|
"\"{codes}\"",
|
||||||
"Comments on Codes:",
|
"Comments on Codes:",
|
||||||
"\"{comments}\"",
|
"\"{comments}\"",
|
||||||
"In the software, each file must strictly follow a markdown code block format, where the following tokens must be replaced such that \"FILENAME\" is the lowercase file name including the file extension, \"LANGUAGE\" in the programming language, \"DOCSTRING\" is a string literal specified in source code that is used to document a specific segment of code, and \"CODE\" is the original code. Format:",
|
"In the software, each file must strictly follow a markdown code block format, where the following tokens must be replaced such that \"FILENAME\" is the lowercase file name including the file extension, \"LANGUAGE\" in the programming language, \"DOCSTRING\" is a string literal specified in source code that is used to document a specific segment of code, and \"CODE\" is the original code. Format:",
|
||||||
"FILENAME",
|
"FILENAME",
|
||||||
"```LANGUAGE",
|
"```LANGUAGE",
|
||||||
"'''",
|
"'''",
|
||||||
"DOCSTRING",
|
"DOCSTRING",
|
||||||
"'''",
|
"'''",
|
||||||
"CODE",
|
"CODE",
|
||||||
"```",
|
"```",
|
||||||
"As the {assistant_role}, to satisfy the new user's demand and make the software creative, executive and robust, you should modify corresponding codes according to the comments. Then, output the full and complete codes with all bugs fixed based on the comments. Return all codes strictly following the required format."
|
"As the {assistant_role}, to satisfy the new user's demand and make the software creative, executive and robust, you should modify corresponding codes according to the comments. Then, output the full and complete codes with all bugs fixed based on the comments. Return all codes strictly following the required format."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"CodeReviewHuman": {
|
"CodeReviewHuman": {
|
||||||
"assistant_role_name": "Programmer",
|
"assistant_role_name": "Programmer",
|
||||||
"user_role_name": "Code Reviewer",
|
"user_role_name": "Code Reviewer",
|
||||||
"phase_prompt": [
|
"phase_prompt": [
|
||||||
"According to the new user's task, our designed product modality and three creative ideas, our developed first-edition source codes are listed below: ",
|
"According to the new user's task, our designed product modality and three creative ideas, our developed first-edition source codes are listed below: ",
|
||||||
"Task: \"{task}\".",
|
"Task: \"{task}\".",
|
||||||
"Modality: \"{modality}\".",
|
"Modality: \"{modality}\".",
|
||||||
"Programming Language: \"{language}\"",
|
"Programming Language: \"{language}\"",
|
||||||
"Ideas: \"{ideas}\"",
|
"Ideas: \"{ideas}\"",
|
||||||
"Codes: ",
|
"Codes: ",
|
||||||
"\"{codes}\"",
|
"\"{codes}\"",
|
||||||
"Comments on Codes:",
|
"Comments on Codes:",
|
||||||
"\"{comments}\"",
|
"\"{comments}\"",
|
||||||
"In the software, each file must strictly follow a markdown code block format, where the following tokens must be replaced such that \"FILENAME\" is the lowercase file name including the file extension, \"LANGUAGE\" in the programming language, \"DOCSTRING\" is a string literal specified in source code that is used to document a specific segment of code, and \"CODE\" is the original code. Format:",
|
"In the software, each file must strictly follow a markdown code block format, where the following tokens must be replaced such that \"FILENAME\" is the lowercase file name including the file extension, \"LANGUAGE\" in the programming language, \"DOCSTRING\" is a string literal specified in source code that is used to document a specific segment of code, and \"CODE\" is the original code. Format:",
|
||||||
"FILENAME",
|
"FILENAME",
|
||||||
"```LANGUAGE",
|
"```LANGUAGE",
|
||||||
"'''",
|
"'''",
|
||||||
"DOCSTRING",
|
"DOCSTRING",
|
||||||
"'''",
|
"'''",
|
||||||
"CODE",
|
"CODE",
|
||||||
"```",
|
"```",
|
||||||
"As the {assistant_role}, to satisfy the new user's demand and make the software creative, executive and robust, you should modify corresponding codes according to the comments. Then, output the fixed codes strictly following the required format."
|
"As the {assistant_role}, to satisfy the new user's demand and make the software creative, executive and robust, you should modify corresponding codes according to the comments. Then, output the fixed codes strictly following the required format."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"TestErrorSummary": {
|
"TestErrorSummary": {
|
||||||
"assistant_role_name": "Programmer",
|
"assistant_role_name": "Programmer",
|
||||||
"user_role_name": "Software Test Engineer",
|
"user_role_name": "Software Test Engineer",
|
||||||
"phase_prompt": [
|
"phase_prompt": [
|
||||||
"Our developed source codes and corresponding test reports are listed below: ",
|
"Our developed source codes and corresponding test reports are listed below: ",
|
||||||
"Programming Language: \"{language}\"",
|
"Programming Language: \"{language}\"",
|
||||||
"Source Codes:",
|
"Source Codes:",
|
||||||
"\"{codes}\"",
|
"\"{codes}\"",
|
||||||
"Test Reports of Source Codes:",
|
"Test Reports of Source Codes:",
|
||||||
"\"{test_reports}\"",
|
"\"{test_reports}\"",
|
||||||
"According to my test reports, please locate and summarize the bugs that cause the problem."
|
"According to my test reports, please locate and summarize the bugs that cause the problem."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"TestModification": {
|
"TestModification": {
|
||||||
"assistant_role_name": "Programmer",
|
"assistant_role_name": "Programmer",
|
||||||
"user_role_name": "Software Test Engineer",
|
"user_role_name": "Software Test Engineer",
|
||||||
"phase_prompt": [
|
"phase_prompt": [
|
||||||
"Our developed source codes and corresponding test reports are listed below: ",
|
"Our developed source codes and corresponding test reports are listed below: ",
|
||||||
"Programming Language: \"{language}\"",
|
"Programming Language: \"{language}\"",
|
||||||
"Source Codes:",
|
"Source Codes:",
|
||||||
"\"{codes}\"",
|
"\"{codes}\"",
|
||||||
"Test Reports of Source Codes:",
|
"Test Reports of Source Codes:",
|
||||||
"\"{test_reports}\"",
|
"\"{test_reports}\"",
|
||||||
"Error Summary of Test Reports:",
|
"Error Summary of Test Reports:",
|
||||||
"\"{error_summary}\"",
|
"\"{error_summary}\"",
|
||||||
"Note that each file must strictly follow a markdown code block format, where the following tokens must be replaced such that \"FILENAME\" is the lowercase file name including the file extension, \"LANGUAGE\" in the programming language, \"DOCSTRING\" is a string literal specified in source code that is used to document a specific segment of code, and \"CODE\" is the original code:",
|
"Note that each file must strictly follow a markdown code block format, where the following tokens must be replaced such that \"FILENAME\" is the lowercase file name including the file extension, \"LANGUAGE\" in the programming language, \"DOCSTRING\" is a string literal specified in source code that is used to document a specific segment of code, and \"CODE\" is the original code:",
|
||||||
"FILENAME",
|
"FILENAME",
|
||||||
"```LANGUAGE",
|
"```LANGUAGE",
|
||||||
"'''",
|
"'''",
|
||||||
"DOCSTRING",
|
"DOCSTRING",
|
||||||
"'''",
|
"'''",
|
||||||
"CODE",
|
"CODE",
|
||||||
"```",
|
"```",
|
||||||
"As the {assistant_role}, to satisfy the new user's demand and make the software execute smoothly and robustly, you should modify the codes based on the error summary. Now, use the format exemplified above and modify the problematic codes based on the error summary. Output the codes that you fixed based on the test reported and corresponding explanations (strictly follow the format defined above, including FILENAME, LANGUAGE, DOCSTRING and CODE; incomplete \"TODO\" codes are strictly prohibited). If no bugs are reported, please return only one line like \"<INFO> Finished\"."
|
"As the {assistant_role}, to satisfy the new user's demand and make the software execute smoothly and robustly, you should modify the codes based on the error summary. Now, use the format exemplified above and modify the problematic codes based on the error summary. Output the codes that you fixed based on the test reported and corresponding explanations (strictly follow the format defined above, including FILENAME, LANGUAGE, DOCSTRING and CODE; incomplete \"TODO\" codes are strictly prohibited). If no bugs are reported, please return only one line like \"<INFO> Finished\"."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"EnvironmentDoc": {
|
"EnvironmentDoc": {
|
||||||
"assistant_role_name": "Programmer",
|
"assistant_role_name": "Programmer",
|
||||||
"user_role_name": "Chief Technology Officer",
|
"user_role_name": "Chief Technology Officer",
|
||||||
"phase_prompt": [
|
"phase_prompt": [
|
||||||
"The new user's task and our developed codes are listed: ",
|
"The new user's task and our developed codes are listed: ",
|
||||||
"Task: \"{task}\".",
|
"Task: \"{task}\".",
|
||||||
"Modality: \"{modality}\".",
|
"Modality: \"{modality}\".",
|
||||||
"Programming Language: \"{language}\"",
|
"Programming Language: \"{language}\"",
|
||||||
"Ideas: \"{ideas}\"",
|
"Ideas: \"{ideas}\"",
|
||||||
"Codes: ",
|
"Codes: ",
|
||||||
"\"{codes}\"",
|
"\"{codes}\"",
|
||||||
"As the {assistant_role}, you should write a requirements.txt file, which is commonly used in Python projects to specify the dependencies or packages required for the project to run properly. It serves as a way to document and manage the project's dependencies in a standardized format. For example:",
|
"As the {assistant_role}, you should write a requirements.txt file, which is commonly used in Python projects to specify the dependencies or packages required for the project to run properly. It serves as a way to document and manage the project's dependencies in a standardized format. For example:",
|
||||||
"requirements.txt",
|
"requirements.txt",
|
||||||
"```",
|
"```",
|
||||||
"numpy==1.19.2",
|
"numpy==1.19.2",
|
||||||
"pandas>=1.1.4",
|
"pandas>=1.1.4",
|
||||||
"```",
|
"```",
|
||||||
"According to the codes and file format listed above, write a requirements.txt file to specify the dependencies or packages required for the project to run properly."
|
"According to the codes and file format listed above, write a requirements.txt file to specify the dependencies or packages required for the project to run properly."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"Manual": {
|
"Manual": {
|
||||||
"assistant_role_name": "Chief Product Officer",
|
"assistant_role_name": "Chief Product Officer",
|
||||||
"user_role_name": "Chief Executive Officer",
|
"user_role_name": "Chief Executive Officer",
|
||||||
"phase_prompt": [
|
"phase_prompt": [
|
||||||
"The new user's task, our developed codes and required dependencies are listed: ",
|
"The new user's task, our developed codes and required dependencies are listed: ",
|
||||||
"Task: \"{task}\".",
|
"Task: \"{task}\".",
|
||||||
"Modality: \"{modality}\".",
|
"Modality: \"{modality}\".",
|
||||||
"Programming Language: \"{language}\"",
|
"Programming Language: \"{language}\"",
|
||||||
"Ideas: \"{ideas}\"",
|
"Ideas: \"{ideas}\"",
|
||||||
"Codes: ",
|
"Codes: ",
|
||||||
"\"{codes}\"",
|
"\"{codes}\"",
|
||||||
"Requirements:",
|
"Requirements:",
|
||||||
"\"{requirements}\"",
|
"\"{requirements}\"",
|
||||||
"As the {assistant_role}, by using Markdown, you should write a manual.md file which is a detailed user manual to use the software, including introducing main functions of the software, how to install environment dependencies and how to use/play it. For example:",
|
"As the {assistant_role}, by using Markdown, you should write a manual.md file which is a detailed user manual to use the software, including introducing main functions of the software, how to install environment dependencies and how to use/play it. For example:",
|
||||||
"manual.md",
|
"manual.md",
|
||||||
"```",
|
"```",
|
||||||
"# LangChain",
|
"# LangChain",
|
||||||
"Building applications with LLMs through composability",
|
"Building applications with LLMs through composability",
|
||||||
"Looking for the JS/TS version? Check out LangChain.js.",
|
"Looking for the JS/TS version? Check out LangChain.js.",
|
||||||
"**Production Support:** As you move your LangChains into production, we'd love to offer more comprehensive support.",
|
"**Production Support:** As you move your LangChains into production, we'd love to offer more comprehensive support.",
|
||||||
"Please fill out this form and we'll set up a dedicated support Slack channel.",
|
"Please fill out this form and we'll set up a dedicated support Slack channel.",
|
||||||
"## Quick Install",
|
"## Quick Install",
|
||||||
"`pip install langchain`",
|
"`pip install langchain`",
|
||||||
"or",
|
"or",
|
||||||
"`conda install langchain -c conda-forge`",
|
"`conda install langchain -c conda-forge`",
|
||||||
"## 🤔 What is this?",
|
"## 🤔 What is this?",
|
||||||
"Large language models (LLMs) are emerging as a transformative technology, enabling developers to build applications that they previously could not. However, using these LLMs in isolation is often insufficient for creating a truly powerful app - the real power comes when you can combine them with other sources of computation or knowledge.",
|
"Large language models (LLMs) are emerging as a transformative technology, enabling developers to build applications that they previously could not. However, using these LLMs in isolation is often insufficient for creating a truly powerful app - the real power comes when you can combine them with other sources of computation or knowledge.",
|
||||||
"This library aims to assist in the development of those types of applications. Common examples of these applications include:",
|
"This library aims to assist in the development of those types of applications. Common examples of these applications include:",
|
||||||
"**❓ Question Answering over specific documents**",
|
"**❓ Question Answering over specific documents**",
|
||||||
"- Documentation",
|
"- Documentation",
|
||||||
"- End-to-end Example: Question Answering over Notion Database",
|
"- End-to-end Example: Question Answering over Notion Database",
|
||||||
"**🤖 Agents**",
|
"**🤖 Agents**",
|
||||||
"- Documentation",
|
"- Documentation",
|
||||||
"- End-to-end Example: GPT+WolframAlpha",
|
"- End-to-end Example: GPT+WolframAlpha",
|
||||||
"## 📖 Documentation",
|
"## 📖 Documentation",
|
||||||
"Please see [here](https://python.langchain.com) for full documentation on:",
|
"Please see [here](https://python.langchain.com) for full documentation on:",
|
||||||
"- Getting started (installation, setting up the environment, simple examples)",
|
"- Getting started (installation, setting up the environment, simple examples)",
|
||||||
"- How-To examples (demos, integrations, helper functions)",
|
"- How-To examples (demos, integrations, helper functions)",
|
||||||
"- Reference (full API docs)",
|
"- Reference (full API docs)",
|
||||||
"- Resources (high-level explanation of core concepts)",
|
"- Resources (high-level explanation of core concepts)",
|
||||||
"```"
|
"```"
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
}
|
}
|
@ -1,65 +1,65 @@
|
|||||||
{
|
{
|
||||||
"Chief Executive Officer": [
|
"Chief Executive Officer": [
|
||||||
"{chatdev_prompt}",
|
"{chatdev_prompt}",
|
||||||
"You are Chief Executive Officer. Now, we are both working at ChatDev and we share a common interest in collaborating to successfully complete a task assigned by a new customer.",
|
"You are Chief Executive Officer. Now, we are both working at ChatDev and we share a common interest in collaborating to successfully complete a task assigned by a new customer.",
|
||||||
"Your main responsibilities include being an active decision-maker on users' demands and other key policy issues, leader, manager, and executor. Your decision-making role involves high-level decisions about policy and strategy; and your communicator role can involve speaking to the organization's management and employees.",
|
"Your main responsibilities include being an active decision-maker on users' demands and other key policy issues, leader, manager, and executor. Your decision-making role involves high-level decisions about policy and strategy; and your communicator role can involve speaking to the organization's management and employees.",
|
||||||
"Here is a new customer's task: {task}.",
|
"Here is a new customer's task: {task}.",
|
||||||
"To complete the task, I will give you one or more instructions, and you must help me to write a specific solution that appropriately solves the requested instruction based on your expertise and my needs."
|
"To complete the task, I will give you one or more instructions, and you must help me to write a specific solution that appropriately solves the requested instruction based on your expertise and my needs."
|
||||||
],
|
],
|
||||||
"Chief Product Officer": [
|
"Chief Product Officer": [
|
||||||
"{chatdev_prompt}",
|
"{chatdev_prompt}",
|
||||||
"You are Chief Product Officer. we are both working at ChatDev. We share a common interest in collaborating to successfully complete a task assigned by a new customer.",
|
"You are Chief Product Officer. we are both working at ChatDev. We share a common interest in collaborating to successfully complete a task assigned by a new customer.",
|
||||||
"You are responsible for all product-related matters in ChatDev. Usually includes product design, product strategy, product vision, product innovation, project management and product marketing.",
|
"You are responsible for all product-related matters in ChatDev. Usually includes product design, product strategy, product vision, product innovation, project management and product marketing.",
|
||||||
"Here is a new customer's task: {task}.",
|
"Here is a new customer's task: {task}.",
|
||||||
"To complete the task, you must write a response that appropriately solves the requested instruction based on your expertise and customer's needs."
|
"To complete the task, you must write a response that appropriately solves the requested instruction based on your expertise and customer's needs."
|
||||||
],
|
],
|
||||||
"Counselor": [
|
"Counselor": [
|
||||||
"{chatdev_prompt}",
|
"{chatdev_prompt}",
|
||||||
"You are Counselor. Now, we share a common interest in collaborating to successfully complete a task assigned by a new customer.",
|
"You are Counselor. Now, we share a common interest in collaborating to successfully complete a task assigned by a new customer.",
|
||||||
"Your main responsibilities include asking what user and customer think and provide your valuable suggestions. ",
|
"Your main responsibilities include asking what user and customer think and provide your valuable suggestions. ",
|
||||||
"Here is a new customer's task: {task}.",
|
"Here is a new customer's task: {task}.",
|
||||||
"To complete the task, I will give you one or more instructions, and you must help me to write a specific solution that appropriately solves the requested instruction based on your expertise and my needs."
|
"To complete the task, I will give you one or more instructions, and you must help me to write a specific solution that appropriately solves the requested instruction based on your expertise and my needs."
|
||||||
],
|
],
|
||||||
"Chief Technology Officer": [
|
"Chief Technology Officer": [
|
||||||
"{chatdev_prompt}",
|
"{chatdev_prompt}",
|
||||||
"You are Chief Technology Officer. we are both working at ChatDev. We share a common interest in collaborating to successfully complete a task assigned by a new customer.",
|
"You are Chief Technology Officer. we are both working at ChatDev. We share a common interest in collaborating to successfully complete a task assigned by a new customer.",
|
||||||
"You are very familiar to information technology. You will make high-level decisions for the overarching technology infrastructure that closely align with the organization's goals, while you work alongside the organization's information technology (\"IT\") staff members to perform everyday operations.",
|
"You are very familiar to information technology. You will make high-level decisions for the overarching technology infrastructure that closely align with the organization's goals, while you work alongside the organization's information technology (\"IT\") staff members to perform everyday operations.",
|
||||||
"Here is a new customer's task: {task}.",
|
"Here is a new customer's task: {task}.",
|
||||||
"To complete the task, You must write a response that appropriately solves the requested instruction based on your expertise and customer's needs."
|
"To complete the task, You must write a response that appropriately solves the requested instruction based on your expertise and customer's needs."
|
||||||
],
|
],
|
||||||
"Chief Human Resource Officer": [
|
"Chief Human Resource Officer": [
|
||||||
"{chatdev_prompt}",
|
"{chatdev_prompt}",
|
||||||
"You are Chief Human Resource Officer. Now, we are both working at ChatDev and we share a common interest in collaborating to successfully complete a task assigned by a new customer.",
|
"You are Chief Human Resource Officer. Now, we are both working at ChatDev and we share a common interest in collaborating to successfully complete a task assigned by a new customer.",
|
||||||
"You are a corporate officer who oversees all aspects of human resource management and industrial relations policies, practices and operations for an organization. You will be involved in board staff recruitment, member selection, executive compensation, and succession planning. Besides, You report directly to the chief executive officer (CEO) and am a member of the most senior-level committees of a company (e.g., executive committee or office of CEO).",
|
"You are a corporate officer who oversees all aspects of human resource management and industrial relations policies, practices and operations for an organization. You will be involved in board staff recruitment, member selection, executive compensation, and succession planning. Besides, You report directly to the chief executive officer (CEO) and am a member of the most senior-level committees of a company (e.g., executive committee or office of CEO).",
|
||||||
"Here is a new customer's task: {task}.",
|
"Here is a new customer's task: {task}.",
|
||||||
"To complete the task, you must write a response that appropriately solves the requested instruction based on your expertise and customer's needs."
|
"To complete the task, you must write a response that appropriately solves the requested instruction based on your expertise and customer's needs."
|
||||||
],
|
],
|
||||||
"Programmer": [
|
"Programmer": [
|
||||||
"{chatdev_prompt}",
|
"{chatdev_prompt}",
|
||||||
"You are Programmer. we are both working at ChatDev. We share a common interest in collaborating to successfully complete a task assigned by a new customer.",
|
"You are Programmer. we are both working at ChatDev. We share a common interest in collaborating to successfully complete a task assigned by a new customer.",
|
||||||
"You can write/create computer software or applications by providing a specific programming language to the computer. You have extensive computing and coding experience in many varieties of programming languages and platforms, such as Python, Java, C, C++, HTML, CSS, JavaScript, XML, SQL, PHP, etc,.",
|
"You can write/create computer software or applications by providing a specific programming language to the computer. You have extensive computing and coding experience in many varieties of programming languages and platforms, such as Python, Java, C, C++, HTML, CSS, JavaScript, XML, SQL, PHP, etc,.",
|
||||||
"Here is a new customer's task: {task}.",
|
"Here is a new customer's task: {task}.",
|
||||||
"To complete the task, you must write a response that appropriately solves the requested instruction based on your expertise and customer's needs."
|
"To complete the task, you must write a response that appropriately solves the requested instruction based on your expertise and customer's needs."
|
||||||
],
|
],
|
||||||
"Code Reviewer": [
|
"Code Reviewer": [
|
||||||
"{chatdev_prompt}",
|
"{chatdev_prompt}",
|
||||||
"You are Code Reviewer. we are both working at ChatDev. We share a common interest in collaborating to successfully complete a task assigned by a new customer.",
|
"You are Code Reviewer. we are both working at ChatDev. We share a common interest in collaborating to successfully complete a task assigned by a new customer.",
|
||||||
"You can help programmers to assess source codes for software troubleshooting, fix bugs to increase code quality and robustness, and offer proposals to improve the source codes.",
|
"You can help programmers to assess source codes for software troubleshooting, fix bugs to increase code quality and robustness, and offer proposals to improve the source codes.",
|
||||||
"Here is a new customer's task: {task}.",
|
"Here is a new customer's task: {task}.",
|
||||||
"To complete the task, you must write a response that appropriately solves the requested instruction based on your expertise and customer's needs."
|
"To complete the task, you must write a response that appropriately solves the requested instruction based on your expertise and customer's needs."
|
||||||
],
|
],
|
||||||
"Software Test Engineer": [
|
"Software Test Engineer": [
|
||||||
"{chatdev_prompt}",
|
"{chatdev_prompt}",
|
||||||
"You are Software Test Engineer. we are both working at ChatDev. We share a common interest in collaborating to successfully complete a task assigned by a new customer.",
|
"You are Software Test Engineer. we are both working at ChatDev. We share a common interest in collaborating to successfully complete a task assigned by a new customer.",
|
||||||
"You can use the software as intended to analyze its functional properties, design manual and automated test procedures to evaluate each software product, build and implement software evaluation test programs, and run test programs to ensure that testing protocols evaluate the software correctly.",
|
"You can use the software as intended to analyze its functional properties, design manual and automated test procedures to evaluate each software product, build and implement software evaluation test programs, and run test programs to ensure that testing protocols evaluate the software correctly.",
|
||||||
"Here is a new customer's task: {task}.",
|
"Here is a new customer's task: {task}.",
|
||||||
"To complete the task, you must write a response that appropriately solves the requested instruction based on your expertise and customer's needs."
|
"To complete the task, you must write a response that appropriately solves the requested instruction based on your expertise and customer's needs."
|
||||||
],
|
],
|
||||||
"Chief Creative Officer": [
|
"Chief Creative Officer": [
|
||||||
"{chatdev_prompt}",
|
"{chatdev_prompt}",
|
||||||
"You are Chief Creative Officer. we are both working at ChatDev. We share a common interest in collaborating to successfully complete a task assigned by a new customer.",
|
"You are Chief Creative Officer. we are both working at ChatDev. We share a common interest in collaborating to successfully complete a task assigned by a new customer.",
|
||||||
"You direct ChatDev's creative software's and develop the artistic design strategy that defines the company's brand. You create the unique image or music of our produced software's and deliver this distinctive design to consumers to create a clear brand image which is a fundamental and essential work throughout the company.",
|
"You direct ChatDev's creative software's and develop the artistic design strategy that defines the company's brand. You create the unique image or music of our produced software's and deliver this distinctive design to consumers to create a clear brand image which is a fundamental and essential work throughout the company.",
|
||||||
"Here is a new customer's task: {task}.",
|
"Here is a new customer's task: {task}.",
|
||||||
"To complete the task, you must write a response that appropriately solves the requested instruction based on your expertise and customer's needs."
|
"To complete the task, you must write a response that appropriately solves the requested instruction based on your expertise and customer's needs."
|
||||||
]
|
]
|
||||||
}
|
}
|
@ -1,51 +1,51 @@
|
|||||||
'''
|
'''
|
||||||
This file handles the game logic for the Gomoku game.
|
This file handles the game logic for the Gomoku game.
|
||||||
'''
|
'''
|
||||||
import pygame
|
import pygame
|
||||||
class Board:
|
class Board:
|
||||||
def __init__(self, rows, cols, player_1_icon, player_2_icon, width, height):
|
def __init__(self, rows, cols, player_1_icon, player_2_icon, width, height):
|
||||||
self.rows = rows
|
self.rows = rows
|
||||||
self.cols = cols
|
self.cols = cols
|
||||||
self.width = width
|
self.width = width
|
||||||
self.height = height
|
self.height = height
|
||||||
self.board = [[None for _ in range(cols)] for _ in range(rows)]
|
self.board = [[None for _ in range(cols)] for _ in range(rows)]
|
||||||
self.current_player = 1
|
self.current_player = 1
|
||||||
self.player_1_icon =player_1_icon
|
self.player_1_icon =player_1_icon
|
||||||
self.player_2_icon =player_2_icon
|
self.player_2_icon =player_2_icon
|
||||||
self.game_state = 'ongoing'
|
self.game_state = 'ongoing'
|
||||||
self.winner = None
|
self.winner = None
|
||||||
def place_stone(self, x, y):
|
def place_stone(self, x, y):
|
||||||
if self.game_state == 'ended':
|
if self.game_state == 'ended':
|
||||||
return
|
return
|
||||||
row, col = self.get_board_position(x, y)
|
row, col = self.get_board_position(x, y)
|
||||||
if row >= 0 and row < self.rows and col >= 0 and col < self.cols:
|
if row >= 0 and row < self.rows and col >= 0 and col < self.cols:
|
||||||
if self.board[row][col] is None:
|
if self.board[row][col] is None:
|
||||||
self.board[row][col] = self.current_player
|
self.board[row][col] = self.current_player
|
||||||
if self.check_win(row, col):
|
if self.check_win(row, col):
|
||||||
self.game_state = 'ended'
|
self.game_state = 'ended'
|
||||||
self.winner = self.current_player
|
self.winner = self.current_player
|
||||||
self.current_player = 1 if self.current_player == 2 else 2
|
self.current_player = 1 if self.current_player == 2 else 2
|
||||||
def get_board_position(self, x, y):
|
def get_board_position(self, x, y):
|
||||||
row = y // (self.height // self.rows)
|
row = y // (self.height // self.rows)
|
||||||
col = x // (self.width // self.cols)
|
col = x // (self.width // self.cols)
|
||||||
return row, col
|
return row, col
|
||||||
def check_win(self, row, col):
|
def check_win(self, row, col):
|
||||||
# Check horizontal, vertical and diagonal lines for a win
|
# Check horizontal, vertical and diagonal lines for a win
|
||||||
directions = [(0, 1), (1, 0), (1, 1), (1, -1)]
|
directions = [(0, 1), (1, 0), (1, 1), (1, -1)]
|
||||||
for dx, dy in directions:
|
for dx, dy in directions:
|
||||||
if self.count_stones(row, col, dx, dy) + self.count_stones(row, col, -dx, -dy) - 1 >= 5:
|
if self.count_stones(row, col, dx, dy) + self.count_stones(row, col, -dx, -dy) - 1 >= 5:
|
||||||
return True
|
return True
|
||||||
return False
|
return False
|
||||||
def count_stones(self, row, col, dx, dy):
|
def count_stones(self, row, col, dx, dy):
|
||||||
count = 0
|
count = 0
|
||||||
while 0 <= row < self.rows and 0 <= col < self.cols and self.board[row][col] == self.current_player:
|
while 0 <= row < self.rows and 0 <= col < self.cols and self.board[row][col] == self.current_player:
|
||||||
count += 1
|
count += 1
|
||||||
row += dx
|
row += dx
|
||||||
col += dy
|
col += dy
|
||||||
return count
|
return count
|
||||||
def draw(self, window):
|
def draw(self, window):
|
||||||
for row in range(self.rows):
|
for row in range(self.rows):
|
||||||
for col in range(self.cols):
|
for col in range(self.cols):
|
||||||
if self.board[row][col] is not None:
|
if self.board[row][col] is not None:
|
||||||
icon = self.player_1_icon if self.board[row][col] == 1 else self.player_2_icon
|
icon = self.player_1_icon if self.board[row][col] == 1 else self.player_2_icon
|
||||||
window.blit(icon, (col * (self.width // self.cols), row * (self.height // self.rows)))
|
window.blit(icon, (col * (self.width // self.cols), row * (self.height // self.rows)))
|
File diff suppressed because one or more lines are too long
@ -1,45 +1,45 @@
|
|||||||
'''
|
'''
|
||||||
This is the main file for the Gomoku game. It handles the game loop and user interaction.
|
This is the main file for the Gomoku game. It handles the game loop and user interaction.
|
||||||
'''
|
'''
|
||||||
import pygame
|
import pygame
|
||||||
import board
|
import board
|
||||||
import os
|
import os
|
||||||
from pygame import image, font
|
from pygame import image, font
|
||||||
# Initialize Pygame
|
# Initialize Pygame
|
||||||
pygame.init()
|
pygame.init()
|
||||||
# Set the width and height of the game window
|
# Set the width and height of the game window
|
||||||
WIDTH, HEIGHT = 800, 800
|
WIDTH, HEIGHT = 800, 800
|
||||||
# Set the dimensions of the game board
|
# Set the dimensions of the game board
|
||||||
BOARD_ROWS, BOARD_COLS = 15, 15
|
BOARD_ROWS, BOARD_COLS = 15, 15
|
||||||
# Create the game window
|
# Create the game window
|
||||||
WINDOW = pygame.display.set_mode((WIDTH, HEIGHT))
|
WINDOW = pygame.display.set_mode((WIDTH, HEIGHT))
|
||||||
# Load images
|
# Load images
|
||||||
bg_image = pygame.transform.scale(image.load('board_background.png'),(800,800))
|
bg_image = pygame.transform.scale(image.load('board_background.png'),(800,800))
|
||||||
player_1_icon = pygame.transform.scale(image.load('player_1_icon.png'),(50,50))
|
player_1_icon = pygame.transform.scale(image.load('player_1_icon.png'),(50,50))
|
||||||
player_2_icon = pygame.transform.scale(image.load('player_2_icon.png'),(50,50))
|
player_2_icon = pygame.transform.scale(image.load('player_2_icon.png'),(50,50))
|
||||||
# Create a game board
|
# Create a game board
|
||||||
game_board = board.Board(BOARD_ROWS, BOARD_COLS, player_1_icon, player_2_icon, WIDTH, HEIGHT)
|
game_board = board.Board(BOARD_ROWS, BOARD_COLS, player_1_icon, player_2_icon, WIDTH, HEIGHT)
|
||||||
# Create a font object
|
# Create a font object
|
||||||
font = font.Font(None, 36)
|
font = font.Font(None, 36)
|
||||||
def main():
|
def main():
|
||||||
clock = pygame.time.Clock()
|
clock = pygame.time.Clock()
|
||||||
running = True
|
running = True
|
||||||
while running:
|
while running:
|
||||||
for event in pygame.event.get():
|
for event in pygame.event.get():
|
||||||
if event.type == pygame.QUIT:
|
if event.type == pygame.QUIT:
|
||||||
running = False
|
running = False
|
||||||
if event.type == pygame.MOUSEBUTTONDOWN and game_board.game_state == 'ongoing':
|
if event.type == pygame.MOUSEBUTTONDOWN and game_board.game_state == 'ongoing':
|
||||||
x, y = pygame.mouse.get_pos()
|
x, y = pygame.mouse.get_pos()
|
||||||
game_board.place_stone(x, y)
|
game_board.place_stone(x, y)
|
||||||
WINDOW.blit(bg_image, (0, 0))
|
WINDOW.blit(bg_image, (0, 0))
|
||||||
game_board.draw(WINDOW)
|
game_board.draw(WINDOW)
|
||||||
text = font.render(f"Player {game_board.current_player}'s turn", True, (255, 255, 255))
|
text = font.render(f"Player {game_board.current_player}'s turn", True, (255, 255, 255))
|
||||||
WINDOW.blit(text, (20, 20))
|
WINDOW.blit(text, (20, 20))
|
||||||
if game_board.game_state == 'ended':
|
if game_board.game_state == 'ended':
|
||||||
text = font.render(f"Player {game_board.winner} wins!", True, (255, 255, 255))
|
text = font.render(f"Player {game_board.winner} wins!", True, (255, 255, 255))
|
||||||
WINDOW.blit(text, (WIDTH // 2 - text.get_width() // 2, HEIGHT // 2 - text.get_height() // 2))
|
WINDOW.blit(text, (WIDTH // 2 - text.get_width() // 2, HEIGHT // 2 - text.get_height() // 2))
|
||||||
pygame.display.update()
|
pygame.display.update()
|
||||||
clock.tick(60)
|
clock.tick(60)
|
||||||
pygame.quit()
|
pygame.quit()
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
main()
|
main()
|
@ -1,43 +1,43 @@
|
|||||||
manual.md
|
manual.md
|
||||||
|
|
||||||
```
|
```
|
||||||
# Gomoku Game
|
# Gomoku Game
|
||||||
|
|
||||||
A fun and interactive Gomoku game developed using Python and Pygame.
|
A fun and interactive Gomoku game developed using Python and Pygame.
|
||||||
|
|
||||||
## Quick Install
|
## Quick Install
|
||||||
|
|
||||||
Before you can run the game, you need to install Python and Pygame. Here are the steps to do so:
|
Before you can run the game, you need to install Python and Pygame. Here are the steps to do so:
|
||||||
|
|
||||||
1. Install Python: Download and install the latest version of Python from the official website: https://www.python.org/downloads/
|
1. Install Python: Download and install the latest version of Python from the official website: https://www.python.org/downloads/
|
||||||
|
|
||||||
2. Install Pygame: Once Python is installed, open your terminal or command prompt and type the following command to install Pygame:
|
2. Install Pygame: Once Python is installed, open your terminal or command prompt and type the following command to install Pygame:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
pip install pygame
|
pip install pygame
|
||||||
```
|
```
|
||||||
|
|
||||||
## 🤔 What is this?
|
## 🤔 What is this?
|
||||||
|
|
||||||
Gomoku, also known as Five in a Row, is a strategy board game. Two players alternate turns placing a piece of their color on an empty intersection. The winner is the first player to form an unbroken chain of five pieces horizontally, vertically, or diagonally.
|
Gomoku, also known as Five in a Row, is a strategy board game. Two players alternate turns placing a piece of their color on an empty intersection. The winner is the first player to form an unbroken chain of five pieces horizontally, vertically, or diagonally.
|
||||||
|
|
||||||
This application is a digital version of the Gomoku game. It has a simple and user-friendly interface, and it allows two players to play the game on the same device.
|
This application is a digital version of the Gomoku game. It has a simple and user-friendly interface, and it allows two players to play the game on the same device.
|
||||||
|
|
||||||
## 🎮 How to Play
|
## 🎮 How to Play
|
||||||
|
|
||||||
1. Run the game: Navigate to the directory where the game files are located and run the following command:
|
1. Run the game: Navigate to the directory where the game files are located and run the following command:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
python main.py
|
python main.py
|
||||||
```
|
```
|
||||||
|
|
||||||
2. The game board will appear on the screen. Player 1 starts the game by clicking on an empty spot on the board to place their piece.
|
2. The game board will appear on the screen. Player 1 starts the game by clicking on an empty spot on the board to place their piece.
|
||||||
|
|
||||||
3. Players take turns placing their pieces on the board. The goal is to be the first to form a line of five pieces horizontally, vertically, or diagonally.
|
3. Players take turns placing their pieces on the board. The goal is to be the first to form a line of five pieces horizontally, vertically, or diagonally.
|
||||||
|
|
||||||
4. The game ends when one player has formed a line of five pieces, or when all spots on the board have been filled and no player has won. The game will display a message indicating the winner or a draw.
|
4. The game ends when one player has formed a line of five pieces, or when all spots on the board have been filled and no player has won. The game will display a message indicating the winner or a draw.
|
||||||
|
|
||||||
5. To play again, simply close and rerun the game.
|
5. To play again, simply close and rerun the game.
|
||||||
|
|
||||||
Enjoy the game!
|
Enjoy the game!
|
||||||
```
|
```
|
@ -1,29 +1,29 @@
|
|||||||
Task:
|
Task:
|
||||||
develop a gomoku game
|
develop a gomoku game
|
||||||
|
|
||||||
Config:
|
Config:
|
||||||
ChatEnvConfig.clear_structure: True
|
ChatEnvConfig.clear_structure: True
|
||||||
ChatEnvConfig.brainstorming: False
|
ChatEnvConfig.brainstorming: False
|
||||||
|
|
||||||
|
|
||||||
Roster:
|
Roster:
|
||||||
Chief Executive Officer, Counselor, Chief Human Resource Officer, Chief Product Officer, Chief Technology Officer, Programmer, Code Reviewer, Software Test Engineer, Chief Creative Officer
|
Chief Executive Officer, Counselor, Chief Human Resource Officer, Chief Product Officer, Chief Technology Officer, Programmer, Code Reviewer, Software Test Engineer, Chief Creative Officer
|
||||||
|
|
||||||
Modality:
|
Modality:
|
||||||
application
|
application
|
||||||
|
|
||||||
Ideas:
|
Ideas:
|
||||||
|
|
||||||
|
|
||||||
Language:
|
Language:
|
||||||
Considering the task at hand, Python would be a suitable choice due to its simplicity and the availability of numerous libraries that can aid in the development of the game. Python's simplicity will allow for rapid development and testing, while its libraries can help with creating the game interface and handling game logic. Therefore, I propose Python as the programming language for this task.
|
Considering the task at hand, Python would be a suitable choice due to its simplicity and the availability of numerous libraries that can aid in the development of the game. Python's simplicity will allow for rapid development and testing, while its libraries can help with creating the game interface and handling game logic. Therefore, I propose Python as the programming language for this task.
|
||||||
|
|
||||||
Code_Version:
|
Code_Version:
|
||||||
8.0
|
8.0
|
||||||
|
|
||||||
Proposed_images:
|
Proposed_images:
|
||||||
14
|
14
|
||||||
|
|
||||||
Incorporated_images:
|
Incorporated_images:
|
||||||
3
|
3
|
||||||
|
|
||||||
|
File diff suppressed because one or more lines are too long
@ -1,97 +1,97 @@
|
|||||||
'''
|
'''
|
||||||
This is the main file for the Music Player application. It uses the tkinter library for the GUI, pygame for playing the music, and mutagen for handling the metadata of mp3 files. The changes include a directory selection dialog, metadata display on the GUI, improved error handling, and a method to unpause the music. Now, it also handles the case where the user selects a non-mp3 file. The bug related to the os module has been fixed by removing the os.setsid() line and using the selected directory directly when loading the mp3 files.
|
This is the main file for the Music Player application. It uses the tkinter library for the GUI, pygame for playing the music, and mutagen for handling the metadata of mp3 files. The changes include a directory selection dialog, metadata display on the GUI, improved error handling, and a method to unpause the music. Now, it also handles the case where the user selects a non-mp3 file. The bug related to the os module has been fixed by removing the os.setsid() line and using the selected directory directly when loading the mp3 files.
|
||||||
'''
|
'''
|
||||||
import os
|
import os
|
||||||
import pygame
|
import pygame
|
||||||
from tkinter import *
|
from tkinter import *
|
||||||
from tkinter import filedialog
|
from tkinter import filedialog
|
||||||
from mutagen.mp3 import MP3
|
from mutagen.mp3 import MP3
|
||||||
from mutagen.id3 import ID3, TIT2
|
from mutagen.id3 import ID3, TIT2
|
||||||
class MusicPlayer:
|
class MusicPlayer:
|
||||||
def __init__(self, root):
|
def __init__(self, root):
|
||||||
self.root = root
|
self.root = root
|
||||||
self.root.title("MusicPlayer")
|
self.root.title("MusicPlayer")
|
||||||
self.root.geometry("1000x200+200+200")
|
self.root.geometry("1000x200+200+200")
|
||||||
pygame.init()
|
pygame.init()
|
||||||
pygame.mixer.init()
|
pygame.mixer.init()
|
||||||
self.track = StringVar()
|
self.track = StringVar()
|
||||||
self.status = StringVar()
|
self.status = StringVar()
|
||||||
self.metadata = StringVar()
|
self.metadata = StringVar()
|
||||||
self.volume = DoubleVar()
|
self.volume = DoubleVar()
|
||||||
self.volume.set(pygame.mixer.music.get_volume())
|
self.volume.set(pygame.mixer.music.get_volume())
|
||||||
trackframe = LabelFrame(self.root, text="Song Track", font=("times new roman", 15, "bold"), bg="Navyblue",
|
trackframe = LabelFrame(self.root, text="Song Track", font=("times new roman", 15, "bold"), bg="Navyblue",
|
||||||
fg="white", bd=5, relief=GROOVE)
|
fg="white", bd=5, relief=GROOVE)
|
||||||
trackframe.place(x=0, y=0, width=600, height=100)
|
trackframe.place(x=0, y=0, width=600, height=100)
|
||||||
songtrack = Label(trackframe, textvariable=self.track, width=20, font=("times new roman", 24, "bold"),
|
songtrack = Label(trackframe, textvariable=self.track, width=20, font=("times new roman", 24, "bold"),
|
||||||
bg="Orange", fg="gold").grid(row=0, column=0, padx=10, pady=5)
|
bg="Orange", fg="gold").grid(row=0, column=0, padx=10, pady=5)
|
||||||
trackstatus = Label(trackframe, textvariable=self.status, font=("times new roman", 24, "bold"), bg="orange",
|
trackstatus = Label(trackframe, textvariable=self.status, font=("times new roman", 24, "bold"), bg="orange",
|
||||||
fg="gold").grid(row=0, column=1, padx=10, pady=5)
|
fg="gold").grid(row=0, column=1, padx=10, pady=5)
|
||||||
metadata_label = Label(trackframe, textvariable=self.metadata, font=("times new roman", 16, "bold"),
|
metadata_label = Label(trackframe, textvariable=self.metadata, font=("times new roman", 16, "bold"),
|
||||||
bg="orange", fg="gold")
|
bg="orange", fg="gold")
|
||||||
metadata_label.grid(row=1, column=0, padx=10, pady=5)
|
metadata_label.grid(row=1, column=0, padx=10, pady=5)
|
||||||
volumecontrol = Scale(trackframe, variable=self.volume, from_=0.0, to=1.0, orient=HORIZONTAL, resolution=0.1,
|
volumecontrol = Scale(trackframe, variable=self.volume, from_=0.0, to=1.0, orient=HORIZONTAL, resolution=0.1,
|
||||||
command=self.change_volume)
|
command=self.change_volume)
|
||||||
volumecontrol.grid(row=0, column=2, padx=10, pady=5)
|
volumecontrol.grid(row=0, column=2, padx=10, pady=5)
|
||||||
buttonframe = LabelFrame(self.root, text="Control Panel", font=("times new roman", 15, "bold"), bg="grey",
|
buttonframe = LabelFrame(self.root, text="Control Panel", font=("times new roman", 15, "bold"), bg="grey",
|
||||||
fg="white", bd=5, relief=GROOVE)
|
fg="white", bd=5, relief=GROOVE)
|
||||||
buttonframe.place(x=0, y=100, width=600, height=100)
|
buttonframe.place(x=0, y=100, width=600, height=100)
|
||||||
playbtn = Button(buttonframe, text="PLAY", command=self.play_music, width=10, height=1,
|
playbtn = Button(buttonframe, text="PLAY", command=self.play_music, width=10, height=1,
|
||||||
font=("times new roman", 16, "bold"), fg="navyblue", bg="pink").grid(row=0, column=0,
|
font=("times new roman", 16, "bold"), fg="navyblue", bg="pink").grid(row=0, column=0,
|
||||||
padx=10, pady=5)
|
padx=10, pady=5)
|
||||||
pausebtn = Button(buttonframe, text="PAUSE", command=self.pause_music, width=8, height=1,
|
pausebtn = Button(buttonframe, text="PAUSE", command=self.pause_music, width=8, height=1,
|
||||||
font=("times new roman", 16, "bold"), fg="navyblue", bg="pink").grid(row=0, column=1,
|
font=("times new roman", 16, "bold"), fg="navyblue", bg="pink").grid(row=0, column=1,
|
||||||
padx=10, pady=5)
|
padx=10, pady=5)
|
||||||
stopbtn = Button(buttonframe, text="STOP", command=self.stop_music, width=10, height=1,
|
stopbtn = Button(buttonframe, text="STOP", command=self.stop_music, width=10, height=1,
|
||||||
font=("times new roman", 16, "bold"), fg="navyblue", bg="pink").grid(row=0, column=2,
|
font=("times new roman", 16, "bold"), fg="navyblue", bg="pink").grid(row=0, column=2,
|
||||||
padx=10, pady=5)
|
padx=10, pady=5)
|
||||||
unpausebtn = Button(buttonframe, text="UNPAUSE", command=self.unpause_music, width=10, height=1,
|
unpausebtn = Button(buttonframe, text="UNPAUSE", command=self.unpause_music, width=10, height=1,
|
||||||
font=("times new roman", 16, "bold"), fg="navyblue", bg="pink").grid(row=0, column=3,
|
font=("times new roman", 16, "bold"), fg="navyblue", bg="pink").grid(row=0, column=3,
|
||||||
padx=10, pady=5)
|
padx=10, pady=5)
|
||||||
songsframe = LabelFrame(self.root, text="Song Playlist", font=("times new roman", 15, "bold"), bg="grey",
|
songsframe = LabelFrame(self.root, text="Song Playlist", font=("times new roman", 15, "bold"), bg="grey",
|
||||||
fg="white", bd=5, relief=GROOVE)
|
fg="white", bd=5, relief=GROOVE)
|
||||||
songsframe.place(x=600, y=0, width=400, height=200)
|
songsframe.place(x=600, y=0, width=400, height=200)
|
||||||
scrol_y = Scrollbar(songsframe, orient=VERTICAL)
|
scrol_y = Scrollbar(songsframe, orient=VERTICAL)
|
||||||
self.playlist = Listbox(songsframe, yscrollcommand=scrol_y.set, selectbackground="gold", selectmode=SINGLE,
|
self.playlist = Listbox(songsframe, yscrollcommand=scrol_y.set, selectbackground="gold", selectmode=SINGLE,
|
||||||
font=("times new roman", 12, "bold"), bg="silver", fg="navyblue", bd=5, relief=GROOVE)
|
font=("times new roman", 12, "bold"), bg="silver", fg="navyblue", bd=5, relief=GROOVE)
|
||||||
scrol_y.pack(side=RIGHT, fill=Y)
|
scrol_y.pack(side=RIGHT, fill=Y)
|
||||||
scrol_y.config(command=self.playlist.yview)
|
scrol_y.config(command=self.playlist.yview)
|
||||||
self.playlist.pack(fill=BOTH)
|
self.playlist.pack(fill=BOTH)
|
||||||
self.select_directory()
|
self.select_directory()
|
||||||
def select_directory(self):
|
def select_directory(self):
|
||||||
self.directory = filedialog.askdirectory()
|
self.directory = filedialog.askdirectory()
|
||||||
self.load_mp3_files()
|
self.load_mp3_files()
|
||||||
def load_mp3_files(self):
|
def load_mp3_files(self):
|
||||||
songtracks = os.listdir(self.directory)
|
songtracks = os.listdir(self.directory)
|
||||||
self.playlist.delete(0, END)
|
self.playlist.delete(0, END)
|
||||||
for track in songtracks:
|
for track in songtracks:
|
||||||
if track.endswith('.mp3'):
|
if track.endswith('.mp3'):
|
||||||
self.playlist.insert(END, track)
|
self.playlist.insert(END, track)
|
||||||
def play_music(self):
|
def play_music(self):
|
||||||
try:
|
try:
|
||||||
selected_track = self.playlist.get(ACTIVE)
|
selected_track = self.playlist.get(ACTIVE)
|
||||||
if not selected_track.endswith('.mp3'):
|
if not selected_track.endswith('.mp3'):
|
||||||
self.metadata.set("Error: Selected file is not an mp3 file.")
|
self.metadata.set("Error: Selected file is not an mp3 file.")
|
||||||
return
|
return
|
||||||
self.track.set(selected_track)
|
self.track.set(selected_track)
|
||||||
self.status.set("-Playing")
|
self.status.set("-Playing")
|
||||||
pygame.mixer.music.load(os.path.join(self.directory, selected_track))
|
pygame.mixer.music.load(os.path.join(self.directory, selected_track))
|
||||||
pygame.mixer.music.play()
|
pygame.mixer.music.play()
|
||||||
audio = MP3(os.path.join(self.directory, selected_track), ID3=ID3)
|
audio = MP3(os.path.join(self.directory, selected_track), ID3=ID3)
|
||||||
self.metadata.set('Title: ' + audio["TIT2"].text[0] + ', Artist: ' + audio["TPE1"].text[0] + ', Duration: ' +
|
self.metadata.set('Title: ' + audio["TIT2"].text[0] + ', Artist: ' + audio["TPE1"].text[0] + ', Duration: ' +
|
||||||
str(audio.info.length))
|
str(audio.info.length))
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
self.metadata.set("An error occurred: " + str(e))
|
self.metadata.set("An error occurred: " + str(e))
|
||||||
def stop_music(self):
|
def stop_music(self):
|
||||||
self.status.set("-Stopped")
|
self.status.set("-Stopped")
|
||||||
pygame.mixer.music.stop()
|
pygame.mixer.music.stop()
|
||||||
def pause_music(self):
|
def pause_music(self):
|
||||||
self.status.set("-Paused")
|
self.status.set("-Paused")
|
||||||
pygame.mixer.music.pause()
|
pygame.mixer.music.pause()
|
||||||
def unpause_music(self):
|
def unpause_music(self):
|
||||||
self.status.set("-Playing")
|
self.status.set("-Playing")
|
||||||
pygame.mixer.music.unpause()
|
pygame.mixer.music.unpause()
|
||||||
def change_volume(self, v):
|
def change_volume(self, v):
|
||||||
pygame.mixer.music.set_volume(self.volume.get())
|
pygame.mixer.music.set_volume(self.volume.get())
|
||||||
root = Tk()
|
root = Tk()
|
||||||
MusicPlayer(root)
|
MusicPlayer(root)
|
||||||
root.mainloop()
|
root.mainloop()
|
@ -1,39 +1,39 @@
|
|||||||
# MusicPlayer
|
# MusicPlayer
|
||||||
|
|
||||||
A user-friendly application to play local mp3 files with essential functionalities and metadata display.
|
A user-friendly application to play local mp3 files with essential functionalities and metadata display.
|
||||||
|
|
||||||
## Quick Install
|
## Quick Install
|
||||||
|
|
||||||
Before running the application, ensure you have the following Python libraries installed:
|
Before running the application, ensure you have the following Python libraries installed:
|
||||||
|
|
||||||
- tkinter
|
- tkinter
|
||||||
- pygame
|
- pygame
|
||||||
- mutagen
|
- mutagen
|
||||||
|
|
||||||
You can install these libraries using pip:
|
You can install these libraries using pip:
|
||||||
|
|
||||||
```
|
```
|
||||||
pip install python-tk pygame mutagen
|
pip install python-tk pygame mutagen
|
||||||
```
|
```
|
||||||
|
|
||||||
## 🤔 What is this?
|
## 🤔 What is this?
|
||||||
|
|
||||||
MusicPlayer is a software application that enables users to play local mp3 files with an intuitive and user-friendly graphical user interface (GUI). The software is designed to run on various operating systems and provides essential functionalities such as play, pause, stop, and volume control. It supports the playback of mp3 files stored locally on the user's device. The user interface allows users to easily browse and select mp3 files from their local directories. Additionally, the software displays relevant metadata information for each mp3 file, such as title, artist, album, and duration. The application provides a visually appealing and responsive interface, ensuring smooth playback without any glitches or delays. The software also includes error handling mechanisms to gracefully handle any exceptions or invalid file formats encountered during playback.
|
MusicPlayer is a software application that enables users to play local mp3 files with an intuitive and user-friendly graphical user interface (GUI). The software is designed to run on various operating systems and provides essential functionalities such as play, pause, stop, and volume control. It supports the playback of mp3 files stored locally on the user's device. The user interface allows users to easily browse and select mp3 files from their local directories. Additionally, the software displays relevant metadata information for each mp3 file, such as title, artist, album, and duration. The application provides a visually appealing and responsive interface, ensuring smooth playback without any glitches or delays. The software also includes error handling mechanisms to gracefully handle any exceptions or invalid file formats encountered during playback.
|
||||||
|
|
||||||
## 📖 How to use it?
|
## 📖 How to use it?
|
||||||
|
|
||||||
1. Run the `main.py` file to start the application.
|
1. Run the `main.py` file to start the application.
|
||||||
|
|
||||||
2. The application will open a directory selection dialog. Navigate to the directory containing your mp3 files and select it.
|
2. The application will open a directory selection dialog. Navigate to the directory containing your mp3 files and select it.
|
||||||
|
|
||||||
3. The application will load all mp3 files from the selected directory into the playlist.
|
3. The application will load all mp3 files from the selected directory into the playlist.
|
||||||
|
|
||||||
4. Select a song from the playlist and use the control panel to play, pause, stop, or adjust the volume of the playback.
|
4. Select a song from the playlist and use the control panel to play, pause, stop, or adjust the volume of the playback.
|
||||||
|
|
||||||
5. The application will display the title, artist, and duration of the currently playing song.
|
5. The application will display the title, artist, and duration of the currently playing song.
|
||||||
|
|
||||||
6. In case of an error (e.g., if the selected file is not an mp3 file), the application will display an error message.
|
6. In case of an error (e.g., if the selected file is not an mp3 file), the application will display an error message.
|
||||||
|
|
||||||
## 📚 Documentation
|
## 📚 Documentation
|
||||||
|
|
||||||
Please refer to the comments in the `main.py` file for a detailed explanation of the code. The application uses the tkinter library for the GUI, pygame for playing the music, and mutagen for handling the metadata of mp3 files. The application includes a directory selection dialog, metadata display on the GUI, improved error handling, and a method to unpause the music. It also handles the case where the user selects a non-mp3 file.
|
Please refer to the comments in the `main.py` file for a detailed explanation of the code. The application uses the tkinter library for the GUI, pygame for playing the music, and mutagen for handling the metadata of mp3 files. The application includes a directory selection dialog, metadata display on the GUI, improved error handling, and a method to unpause the music. It also handles the case where the user selects a non-mp3 file.
|
@ -1,29 +1,29 @@
|
|||||||
Task:
|
Task:
|
||||||
develop a software application that enables users to play local mp3 files with an intuitive and user-friendly graphical user interface (gui). the software should be designed to run on various operating systems and provide essential functionalities such as play, pause, stop, and volume control. it should support the playback of mp3 files stored locally on the user's device. the user interface should allow users to easily browse and select mp3 files from their local directories. additionally, the software should display relevant metadata information for each mp3 file, such as title, artist, album, and duration. the application should provide a visually appealing and responsive interface, ensuring smooth playback without any glitches or delays. the software should also include error handling mechanisms to gracefully handle any exceptions or invalid file formats encountered during playback.
|
develop a software application that enables users to play local mp3 files with an intuitive and user-friendly graphical user interface (gui). the software should be designed to run on various operating systems and provide essential functionalities such as play, pause, stop, and volume control. it should support the playback of mp3 files stored locally on the user's device. the user interface should allow users to easily browse and select mp3 files from their local directories. additionally, the software should display relevant metadata information for each mp3 file, such as title, artist, album, and duration. the application should provide a visually appealing and responsive interface, ensuring smooth playback without any glitches or delays. the software should also include error handling mechanisms to gracefully handle any exceptions or invalid file formats encountered during playback.
|
||||||
|
|
||||||
Config:
|
Config:
|
||||||
ChatEnvConfig.clear_structure: True
|
ChatEnvConfig.clear_structure: True
|
||||||
ChatEnvConfig.brainstorming: False
|
ChatEnvConfig.brainstorming: False
|
||||||
|
|
||||||
|
|
||||||
Roster:
|
Roster:
|
||||||
Chief Executive Officer, Counselor, Chief Human Resource Officer, Chief Product Officer, Chief Technology Officer, Programmer, Code Reviewer, Software Test Engineer, Chief Creative Officer
|
Chief Executive Officer, Counselor, Chief Human Resource Officer, Chief Product Officer, Chief Technology Officer, Programmer, Code Reviewer, Software Test Engineer, Chief Creative Officer
|
||||||
|
|
||||||
Modality:
|
Modality:
|
||||||
application
|
application
|
||||||
|
|
||||||
Ideas:
|
Ideas:
|
||||||
|
|
||||||
|
|
||||||
Language:
|
Language:
|
||||||
Python
|
Python
|
||||||
|
|
||||||
Code_Version:
|
Code_Version:
|
||||||
12.0
|
12.0
|
||||||
|
|
||||||
Proposed_images:
|
Proposed_images:
|
||||||
0
|
0
|
||||||
|
|
||||||
Incorporated_images:
|
Incorporated_images:
|
||||||
0
|
0
|
||||||
|
|
||||||
|
@ -28,7 +28,7 @@ from camel.utils import (
|
|||||||
num_tokens_from_messages,
|
num_tokens_from_messages,
|
||||||
openai_api_key_required,
|
openai_api_key_required,
|
||||||
)
|
)
|
||||||
|
from chatdev.utils import log_visualize
|
||||||
try:
|
try:
|
||||||
from openai.types.chat import ChatCompletion
|
from openai.types.chat import ChatCompletion
|
||||||
|
|
||||||
@ -74,6 +74,7 @@ class ChatAgent(BaseAgent):
|
|||||||
|
|
||||||
Args:
|
Args:
|
||||||
system_message (SystemMessage): The system message for the chat agent.
|
system_message (SystemMessage): The system message for the chat agent.
|
||||||
|
with_memory(bool): The memory setting of the chat agent.
|
||||||
model (ModelType, optional): The LLM model to use for generating
|
model (ModelType, optional): The LLM model to use for generating
|
||||||
responses. (default :obj:`ModelType.GPT_3_5_TURBO`)
|
responses. (default :obj:`ModelType.GPT_3_5_TURBO`)
|
||||||
model_config (Any, optional): Configuration options for the LLM model.
|
model_config (Any, optional): Configuration options for the LLM model.
|
||||||
@ -86,6 +87,7 @@ class ChatAgent(BaseAgent):
|
|||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
system_message: SystemMessage,
|
system_message: SystemMessage,
|
||||||
|
memory = None,
|
||||||
model: Optional[ModelType] = None,
|
model: Optional[ModelType] = None,
|
||||||
model_config: Optional[Any] = None,
|
model_config: Optional[Any] = None,
|
||||||
message_window_size: Optional[int] = None,
|
message_window_size: Optional[int] = None,
|
||||||
@ -102,6 +104,10 @@ class ChatAgent(BaseAgent):
|
|||||||
self.terminated: bool = False
|
self.terminated: bool = False
|
||||||
self.info: bool = False
|
self.info: bool = False
|
||||||
self.init_messages()
|
self.init_messages()
|
||||||
|
if memory !=None and self.role_name in["Code Reviewer","Programmer","Software Test Engineer"]:
|
||||||
|
self.memory = memory.memory_data.get("All")
|
||||||
|
else:
|
||||||
|
self.memory = None
|
||||||
|
|
||||||
def reset(self) -> List[MessageType]:
|
def reset(self) -> List[MessageType]:
|
||||||
r"""Resets the :obj:`ChatAgent` to its initial state and returns the
|
r"""Resets the :obj:`ChatAgent` to its initial state and returns the
|
||||||
@ -159,6 +165,41 @@ class ChatAgent(BaseAgent):
|
|||||||
"""
|
"""
|
||||||
self.stored_messages.append(message)
|
self.stored_messages.append(message)
|
||||||
return self.stored_messages
|
return self.stored_messages
|
||||||
|
def use_memory(self,input_message) -> List[MessageType]:
|
||||||
|
if self.memory is None :
|
||||||
|
return None
|
||||||
|
else:
|
||||||
|
if self.role_name == "Programmer":
|
||||||
|
result = self.memory.memory_retrieval(input_message,"code")
|
||||||
|
if result != None:
|
||||||
|
target_memory,distances, mids,task_list,task_dir_list = result
|
||||||
|
if target_memory != None and len(target_memory) != 0:
|
||||||
|
target_memory="".join(target_memory)
|
||||||
|
#self.stored_messages[-1].content = self.stored_messages[-1].content+"Here is some code you've previously completed:"+target_memory+"You can refer to the previous script to complement this task."
|
||||||
|
log_visualize(self.role_name,
|
||||||
|
"thinking back and found some related code: \n--------------------------\n"
|
||||||
|
+ target_memory)
|
||||||
|
else:
|
||||||
|
target_memory = None
|
||||||
|
log_visualize(self.role_name,
|
||||||
|
"thinking back but find nothing useful")
|
||||||
|
|
||||||
|
else:
|
||||||
|
result = self.memory.memory_retrieval(input_message, "text")
|
||||||
|
if result != None:
|
||||||
|
target_memory, distances, mids, task_list, task_dir_list = result
|
||||||
|
if target_memory != None and len(target_memory) != 0:
|
||||||
|
target_memory=";".join(target_memory)
|
||||||
|
#self.stored_messages[-1].content = self.stored_messages[-1].content+"Here are some effective and efficient instructions you have sent to the assistant :"+target_memory+"You can refer to these previous excellent instructions to better instruct assistant here."
|
||||||
|
log_visualize(self.role_name,
|
||||||
|
"thinking back and found some related text: \n--------------------------\n"
|
||||||
|
+ target_memory)
|
||||||
|
else:
|
||||||
|
target_memory = None
|
||||||
|
log_visualize(self.role_name,
|
||||||
|
"thinking back but find nothing useful")
|
||||||
|
|
||||||
|
return target_memory
|
||||||
|
|
||||||
@retry(wait=wait_exponential(min=5, max=60), stop=stop_after_attempt(5))
|
@retry(wait=wait_exponential(min=5, max=60), stop=stop_after_attempt(5))
|
||||||
@openai_api_key_required
|
@openai_api_key_required
|
||||||
|
@ -90,13 +90,16 @@ class RolePlaying:
|
|||||||
sys_msg_generator_kwargs: Optional[Dict] = None,
|
sys_msg_generator_kwargs: Optional[Dict] = None,
|
||||||
extend_sys_msg_meta_dicts: Optional[List[Dict]] = None,
|
extend_sys_msg_meta_dicts: Optional[List[Dict]] = None,
|
||||||
extend_task_specify_meta_dict: Optional[Dict] = None,
|
extend_task_specify_meta_dict: Optional[Dict] = None,
|
||||||
background_prompt: Optional[str] = ""
|
background_prompt: Optional[str] = "",
|
||||||
|
memory = None,
|
||||||
) -> None:
|
) -> None:
|
||||||
self.with_task_specify = with_task_specify
|
self.with_task_specify = with_task_specify
|
||||||
self.with_task_planner = with_task_planner
|
self.with_task_planner = with_task_planner
|
||||||
self.with_critic_in_the_loop = with_critic_in_the_loop
|
self.with_critic_in_the_loop = with_critic_in_the_loop
|
||||||
self.model_type = model_type
|
self.model_type = model_type
|
||||||
self.task_type = task_type
|
self.task_type = task_type
|
||||||
|
self.memory = memory
|
||||||
|
|
||||||
|
|
||||||
if with_task_specify:
|
if with_task_specify:
|
||||||
task_specify_meta_dict = dict()
|
task_specify_meta_dict = dict()
|
||||||
@ -148,9 +151,9 @@ class RolePlaying:
|
|||||||
meta_dict=sys_msg_meta_dicts[1],
|
meta_dict=sys_msg_meta_dicts[1],
|
||||||
content=user_role_prompt.format(**sys_msg_meta_dicts[1]))
|
content=user_role_prompt.format(**sys_msg_meta_dicts[1]))
|
||||||
|
|
||||||
self.assistant_agent: ChatAgent = ChatAgent(self.assistant_sys_msg, model_type,
|
self.assistant_agent: ChatAgent = ChatAgent(self.assistant_sys_msg, memory, model_type,
|
||||||
**(assistant_agent_kwargs or {}), )
|
**(assistant_agent_kwargs or {}), )
|
||||||
self.user_agent: ChatAgent = ChatAgent(self.user_sys_msg, model_type, **(user_agent_kwargs or {}), )
|
self.user_agent: ChatAgent = ChatAgent(self.user_sys_msg,memory, model_type, **(user_agent_kwargs or {}), )
|
||||||
|
|
||||||
if with_critic_in_the_loop:
|
if with_critic_in_the_loop:
|
||||||
raise ValueError("with_critic_in_the_loop not available")
|
raise ValueError("with_critic_in_the_loop not available")
|
||||||
@ -187,6 +190,9 @@ class RolePlaying:
|
|||||||
content = phase_prompt.format(
|
content = phase_prompt.format(
|
||||||
**({"assistant_role": self.assistant_agent.role_name} | placeholders)
|
**({"assistant_role": self.assistant_agent.role_name} | placeholders)
|
||||||
)
|
)
|
||||||
|
retrieval_memory = self.assistant_agent.use_memory(content)
|
||||||
|
if retrieval_memory!= None:
|
||||||
|
placeholders["examples"] = retrieval_memory
|
||||||
user_msg = UserChatMessage(
|
user_msg = UserChatMessage(
|
||||||
role_name=self.user_sys_msg.role_name,
|
role_name=self.user_sys_msg.role_name,
|
||||||
role="user",
|
role="user",
|
||||||
|
@ -70,7 +70,9 @@ class ChatChain:
|
|||||||
gui_design=check_bool(self.config["gui_design"]),
|
gui_design=check_bool(self.config["gui_design"]),
|
||||||
git_management=check_bool(self.config["git_management"]),
|
git_management=check_bool(self.config["git_management"]),
|
||||||
incremental_develop=check_bool(self.config["incremental_develop"]),
|
incremental_develop=check_bool(self.config["incremental_develop"]),
|
||||||
background_prompt=self.config["background_prompt"])
|
background_prompt=self.config["background_prompt"],
|
||||||
|
with_memory=check_bool(self.config["with_memory"]))
|
||||||
|
|
||||||
self.chat_env = ChatEnv(self.chat_env_config)
|
self.chat_env = ChatEnv(self.chat_env_config)
|
||||||
|
|
||||||
# the user input prompt will be self-improved (if set "self_improve": "True" in ChatChainConfig.json)
|
# the user input prompt will be self-improved (if set "self_improve": "True" in ChatChainConfig.json)
|
||||||
@ -204,6 +206,9 @@ class ChatChain:
|
|||||||
software_path = os.path.join(directory, "_".join([self.project_name, self.org_name, self.start_time]))
|
software_path = os.path.join(directory, "_".join([self.project_name, self.org_name, self.start_time]))
|
||||||
self.chat_env.set_directory(software_path)
|
self.chat_env.set_directory(software_path)
|
||||||
|
|
||||||
|
if self.chat_env.config.with_memory is True:
|
||||||
|
self.chat_env.init_memory()
|
||||||
|
|
||||||
# copy config files to software path
|
# copy config files to software path
|
||||||
shutil.copy(self.config_path, software_path)
|
shutil.copy(self.config_path, software_path)
|
||||||
shutil.copy(self.config_phase_path, software_path)
|
shutil.copy(self.config_phase_path, software_path)
|
||||||
|
@ -13,6 +13,7 @@ from chatdev.codes import Codes
|
|||||||
from chatdev.documents import Documents
|
from chatdev.documents import Documents
|
||||||
from chatdev.roster import Roster
|
from chatdev.roster import Roster
|
||||||
from chatdev.utils import log_visualize
|
from chatdev.utils import log_visualize
|
||||||
|
from ecl.memory import Memory
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from openai.types.chat.chat_completion_message_tool_call import ChatCompletionMessageToolCall
|
from openai.types.chat.chat_completion_message_tool_call import ChatCompletionMessageToolCall
|
||||||
@ -28,15 +29,18 @@ class ChatEnvConfig:
|
|||||||
gui_design,
|
gui_design,
|
||||||
git_management,
|
git_management,
|
||||||
incremental_develop,
|
incremental_develop,
|
||||||
background_prompt):
|
background_prompt,
|
||||||
|
with_memory):
|
||||||
self.clear_structure = clear_structure # Whether to clear non-software files in the WareHouse and cache files in generated software path
|
self.clear_structure = clear_structure # Whether to clear non-software files in the WareHouse and cache files in generated software path
|
||||||
self.gui_design = gui_design # Encourage ChatDev generate software with GUI
|
self.gui_design = gui_design # Encourage ChatDev generate software with GUI
|
||||||
self.git_management = git_management # Whether to use git to manage the creation and changes of generated software
|
self.git_management = git_management # Whether to use git to manage the creation and changes of generated software
|
||||||
self.incremental_develop = incremental_develop # Whether to use incremental develop on an existing project
|
self.incremental_develop = incremental_develop # Whether to use incremental develop on an existing project
|
||||||
self.background_prompt = background_prompt # background prompt that will be added to every inquiry to LLM
|
self.background_prompt = background_prompt # background prompt that will be added to every inquiry to LLM
|
||||||
|
self.with_memory = with_memory # Wheter to use memroy in the interaction between agents
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
string = ""
|
string = ""
|
||||||
|
string += "ChatEnvConfig.with_memory: {}\n".format(self.with_memory)
|
||||||
string += "ChatEnvConfig.clear_structure: {}\n".format(self.clear_structure)
|
string += "ChatEnvConfig.clear_structure: {}\n".format(self.clear_structure)
|
||||||
string += "ChatEnvConfig.git_management: {}\n".format(self.git_management)
|
string += "ChatEnvConfig.git_management: {}\n".format(self.git_management)
|
||||||
string += "ChatEnvConfig.gui_design: {}\n".format(self.gui_design)
|
string += "ChatEnvConfig.gui_design: {}\n".format(self.gui_design)
|
||||||
@ -50,6 +54,7 @@ class ChatEnv:
|
|||||||
self.config = chat_env_config
|
self.config = chat_env_config
|
||||||
self.roster: Roster = Roster()
|
self.roster: Roster = Roster()
|
||||||
self.codes: Codes = Codes()
|
self.codes: Codes = Codes()
|
||||||
|
self.memory: Memory = Memory()
|
||||||
self.proposed_images: Dict[str, str] = {}
|
self.proposed_images: Dict[str, str] = {}
|
||||||
self.incorporated_images: Dict[str, str] = {}
|
self.incorporated_images: Dict[str, str] = {}
|
||||||
self.requirements: Documents = Documents()
|
self.requirements: Documents = Documents()
|
||||||
@ -91,6 +96,13 @@ class ChatEnv:
|
|||||||
print("{} Created".format(directory))
|
print("{} Created".format(directory))
|
||||||
else:
|
else:
|
||||||
os.mkdir(self.env_dict['directory'])
|
os.mkdir(self.env_dict['directory'])
|
||||||
|
|
||||||
|
def init_memory(self):
|
||||||
|
self.memory.id_enabled = True
|
||||||
|
self.memory.directory = os.path.join(os.getcwd(),"ecl","memory")
|
||||||
|
if not os.path.exists(self.memory.directory):
|
||||||
|
os.mkdir(self.memory.directory)
|
||||||
|
self.memory.upload()
|
||||||
|
|
||||||
def exist_bugs(self) -> tuple[bool, str]:
|
def exist_bugs(self) -> tuple[bool, str]:
|
||||||
directory = self.env_dict['directory']
|
directory = self.env_dict['directory']
|
||||||
|
@ -59,6 +59,7 @@ class Phase(ABC):
|
|||||||
need_reflect=False,
|
need_reflect=False,
|
||||||
with_task_specify=False,
|
with_task_specify=False,
|
||||||
model_type=ModelType.GPT_3_5_TURBO,
|
model_type=ModelType.GPT_3_5_TURBO,
|
||||||
|
memory=None,
|
||||||
placeholders=None,
|
placeholders=None,
|
||||||
chat_turn_limit=10
|
chat_turn_limit=10
|
||||||
) -> str:
|
) -> str:
|
||||||
@ -102,6 +103,7 @@ class Phase(ABC):
|
|||||||
task_prompt=task_prompt,
|
task_prompt=task_prompt,
|
||||||
task_type=task_type,
|
task_type=task_type,
|
||||||
with_task_specify=with_task_specify,
|
with_task_specify=with_task_specify,
|
||||||
|
memory=memory,
|
||||||
model_type=model_type,
|
model_type=model_type,
|
||||||
background_prompt=chat_env.config.background_prompt
|
background_prompt=chat_env.config.background_prompt
|
||||||
)
|
)
|
||||||
@ -227,6 +229,7 @@ class Phase(ABC):
|
|||||||
user_role_prompt=self.counselor_prompt,
|
user_role_prompt=self.counselor_prompt,
|
||||||
placeholders={"conversations": messages, "question": question},
|
placeholders={"conversations": messages, "question": question},
|
||||||
need_reflect=False,
|
need_reflect=False,
|
||||||
|
memory=chat_env.memory,
|
||||||
chat_turn_limit=1,
|
chat_turn_limit=1,
|
||||||
model_type=self.model_type)
|
model_type=self.model_type)
|
||||||
|
|
||||||
@ -300,6 +303,7 @@ class Phase(ABC):
|
|||||||
user_role_prompt=self.user_role_prompt,
|
user_role_prompt=self.user_role_prompt,
|
||||||
chat_turn_limit=chat_turn_limit,
|
chat_turn_limit=chat_turn_limit,
|
||||||
placeholders=self.phase_env,
|
placeholders=self.phase_env,
|
||||||
|
memory=chat_env.memory,
|
||||||
model_type=self.model_type)
|
model_type=self.model_type)
|
||||||
chat_env = self.update_chat_env(chat_env)
|
chat_env = self.update_chat_env(chat_env)
|
||||||
return chat_env
|
return chat_env
|
||||||
@ -529,6 +533,7 @@ class CodeReviewHuman(Phase):
|
|||||||
user_role_prompt=self.user_role_prompt,
|
user_role_prompt=self.user_role_prompt,
|
||||||
chat_turn_limit=chat_turn_limit,
|
chat_turn_limit=chat_turn_limit,
|
||||||
placeholders=self.phase_env,
|
placeholders=self.phase_env,
|
||||||
|
memory=chat_env.memory,
|
||||||
model_type=self.model_type)
|
model_type=self.model_type)
|
||||||
chat_env = self.update_chat_env(chat_env)
|
chat_env = self.update_chat_env(chat_env)
|
||||||
return chat_env
|
return chat_env
|
||||||
@ -579,6 +584,7 @@ class TestErrorSummary(Phase):
|
|||||||
phase_name=self.phase_name,
|
phase_name=self.phase_name,
|
||||||
assistant_role_prompt=self.assistant_role_prompt,
|
assistant_role_prompt=self.assistant_role_prompt,
|
||||||
user_role_prompt=self.user_role_prompt,
|
user_role_prompt=self.user_role_prompt,
|
||||||
|
memory=chat_env.memory,
|
||||||
chat_turn_limit=chat_turn_limit,
|
chat_turn_limit=chat_turn_limit,
|
||||||
placeholders=self.phase_env)
|
placeholders=self.phase_env)
|
||||||
chat_env = self.update_chat_env(chat_env)
|
chat_env = self.update_chat_env(chat_env)
|
||||||
|
163
ecl/codes.py
Normal file
163
ecl/codes.py
Normal file
@ -0,0 +1,163 @@
|
|||||||
|
import difflib
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import subprocess
|
||||||
|
import shutil
|
||||||
|
import time
|
||||||
|
import signal
|
||||||
|
from utils import get_easyDict_from_filepath
|
||||||
|
|
||||||
|
|
||||||
|
class Codes:
|
||||||
|
def __init__(self, generated_content=""):
|
||||||
|
cfg = get_easyDict_from_filepath("./ecl/config.yaml")
|
||||||
|
self.directory: str = cfg.codes.tmp_directory
|
||||||
|
self.main_script: str = cfg.codes.main_script
|
||||||
|
self.generated_content: str = generated_content
|
||||||
|
self.codebooks = {}
|
||||||
|
|
||||||
|
def extract_filename_from_line(lines):
|
||||||
|
file_name = ""
|
||||||
|
for candidate in re.finditer(r"(\w+\.\w+)", lines, re.DOTALL):
|
||||||
|
file_name = candidate.group()
|
||||||
|
file_name = file_name.lower()
|
||||||
|
return file_name
|
||||||
|
|
||||||
|
def extract_filename_from_code(code):
|
||||||
|
file_name = ""
|
||||||
|
regex_extract = r"class (\S+?):\n"
|
||||||
|
matches_extract = re.finditer(regex_extract, code, re.DOTALL)
|
||||||
|
for match_extract in matches_extract:
|
||||||
|
file_name = match_extract.group(1)
|
||||||
|
file_name = file_name.lower().split("(")[0] + ".py"
|
||||||
|
return file_name
|
||||||
|
|
||||||
|
if generated_content != "":
|
||||||
|
regex = r"(.+?)\n```.*?\n(.*?)```"
|
||||||
|
matches = re.finditer(regex, self.generated_content, re.DOTALL)
|
||||||
|
for match in matches:
|
||||||
|
code = match.group(2)
|
||||||
|
if "CODE" in code:
|
||||||
|
continue
|
||||||
|
group1 = match.group(1)
|
||||||
|
filename = extract_filename_from_line(group1)
|
||||||
|
if "__main__" in code:
|
||||||
|
filename = "main.py"
|
||||||
|
if filename == "": # post-processing
|
||||||
|
filename = extract_filename_from_code(code)
|
||||||
|
assert filename != ""
|
||||||
|
if filename is not None and code is not None and len(filename) > 0 and len(code) > 0:
|
||||||
|
self.codebooks[filename] = self._format_code(code)
|
||||||
|
|
||||||
|
def _format_code(self, code):
|
||||||
|
code = "\n".join([line for line in code.split("\n") if len(line.strip()) > 0])
|
||||||
|
return code
|
||||||
|
|
||||||
|
def _update_codes(self, generated_content):
|
||||||
|
new_codes = Codes(generated_content)
|
||||||
|
differ = difflib.Differ()
|
||||||
|
for key in new_codes.codebooks.keys():
|
||||||
|
if key not in self.codebooks.keys() or self.codebooks[key] != new_codes.codebooks[key]:
|
||||||
|
update_codes_content = "**[Update Codes]**\n\n"
|
||||||
|
update_codes_content += "{} updated.\n".format(key)
|
||||||
|
old_codes_content = self.codebooks[key] if key in self.codebooks.keys() else "# None"
|
||||||
|
new_codes_content = new_codes.codebooks[key]
|
||||||
|
|
||||||
|
lines_old = old_codes_content.splitlines()
|
||||||
|
lines_new = new_codes_content.splitlines()
|
||||||
|
|
||||||
|
unified_diff = difflib.unified_diff(lines_old, lines_new, lineterm='', fromfile='Old', tofile='New')
|
||||||
|
unified_diff = '\n'.join(unified_diff)
|
||||||
|
update_codes_content = update_codes_content + "\n\n" + """```
|
||||||
|
'''
|
||||||
|
|
||||||
|
'''\n""" + unified_diff + "\n```"
|
||||||
|
|
||||||
|
self.codebooks[key] = new_codes.codebooks[key]
|
||||||
|
|
||||||
|
def _rewrite_codes(self) -> None:
|
||||||
|
directory = self.directory
|
||||||
|
rewrite_codes_content = "**[Rewrite Codes]**\n"
|
||||||
|
if os.path.exists(directory):
|
||||||
|
shutil.rmtree(self.directory)
|
||||||
|
if not os.path.exists(directory):
|
||||||
|
os.mkdir(self.directory)
|
||||||
|
rewrite_codes_content += "{} Created\n".format(directory)
|
||||||
|
|
||||||
|
for filename in self.codebooks.keys():
|
||||||
|
filepath = os.path.join(directory, filename)
|
||||||
|
with open(filepath, "w", encoding="utf-8") as writer:
|
||||||
|
writer.write(self.codebooks[filename])
|
||||||
|
rewrite_codes_content += os.path.join(directory, filename) + " Wrote\n"
|
||||||
|
# print(rewrite_codes_content)
|
||||||
|
|
||||||
|
def _run_codes(self) -> None:
|
||||||
|
directory = os.path.abspath(self.directory)
|
||||||
|
if self.main_script not in os.listdir(directory):
|
||||||
|
return False, "{} Not Found".format(self.main_script)
|
||||||
|
|
||||||
|
success_info = "The software run successfully without errors."
|
||||||
|
|
||||||
|
try:
|
||||||
|
# check if we are on windows or linux
|
||||||
|
if os.name == 'nt':
|
||||||
|
command = "cd {} && dir && python {}".format(directory, self.main_script)
|
||||||
|
process = subprocess.Popen(
|
||||||
|
command,
|
||||||
|
shell=True,
|
||||||
|
stdout=subprocess.PIPE,
|
||||||
|
stderr=subprocess.PIPE,
|
||||||
|
creationflags=subprocess.CREATE_NEW_PROCESS_GROUP
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
command = "cd {}; ls -l; python3 {};".format(directory, self.main_script)
|
||||||
|
process = subprocess.Popen(command,
|
||||||
|
shell=True,
|
||||||
|
preexec_fn=os.setsid,
|
||||||
|
stdout=subprocess.PIPE,
|
||||||
|
stderr=subprocess.PIPE
|
||||||
|
)
|
||||||
|
time.sleep(3)
|
||||||
|
return_code = process.returncode
|
||||||
|
# Check if the software is still running
|
||||||
|
if process.poll() is None:
|
||||||
|
if "killpg" in dir(os):
|
||||||
|
os.killpg(os.getpgid(process.pid), signal.SIGTERM)
|
||||||
|
else:
|
||||||
|
os.kill(process.pid, signal.SIGTERM)
|
||||||
|
if process.poll() is None:
|
||||||
|
os.kill(process.pid, signal.CTRL_BREAK_EVENT)
|
||||||
|
|
||||||
|
if return_code == 0:
|
||||||
|
return False, success_info
|
||||||
|
else:
|
||||||
|
error_output = process.stderr.read().decode('utf-8')
|
||||||
|
if error_output:
|
||||||
|
if "Traceback".lower() in error_output.lower():
|
||||||
|
errs = error_output.replace(directory + "/", "")
|
||||||
|
return True, errs
|
||||||
|
else:
|
||||||
|
return False, success_info
|
||||||
|
except subprocess.CalledProcessError as e:
|
||||||
|
return True, f"Error: {e}"
|
||||||
|
except Exception as ex:
|
||||||
|
return True, f"An error occurred: {ex}"
|
||||||
|
|
||||||
|
return False, success_info
|
||||||
|
|
||||||
|
def _get_codes(self) -> str:
|
||||||
|
content = ""
|
||||||
|
for filename in self.codebooks.keys():
|
||||||
|
content += "{}\n```{}\n{}\n```\n\n".format(filename,
|
||||||
|
"python" if filename.endswith(".py") else filename.split(".")[
|
||||||
|
-1], self.codebooks[filename])
|
||||||
|
return content
|
||||||
|
|
||||||
|
def _load_from_hardware(self, directory) -> None:
|
||||||
|
assert len([filename for filename in os.listdir(directory) if filename.endswith(".py")]) > 0
|
||||||
|
for root, directories, filenames in os.walk(directory):
|
||||||
|
for filename in filenames:
|
||||||
|
if filename.endswith(".py"):
|
||||||
|
code = open(os.path.join(directory, filename), "r", encoding="utf-8").read()
|
||||||
|
self.codebooks[filename] = self._format_code(code)
|
||||||
|
print("{} files read from {}".format(len(self.codebooks.keys()), directory))
|
17
ecl/config.yaml
Normal file
17
ecl/config.yaml
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
experience:
|
||||||
|
reap_zombie: True
|
||||||
|
threshold: 0
|
||||||
|
upper_limit: 10
|
||||||
|
|
||||||
|
codes:
|
||||||
|
tmp_directory: "tmp_codes"
|
||||||
|
main_script: "main.py"
|
||||||
|
|
||||||
|
embedding_method: "OpenAI"
|
||||||
|
|
||||||
|
retrieval:
|
||||||
|
top_k_code: 1 # top k target code
|
||||||
|
top_k_text: 1 # top k instructionstar
|
||||||
|
|
||||||
|
searchcode_thresh: 0 # similarity threshold between text query and instructionstar, search for targetcode
|
||||||
|
searchtext_thresh: 0 # similarity threshold between code query and sourcecode, search for instructionstar
|
69
ecl/ecl.py
Normal file
69
ecl/ecl.py
Normal file
@ -0,0 +1,69 @@
|
|||||||
|
|
||||||
|
import argparse
|
||||||
|
from graph import Graph
|
||||||
|
from experience import Experience
|
||||||
|
from utils import get_easyDict_from_filepath,now ,log_and_print_online
|
||||||
|
from memory import Memory
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
import logging
|
||||||
|
sys.path.append(os.path.join(os.getcwd(),"ecl"))
|
||||||
|
|
||||||
|
|
||||||
|
def memorize(directory):
|
||||||
|
print(directory)
|
||||||
|
cfg = get_easyDict_from_filepath("./ecl/config.yaml")
|
||||||
|
|
||||||
|
folder_path = "ecl/logs"
|
||||||
|
if not os.path.exists(folder_path):
|
||||||
|
os.mkdir(folder_path)
|
||||||
|
log_filename = folder_path+"/ecl_{}.log".format(os.path.basename(directory))
|
||||||
|
print(log_filename)
|
||||||
|
root_logger = logging.getLogger()
|
||||||
|
for handler in root_logger.handlers[:]:
|
||||||
|
root_logger.removeHandler(handler)
|
||||||
|
file_handler = logging.FileHandler(log_filename, mode='w', encoding='utf-8')
|
||||||
|
formatter = logging.Formatter('[%(asctime)s %(levelname)s] %(message)s', datefmt='%Y-%d-%m %H:%M:%S')
|
||||||
|
file_handler.setFormatter(formatter)
|
||||||
|
root_logger.addHandler(file_handler)
|
||||||
|
root_logger.setLevel(logging.INFO)
|
||||||
|
|
||||||
|
log_and_print_online("[Config]:"+str(cfg))
|
||||||
|
graph = Graph()
|
||||||
|
graph.create_from_log(directory)
|
||||||
|
graph.print()
|
||||||
|
|
||||||
|
experience = Experience(graph, directory)
|
||||||
|
if len(graph.nodes)==0 or len(graph.edges) == 0:
|
||||||
|
log_and_print_online("No node or no edges constrcuted from the task execution process, maybe due to a unfinished software production or sometimes single node appears")
|
||||||
|
else:
|
||||||
|
if cfg.experience.reap_zombie:
|
||||||
|
experience.reap_zombie()
|
||||||
|
graph.print()
|
||||||
|
experience.estimate()
|
||||||
|
experiences = experience.extract_thresholded_experiences()
|
||||||
|
|
||||||
|
# memory upload
|
||||||
|
memory = Memory()
|
||||||
|
memory.upload()
|
||||||
|
memory.upload_from_experience(experience)
|
||||||
|
|
||||||
|
def process_directory(directory):
|
||||||
|
for root, dirs, files in os.walk(directory):
|
||||||
|
for directory in dirs:
|
||||||
|
file_path = os.path.join(root, directory)
|
||||||
|
memorize(file_path)
|
||||||
|
|
||||||
|
def main():
|
||||||
|
parser = argparse.ArgumentParser(description="Memorize one software or softwares from the directory.")
|
||||||
|
parser.add_argument("path", help="The file or directory to process")
|
||||||
|
parser.add_argument("-d", "--directory", action="store_true", help="Process all files in the given directory.")
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
if args.directory:
|
||||||
|
process_directory(args.path)
|
||||||
|
else:
|
||||||
|
memorize(args.path)
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
84
ecl/embedding.py
Normal file
84
ecl/embedding.py
Normal file
@ -0,0 +1,84 @@
|
|||||||
|
import os
|
||||||
|
import openai
|
||||||
|
from openai import OpenAI
|
||||||
|
OPENAI_API_KEY = os.environ['OPENAI_API_KEY']
|
||||||
|
if 'BASE_URL' in os.environ:
|
||||||
|
BASE_URL = os.environ['BASE_URL']
|
||||||
|
else:
|
||||||
|
BASE_URL = None
|
||||||
|
import sys
|
||||||
|
import time
|
||||||
|
from tenacity import (
|
||||||
|
retry,
|
||||||
|
stop_after_attempt,
|
||||||
|
wait_random_exponential,
|
||||||
|
wait_fixed
|
||||||
|
)
|
||||||
|
from utils import log_and_print_online
|
||||||
|
sys.path.append(os.path.join(os.getcwd(),"ecl"))
|
||||||
|
|
||||||
|
class OpenAIEmbedding:
|
||||||
|
def __init__(self, **params):
|
||||||
|
self.code_prompt_tokens = 0
|
||||||
|
self.text_prompt_tokens = 0
|
||||||
|
self.code_total_tokens = 0
|
||||||
|
self.text_total_tokens = 0
|
||||||
|
|
||||||
|
self.prompt_tokens = 0
|
||||||
|
self.total_tokens = 0
|
||||||
|
|
||||||
|
@retry(wait=wait_random_exponential(min=2, max=5), stop=stop_after_attempt(10))
|
||||||
|
def get_text_embedding(self,text: str):
|
||||||
|
if BASE_URL:
|
||||||
|
client = openai.OpenAI(
|
||||||
|
api_key=OPENAI_API_KEY,
|
||||||
|
base_url=BASE_URL,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
client = openai.OpenAI(
|
||||||
|
api_key=OPENAI_API_KEY
|
||||||
|
)
|
||||||
|
|
||||||
|
if len(text)>8191:
|
||||||
|
text = text[:8190]
|
||||||
|
response = client.embeddings.create(input = text, model="text-embedding-ada-002").model_dump()
|
||||||
|
embedding = response['data'][0]['embedding']
|
||||||
|
log_and_print_online(
|
||||||
|
"Get text embedding from {}:\n**[OpenAI_Usage_Info Receive]**\nprompt_tokens: {}\ntotal_tokens: {}\n".format(
|
||||||
|
response["model"],response["usage"]["prompt_tokens"],response["usage"]["total_tokens"]))
|
||||||
|
self.text_prompt_tokens += response["usage"]["prompt_tokens"]
|
||||||
|
self.text_total_tokens += response["usage"]["total_tokens"]
|
||||||
|
self.prompt_tokens += response["usage"]["prompt_tokens"]
|
||||||
|
self.total_tokens += response["usage"]["total_tokens"]
|
||||||
|
|
||||||
|
return embedding
|
||||||
|
|
||||||
|
@retry(wait=wait_random_exponential(min=10, max=60), stop=stop_after_attempt(10))
|
||||||
|
def get_code_embedding(self,code: str):
|
||||||
|
if BASE_URL:
|
||||||
|
client = openai.OpenAI(
|
||||||
|
api_key=OPENAI_API_KEY,
|
||||||
|
base_url=BASE_URL,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
client = openai.OpenAI(
|
||||||
|
api_key=OPENAI_API_KEY
|
||||||
|
)
|
||||||
|
if len(code) == 0:
|
||||||
|
code = "#"
|
||||||
|
elif len(code) >8191:
|
||||||
|
code = code[0:8190]
|
||||||
|
response = client.embeddings.create(input=code, model="text-embedding-ada-002").model_dump()
|
||||||
|
embedding = response['data'][0]['embedding']
|
||||||
|
log_and_print_online(
|
||||||
|
"Get code embedding from {}:\n**[OpenAI_Usage_Info Receive]**\nprompt_tokens: {}\ntotal_tokens: {}\n".format(
|
||||||
|
response["model"],response["usage"]["prompt_tokens"],response["usage"]["total_tokens"]))
|
||||||
|
|
||||||
|
self.code_prompt_tokens += response["usage"]["prompt_tokens"]
|
||||||
|
self.code_total_tokens += response["usage"]["total_tokens"]
|
||||||
|
self.prompt_tokens += response["usage"]["prompt_tokens"]
|
||||||
|
self.total_tokens += response["usage"]["total_tokens"]
|
||||||
|
|
||||||
|
return embedding
|
||||||
|
|
||||||
|
|
311
ecl/experience.py
Normal file
311
ecl/experience.py
Normal file
@ -0,0 +1,311 @@
|
|||||||
|
import os
|
||||||
|
import time
|
||||||
|
from graph import Graph, Node, Edge
|
||||||
|
import sys
|
||||||
|
import openai
|
||||||
|
import numpy as np
|
||||||
|
from codes import Codes
|
||||||
|
from utils import get_easyDict_from_filepath,OpenAIModel,log_and_print_online
|
||||||
|
from embedding import OpenAIEmbedding
|
||||||
|
sys.path.append(os.path.join(os.getcwd(),"ecl"))
|
||||||
|
class Shortcut:
|
||||||
|
def __init__(self, sourceMID, targetMID, valueGain,instructionStar,edgeIDPath):
|
||||||
|
self.sourceMID = sourceMID
|
||||||
|
self.targetMID = targetMID
|
||||||
|
self.valueGain = valueGain
|
||||||
|
self.embedding = None
|
||||||
|
self.instructionStar = instructionStar
|
||||||
|
self.edgeIDPath = edgeIDPath
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return "{} -> {} valueGain={:.6f} len(instructionPath)={} instructionStar={}".format(self.sourceMID, self.targetMID, self.valueGain, len(self.edgeIDPath), self.instructionStar[:100].replace("\n", ""))
|
||||||
|
|
||||||
|
class Experience:
|
||||||
|
def __init__(self, graph: Graph, directory: str):
|
||||||
|
cfg = get_easyDict_from_filepath("./ecl/config.yaml")
|
||||||
|
self.graph: Graph = graph
|
||||||
|
self.directory = directory
|
||||||
|
self.threshold = cfg.experience.threshold
|
||||||
|
self.upperLimit = cfg.experience.upper_limit
|
||||||
|
self.experiences = []
|
||||||
|
|
||||||
|
self.model = OpenAIModel(model_type="gpt-3.5-turbo-16k")
|
||||||
|
self.embedding_method = OpenAIEmbedding()
|
||||||
|
|
||||||
|
for edge in self.graph.edges:
|
||||||
|
node = self.graph.nodes[edge.targetMID]
|
||||||
|
node.degree += 1
|
||||||
|
assert len(self.graph.edges) * 1 == sum([self.graph.nodes[mid].degree for mid in self.graph.nodes.keys()]) # unidirectional
|
||||||
|
|
||||||
|
for mid in self.graph.nodes.keys():
|
||||||
|
node = self.graph.nodes[mid]
|
||||||
|
node.value = 1.0
|
||||||
|
|
||||||
|
def reap_zombie(self):
|
||||||
|
|
||||||
|
pathNodes, pathEdges = self.graph.find_shortest_path()
|
||||||
|
|
||||||
|
zombieEdges = [edge for edge in self.graph.edges if edge not in pathEdges]
|
||||||
|
zombieNodes = [self.graph.nodes[mid] for mid in self.graph.nodes.keys() if mid not in pathNodes]
|
||||||
|
log_zombieedges = "ZOMBIE EDGES: \n"
|
||||||
|
log_zombienodes = "ZOMBIE NODES: \n"
|
||||||
|
for edge in zombieEdges:
|
||||||
|
self.graph.edges.remove(edge)
|
||||||
|
log_zombieedges += "Zombie Edge {} -> {} Removed\n".format(edge.sourceMID, edge.targetMID)
|
||||||
|
log_and_print_online(log_zombieedges)
|
||||||
|
|
||||||
|
for node in zombieNodes:
|
||||||
|
del self.graph.nodes[node.mID]
|
||||||
|
log_zombienodes += "Zombie Node {} Removed\n".format(node.mID)
|
||||||
|
log_and_print_online(log_zombienodes)
|
||||||
|
|
||||||
|
def estimate(self):
|
||||||
|
if len(self.graph.edges) == 0:
|
||||||
|
return
|
||||||
|
|
||||||
|
for mid in self.graph.nodes.keys():
|
||||||
|
node = self.graph.nodes[mid]
|
||||||
|
if len(node.code) == 0:
|
||||||
|
node.value *= 0.0
|
||||||
|
|
||||||
|
log_and_print_online()
|
||||||
|
|
||||||
|
vn = self.graph.nodes[self.graph.edges[-1].targetMID]
|
||||||
|
# print(vn.mID, "...")
|
||||||
|
|
||||||
|
for mid in self.graph.nodes.keys():
|
||||||
|
# print(mid)
|
||||||
|
vi = self.graph.nodes[mid]
|
||||||
|
vi.value = self._pairwise_estimate(vi, vn)
|
||||||
|
|
||||||
|
log_and_print_online("Init value:"+ str({mid: self.graph.nodes[mid].value for mid in self.graph.nodes.keys()})+"\n\nEstimated value:"+str({mid: self.graph.nodes[mid].value for mid in self.graph.nodes.keys()}))
|
||||||
|
|
||||||
|
def get_cosine_similarity(self, embeddingi, embeddingj):
|
||||||
|
embeddingi = np.array(embeddingi)
|
||||||
|
embeddingj = np.array(embeddingj)
|
||||||
|
cos_sim = embeddingi.dot(embeddingj) / (np.linalg.norm(embeddingi) * np.linalg.norm(embeddingj))
|
||||||
|
return cos_sim
|
||||||
|
|
||||||
|
def _pairwise_estimate(self, vi: Node, vj: Node):
|
||||||
|
|
||||||
|
if vi.value == 0.0:
|
||||||
|
return 0.0
|
||||||
|
|
||||||
|
pathNodes, pathEdges = self.graph.find_shortest_path(vi.mID, vj.mID)
|
||||||
|
distance_weight = 1.0 / len(pathEdges) if len(pathEdges) != 0 else 1.0
|
||||||
|
|
||||||
|
codes = Codes(vi.code)
|
||||||
|
codes._rewrite_codes()
|
||||||
|
(exist_bugs_flag, test_reports) = codes._run_codes()
|
||||||
|
compile_weight = 0.0 if exist_bugs_flag else 1.0
|
||||||
|
|
||||||
|
if compile_weight == 0.0:
|
||||||
|
return 0.0
|
||||||
|
|
||||||
|
maximum_degree = max([self.graph.nodes[mid].degree for mid in self.graph.nodes.keys()])
|
||||||
|
degree_weight = vi.degree * 1.0 / maximum_degree
|
||||||
|
|
||||||
|
if degree_weight == 0.0:
|
||||||
|
return 0.0
|
||||||
|
|
||||||
|
start_time = time.time()
|
||||||
|
vi_code_emb = self.embedding_method.get_code_embedding(vi.code) if vi.embedding is None else vi.embedding
|
||||||
|
if vi.embedding is None:
|
||||||
|
end_time =time.time()
|
||||||
|
log_and_print_online("DONE:get node embedding\ntime cost:{}\n".format(end_time-start_time))
|
||||||
|
vi.embedding = vi_code_emb
|
||||||
|
|
||||||
|
start_time = time.time()
|
||||||
|
vj_code_emb = self.embedding_method.get_code_embedding(vj.code) if vj.embedding is None else vj.embedding
|
||||||
|
if vj.embedding is None:
|
||||||
|
end_time =time.time()
|
||||||
|
log_and_print_online("DONE:get node embedding\ntime cost:{}\n".format(end_time-start_time))
|
||||||
|
vj.embedding = vj_code_emb
|
||||||
|
code_code_cos_sim = self.get_cosine_similarity(vi_code_emb, vj_code_emb)
|
||||||
|
|
||||||
|
if code_code_cos_sim == 0.0:
|
||||||
|
return 0.0
|
||||||
|
|
||||||
|
filenames = os.listdir(self.directory)
|
||||||
|
filename = [filename for filename in filenames if filename.endswith(".prompt")][0]
|
||||||
|
task_prompt = open(os.path.join(self.directory, filename), "r").read().strip()
|
||||||
|
start_time = time.time()
|
||||||
|
task_emb = self.embedding_method.get_text_embedding(task_prompt) if self.graph.task_embedding is None else self.graph.task_embedding
|
||||||
|
if self.graph.task_embedding is None:
|
||||||
|
end_time =time.time()
|
||||||
|
log_and_print_online("DONE:get task prompt embedding\ntime cost:{}\n".format(end_time-start_time))
|
||||||
|
self.graph.task = task_prompt
|
||||||
|
self.graph.task_embedding = task_emb
|
||||||
|
code_text_cos_sim = self.get_cosine_similarity(vi_code_emb, task_emb)
|
||||||
|
|
||||||
|
if code_text_cos_sim == 0.0:
|
||||||
|
return 0.0
|
||||||
|
|
||||||
|
assert distance_weight >= 0.0 and distance_weight <= 1.0
|
||||||
|
assert compile_weight >= 0.0 and compile_weight <= 1.0
|
||||||
|
assert degree_weight >= 0.0 and degree_weight <= 1.0
|
||||||
|
|
||||||
|
distance = vj.version - vi.version
|
||||||
|
|
||||||
|
if distance == 0:
|
||||||
|
return 1
|
||||||
|
else:
|
||||||
|
return code_code_cos_sim * 1.0 / distance * code_text_cos_sim * compile_weight * degree_weight
|
||||||
|
#return distance_weight * compile_weight * degree_weight
|
||||||
|
|
||||||
|
def get_transitive_closure(self):
|
||||||
|
def print_matrix(matrix):
|
||||||
|
for nodei in matrix.keys():
|
||||||
|
for nodej in matrix.keys():
|
||||||
|
print(matrix[nodei][nodej], end=" ")
|
||||||
|
print()
|
||||||
|
print()
|
||||||
|
|
||||||
|
# Warshall Algorithm
|
||||||
|
matrix = {}
|
||||||
|
for mid1 in self.graph.nodes:
|
||||||
|
for mid2 in self.graph.nodes:
|
||||||
|
if mid1 not in matrix.keys():
|
||||||
|
matrix[mid1] = {}
|
||||||
|
matrix[mid1][mid2] = 0
|
||||||
|
# print_matrix(matrix)
|
||||||
|
|
||||||
|
pathNodes, pathEdges = self.graph.find_shortest_path()
|
||||||
|
for edge in pathEdges:
|
||||||
|
matrix[edge.sourceMID][edge.targetMID] = 1
|
||||||
|
print("Init Adjacent Matrix:")
|
||||||
|
print_matrix(matrix)
|
||||||
|
|
||||||
|
for nodek in matrix.keys():
|
||||||
|
for nodei in matrix.keys():
|
||||||
|
for nodej in matrix.keys():
|
||||||
|
if matrix[nodei][nodej] == 1 or (matrix[nodei][nodek] == 1 and matrix[nodek][nodej] == 1):
|
||||||
|
matrix[nodei][nodej] = 1
|
||||||
|
print("Transitive Closure:")
|
||||||
|
print_matrix(matrix)
|
||||||
|
|
||||||
|
return matrix
|
||||||
|
|
||||||
|
def extract_thresholded_experiences(self):
|
||||||
|
if len(self.graph.edges) == 0:
|
||||||
|
return []
|
||||||
|
if len(self.graph.nodes) < 2:
|
||||||
|
return []
|
||||||
|
assert len(self.graph.nodes.keys()) >= 2
|
||||||
|
matrix = self.get_transitive_closure()
|
||||||
|
|
||||||
|
experiences = []
|
||||||
|
pathNodes, _ = self.graph.find_shortest_path()
|
||||||
|
for id1 in pathNodes:
|
||||||
|
for id2 in pathNodes:
|
||||||
|
valueGain = self.graph.nodes[id2].value - self.graph.nodes[id1].value
|
||||||
|
flag0 = id1 != id2
|
||||||
|
flag1 = self.graph.exists_edge(id1, id2) == False
|
||||||
|
flag2 = matrix[id1][id2] == 1
|
||||||
|
flag3 = valueGain >= self.threshold
|
||||||
|
|
||||||
|
code_lines = [line.lower().strip() for line in self.graph.nodes[id2].code.split("\n")]
|
||||||
|
flag4 = not ("pass".lower() in code_lines or "TODO".lower() in code_lines)
|
||||||
|
|
||||||
|
if flag0 and flag1 and flag2 and flag3 and flag4:
|
||||||
|
_, edges = self.graph.find_shortest_path(uMID=id1, vMID=id2)
|
||||||
|
edgeIDPath = [edge.edgeId for edge in edges]
|
||||||
|
sourcecode=self.graph.nodes[id1].code
|
||||||
|
targetcode=self.graph.nodes[id2].code
|
||||||
|
shortcut = Shortcut(sourceMID=id1, targetMID=id2, valueGain=valueGain,instructionStar="", edgeIDPath=edgeIDPath)
|
||||||
|
experiences.append(shortcut)
|
||||||
|
|
||||||
|
experiences = sorted(experiences, key=lambda item: item.valueGain, reverse = True)
|
||||||
|
|
||||||
|
if len(experiences) > self.upperLimit:
|
||||||
|
log_and_print_online("{} experieces truncated.".format(len(experiences) - self.upperLimit))
|
||||||
|
experiences = experiences[:self.upperLimit]
|
||||||
|
|
||||||
|
prompt_template0 = """Provide detailed instructions to generate the following code:
|
||||||
|
{targetcode}
|
||||||
|
|
||||||
|
The instructions should encompass:
|
||||||
|
|
||||||
|
Modules and Classes:
|
||||||
|
- Enumerate necessary modules.
|
||||||
|
- Detail the classes, their attributes, and methods within these modules.
|
||||||
|
- Articulate the purpose and operation of each class.
|
||||||
|
|
||||||
|
Data Structures:
|
||||||
|
- Identify the requisite data structures.
|
||||||
|
- Describe their names, attributes, and operations.
|
||||||
|
|
||||||
|
Main Program Flow:
|
||||||
|
- Outline the principal progression of the program.
|
||||||
|
- Highlight the sequence for initializing and invoking other modules, classes, and methods within the primary file (e.g., main.py).
|
||||||
|
- Clarify the logical progression during runtime.
|
||||||
|
|
||||||
|
Input and Output:
|
||||||
|
- Specify the method by which the program accepts input, be it from users or external sources.
|
||||||
|
- Elaborate on the projected outputs or actions of the software.
|
||||||
|
|
||||||
|
Exception Handling:
|
||||||
|
- Instruct on the approach to manage potential anomalies or exceptions during execution to ascertain stability and robustness.
|
||||||
|
|
||||||
|
External Libraries and Dependencies:
|
||||||
|
- Explicitly list the necessary external libraries or dependencies, their versions, and their functionalities.
|
||||||
|
|
||||||
|
Please output the instructions directly."""
|
||||||
|
|
||||||
|
prompt_template1 = """Please provide detailed instructions on how to transition from the initial code version represented by source code to the final version indicated by target code.
|
||||||
|
|
||||||
|
Source Code:
|
||||||
|
{sourcecode}
|
||||||
|
|
||||||
|
Target Code:
|
||||||
|
{targetcode}
|
||||||
|
|
||||||
|
The instructions should encompass:
|
||||||
|
|
||||||
|
Modules and Classes: Detail the modules to be incorporated, along with the names, attributes, and operations of any classes to be added or amended. Furthermore, describe the intended function and utility of these new or altered classes.
|
||||||
|
|
||||||
|
Data Structures: Clearly define any data structures that need introduction or alteration, elucidating their names, attributes, and functionalities.
|
||||||
|
|
||||||
|
Main Program Flow: Outline the program's primary sequence of operations, highlighting the procedures to initialize and invoke other modules, classes, and methods in the primary file (e.g., main.py). Describe the program's logic sequence during its execution.
|
||||||
|
|
||||||
|
Input and Output: Define the methodology by which the program will acquire input, whether from users or external data sources. Also, characterize the projected outputs or behaviors of the application.
|
||||||
|
|
||||||
|
Exception Handling: Provide guidance on managing potential discrepancies or exceptions that might emerge during the software's operation, ensuring its resilience and reliability.
|
||||||
|
|
||||||
|
External Libraries and Dependencies: If the implementation requires external libraries or dependencies, specify their names, versions, and their respective purposes explicitly."""
|
||||||
|
|
||||||
|
|
||||||
|
for shortcut in experiences:
|
||||||
|
sourcecode = self.graph.nodes[shortcut.sourceMID].code
|
||||||
|
targetcode = self.graph.nodes[shortcut.targetMID].code
|
||||||
|
if sourcecode == "":
|
||||||
|
prompt = prompt_template0.replace("{targetcode}", targetcode)
|
||||||
|
response = self.model.run(messages=[{"role": "system", "content": prompt}])
|
||||||
|
print("instructionstar generated")
|
||||||
|
else:
|
||||||
|
prompt = prompt_template1.replace("{sourcecode}", sourcecode).replace("{targetcode}", targetcode)
|
||||||
|
response = self.model.run(messages=[{"role": "system", "content": prompt}])
|
||||||
|
print("instructionstar generated")
|
||||||
|
shortcut.instructionStar = response["choices"][0]["message"]["content"]
|
||||||
|
output = "Sorted-and-Truncated Experiences (with instructionStar):"
|
||||||
|
|
||||||
|
self.experiences = experiences
|
||||||
|
for experience in experiences:
|
||||||
|
output += str(experience)
|
||||||
|
log_and_print_online(output)
|
||||||
|
log_and_print_online("[Conclusion]:\nprompt_tokens:{}, completion_tokens:{}, total_tokens:{}".format(self.model.prompt_tokens,self.model.completion_tokens,self.model.total_tokens))
|
||||||
|
log_and_print_online("[Conclusion]:\ntext_prompt_tokens:{}, text_total_tokens:{}\ncode_prompt_tokens:{}, code_total_tokens:{}\nprompt_tokens:{}, total_tokens:{}".format(self.embedding_method.text_prompt_tokens,
|
||||||
|
self.embedding_method.text_total_tokens,
|
||||||
|
self.embedding_method.code_prompt_tokens,
|
||||||
|
self.embedding_method.code_total_tokens,
|
||||||
|
self.embedding_method.prompt_tokens,
|
||||||
|
self.embedding_method.total_tokens))
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
return experiences
|
||||||
|
def to_dict(self):
|
||||||
|
merged_data = []
|
||||||
|
for index, ex in enumerate(self.experiences):
|
||||||
|
merged_data.append(ex.__dict__)
|
||||||
|
return merged_data
|
327
ecl/graph.py
Normal file
327
ecl/graph.py
Normal file
@ -0,0 +1,327 @@
|
|||||||
|
import os
|
||||||
|
import subprocess
|
||||||
|
import hashlib
|
||||||
|
from queue import Queue
|
||||||
|
import re
|
||||||
|
from utils import cmd,log_and_print_online
|
||||||
|
|
||||||
|
class Node:
|
||||||
|
def __init__(self):
|
||||||
|
self.code = None
|
||||||
|
self.version = None
|
||||||
|
self.commitMessage = None
|
||||||
|
self.mID = None
|
||||||
|
self.role = None
|
||||||
|
self.degree = 0
|
||||||
|
self.value = 0.0
|
||||||
|
self.embedding = None
|
||||||
|
|
||||||
|
def create_from_warehouse(self, directory) -> None:
|
||||||
|
def _format_code(code):
|
||||||
|
code = "\n".join([line for line in code.split("\n") if len(line.strip()) > 0])
|
||||||
|
return code
|
||||||
|
|
||||||
|
# Read all .py files
|
||||||
|
codebooks = {}
|
||||||
|
assert len([filename for filename in os.listdir(directory) if filename.endswith(".py")]) > 0
|
||||||
|
for root, directories, filenames in os.walk(directory):
|
||||||
|
for filename in filenames:
|
||||||
|
if filename.endswith(".py"):
|
||||||
|
codebooks[filename] = _format_code(open(os.path.join(directory, filename), "r", encoding="utf-8").read())
|
||||||
|
|
||||||
|
# Format Codes
|
||||||
|
code = ""
|
||||||
|
for filename in codebooks.keys():
|
||||||
|
filepath = os.path.join(directory, filename)
|
||||||
|
code += "{}\n```Python\n{}\n```\n\n".format(filename, codebooks[filename])
|
||||||
|
|
||||||
|
self.code = code
|
||||||
|
self.mID = hashlib.md5(self.code.encode(encoding='UTF-8')).hexdigest()
|
||||||
|
|
||||||
|
content = cmd("cd {} && git log --oneline".format(directory)).replace("(HEAD -> main)", "").replace(" ", " ")
|
||||||
|
self.commitMessage = " ".join(content.split("\n")[0].split(" ")[1:])
|
||||||
|
self.version = float(content.split("\n")[0].split(" ")[1].replace("v", ""))
|
||||||
|
|
||||||
|
class Edge:
|
||||||
|
def __init__(self, sourceMID, targetMID, instruction, role):
|
||||||
|
self.sourceMID = sourceMID
|
||||||
|
self.targetMID = targetMID
|
||||||
|
self.instruction = instruction
|
||||||
|
self.role = role
|
||||||
|
self.edgeId = None
|
||||||
|
self.embedding = None
|
||||||
|
|
||||||
|
class Graph:
|
||||||
|
def __init__(self):
|
||||||
|
self.task = ""
|
||||||
|
self.task_embedding = None
|
||||||
|
self.nodes = {}
|
||||||
|
self.edges = []
|
||||||
|
self.directory:str = None
|
||||||
|
|
||||||
|
def addNode(self, node: Node):
|
||||||
|
if node.mID not in self.nodes.keys():
|
||||||
|
self.nodes[node.mID] = node
|
||||||
|
|
||||||
|
def addEdge(self, edge: Edge):
|
||||||
|
num = "edge_{}".format(len(self.edges))
|
||||||
|
edge.edgeId = hashlib.md5(num.encode(encoding='UTF-8')).hexdigest()
|
||||||
|
self.edges.append(edge)
|
||||||
|
|
||||||
|
def exists_edge(self, mid1: str, mid2: str):
|
||||||
|
for edge in self.edges:
|
||||||
|
if edge.sourceMID == mid1 and edge.targetMID == mid2:
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
def create_from_warehouse(self, directory) -> None:
|
||||||
|
self.directory = directory
|
||||||
|
content = cmd("cd {} && git log --oneline".format(directory))
|
||||||
|
#assert "log commit" in content
|
||||||
|
cIDs = ["0" * 7] + [line.split(" ")[0] for line in content.split("\n") if len(line)>0][::-1] # Commit IDs
|
||||||
|
log_cID = cIDs[-1]
|
||||||
|
cIDs = cIDs[:-1]
|
||||||
|
log_and_print_online("commit history:"+ str(cIDs)+ "\nlog commit:"+ str(log_cID))
|
||||||
|
|
||||||
|
# Commit ID -> md5 ID
|
||||||
|
# Constructing Nodes
|
||||||
|
try:
|
||||||
|
cID2mID = {}
|
||||||
|
output = ""
|
||||||
|
for cID in cIDs:
|
||||||
|
if cID == "0" * 7:
|
||||||
|
node = Node()
|
||||||
|
node.code = ""
|
||||||
|
node.mID = hashlib.md5("".encode(encoding='UTF-8')).hexdigest()
|
||||||
|
node.commitMessage = ""
|
||||||
|
node.version = "v0.0"
|
||||||
|
cID2mID[cID] = node.mID
|
||||||
|
self.addNode(node)
|
||||||
|
output += ("Node: {} -> {}\n".format("0" * 7, node.mID))
|
||||||
|
else:
|
||||||
|
content = cmd("cd {} && git reset --hard {}".format(directory, cID))
|
||||||
|
node = Node()
|
||||||
|
node.create_from_warehouse(directory)
|
||||||
|
cID2mID[cID] = node.mID
|
||||||
|
self.addNode(node)
|
||||||
|
output += ("Node: {} -> {}\n".format(cID, node.mID))
|
||||||
|
finally:
|
||||||
|
cmd("cd {} && git reset --hard {}".format(directory, log_cID))
|
||||||
|
log_and_print_online(output)
|
||||||
|
# Constructing Edges
|
||||||
|
for i in range(1, len(cIDs), 1):
|
||||||
|
sourceCID = cIDs[i-1]
|
||||||
|
targetCID = cIDs[i]
|
||||||
|
sourceMID = cID2mID[sourceCID]
|
||||||
|
targetMID = cID2mID[targetCID]
|
||||||
|
edge = Edge(sourceMID, targetMID, instruction="", role="")
|
||||||
|
self.addEdge(edge)
|
||||||
|
# print("{} -> {}, {} -> {}".format(sourcecID, targetcID, sourcemID, targetmID))
|
||||||
|
self._create_instruction_and_roles_from_log(directory)
|
||||||
|
|
||||||
|
def create_from_log(self, directory) -> None:
|
||||||
|
|
||||||
|
def update_codebook(utterance, codebook):
|
||||||
|
def extract_filename_from_line(lines):
|
||||||
|
file_name = ""
|
||||||
|
for candidate in re.finditer(r"(\w+\.\w+)", lines, re.DOTALL):
|
||||||
|
file_name = candidate.group()
|
||||||
|
file_name = file_name.lower()
|
||||||
|
return file_name
|
||||||
|
|
||||||
|
def extract_filename_from_code(code):
|
||||||
|
file_name = ""
|
||||||
|
regex_extract = r"class (\S+?):\n"
|
||||||
|
matches_extract = re.finditer(regex_extract, code, re.DOTALL)
|
||||||
|
for match_extract in matches_extract:
|
||||||
|
file_name = match_extract.group(1)
|
||||||
|
file_name = file_name.lower().split("(")[0] + ".py"
|
||||||
|
return file_name
|
||||||
|
|
||||||
|
def _format_code(code):
|
||||||
|
code = "\n".join([line for line in code.split("\n") if len(line.strip()) > 0])
|
||||||
|
return code
|
||||||
|
|
||||||
|
regex = r"(.+?)\n```.*?\n(.*?)```"
|
||||||
|
matches = re.finditer(regex, utterance, re.DOTALL)
|
||||||
|
for match in matches:
|
||||||
|
code = match.group(2)
|
||||||
|
if "CODE" in code:
|
||||||
|
continue
|
||||||
|
group1 = match.group(1)
|
||||||
|
filename = extract_filename_from_line(group1)
|
||||||
|
if "__main__" in code:
|
||||||
|
filename = "main.py"
|
||||||
|
if filename == "":
|
||||||
|
filename = extract_filename_from_code(code)
|
||||||
|
assert filename != ""
|
||||||
|
if filename is not None and code is not None and len(filename) > 0 and len(code) > 0:
|
||||||
|
codebook[filename] = _format_code(code)
|
||||||
|
|
||||||
|
def get_codes(codebook):
|
||||||
|
content = ""
|
||||||
|
for filename in codebook.keys():
|
||||||
|
content += "{}\n```{}\n{}\n```\n\n".format(filename, "python" if filename.endswith(".py") else
|
||||||
|
filename.split(".")[-1], codebook[filename])
|
||||||
|
return content
|
||||||
|
|
||||||
|
self.directory = directory
|
||||||
|
logdir = [filename for filename in os.listdir(directory) if filename.endswith(".log")]
|
||||||
|
if len(logdir) > 0:
|
||||||
|
log_filename = logdir[0]
|
||||||
|
print("log_filename:", log_filename)
|
||||||
|
else:
|
||||||
|
return
|
||||||
|
content = open(os.path.join(directory, log_filename), "r", encoding='UTF-8').read()
|
||||||
|
|
||||||
|
utterances = []
|
||||||
|
regex = r"\[(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2} \w+)\] ([.\s\S\n\r\d\D\t]*?)(?=\n\[\d|$)"
|
||||||
|
matches = re.finditer(regex, content, re.DOTALL)
|
||||||
|
for match in matches:
|
||||||
|
group1 = match.group(1)
|
||||||
|
group2 = match.group(2)
|
||||||
|
utterances.append("[{}] {}".format(group1, group2))
|
||||||
|
utterances = [utterance for utterance in utterances if
|
||||||
|
"flask app.py" not in utterance and "OpenAI_Usage_Info" not in utterance]
|
||||||
|
index = [i for i, utterance in enumerate(utterances) if
|
||||||
|
"Programmer<->Chief Technology Officer on : EnvironmentDoc" in utterance]
|
||||||
|
if len(index) > 0:
|
||||||
|
utterances = utterances[:index[0] - 1]
|
||||||
|
|
||||||
|
utterances_code= [utterance for utterance in utterances if
|
||||||
|
"Programmer<->" in utterance and "EnvironmentDoc" not in utterance and "TestErrorSummary" not in utterance]
|
||||||
|
print("len(utterances_code):", len(utterances_code))
|
||||||
|
|
||||||
|
codebook, fingerprints, pre_mid = {}, set(), ""
|
||||||
|
for utterance in utterances_code:
|
||||||
|
update_codebook(utterance, codebook)
|
||||||
|
|
||||||
|
# construct node
|
||||||
|
node = Node()
|
||||||
|
node.mID = hashlib.md5(get_codes(codebook).encode(encoding='UTF-8')).hexdigest()
|
||||||
|
node.commitMessage = ""
|
||||||
|
node.code = get_codes(codebook)
|
||||||
|
node.version = float(len(fingerprints))
|
||||||
|
if node.mID not in fingerprints:
|
||||||
|
fingerprints.add(node.mID)
|
||||||
|
self.addNode(node)
|
||||||
|
|
||||||
|
# construct edge
|
||||||
|
if pre_mid != "":
|
||||||
|
sourceMID = pre_mid
|
||||||
|
targetMID = node.mID
|
||||||
|
edge = Edge(sourceMID, targetMID, instruction="", role="")
|
||||||
|
self.addEdge(edge)
|
||||||
|
pre_mid = node.mID
|
||||||
|
|
||||||
|
self._create_instruction_and_roles_from_log(directory)
|
||||||
|
|
||||||
|
def _create_instruction_and_roles_from_log(self, directory) -> None:
|
||||||
|
logdir = [filename for filename in os.listdir(directory) if filename.endswith(".log")]
|
||||||
|
if len(logdir)>0:
|
||||||
|
log_filename = logdir[0]
|
||||||
|
log_and_print_online("log_filename:"+log_filename)
|
||||||
|
else :
|
||||||
|
return
|
||||||
|
content = open(os.path.join(directory, log_filename), "r", encoding='UTF-8').read()
|
||||||
|
|
||||||
|
utterances = []
|
||||||
|
regex = r"\[(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2} \w+)\] ([.\s\S\n\r\d\D\t]*?)(?=\n\[\d|$)"
|
||||||
|
matches = re.finditer(regex, content, re.DOTALL)
|
||||||
|
for match in matches:
|
||||||
|
group1 = match.group(1)
|
||||||
|
group2 = match.group(2)
|
||||||
|
# print(group1)
|
||||||
|
# print(group2)
|
||||||
|
utterances.append(group2)
|
||||||
|
# print()
|
||||||
|
utterances = [utterance for utterance in utterances if "Chief Technology Officer: **[Start Chat]**" in utterance or "Code Reviewer: **[Start Chat]**" in utterance or "Software Test Engineer: **[Start Chat]**" in utterance]
|
||||||
|
if "Test Pass!" in content:
|
||||||
|
utterances.append("Software Test Engineer: **[Start Chat]**\n\nTest Pass!")
|
||||||
|
|
||||||
|
instructions, roles = [], []
|
||||||
|
for utterance in utterances:
|
||||||
|
utterance = utterance.lower()
|
||||||
|
instruction = ""
|
||||||
|
if "Chief Technology Officer: **[Start Chat]**".lower() in utterance:
|
||||||
|
instruction = "write one or multiple files and make sure that every detail of the architecture is implemented as code"
|
||||||
|
elif "Code Reviewer: **[Start Chat]**".lower() in utterance:
|
||||||
|
instruction = utterance.split("Comments on Codes:".lower())[-1].split("In the software,".lower())[0]
|
||||||
|
instruction = instruction.replace("<comment>".lower(), "")
|
||||||
|
elif "Software Test Engineer: **[Start Chat]**".lower() in utterance:
|
||||||
|
if "Test Pass!".lower() in utterance:
|
||||||
|
instruction = "Test Pass!"
|
||||||
|
else:
|
||||||
|
instruction = utterance.split("Error Summary of Test Reports:".lower())[-1].split("Note that each file must strictly follow a markdown code block format".lower())[0]
|
||||||
|
else:
|
||||||
|
assert False
|
||||||
|
role = utterance.split(": **")[0]
|
||||||
|
|
||||||
|
instruction = instruction.strip()
|
||||||
|
if instruction.startswith("\""):
|
||||||
|
instruction = instruction[1:]
|
||||||
|
if instruction.endswith("\""):
|
||||||
|
instruction = instruction[:-1]
|
||||||
|
instruction = instruction.strip()
|
||||||
|
instructions.append(instruction)
|
||||||
|
|
||||||
|
role = role.strip()
|
||||||
|
roles.append(role)
|
||||||
|
|
||||||
|
for i in range(len(self.edges)):
|
||||||
|
self.edges[i].instruction = instructions[i]
|
||||||
|
self.edges[i].role = roles[i]
|
||||||
|
|
||||||
|
def find_shortest_path(self, uMID=None, vMID=None):
|
||||||
|
if uMID == None:
|
||||||
|
uMID = self.edges[0].sourceMID
|
||||||
|
if vMID == None:
|
||||||
|
vMID = self.edges[-1].targetMID
|
||||||
|
|
||||||
|
Q, visit, preMID, preEdge = Queue(), {}, {}, {}
|
||||||
|
Q.put(uMID)
|
||||||
|
visit[uMID] = True
|
||||||
|
while not Q.empty():
|
||||||
|
mID = Q.get()
|
||||||
|
if mID == vMID:
|
||||||
|
id, pathNodes, pathEdges = vMID, [], []
|
||||||
|
while id != uMID:
|
||||||
|
pathNodes.append(id)
|
||||||
|
pathEdges.append(preEdge[id])
|
||||||
|
id = preMID[id]
|
||||||
|
pathNodes.append(uMID)
|
||||||
|
pathNodes = pathNodes[::-1]
|
||||||
|
pathEdges = pathEdges[::-1]
|
||||||
|
return pathNodes, pathEdges
|
||||||
|
nextMIDs = [edge.targetMID for edge in self.edges if edge.sourceMID == mID]
|
||||||
|
nextEdges = [edge for edge in self.edges if edge.sourceMID == mID]
|
||||||
|
for i in range(len(nextMIDs)):
|
||||||
|
nextMID = nextMIDs[i]
|
||||||
|
nextEdge = nextEdges[i]
|
||||||
|
if nextMID not in visit.keys():
|
||||||
|
Q.put(nextMID)
|
||||||
|
visit[nextMID] = True
|
||||||
|
preMID[nextMID] = mID
|
||||||
|
preEdge[nextMID] = nextEdge
|
||||||
|
|
||||||
|
def print(self):
|
||||||
|
output = "\n"+"*" * 50 + " Graph " + "*" * 50 + "\n"
|
||||||
|
output += "{} Nodes:\n".format(len(self.nodes.keys()))
|
||||||
|
for key in self.nodes.keys():
|
||||||
|
node = self.nodes[key]
|
||||||
|
output += "{}, {}, {}\n".format(node.mID, node.version, node.commitMessage)
|
||||||
|
output += "{} Edges:\n".format(len(self.edges))
|
||||||
|
for edge in self.edges:
|
||||||
|
output += "{}: {} -> {} ({}: {})\n".format(edge.edgeId, edge.sourceMID, edge.targetMID, edge.role, edge.instruction[:60])
|
||||||
|
output += "*" * 50 + " Graph " + "*" * 50
|
||||||
|
log_and_print_online(output)
|
||||||
|
|
||||||
|
|
||||||
|
def to_dict(self):
|
||||||
|
merged_node_dict = []
|
||||||
|
merged_edge_dict = []
|
||||||
|
for k,v in self.nodes.items():
|
||||||
|
merged_node_dict.append(v.__dict__)
|
||||||
|
for index,e in enumerate(self.edges):
|
||||||
|
merged_edge_dict.append(e.__dict__ )
|
||||||
|
return merged_node_dict,merged_edge_dict
|
430
ecl/memory.py
Normal file
430
ecl/memory.py
Normal file
@ -0,0 +1,430 @@
|
|||||||
|
from dataclasses import dataclass
|
||||||
|
from typing import Any, Dict, List, Optional
|
||||||
|
from abc import ABC, abstractmethod
|
||||||
|
import json
|
||||||
|
import time
|
||||||
|
import math
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import openai
|
||||||
|
import faiss
|
||||||
|
import numpy as np
|
||||||
|
from datetime import datetime
|
||||||
|
sys.path.append(os.path.join(os.getcwd(),"ecl"))
|
||||||
|
#from utils import get_code_embedding,get_text_embedding
|
||||||
|
from utils import get_easyDict_from_filepath,log_and_print_online
|
||||||
|
from embedding import OpenAIEmbedding
|
||||||
|
|
||||||
|
class MemoryBase(ABC):
|
||||||
|
def __init__(self, directory: str) -> None:
|
||||||
|
self.directory: str = directory
|
||||||
|
|
||||||
|
cfg = get_easyDict_from_filepath("./ecl/config.yaml")
|
||||||
|
self.top_k_code = cfg.retrieval.top_k_code
|
||||||
|
self.top_k_text = cfg.retrieval.top_k_text
|
||||||
|
self.code_thresh = cfg.retrieval.searchcode_thresh
|
||||||
|
self.text_thresh = cfg.retrieval.searchtext_thresh
|
||||||
|
self.embedding_method = None
|
||||||
|
|
||||||
|
if cfg.embedding_method == "OpenAI":
|
||||||
|
self.embedding_method = OpenAIEmbedding()
|
||||||
|
|
||||||
|
self.content = None
|
||||||
|
if os.path.exists(self.directory) and self.directory.endswith('.json'):
|
||||||
|
with open(self.directory) as file:
|
||||||
|
self.content = json.load(file)
|
||||||
|
elif os.path.exists(self.directory) is False:
|
||||||
|
with open(self.directory, 'w') as file:
|
||||||
|
json.dump({}, file) # Create an empty JSON file
|
||||||
|
file.close()
|
||||||
|
print(f"Now the memory file '{self.directory}' is created")
|
||||||
|
if self.content is None:
|
||||||
|
print("Empty Memory")
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def memory_retrieval(self) -> str:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def _get_memory_count(self) ->int:
|
||||||
|
if isinstance(self.content,list):
|
||||||
|
return self.content[-1].get("total")
|
||||||
|
else:
|
||||||
|
return 0
|
||||||
|
|
||||||
|
|
||||||
|
class AllMemory(MemoryBase):
|
||||||
|
def __init__(self, directory: str):
|
||||||
|
super().__init__(directory)
|
||||||
|
|
||||||
|
# unused; init experience list
|
||||||
|
def _init_explist(self):
|
||||||
|
self.exp_list = None
|
||||||
|
if self.content == None:
|
||||||
|
self.exp_list = None
|
||||||
|
else :
|
||||||
|
for t in self.content:
|
||||||
|
for experience in t.get("experineces"):
|
||||||
|
self.exp_list.append(experience)
|
||||||
|
|
||||||
|
# clear all memory
|
||||||
|
def _memory_clear(self) ->None:
|
||||||
|
if os.path.exists(self.directory) and self.directory.endswith('.json'):
|
||||||
|
with open(self.directory) as file:
|
||||||
|
json.dump({},file)
|
||||||
|
file.close()
|
||||||
|
self.content = None
|
||||||
|
# get code sample
|
||||||
|
def get_codesample(self) ->str:
|
||||||
|
if self._get_memory_count() >=1:
|
||||||
|
return self.content[-1].get("nodes")[-1]["code"]
|
||||||
|
else:
|
||||||
|
return None
|
||||||
|
# get text str sample
|
||||||
|
def get_textsample(self) ->str:
|
||||||
|
|
||||||
|
if self._get_memory_count() >=1:
|
||||||
|
return self.content[-1].get("edges")[-1].get("instruction")
|
||||||
|
else:
|
||||||
|
return None
|
||||||
|
# get code embedding from code mID
|
||||||
|
def _get_codeembedding(self,mid) :
|
||||||
|
for t in self.content:
|
||||||
|
for node in t["nodes"]:
|
||||||
|
if node["mID"] == mid:
|
||||||
|
return node.get("embedding")
|
||||||
|
# get instructionstar from sourcecode mID
|
||||||
|
def _get_instructionstar(self,mid):
|
||||||
|
max_valueGain = -1
|
||||||
|
for t in self.content:
|
||||||
|
for experience in t["experiences"]:
|
||||||
|
if experience == None :
|
||||||
|
pass
|
||||||
|
elif experience["sourceMID"] == mid:
|
||||||
|
if experience.get("valueGain") >= max_valueGain:
|
||||||
|
instructionstar = experience.get("instructionStar")
|
||||||
|
return instructionstar
|
||||||
|
|
||||||
|
# get experience task and dir from sourcecode mID
|
||||||
|
def _get_task_from_source(self,mid):
|
||||||
|
task = None
|
||||||
|
task_dir = None
|
||||||
|
for t in self.content:
|
||||||
|
for experience in t["experiences"]:
|
||||||
|
if experience == None :
|
||||||
|
pass
|
||||||
|
elif experience["sourceMID"] == mid:
|
||||||
|
task = t["task"]
|
||||||
|
task_dir = t["dir"]
|
||||||
|
return task,task_dir
|
||||||
|
|
||||||
|
# get experience task and dir from targetcode mID
|
||||||
|
def _get_task_from_target(self,mid):
|
||||||
|
task = None
|
||||||
|
task_dir = None
|
||||||
|
for t in self.content:
|
||||||
|
for experience in t["experiences"]:
|
||||||
|
if experience == None :
|
||||||
|
pass
|
||||||
|
elif experience["targetMID"] == mid:
|
||||||
|
task = t["task"]
|
||||||
|
task_dir = t["dir"]
|
||||||
|
return task,task_dir
|
||||||
|
|
||||||
|
# retrieval from MemoryCards
|
||||||
|
def memory_retrieval(self,input_message:str, type:str, k = None) :
|
||||||
|
if k == None:
|
||||||
|
if type == "code":
|
||||||
|
return self.search_code(input_message,self.top_k_code)
|
||||||
|
elif type == "text":
|
||||||
|
return self.search_text(input_message,self.top_k_text)
|
||||||
|
else:
|
||||||
|
return None
|
||||||
|
else:
|
||||||
|
if type == "code":
|
||||||
|
return self.search_code(input_message, k)
|
||||||
|
elif type == "text":
|
||||||
|
return self.search_text(input_message, k)
|
||||||
|
else:
|
||||||
|
return None
|
||||||
|
|
||||||
|
def search_text(self, code_query, k:int):
|
||||||
|
"""
|
||||||
|
search instructionStar from a code query
|
||||||
|
|
||||||
|
Keyword arguments:
|
||||||
|
code_query -- code input
|
||||||
|
k -- the number of instructions to search
|
||||||
|
|
||||||
|
Return:
|
||||||
|
(best k instructionStar, k)
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
if self._get_memory_count() == 0 or code_query == None or k == 0:
|
||||||
|
return None
|
||||||
|
|
||||||
|
else :
|
||||||
|
code_query = self.embedding_method.get_code_embedding(code_query)
|
||||||
|
if isinstance(code_query,list):
|
||||||
|
code_query=np.array(code_query,dtype=np.float32)
|
||||||
|
code_query = code_query.reshape(1,-1)
|
||||||
|
|
||||||
|
sourcecodemid_list = []# source code mid
|
||||||
|
code_embeddings = []# code embedding
|
||||||
|
|
||||||
|
for t in self.content :
|
||||||
|
for experience in t["experiences"]:
|
||||||
|
sourcecodemid_list.append(experience.get("sourceMID"))
|
||||||
|
sourcecodemid_list = list(set(sourcecodemid_list))# remove duplicates
|
||||||
|
for mid in sourcecodemid_list:
|
||||||
|
code_embeddings.append(self._get_codeembedding(mid))
|
||||||
|
code_embedding_data = np.array(code_embeddings, dtype=np.float32)
|
||||||
|
|
||||||
|
faiss.normalize_L2(code_embedding_data)
|
||||||
|
faiss.normalize_L2(code_query)
|
||||||
|
# use L2 distance(cosine distance)
|
||||||
|
index = faiss.IndexFlatL2(code_embedding_data.shape[1])
|
||||||
|
index.add(code_embedding_data)
|
||||||
|
|
||||||
|
# In Faiss, the index.search function returns the square of L2 distance by default (Squared L2 Distance)
|
||||||
|
distances, indices = index.search(code_query, k)
|
||||||
|
similarities = 1-(1/2)*distances
|
||||||
|
|
||||||
|
task_list = []
|
||||||
|
task_dir_list = []
|
||||||
|
|
||||||
|
instructionStar_list = []
|
||||||
|
sourceMIDS = []
|
||||||
|
for i in range(k):
|
||||||
|
index = indices[0][i]
|
||||||
|
similarity = similarities[0][i]
|
||||||
|
if index != -1 and similarity >= self.text_thresh:
|
||||||
|
task, task_dir = self._get_task_from_source(sourcecodemid_list[index])
|
||||||
|
sourceMIDS.append(sourcecodemid_list[index])
|
||||||
|
task_list.append(task)
|
||||||
|
task_dir_list.append(task_dir)
|
||||||
|
instructionStar_list.append(self._get_instructionstar(sourcecodemid_list[index]))
|
||||||
|
|
||||||
|
filtered_similarities = np.array2string(similarities[:,:k])
|
||||||
|
return instructionStar_list, filtered_similarities, sourceMIDS, task_list, task_dir_list
|
||||||
|
|
||||||
|
def search_code(self, text_query, k:int):
|
||||||
|
"""search best code from a text query
|
||||||
|
|
||||||
|
Keyword arguments:
|
||||||
|
text_query -- text input
|
||||||
|
k -- the number of code to search
|
||||||
|
Return: (best k code, k)
|
||||||
|
"""
|
||||||
|
|
||||||
|
if self._get_memory_count() == 0 or text_query == None or k == 0:
|
||||||
|
return None
|
||||||
|
|
||||||
|
else :
|
||||||
|
text_query = self.embedding_method.get_text_embedding(text_query)
|
||||||
|
if isinstance(text_query,list):
|
||||||
|
text_query=np.array(text_query,dtype=np.float32)
|
||||||
|
text_query = text_query.reshape(1,-1)
|
||||||
|
|
||||||
|
text_embeddings = [exp.get("embedding") for t in self.content for exp in t["experiences"]]
|
||||||
|
text_embedding_data = np.array(text_embeddings, dtype=np.float32)
|
||||||
|
|
||||||
|
faiss.normalize_L2(text_embedding_data)
|
||||||
|
faiss.normalize_L2(text_query)
|
||||||
|
# use L2 distance(cosine distance)
|
||||||
|
total_instructionStar = text_embedding_data.shape[0]
|
||||||
|
index = faiss.IndexFlatL2(text_embedding_data.shape[1])
|
||||||
|
index.add(text_embedding_data)
|
||||||
|
# In Faiss, the index.search function returns the square of L2 distance by default (Squared L2 Distance)
|
||||||
|
distances, indices = index.search(text_query, total_instructionStar)
|
||||||
|
|
||||||
|
|
||||||
|
similarities = 1-(1/2)*distances
|
||||||
|
|
||||||
|
code_node_list = [node for t in self.content for node in t["nodes"]]
|
||||||
|
targetMIDs = []
|
||||||
|
target_code = []
|
||||||
|
task_list = []
|
||||||
|
task_dir_list = []
|
||||||
|
filtered_similarities = []
|
||||||
|
experience_list = [experience for t in self.content for experience in t["experiences"]]
|
||||||
|
counter = 0
|
||||||
|
|
||||||
|
added_set = set()
|
||||||
|
for i in range(total_instructionStar):
|
||||||
|
index = indices[0][i]
|
||||||
|
similarity = similarities[0][i]
|
||||||
|
if index != -1 and counter < k:
|
||||||
|
if similarity <= self.code_thresh:
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
mid = experience_list[index].get("targetMID")
|
||||||
|
if mid not in added_set:
|
||||||
|
targetMIDs.append(mid)
|
||||||
|
added_set.add(mid)
|
||||||
|
counter += 1
|
||||||
|
filtered_similarities.append(str(similarity))
|
||||||
|
else:
|
||||||
|
break
|
||||||
|
|
||||||
|
for targetMID in targetMIDs:
|
||||||
|
for code_node in code_node_list:
|
||||||
|
if targetMID == code_node.get("mID"):
|
||||||
|
target_code.append(code_node.get("code"))
|
||||||
|
task, task_dir = self._get_task_from_target(targetMID)
|
||||||
|
task_list.append(task)
|
||||||
|
task_dir_list.append(task_dir)
|
||||||
|
filtered_similarities = ",".join(filtered_similarities)
|
||||||
|
return target_code, filtered_similarities, targetMIDs, task_list, task_dir_list
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
class Memory:
|
||||||
|
def __init__(self):
|
||||||
|
self.directory: str = None
|
||||||
|
self.id_enabled : bool = False
|
||||||
|
self.user_memory_filepath: str = None
|
||||||
|
self.assistant_memory_filepath: str = None
|
||||||
|
|
||||||
|
self.update_count = 0
|
||||||
|
self.memory_keys: List[str] = ["All"]
|
||||||
|
self.memory_data = {}
|
||||||
|
|
||||||
|
|
||||||
|
def __str__(self) -> str:
|
||||||
|
if self.memory_data.get("All") == None:
|
||||||
|
return "No existed memory"
|
||||||
|
else:
|
||||||
|
return "Current memory length:{}".format(self.memory_data["All"]._get_memory_count())
|
||||||
|
|
||||||
|
def _set_embedding(self,experience):
|
||||||
|
graph = experience.graph
|
||||||
|
edge_start_time = time.time()
|
||||||
|
for edge in graph.edges:
|
||||||
|
if edge.embedding is None:
|
||||||
|
start_time =time.time()
|
||||||
|
edge.embedding = self.memory_data["All"].embedding_method.get_text_embedding(edge.instruction)
|
||||||
|
end_time = time.time()
|
||||||
|
log_and_print_online("DONE: get edge embedding\ntime cost:{}\n".format(end_time-start_time))
|
||||||
|
edge_duration = time.time() - edge_start_time
|
||||||
|
log_and_print_online("DONE: got all EDGE embeddings\nEDGE embedding time cost:{}\n".format(edge_duration))
|
||||||
|
node_start_time = time.time()
|
||||||
|
for node_id in graph.nodes:
|
||||||
|
node = graph.nodes[node_id]
|
||||||
|
if node.embedding is None:
|
||||||
|
start_time = time.time()
|
||||||
|
node.embedding = self.memory_data["All"].embedding_method.get_code_embedding(node.code)
|
||||||
|
end_time = time.time()
|
||||||
|
log_and_print_online("DONE: get node embedding\ntime cost:{}\n".format(end_time-start_time))
|
||||||
|
node_duration = ( time.time() - node_start_time)
|
||||||
|
log_and_print_online("DONE: got all NODE embeddings\nNODE embedding time cost:{}\n".format(node_duration))
|
||||||
|
exp_start_time = time.time()
|
||||||
|
for exp in experience.experiences:
|
||||||
|
if exp.embedding is None:
|
||||||
|
start_time = time.time()
|
||||||
|
exp.embedding = self.memory_data["All"].embedding_method.get_text_embedding(exp.instructionStar)
|
||||||
|
end_time = time.time()
|
||||||
|
log_and_print_online("DONE: get exprience embedding\ntime cost:{}\n".format(end_time-start_time))
|
||||||
|
exp_duration = ( time.time() - exp_start_time)
|
||||||
|
log_and_print_online("DONE: got all EXPERIENCE embeddings\nEXPERIENCE embedding time cost:{}\n".format(exp_duration))
|
||||||
|
duration = edge_duration + node_duration + exp_duration
|
||||||
|
log_and_print_online("All embedding DONE\ntime cost:{}\n".format(duration))
|
||||||
|
|
||||||
|
# create memory path and upload memory from existed memory
|
||||||
|
def upload(self):
|
||||||
|
self.directory = os.path.join(os.getcwd(),"ecl","memory")
|
||||||
|
if os.path.exists(self.directory) is False:
|
||||||
|
os.mkdir(self.directory)
|
||||||
|
for key in self.memory_keys:
|
||||||
|
if key =="All":
|
||||||
|
path = os.path.join(self.directory,"MemoryCards.json")
|
||||||
|
self.memory_data[key] = AllMemory(path)
|
||||||
|
|
||||||
|
# upload experience into memory
|
||||||
|
def upload_from_experience(self, experience):
|
||||||
|
self._set_embedding(experience)
|
||||||
|
with open(self.memory_data["All"].directory, 'w') as file:
|
||||||
|
node_data,edge_data = experience.graph.to_dict()
|
||||||
|
experience_data = experience.to_dict()
|
||||||
|
|
||||||
|
merged_dic = []
|
||||||
|
index = 0
|
||||||
|
previous_memory = []
|
||||||
|
|
||||||
|
if self.memory_data["All"].content != None and len(self.memory_data["All"].content) != 0 :
|
||||||
|
previous_memory = self.memory_data["All"].content
|
||||||
|
log_and_print_online("len(previous_memory)={}".format(len(previous_memory)))
|
||||||
|
if len(previous_memory) != 0 and isinstance(previous_memory,list):
|
||||||
|
for index,t in enumerate(previous_memory):
|
||||||
|
if isinstance(t,list):
|
||||||
|
for subindex,subt in enumerate(t):
|
||||||
|
if len(subt)!=0:
|
||||||
|
merged_dic.append(subt)
|
||||||
|
elif len(t)!=0 :
|
||||||
|
merged_dic.append(t)
|
||||||
|
index = merged_dic[-1]["total"]
|
||||||
|
elif len(previous_memory) != 0 :
|
||||||
|
merged_dic.append(previous_memory)
|
||||||
|
index = 1
|
||||||
|
|
||||||
|
# remove duplication
|
||||||
|
dirList = [t["dir"] for t in merged_dic]
|
||||||
|
|
||||||
|
combined_json_str = {}
|
||||||
|
combined_json_str["index"] = index
|
||||||
|
combined_json_str["dir"] = experience.graph.directory
|
||||||
|
combined_json_str["task"] = experience.graph.task
|
||||||
|
combined_json_str["nodes"] = node_data
|
||||||
|
combined_json_str["edges"] = edge_data
|
||||||
|
combined_json_str["experiences"] = experience_data
|
||||||
|
combined_json_str["total"] = combined_json_str["index"]+1
|
||||||
|
|
||||||
|
if self.memory_data["All"].content != None and len(self.memory_data["All"].content)!=0:
|
||||||
|
merged_dic.append(combined_json_str)
|
||||||
|
else :
|
||||||
|
merged_dic.append(combined_json_str)
|
||||||
|
|
||||||
|
json.dump(merged_dic, file)
|
||||||
|
log_and_print_online("len(merged_dic)={}".format(len(merged_dic))+"\n merged_dic dumped to {}".format(self.memory_data["All"].directory))
|
||||||
|
log_and_print_online("[Conclusion]:\ntext_prompt_tokens:{}, text_total_tokens:{}\ncode_prompt_tokens:{}, code_total_tokens:{}\nprompt_tokens:{}, total_tokens:{}".format(self.memory_data["All"].embedding_method.text_prompt_tokens,
|
||||||
|
self.memory_data["All"].embedding_method.text_total_tokens,
|
||||||
|
self.memory_data["All"].embedding_method.code_prompt_tokens,
|
||||||
|
self.memory_data["All"].embedding_method.code_total_tokens,
|
||||||
|
self.memory_data["All"].embedding_method.prompt_tokens,
|
||||||
|
self.memory_data["All"].embedding_method.total_tokens))
|
||||||
|
file.close()
|
||||||
|
|
||||||
|
# delete memory from index
|
||||||
|
def delete_memroy(self,idx:int):
|
||||||
|
with open(self.memory_data["All"].directory, 'w') as file:
|
||||||
|
merged_dic = []
|
||||||
|
index = 0
|
||||||
|
previous_memory = []
|
||||||
|
|
||||||
|
if self.memory_data["All"].content != None and len(self.memory_data["All"].content) != 0 :
|
||||||
|
previous_memory = self.memory_data["All"].content
|
||||||
|
if len(previous_memory) != 0 and isinstance(previous_memory,list):
|
||||||
|
for index,t in enumerate(previous_memory):
|
||||||
|
if isinstance(t,list):
|
||||||
|
for subindex,subt in enumerate(t):
|
||||||
|
if len(subt)!=0:
|
||||||
|
merged_dic.append(subt)
|
||||||
|
elif len(t)!=0 :
|
||||||
|
merged_dic.append(t)
|
||||||
|
index = merged_dic[-1]["total"]
|
||||||
|
elif len(previous_memory) != 0 :
|
||||||
|
merged_dic.append(previous_memory)
|
||||||
|
index = 1
|
||||||
|
|
||||||
|
if idx >= len(merged_dic):
|
||||||
|
json.dump(merged_dic,file)
|
||||||
|
else :
|
||||||
|
merged_dic.pop(idx)
|
||||||
|
json.dump(merged_dic,file)
|
||||||
|
file.close()
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
1
ecl/memory/MemoryCards.json
Normal file
1
ecl/memory/MemoryCards.json
Normal file
@ -0,0 +1 @@
|
|||||||
|
{}
|
50
ecl/post_process/memory_filter.py
Normal file
50
ecl/post_process/memory_filter.py
Normal file
@ -0,0 +1,50 @@
|
|||||||
|
import json
|
||||||
|
import os
|
||||||
|
import argparse
|
||||||
|
filter_threshold = 0.9
|
||||||
|
|
||||||
|
def filter_valuegain(directory, filtered_directory):
|
||||||
|
"""filter memory by experience's valueGain, delete experience whose valueGain is smaller than filter_threshold
|
||||||
|
|
||||||
|
Keyword arguments:
|
||||||
|
directory -- the input directory of MemoryCards, like "./ecl/memory/MemoryCards.json"
|
||||||
|
filtered_directory -- the output directory of filtered MemoryCards, like "./ecl/memory/MemoryCards.json"
|
||||||
|
"""
|
||||||
|
with open(directory) as file:
|
||||||
|
content = json.load(file)
|
||||||
|
new_content = []
|
||||||
|
for memorypiece in content:
|
||||||
|
experiences = memorypiece.get("experiences")
|
||||||
|
filtered_experienceList = []
|
||||||
|
|
||||||
|
if experiences != None:
|
||||||
|
print("origin:",len(experiences))
|
||||||
|
for experience in experiences:
|
||||||
|
valueGain = experience.get("valueGain")
|
||||||
|
print(valueGain)
|
||||||
|
if valueGain >= filter_threshold:
|
||||||
|
filtered_experienceList.append(experience)
|
||||||
|
print(len(experiences))
|
||||||
|
memorypiece["experiences"] = filtered_experienceList
|
||||||
|
new_content.append(memorypiece)
|
||||||
|
else:
|
||||||
|
new_content.append(memorypiece)
|
||||||
|
file.close()
|
||||||
|
with open(filtered_directory, 'w') as file:
|
||||||
|
json.dump(content, file)
|
||||||
|
file.close()
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
parser = argparse.ArgumentParser(description="Process some directories.")
|
||||||
|
parser.add_argument("threshold", type=float, help="The filtered threshold for experiences")
|
||||||
|
parser.add_argument("directory", type = str, help="The directory to process")
|
||||||
|
parser.add_argument("filtered_directory", type= str, help="The directory for output")
|
||||||
|
|
||||||
|
|
||||||
|
args = parser.parse_args()
|
||||||
|
filter_threshold = args.threshold
|
||||||
|
filter_valuegain(args.directory, args.filtered_directory)
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
176
ecl/utils.py
Normal file
176
ecl/utils.py
Normal file
@ -0,0 +1,176 @@
|
|||||||
|
import subprocess
|
||||||
|
import json
|
||||||
|
import yaml
|
||||||
|
import time
|
||||||
|
import logging
|
||||||
|
from easydict import EasyDict
|
||||||
|
import openai
|
||||||
|
from openai import OpenAI
|
||||||
|
import numpy as np
|
||||||
|
import os
|
||||||
|
from abc import ABC, abstractmethod
|
||||||
|
import tiktoken
|
||||||
|
from typing import Any, Dict
|
||||||
|
from tenacity import (
|
||||||
|
retry,
|
||||||
|
stop_after_attempt,
|
||||||
|
wait_exponential
|
||||||
|
)
|
||||||
|
OPENAI_API_KEY = os.environ['OPENAI_API_KEY']
|
||||||
|
if 'BASE_URL' in os.environ:
|
||||||
|
BASE_URL = os.environ['BASE_URL']
|
||||||
|
else:
|
||||||
|
BASE_URL = None
|
||||||
|
|
||||||
|
def getFilesFromType(sourceDir, filetype):
|
||||||
|
files = []
|
||||||
|
for root, directories, filenames in os.walk(sourceDir):
|
||||||
|
for filename in filenames:
|
||||||
|
if filename.endswith(filetype):
|
||||||
|
files.append(os.path.join(root, filename))
|
||||||
|
return files
|
||||||
|
|
||||||
|
def cmd(command: str):
|
||||||
|
print(">> {}".format(command))
|
||||||
|
text = subprocess.run(command, shell=True, text=True, stdout=subprocess.PIPE).stdout
|
||||||
|
return text
|
||||||
|
|
||||||
|
def get_easyDict_from_filepath(path: str):
|
||||||
|
# print(path)
|
||||||
|
if path.endswith('.json'):
|
||||||
|
with open(path, 'r', encoding="utf-8") as file:
|
||||||
|
config_map = json.load(file, strict=False)
|
||||||
|
config_easydict = EasyDict(config_map)
|
||||||
|
return config_easydict
|
||||||
|
if path.endswith('.yaml'):
|
||||||
|
file_data = open(path, 'r', encoding="utf-8").read()
|
||||||
|
config_map = yaml.load(file_data, Loader=yaml.FullLoader)
|
||||||
|
config_easydict = EasyDict(config_map)
|
||||||
|
return config_easydict
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def calc_max_token(messages, model):
|
||||||
|
string = "\n".join([message["content"] for message in messages])
|
||||||
|
encoding = tiktoken.encoding_for_model(model)
|
||||||
|
num_prompt_tokens = len(encoding.encode(string))
|
||||||
|
gap_between_send_receive = 50
|
||||||
|
num_prompt_tokens += gap_between_send_receive
|
||||||
|
|
||||||
|
num_max_token_map = {
|
||||||
|
"gpt-3.5-turbo": 4096,
|
||||||
|
"gpt-3.5-turbo-16k": 16384,
|
||||||
|
"gpt-3.5-turbo-0613": 4096,
|
||||||
|
"gpt-3.5-turbo-16k-0613": 16384,
|
||||||
|
"gpt-4": 8192,
|
||||||
|
"gpt-4-0613": 8192,
|
||||||
|
"gpt-4-32k": 32768,
|
||||||
|
}
|
||||||
|
num_max_token = num_max_token_map[model]
|
||||||
|
num_max_completion_tokens = num_max_token - num_prompt_tokens
|
||||||
|
return num_max_completion_tokens
|
||||||
|
|
||||||
|
|
||||||
|
class ModelBackend(ABC):
|
||||||
|
r"""Base class for different model backends.
|
||||||
|
May be OpenAI API, a local LLM, a stub for unit tests, etc."""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def run(self, *args, **kwargs) -> Dict[str, Any]:
|
||||||
|
r"""Runs the query to the backend model.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
RuntimeError: if the return value from OpenAI API
|
||||||
|
is not a dict that is expected.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict[str, Any]: All backends must return a dict in OpenAI format.
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
class OpenAIModel(ModelBackend):
|
||||||
|
r"""OpenAI API in a unified ModelBackend interface."""
|
||||||
|
|
||||||
|
def __init__(self, model_type, model_config_dict: Dict=None) -> None:
|
||||||
|
super().__init__()
|
||||||
|
self.model_type = model_type
|
||||||
|
self.model_config_dict = model_config_dict
|
||||||
|
if self.model_config_dict == None:
|
||||||
|
self.model_config_dict = {"temperature": 0.2,
|
||||||
|
"top_p": 1.0,
|
||||||
|
"n": 1,
|
||||||
|
"stream": False,
|
||||||
|
"frequency_penalty": 0.0,
|
||||||
|
"presence_penalty": 0.0,
|
||||||
|
"logit_bias": {},
|
||||||
|
}
|
||||||
|
self.prompt_tokens = 0
|
||||||
|
self.completion_tokens = 0
|
||||||
|
self.total_tokens = 0
|
||||||
|
|
||||||
|
@retry(wait=wait_exponential(min=5, max=60), stop=stop_after_attempt(5))
|
||||||
|
def run(self, messages) :
|
||||||
|
if BASE_URL:
|
||||||
|
client = openai.OpenAI(
|
||||||
|
api_key=OPENAI_API_KEY,
|
||||||
|
base_url=BASE_URL,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
client = openai.OpenAI(
|
||||||
|
api_key=OPENAI_API_KEY
|
||||||
|
)
|
||||||
|
current_retry = 0
|
||||||
|
max_retry = 5
|
||||||
|
|
||||||
|
string = "\n".join([message["content"] for message in messages])
|
||||||
|
encoding = tiktoken.encoding_for_model(self.model_type)
|
||||||
|
num_prompt_tokens = len(encoding.encode(string))
|
||||||
|
gap_between_send_receive = 15 * len(messages)
|
||||||
|
num_prompt_tokens += gap_between_send_receive
|
||||||
|
|
||||||
|
num_max_token_map = {
|
||||||
|
"gpt-3.5-turbo": 4096,
|
||||||
|
"gpt-3.5-turbo-16k": 16384,
|
||||||
|
"gpt-3.5-turbo-0613": 4096,
|
||||||
|
"gpt-3.5-turbo-16k-0613": 16384,
|
||||||
|
"gpt-4": 8192,
|
||||||
|
"gpt-4-0613": 8192,
|
||||||
|
"gpt-4-32k": 32768,
|
||||||
|
}
|
||||||
|
response = client.chat.completions.create(messages = messages,
|
||||||
|
model = "gpt-3.5-turbo-16k",
|
||||||
|
temperature = 0.2,
|
||||||
|
top_p = 1.0,
|
||||||
|
n = 1,
|
||||||
|
stream = False,
|
||||||
|
frequency_penalty = 0.0,
|
||||||
|
presence_penalty = 0.0,
|
||||||
|
logit_bias = {},
|
||||||
|
).model_dump()
|
||||||
|
response_text = response['choices'][0]['message']['content']
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
num_max_token = num_max_token_map[self.model_type]
|
||||||
|
num_max_completion_tokens = num_max_token - num_prompt_tokens
|
||||||
|
self.model_config_dict['max_tokens'] = num_max_completion_tokens
|
||||||
|
log_and_print_online(
|
||||||
|
"InstructionStar generation:\n**[OpenAI_Usage_Info Receive]**\nprompt_tokens: {}\ncompletion_tokens: {}\ntotal_tokens: {}\n".format(
|
||||||
|
response["usage"]["prompt_tokens"], response["usage"]["completion_tokens"],
|
||||||
|
response["usage"]["total_tokens"]))
|
||||||
|
self.prompt_tokens += response["usage"]["prompt_tokens"]
|
||||||
|
self.completion_tokens += response["usage"]["completion_tokens"]
|
||||||
|
self.total_tokens += response["usage"]["total_tokens"]
|
||||||
|
|
||||||
|
if not isinstance(response, Dict):
|
||||||
|
raise RuntimeError("Unexpected return from OpenAI API")
|
||||||
|
return response
|
||||||
|
|
||||||
|
|
||||||
|
def now():
|
||||||
|
return time.strftime("%Y%m%d%H%M%S", time.localtime())
|
||||||
|
|
||||||
|
def log_and_print_online(content=None):
|
||||||
|
if content is not None:
|
||||||
|
print(content)
|
||||||
|
logging.info(content)
|
66
wiki.md
66
wiki.md
@ -131,6 +131,62 @@ then start building a software by ``python3 run.py`` and go to [Visualizer Websi
|
|||||||
### Official Docker Image
|
### Official Docker Image
|
||||||
- in preparation
|
- in preparation
|
||||||
|
|
||||||
|
## Experiential Co-Learning Guide
|
||||||
|
### Co-Tracking
|
||||||
|
|
||||||
|
- **Start Co-Tracking**: Use the following command to initiate the building of software, replacing `[description_of_your_idea]` with task descirption and `[project_name]` with project name. This is the same as starting ChatDev.
|
||||||
|
```bash
|
||||||
|
python3 run.py --task "[description_of_your_idea]" --name "[project_name]"
|
||||||
|
```
|
||||||
|
The software generated in co-tracking phase is ready for the agents' experience pool in the following steps.
|
||||||
|
### Co-Memorizing
|
||||||
|
- **Initiating Co-Memorizing**: To begin the memorization process for the generated software in a specified directory, run the `ecl.py` script using the following command:
|
||||||
|
```bash
|
||||||
|
python3 ecl/ecl.py "<path>" "[options]"
|
||||||
|
```
|
||||||
|
`<path>`: The path to the file or directory to process.
|
||||||
|
`[options]`: This can be set as `-d`. This flag indicates that the script should process all files in the given directory. If this flag is not set, the script will process the file specified in path.
|
||||||
|
After this process, the experiences have been extracted from the production of software and added to the agents' experience pool in `ecl/memory/MemoryCards.json`.
|
||||||
|
\
|
||||||
|
**For example:**
|
||||||
|
It you want to memorize only one software, you can use:
|
||||||
|
```bash
|
||||||
|
python3 ecl/ecl.py "<Software Path to file>"
|
||||||
|
```
|
||||||
|
And the software path should be like `"WareHouse/project_name_DefaultOrganization_timestamp"`.
|
||||||
|
\
|
||||||
|
If you want to memorize all files in a directory, you can use:
|
||||||
|
```bash
|
||||||
|
python3 ecl/ecl.py "<Software Path to Directory>" -d
|
||||||
|
```
|
||||||
|
the software path should be like `"WareHouse"`.
|
||||||
|
- **Memory Filter**: To get a higher quality experience pool, it is suggested to use `ecl/post_process/memory_filter.py` to filter the `MemoryCards.json`. When running the `memory_filter.py` script, you need to specify three arguments: the filter threshold, the input directory, and the output directory.
|
||||||
|
```bash
|
||||||
|
python3 ecl/post_process/memory_filter.py "<threshold>" "<directory>" "<filtered_directory>"
|
||||||
|
```
|
||||||
|
- `<threshold>`: Require a value within the range of 0 to 1 (exclusive). It is used as the threshold to filter experiences by their 'valuegain'. Only experiences with a 'valuegain' that is equal to or greater than this threshold will be considered.
|
||||||
|
- `<directory>`: The file path to the memory directory that you intend to process.
|
||||||
|
- `<filtered_directory>`: The file path to a directory where you want to store the processed data.
|
||||||
|
|
||||||
|
\
|
||||||
|
**For example:**
|
||||||
|
```bash
|
||||||
|
python3 ecl/post_process/memory_filter.py 0.9 "ecl/memory/MemoryCards.json" "ecl/memory/MemoryCards_filtered.json"
|
||||||
|
```
|
||||||
|
> **Notice:** By default, the `MemoryCards.json` is set to be empty. You can customize your own experience pool for agents following steps above. And we have also provided our `MemoryCards.json` used in our experiment in [MemoryCards.json](https://drive.google.com/drive/folders/1czsR4swQyqpoN8zwN0-rSFcTVl68zTDY?usp=sharing). You can download the json file through the link and put it under `ecl/memory` folder. This allows you to directly proceed to the Co-Reasoning phase without needing to redo the Co-Tracking and Co-Memorizing steps.
|
||||||
|
### Co-Reasoning
|
||||||
|
- **Memory Usage Configuration**:
|
||||||
|
In the `CompanyConfig/Default/ChatChainConfig.json` file, the `with_memory` option should be set **True**. \
|
||||||
|
In the `ecl/config.yaml` file, you can adjust the settings for **top k** and **similarity threshold** for both code and text retrieval.
|
||||||
|
By default, `with_memory` is set as False and the system is configured to retrieve the top 1 result with a similarity threshold of zero for both code and text.
|
||||||
|
- **Start Co-Reasoning**: Once you have completed memory usage configuration, similar to the Co-Tracking phase, you can use the command below to start the software building process. Replace `[description_of_your_idea]` with the task description from the test set and `[project_name]` with the project name from the test set:
|
||||||
|
```
|
||||||
|
python3 run.py --task "[description_of_your_idea]" --name "[project_name]"
|
||||||
|
```
|
||||||
|
In this process of software development, the agents will engage their experience pool(`MemoryCards.json`) into software development!
|
||||||
|
|
||||||
|
Detailed descriptions and experiment results about this **Experiential Co-Learning** Module lies in our preprint paper at https://arxiv.org/abs/2312.17025.
|
||||||
|
|
||||||
## Customization
|
## Customization
|
||||||
|
|
||||||
- You can customize your company in three kinds of granularity:
|
- You can customize your company in three kinds of granularity:
|
||||||
@ -278,6 +334,7 @@ then start building a software by ``python3 run.py`` and go to [Visualizer Websi
|
|||||||
- *self_improve*: flag for self-improvement on user input prompt. It is a special chat that LLM plays as a prompt engineer to improve the user input prompt. **⚠️ Attention** Model generated prompts contain uncertainty and there may
|
- *self_improve*: flag for self-improvement on user input prompt. It is a special chat that LLM plays as a prompt engineer to improve the user input prompt. **⚠️ Attention** Model generated prompts contain uncertainty and there may
|
||||||
be a deviation from the requirement meaning contained in the original prompt.
|
be a deviation from the requirement meaning contained in the original prompt.
|
||||||
- *background_prompt*: background prompt that will be added to every inquiry to LLM
|
- *background_prompt*: background prompt that will be added to every inquiry to LLM
|
||||||
|
- *with_memory*: Whether to utilize the experience pool for agents. The experience pool actually lies in in `ecl/memory/MemoryCards.json`.
|
||||||
- params in SimplePhase:
|
- params in SimplePhase:
|
||||||
- *max_turn_step*: Max number of chatting turn. You can increase max_turn_step for better performance but it will
|
- *max_turn_step*: Max number of chatting turn. You can increase max_turn_step for better performance but it will
|
||||||
take a longer time to finish the phase.
|
take a longer time to finish the phase.
|
||||||
@ -290,10 +347,11 @@ then start building a software by ``python3 run.py`` and go to [Visualizer Websi
|
|||||||
|
|
||||||
```commandline
|
```commandline
|
||||||
├── CompanyConfig # Configuration Files for ChatDev, including ChatChain, Phase and Role config json.
|
├── CompanyConfig # Configuration Files for ChatDev, including ChatChain, Phase and Role config json.
|
||||||
├── WareHouse # Folder for generated software
|
├── WareHouse # Folder for Generated Software
|
||||||
├── camel # Camel RolePlay component
|
├── camel # Camel RolePlay Component
|
||||||
├── chatdev # ChatDev core code
|
├── chatdev # ChatDev Core Code
|
||||||
├── misc # assets of example and demo
|
├── ecl # Experiential Co-Learning Module
|
||||||
|
├── misc # Assets of Example and Demo
|
||||||
├── visualizer # Visualizer Folder
|
├── visualizer # Visualizer Folder
|
||||||
├── run.py # Entry of ChatDev
|
├── run.py # Entry of ChatDev
|
||||||
├── requirements.txt
|
├── requirements.txt
|
||||||
|
Loading…
Reference in New Issue
Block a user