Haolong Li
commited on
Commit
·
e8d999b
1
Parent(s):
96b5976
Upload 3 files
Browse files- ControllerAtomicFlow.py +82 -0
- ControllerAtomicFlow.yaml +91 -0
- __init__.py +10 -0
ControllerAtomicFlow.py
ADDED
|
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
from copy import deepcopy
|
| 3 |
+
from typing import Any, Dict, List
|
| 4 |
+
from flow_modules.aiflows.OpenAIChatFlowModule import OpenAIChatAtomicFlow
|
| 5 |
+
|
| 6 |
+
from dataclasses import dataclass
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
@dataclass
|
| 10 |
+
class Command:
|
| 11 |
+
name: str
|
| 12 |
+
description: str
|
| 13 |
+
input_args: List[str]
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class ControllerAtomicFlow(OpenAIChatAtomicFlow):
|
| 17 |
+
def __init__(self, commands: List[Command], **kwargs):
|
| 18 |
+
super().__init__(**kwargs)
|
| 19 |
+
self.system_message_prompt_template = self.system_message_prompt_template.partial(
|
| 20 |
+
commands=self._build_commands_manual(commands)
|
| 21 |
+
)
|
| 22 |
+
|
| 23 |
+
@staticmethod
|
| 24 |
+
def _build_commands_manual(commands: List[Command]) -> str:
|
| 25 |
+
ret = ""
|
| 26 |
+
for i, command in enumerate(commands):
|
| 27 |
+
command_input_json_schema = json.dumps(
|
| 28 |
+
{input_arg: f"YOUR_{input_arg.upper()}" for input_arg in command.input_args})
|
| 29 |
+
ret += f"{i + 1}. {command.name}: {command.description} Input arguments (given in the JSON schema): {command_input_json_schema}\n"
|
| 30 |
+
return ret
|
| 31 |
+
|
| 32 |
+
@classmethod
|
| 33 |
+
def instantiate_from_config(cls, config):
|
| 34 |
+
flow_config = deepcopy(config)
|
| 35 |
+
|
| 36 |
+
kwargs = {"flow_config": flow_config}
|
| 37 |
+
|
| 38 |
+
# ~~~ Set up prompts ~~~
|
| 39 |
+
kwargs.update(cls._set_up_prompts(flow_config))
|
| 40 |
+
|
| 41 |
+
# ~~~ Set up commands ~~~
|
| 42 |
+
commands = flow_config["commands"]
|
| 43 |
+
commands = [
|
| 44 |
+
Command(name, command_conf["description"], command_conf["input_args"]) for name, command_conf in
|
| 45 |
+
commands.items()
|
| 46 |
+
]
|
| 47 |
+
kwargs.update({"commands": commands})
|
| 48 |
+
|
| 49 |
+
# ~~~ Instantiate flow ~~~
|
| 50 |
+
return cls(**kwargs)
|
| 51 |
+
|
| 52 |
+
def run(self, input_data: Dict[str, Any]) -> Dict[str, Any]:
|
| 53 |
+
hint_for_model = """
|
| 54 |
+
Make sure your response is in the following format:
|
| 55 |
+
Response Format:
|
| 56 |
+
{
|
| 57 |
+
"thought": "thought",
|
| 58 |
+
"reasoning": "reasoning",
|
| 59 |
+
"plan": "- short bulleted\n- list that conveys\n- long-term plan",
|
| 60 |
+
"criticism": "constructive self-criticism",
|
| 61 |
+
"speak": "thoughts summary to say to user",
|
| 62 |
+
"command": "the python function you would like to call",
|
| 63 |
+
"command_args": {
|
| 64 |
+
"arg name": "value"
|
| 65 |
+
}
|
| 66 |
+
}
|
| 67 |
+
"""
|
| 68 |
+
if 'goal' in input_data:
|
| 69 |
+
input_data['goal'] += hint_for_model
|
| 70 |
+
if 'human_feedback' in input_data:
|
| 71 |
+
input_data['human_feedback'] += hint_for_model
|
| 72 |
+
api_output = super().run(input_data)["api_output"].strip()
|
| 73 |
+
|
| 74 |
+
try:
|
| 75 |
+
response = json.loads(api_output)
|
| 76 |
+
return response
|
| 77 |
+
except json.decoder.JSONDecodeError:
|
| 78 |
+
new_input_data = input_data.copy()
|
| 79 |
+
new_input_data['observation'] = ""
|
| 80 |
+
new_input_data['human_feedback'] = "The previous respond cannot be parsed with json.loads, it could be the backslashes used for escaping single quotes in the string arguments of the Python code are not properly escaped themselves within the JSON context."
|
| 81 |
+
new_api_output = super().run(new_input_data)["api_output"].strip()
|
| 82 |
+
return json.loads(new_api_output)
|
ControllerAtomicFlow.yaml
ADDED
|
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
name: "ControllerFlow"
|
| 2 |
+
description: "Proposes the next action to take towards achieving the goal, and prepares the input for the executor."
|
| 3 |
+
enable_cache: True
|
| 4 |
+
|
| 5 |
+
#######################################################
|
| 6 |
+
# Input keys
|
| 7 |
+
#######################################################
|
| 8 |
+
|
| 9 |
+
input_interface_non_initialized: # initial input keys
|
| 10 |
+
- "goal"
|
| 11 |
+
|
| 12 |
+
input_interface_initialized: # input_keys
|
| 13 |
+
- "observation"
|
| 14 |
+
|
| 15 |
+
#######################################################
|
| 16 |
+
# Output keys
|
| 17 |
+
#######################################################
|
| 18 |
+
|
| 19 |
+
output_interface:
|
| 20 |
+
- 'thought'
|
| 21 |
+
- 'reasoning'
|
| 22 |
+
- 'plan'
|
| 23 |
+
- 'criticism'
|
| 24 |
+
- 'speak'
|
| 25 |
+
- 'command'
|
| 26 |
+
- 'command_args'
|
| 27 |
+
|
| 28 |
+
#######################################################
|
| 29 |
+
# ToDo: Some parts of the prompt don't make sense -- update them
|
| 30 |
+
system_message_prompt_template:
|
| 31 |
+
_target_: langchain.PromptTemplate
|
| 32 |
+
template: |2-
|
| 33 |
+
You are a smart AI assistant.
|
| 34 |
+
|
| 35 |
+
Your decisions must always be made independently without seeking user assistance.
|
| 36 |
+
Play to your strengths as an LLM and pursue simple strategies with no legal complications.
|
| 37 |
+
If you have completed all your tasks, make sure to use the "finish" command.
|
| 38 |
+
|
| 39 |
+
Constraints:
|
| 40 |
+
1. ~4000 word limit for short term memory. Your short term memory is short, so immediately save important information to files
|
| 41 |
+
2. If you are unsure how you previously did something or want to recall past events, thinking about similar events will help you remember
|
| 42 |
+
3. No user assistance
|
| 43 |
+
4. Exclusively use the commands listed in double quotes e.g. "command name"
|
| 44 |
+
|
| 45 |
+
Available commands:
|
| 46 |
+
{{commands}}
|
| 47 |
+
|
| 48 |
+
Resources:
|
| 49 |
+
1. Internet access for searches and information gathering.
|
| 50 |
+
2. Long Term memory management.
|
| 51 |
+
3. GPT-3.5 powered Agents for delegation of simple tasks.
|
| 52 |
+
|
| 53 |
+
Performance Evaluation:
|
| 54 |
+
1. Continuously review and analyze your actions to ensure you are performing to the best of your abilities.
|
| 55 |
+
2. Constructively self-criticize your big-picture behavior constantly.
|
| 56 |
+
3. Reflect on past decisions and strategies to refine your approach.
|
| 57 |
+
4. Every command has a cost, so be smart and efficient. Aim to complete tasks in the least number of steps.
|
| 58 |
+
You should only respond in JSON format as described below
|
| 59 |
+
Response Format:
|
| 60 |
+
{
|
| 61 |
+
"thought": "thought",
|
| 62 |
+
"reasoning": "reasoning",
|
| 63 |
+
"plan": "- short bulleted\n- list that conveys\n- long-term plan",
|
| 64 |
+
"criticism": "constructive self-criticism",
|
| 65 |
+
"speak": "thoughts summary to say to user",
|
| 66 |
+
"command": "command name",
|
| 67 |
+
"command_args": {
|
| 68 |
+
"arg name": "value"
|
| 69 |
+
}
|
| 70 |
+
}
|
| 71 |
+
Ensure your responses can be parsed by Python json.loads
|
| 72 |
+
input_variables: ["commands"]
|
| 73 |
+
template_format: jinja2
|
| 74 |
+
|
| 75 |
+
human_message_prompt_template:
|
| 76 |
+
_target_: langchain.PromptTemplate
|
| 77 |
+
template: |2-
|
| 78 |
+
Here is the response to your last action:
|
| 79 |
+
{{observation}}
|
| 80 |
+
input_variables:
|
| 81 |
+
- "observation"
|
| 82 |
+
template_format: jinja2
|
| 83 |
+
|
| 84 |
+
init_human_message_prompt_template:
|
| 85 |
+
_target_: langchain.PromptTemplate
|
| 86 |
+
template: |2-
|
| 87 |
+
Here is the goal you need to achieve:
|
| 88 |
+
{{goal}}
|
| 89 |
+
input_variables:
|
| 90 |
+
- "goal"
|
| 91 |
+
template_format: jinja2
|
__init__.py
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ~~~ Specify the dependencies ~~~
|
| 2 |
+
dependencies = [
|
| 3 |
+
{"url": "aiflows/OpenAIChatFlowModule", "revision": "6a1e351a915f00193f18f3da3b61c497df1d31a3"},
|
| 4 |
+
]
|
| 5 |
+
from flows import flow_verse
|
| 6 |
+
|
| 7 |
+
flow_verse.sync_dependencies(dependencies)
|
| 8 |
+
# ~~~
|
| 9 |
+
|
| 10 |
+
from .ControllerAtomicFlow import ControllerAtomicFlow
|