demongodYY
commited on
Commit
·
991dafd
0
Parent(s):
init gongyi
Browse files- .gitattributes +35 -0
- .gitignore +151 -0
- agent.py +154 -0
- app.py +81 -0
- chains/__init__.py +0 -0
- chains/step1.py +56 -0
- chains/step2.py +46 -0
- chains/step3.py +35 -0
- chains/step4.py +39 -0
- chains/utils.py +25 -0
- model.py +26 -0
- readme.md +12 -0
- requirements.txt +5 -0
.gitattributes
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
.gitignore
ADDED
|
@@ -0,0 +1,151 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Azure az webapp deployment details
|
| 2 |
+
.azure
|
| 3 |
+
*_env
|
| 4 |
+
|
| 5 |
+
# Byte-compiled / optimized / DLL files
|
| 6 |
+
__pycache__/
|
| 7 |
+
*.py[cod]
|
| 8 |
+
*$py.class
|
| 9 |
+
|
| 10 |
+
# C extensions
|
| 11 |
+
*.so
|
| 12 |
+
|
| 13 |
+
# Distribution / packaging
|
| 14 |
+
.Python
|
| 15 |
+
build/
|
| 16 |
+
develop-eggs/
|
| 17 |
+
dist/
|
| 18 |
+
downloads/
|
| 19 |
+
eggs/
|
| 20 |
+
.eggs/
|
| 21 |
+
lib/
|
| 22 |
+
lib64/
|
| 23 |
+
parts/
|
| 24 |
+
sdist/
|
| 25 |
+
var/
|
| 26 |
+
wheels/
|
| 27 |
+
share/python-wheels/
|
| 28 |
+
*.egg-info/
|
| 29 |
+
.installed.cfg
|
| 30 |
+
*.egg
|
| 31 |
+
MANIFEST
|
| 32 |
+
|
| 33 |
+
# PyInstaller
|
| 34 |
+
# Usually these files are written by a python script from a template
|
| 35 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
| 36 |
+
*.manifest
|
| 37 |
+
*.spec
|
| 38 |
+
|
| 39 |
+
# Installer logs
|
| 40 |
+
pip-log.txt
|
| 41 |
+
pip-delete-this-directory.txt
|
| 42 |
+
|
| 43 |
+
# Unit test / coverage reports
|
| 44 |
+
htmlcov/
|
| 45 |
+
.tox/
|
| 46 |
+
.nox/
|
| 47 |
+
.coverage
|
| 48 |
+
.coverage.*
|
| 49 |
+
.cache
|
| 50 |
+
nosetests.xml
|
| 51 |
+
coverage.xml
|
| 52 |
+
*.cover
|
| 53 |
+
*.py,cover
|
| 54 |
+
.hypothesis/
|
| 55 |
+
.pytest_cache/
|
| 56 |
+
cover/
|
| 57 |
+
|
| 58 |
+
# Translations
|
| 59 |
+
*.mo
|
| 60 |
+
*.pot
|
| 61 |
+
|
| 62 |
+
# Django stuff:
|
| 63 |
+
*.log
|
| 64 |
+
local_settings.py
|
| 65 |
+
db.sqlite3
|
| 66 |
+
db.sqlite3-journal
|
| 67 |
+
|
| 68 |
+
# Flask stuff:
|
| 69 |
+
instance/
|
| 70 |
+
.webassets-cache
|
| 71 |
+
|
| 72 |
+
# Scrapy stuff:
|
| 73 |
+
.scrapy
|
| 74 |
+
|
| 75 |
+
# Sphinx documentation
|
| 76 |
+
docs/_build/
|
| 77 |
+
|
| 78 |
+
# PyBuilder
|
| 79 |
+
.pybuilder/
|
| 80 |
+
target/
|
| 81 |
+
|
| 82 |
+
# Jupyter Notebook
|
| 83 |
+
.ipynb_checkpoints
|
| 84 |
+
|
| 85 |
+
# IPython
|
| 86 |
+
profile_default/
|
| 87 |
+
ipython_config.py
|
| 88 |
+
|
| 89 |
+
# pyenv
|
| 90 |
+
# For a library or package, you might want to ignore these files since the code is
|
| 91 |
+
# intended to run in multiple environments; otherwise, check them in:
|
| 92 |
+
# .python-version
|
| 93 |
+
|
| 94 |
+
# pipenv
|
| 95 |
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
| 96 |
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
| 97 |
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
| 98 |
+
# install all needed dependencies.
|
| 99 |
+
#Pipfile.lock
|
| 100 |
+
|
| 101 |
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
|
| 102 |
+
__pypackages__/
|
| 103 |
+
|
| 104 |
+
# Celery stuff
|
| 105 |
+
celerybeat-schedule
|
| 106 |
+
celerybeat.pid
|
| 107 |
+
|
| 108 |
+
# SageMath parsed files
|
| 109 |
+
*.sage.py
|
| 110 |
+
|
| 111 |
+
# Environments
|
| 112 |
+
.env
|
| 113 |
+
.venv
|
| 114 |
+
env/
|
| 115 |
+
venv/
|
| 116 |
+
ENV/
|
| 117 |
+
env.bak/
|
| 118 |
+
venv.bak/
|
| 119 |
+
|
| 120 |
+
# Spyder project settings
|
| 121 |
+
.spyderproject
|
| 122 |
+
.spyproject
|
| 123 |
+
|
| 124 |
+
# Rope project settings
|
| 125 |
+
.ropeproject
|
| 126 |
+
|
| 127 |
+
# mkdocs documentation
|
| 128 |
+
/site
|
| 129 |
+
|
| 130 |
+
# mypy
|
| 131 |
+
.mypy_cache/
|
| 132 |
+
.dmypy.json
|
| 133 |
+
dmypy.json
|
| 134 |
+
|
| 135 |
+
# Pyre type checker
|
| 136 |
+
.pyre/
|
| 137 |
+
|
| 138 |
+
# pytype static type analyzer
|
| 139 |
+
.pytype/
|
| 140 |
+
|
| 141 |
+
# Cython debug symbols
|
| 142 |
+
cython_debug/
|
| 143 |
+
|
| 144 |
+
# NPM
|
| 145 |
+
npm-debug.log*
|
| 146 |
+
node_modules
|
| 147 |
+
static/
|
| 148 |
+
|
| 149 |
+
.DS_Store
|
| 150 |
+
|
| 151 |
+
flagged/
|
agent.py
ADDED
|
@@ -0,0 +1,154 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from langchain.agents import Tool
|
| 2 |
+
from langchain.prompts import (
|
| 3 |
+
ChatPromptTemplate,
|
| 4 |
+
HumanMessagePromptTemplate,
|
| 5 |
+
AIMessagePromptTemplate,
|
| 6 |
+
MessagesPlaceholder,
|
| 7 |
+
SystemMessagePromptTemplate,
|
| 8 |
+
)
|
| 9 |
+
from langchain.tools.render import render_text_description
|
| 10 |
+
|
| 11 |
+
from langchain.agents.output_parsers import ReActSingleInputOutputParser
|
| 12 |
+
from langchain.agents.format_scratchpad import format_log_to_messages
|
| 13 |
+
from langchain.agents import AgentExecutor
|
| 14 |
+
from langchain.memory import (
|
| 15 |
+
ConversationSummaryBufferMemory,
|
| 16 |
+
ConversationBufferWindowMemory,
|
| 17 |
+
)
|
| 18 |
+
|
| 19 |
+
from model import llm4, llm
|
| 20 |
+
from chains.step1 import step1Tool
|
| 21 |
+
from chains.step2 import step2Tool
|
| 22 |
+
from chains.step3 import step3Tool
|
| 23 |
+
from chains.step4 import step4Tool
|
| 24 |
+
|
| 25 |
+
PURPOSE = """\
|
| 26 |
+
In a scalable perspective, clearly define the social issue to be addressed. \
|
| 27 |
+
The principal and the team will surely have a social issue that they ponder on how to solve from morning till night every day. \
|
| 28 |
+
Clearly defining this issue will help the team to:
|
| 29 |
+
|
| 30 |
+
- Concentrate time and resource investments to solve the problem in a scalable manner.
|
| 31 |
+
- Understand how to find a suitable position to tackle the problem within a larger ecosystem.
|
| 32 |
+
- Identify the beneficiary group you want to focus on.
|
| 33 |
+
- Establish scalable strategies and models.\
|
| 34 |
+
"""
|
| 35 |
+
|
| 36 |
+
SUGGESTION = """\
|
| 37 |
+
Maintaining a "continual questioning" attitude at all times, being extremely curious about the causes of the issues, \
|
| 38 |
+
and having an open attitude towards products and scalable approaches that address social problems on a large scale, \
|
| 39 |
+
will help you and your team deepen your understanding of the issues continuously, and enable you to find more accurate solutions.\
|
| 40 |
+
"""
|
| 41 |
+
|
| 42 |
+
STEPS = """\
|
| 43 |
+
1. Problem Storming: Participants follow their intuition and experience, \
|
| 44 |
+
recording all the questions lingering in their minds in any way they prefer.
|
| 45 |
+
|
| 46 |
+
2. Problem Deconstruction: Refine and structure the proposed questions. \
|
| 47 |
+
Attempt to describe the issue in detail from several aspects such as the surface problem, underlying causes, \
|
| 48 |
+
the populations affected by the problem, and the impact that has already been caused.
|
| 49 |
+
|
| 50 |
+
3. Problem Sharing: Share within the group, and besides sharing the problem itself, \
|
| 51 |
+
it's necessary to explain why such a question is raised and how it is considered logically. \
|
| 52 |
+
After sharing is completed, merge similar questions within the group.
|
| 53 |
+
|
| 54 |
+
4. Problem Reconstruction: Based on feedback, write down the final definition of the problem.
|
| 55 |
+
"""
|
| 56 |
+
|
| 57 |
+
agentTemplate = """\
|
| 58 |
+
You are a Coach to help use a workshop toolkit to facilitate other organization to define their sociaty problems.
|
| 59 |
+
|
| 60 |
+
Coach is designed to be able to help me to use the workshop toolkit for scalable sociaty problem definition, \
|
| 61 |
+
via socratic method to ask quesion to help me to learn about toolkit concepts step by step. \
|
| 62 |
+
|
| 63 |
+
Coach is constantly learning and improving, and its capabilities are constantly evolving. \
|
| 64 |
+
It is able to process and understand current problem, to select the right steps response for a given situation.
|
| 65 |
+
|
| 66 |
+
Here is some context about toolkit:
|
| 67 |
+
```
|
| 68 |
+
Toolkit purpose: {toolkit_purpose}
|
| 69 |
+
Toolkit suggestion: {toolkit_suggestion}
|
| 70 |
+
Toolkit steps: {toolkit_steps}
|
| 71 |
+
```
|
| 72 |
+
|
| 73 |
+
TOOLS:
|
| 74 |
+
------
|
| 75 |
+
|
| 76 |
+
Coach has access to the following tools:
|
| 77 |
+
|
| 78 |
+
{tools}
|
| 79 |
+
|
| 80 |
+
To use a tool, you MUST use the following format, don't use tool repeatly with same input:
|
| 81 |
+
|
| 82 |
+
```
|
| 83 |
+
Thought: Do I need to use a tool? Yes
|
| 84 |
+
Action: the action to take, should be one of [{tool_names}]
|
| 85 |
+
Action Input: the input to the action
|
| 86 |
+
Observation: the result of the action
|
| 87 |
+
```
|
| 88 |
+
|
| 89 |
+
When you have a response to say to the Human, or if you do not need to use a tool, you MUST use the following format:
|
| 90 |
+
|
| 91 |
+
```
|
| 92 |
+
Thought: Do I need to use a tool? No
|
| 93 |
+
Final Answer: [your response here, MUST using Chinese response]
|
| 94 |
+
```
|
| 95 |
+
|
| 96 |
+
Response Example:
|
| 97 |
+
```
|
| 98 |
+
User: hi!
|
| 99 |
+
AI:
|
| 100 |
+
Thought: Do I need to use a tool? No
|
| 101 |
+
Final Answer: 你好, 我该如何帮助你?
|
| 102 |
+
```
|
| 103 |
+
|
| 104 |
+
Begin!\
|
| 105 |
+
"""
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
tools = [step1Tool, step2Tool, step3Tool, step4Tool]
|
| 109 |
+
|
| 110 |
+
agentPrompt = ChatPromptTemplate.from_messages(
|
| 111 |
+
[
|
| 112 |
+
SystemMessagePromptTemplate.from_template(
|
| 113 |
+
template=agentTemplate,
|
| 114 |
+
partial_variables={
|
| 115 |
+
"toolkit_purpose": PURPOSE,
|
| 116 |
+
"toolkit_suggestion": SUGGESTION,
|
| 117 |
+
"toolkit_steps": STEPS,
|
| 118 |
+
"tools": render_text_description(tools),
|
| 119 |
+
"tool_names": ", ".join([t.name for t in tools]),
|
| 120 |
+
},
|
| 121 |
+
),
|
| 122 |
+
MessagesPlaceholder(variable_name="chat_history"),
|
| 123 |
+
HumanMessagePromptTemplate.from_template("{input}"),
|
| 124 |
+
MessagesPlaceholder(variable_name="agent_scratchpad"),
|
| 125 |
+
]
|
| 126 |
+
)
|
| 127 |
+
llm_with_stop = llm4.bind(stop=["\nObservation"])
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
agent = (
|
| 131 |
+
{
|
| 132 |
+
"input": lambda x: x["input"],
|
| 133 |
+
"agent_scratchpad": lambda x: format_log_to_messages(x["intermediate_steps"]),
|
| 134 |
+
"chat_history": lambda x: x["chat_history"],
|
| 135 |
+
}
|
| 136 |
+
| agentPrompt
|
| 137 |
+
| llm_with_stop
|
| 138 |
+
| ReActSingleInputOutputParser()
|
| 139 |
+
)
|
| 140 |
+
|
| 141 |
+
memory = ConversationSummaryBufferMemory(
|
| 142 |
+
memory_key="chat_history",
|
| 143 |
+
llm=llm,
|
| 144 |
+
max_token_limit=500,
|
| 145 |
+
return_messages=True,
|
| 146 |
+
)
|
| 147 |
+
|
| 148 |
+
# memory = ConversationBufferWindowMemory(
|
| 149 |
+
# memory_key="chat_history", return_messages=True, k=6
|
| 150 |
+
# )
|
| 151 |
+
|
| 152 |
+
agent_executor = AgentExecutor(
|
| 153 |
+
agent=agent, tools=tools, verbose=True, memory=memory, handle_parsing_errors=True
|
| 154 |
+
)
|
app.py
ADDED
|
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from queue import Empty, Queue
|
| 2 |
+
from threading import Thread
|
| 3 |
+
from langchain.callbacks.streaming_stdout_final_only import (
|
| 4 |
+
FinalStreamingStdOutCallbackHandler,
|
| 5 |
+
)
|
| 6 |
+
from agent import agent_executor
|
| 7 |
+
|
| 8 |
+
from typing import Generator
|
| 9 |
+
|
| 10 |
+
import gradio as gr
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class QueneCallback(FinalStreamingStdOutCallbackHandler):
|
| 14 |
+
def __init__(self, q):
|
| 15 |
+
super().__init__()
|
| 16 |
+
self.q = q
|
| 17 |
+
|
| 18 |
+
def on_llm_new_token(self, token: str, **kwargs: any) -> None:
|
| 19 |
+
# Remember the last n tokens, where n = len(answer_prefix_tokens)
|
| 20 |
+
self.append_to_last_tokens(token)
|
| 21 |
+
|
| 22 |
+
# Check if the last n tokens match the answer_prefix_tokens list ...
|
| 23 |
+
if self.check_if_answer_reached():
|
| 24 |
+
self.answer_reached = True
|
| 25 |
+
if self.stream_prefix:
|
| 26 |
+
for t in self.last_tokens:
|
| 27 |
+
self.q.put(t)
|
| 28 |
+
return
|
| 29 |
+
|
| 30 |
+
# ... if yes, then print tokens from now on
|
| 31 |
+
if self.answer_reached:
|
| 32 |
+
self.q.put(token)
|
| 33 |
+
|
| 34 |
+
def on_llm_end(self, *args, **kwargs: any) -> None:
|
| 35 |
+
return self.q.empty()
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
def stream(input_text) -> Generator:
|
| 39 |
+
# Create a Queue
|
| 40 |
+
q = Queue()
|
| 41 |
+
job_done = object()
|
| 42 |
+
|
| 43 |
+
# Create a funciton to call - this will run in a thread
|
| 44 |
+
def task():
|
| 45 |
+
agent_executor.invoke(
|
| 46 |
+
{"input": input_text}, config={"callbacks": [QueneCallback(q)]}
|
| 47 |
+
)
|
| 48 |
+
q.put(job_done)
|
| 49 |
+
|
| 50 |
+
# Create a thread and start the function
|
| 51 |
+
t = Thread(target=task)
|
| 52 |
+
t.start()
|
| 53 |
+
|
| 54 |
+
# Get each new token from the queue and yield for our generator
|
| 55 |
+
while True:
|
| 56 |
+
try:
|
| 57 |
+
next_token = q.get(True, timeout=1)
|
| 58 |
+
if next_token is job_done:
|
| 59 |
+
break
|
| 60 |
+
yield next_token
|
| 61 |
+
except Empty:
|
| 62 |
+
continue
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
def predict(message, history):
|
| 66 |
+
if len(message) == 0:
|
| 67 |
+
return
|
| 68 |
+
history_openai_format = []
|
| 69 |
+
for human, assistant in history:
|
| 70 |
+
history_openai_format.append({"role": "user", "content": human})
|
| 71 |
+
history_openai_format.append({"role": "assistant", "content": assistant})
|
| 72 |
+
history_openai_format.append({"role": "user", "content": message})
|
| 73 |
+
partial_message = ""
|
| 74 |
+
|
| 75 |
+
for token in stream(message):
|
| 76 |
+
if len(token) != 0:
|
| 77 |
+
partial_message = partial_message + token
|
| 78 |
+
yield partial_message
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
gr.ChatInterface(predict).queue().launch()
|
chains/__init__.py
ADDED
|
File without changes
|
chains/step1.py
ADDED
|
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from langchain.schema.output_parser import StrOutputParser
|
| 2 |
+
from chains.utils import getToolPromptTemplate
|
| 3 |
+
from model import llm4
|
| 4 |
+
from langchain.tools import tool
|
| 5 |
+
|
| 6 |
+
step1ToolName = "Problem Storming"
|
| 7 |
+
step1ToolContext = """\
|
| 8 |
+
Follow your intuition and experience, and on the [Problem Storming Board], jot down all the social issues \
|
| 9 |
+
this organization needs to solve or is pending to solve that are swirling in your mind. \
|
| 10 |
+
What is a common issue you have observed? Record it in any way you like, for example:
|
| 11 |
+
- One or several keywords
|
| 12 |
+
- A small story with a clear beginning, development, and conclusion
|
| 13 |
+
- A clear problem statement
|
| 14 |
+
- A doodle drawn casually
|
| 15 |
+
- A photograph
|
| 16 |
+
|
| 17 |
+
The source of the problem could be:
|
| 18 |
+
- A real story from a beneficiary
|
| 19 |
+
- An impressive personal experience
|
| 20 |
+
- A news report that caught your attention
|
| 21 |
+
- A sudden flash of insight
|
| 22 |
+
|
| 23 |
+
During the problem conceptualization process, you can ask yourself the following questions to better diverge on the problem:
|
| 24 |
+
- Is there a significant gap between the goal you want to achieve and today's reality?
|
| 25 |
+
- Are you stating a problem, or a solution to the problem?
|
| 26 |
+
- Are you focusing on defining the problem itself, rather than how to solve it?
|
| 27 |
+
- If you have identified many problems, can they be merged?
|
| 28 |
+
- Is the problem you identified a common root cause or a manifestation of a category of problems?
|
| 29 |
+
- Is the problem you want to solve unique? Are there others doing the same thing as you?
|
| 30 |
+
- What is the difference between the problem you want to solve and the problems others want to solve?
|
| 31 |
+
- These reflective questions can help ensure that you have a well-defined, distinct, and actionable problem to work on, \
|
| 32 |
+
which is crucial for devising effective solutions.
|
| 33 |
+
|
| 34 |
+
These reflective questions can help ensure that you have a well-defined, distinct, and actionable problem to work on, \
|
| 35 |
+
which is crucial for devising effective solutions.\
|
| 36 |
+
"""
|
| 37 |
+
step1ToolSuggestion = """\
|
| 38 |
+
1. Problem storming revolves around the [Problem Storming Board], where participants fill in questions on sticky notes according to the description provided.
|
| 39 |
+
2. The electronic version of the [Problem Storming Board] can be found in the attachment; if it's an offline workshop, \
|
| 40 |
+
a physical whiteboard can also be used to create it manually.
|
| 41 |
+
3. The facilitator can directly reiterate the wording on the left to explain to workshop participants how to fill it out.
|
| 42 |
+
4. A series of questions are listed at the bottom left of the problem storming board for participants to refer to for better conceptualizing of questions. \
|
| 43 |
+
When participants encounter confusion while filling out the [Problem Storming Board], \
|
| 44 |
+
the facilitator can proactively throw out these questions to guide participants' thinking.\
|
| 45 |
+
"""
|
| 46 |
+
|
| 47 |
+
prompt = getToolPromptTemplate(step1ToolName, step1ToolContext, step1ToolSuggestion)
|
| 48 |
+
|
| 49 |
+
step1Chain = prompt | llm4 | StrOutputParser()
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
@tool("Problem Storming")
|
| 53 |
+
def step1Tool(context: str) -> str:
|
| 54 |
+
"""Useful for find some detail advise or examples when you process on "Problem Storming" step. Need to input current problem context about Problem Storming."""
|
| 55 |
+
|
| 56 |
+
return step1Chain.invoke({"current_situation": context})
|
chains/step2.py
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from langchain.schema.output_parser import StrOutputParser
|
| 2 |
+
from chains.utils import getToolPromptTemplate
|
| 3 |
+
from model import llm4
|
| 4 |
+
from langchain.tools import tool
|
| 5 |
+
|
| 6 |
+
step2ToolName = "Problem Deconstruction"
|
| 7 |
+
step2ToolContext = """\
|
| 8 |
+
For addressing scalable problems, utilizing the [Problem Deconstruction Canvas] can help in delving into various \
|
| 9 |
+
aspects of the issue such as the superficial problem, underlying causes, affected groups, and the resulting impact. \
|
| 10 |
+
Here are some tips:
|
| 11 |
+
1. Distinguishing Superficial Problems from Underlying Causes:
|
| 12 |
+
- Help participants differentiate between the superficial phenomena and the underlying causes of the problem. \
|
| 13 |
+
Superficial problems are observable direct manifestations leading to some impact, for example, pedestrian pathways being occupied by vehicles, \
|
| 14 |
+
higher resume rejection rates for disabled individuals during job applications, \
|
| 15 |
+
or visually impaired users finding it difficult to use a particular brand's app for online orders.
|
| 16 |
+
- Underlying causes focus on the root causes behind superficial problems. \
|
| 17 |
+
Identifying these often requires proposing hypotheses and logical multi-level deduction and research. \
|
| 18 |
+
For instance, the occupation of pedestrian pathways might be linked to lack of public education on traffic regulations, \
|
| 19 |
+
insufficient regulatory enforcement, or poor road design in specific areas.
|
| 20 |
+
2. Refining Broad Problems:
|
| 21 |
+
- If the problem posed is relatively broad, such as when its solution spans multiple industries or fields \
|
| 22 |
+
and the team lacks the necessary expertise and resources to address it, consider further segmenting the problem. \
|
| 23 |
+
For example, the issue of gender discrimination could be narrowed down to a specific domain like the workplace, \
|
| 24 |
+
transforming it into "gender discrimination in the workplace."
|
| 25 |
+
3. Monitoring Changes:
|
| 26 |
+
- Pay attention to changes over time in the underlying causes. Have they evolved? Will they continue to change in the future?
|
| 27 |
+
|
| 28 |
+
These guiding suggestions aim to facilitate a more structured and insightful exploration of the problem, \
|
| 29 |
+
enabling teams to delve deeper into understanding the issues at hand and how they might be addressed in a scalable and impactful manner.\
|
| 30 |
+
"""
|
| 31 |
+
step2ToolSuggestion = """\
|
| 32 |
+
1. This step unfolds around the [Problem Deconstruction Canvas], with each participant selecting a problem from the first step to refine and structure.
|
| 33 |
+
2. It's suggested to spend 5 minutes before starting to clarify with everyone the definition of each dimension on the canvas.
|
| 34 |
+
3. Each participant should choose a specific color of sticky note, and use the four \
|
| 35 |
+
dimensions divided on the canvas to provide a structured description of the selected problem within 10 minutes.\
|
| 36 |
+
"""
|
| 37 |
+
prompt = getToolPromptTemplate(step2ToolName, step2ToolContext, step2ToolSuggestion)
|
| 38 |
+
|
| 39 |
+
step2Chain = prompt | llm4 | StrOutputParser()
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
@tool("Problem Deconstruction")
|
| 43 |
+
def step2Tool(context: str) -> str:
|
| 44 |
+
"""Useful for find some detail advise or examples when you process on "Problem Deconstruction" step. Need to input current problem context about Problem Deconstruction."""
|
| 45 |
+
|
| 46 |
+
return step2Chain.invoke({"current_situation": context})
|
chains/step3.py
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from langchain.schema.output_parser import StrOutputParser
|
| 2 |
+
from chains.utils import getToolPromptTemplate
|
| 3 |
+
from model import llm4
|
| 4 |
+
from langchain.tools import tool
|
| 5 |
+
|
| 6 |
+
step3ToolName = "Problem Sharing"
|
| 7 |
+
step3ToolContext = """\
|
| 8 |
+
After structuring the problem descriptions in the previous step, the team will share the following content within the group:
|
| 9 |
+
- The problem/story described on the [Problem Storming Board].
|
| 10 |
+
- The refined problem description on the [Problem Deconstruction Canvas] based on the aforementioned problem/story.
|
| 11 |
+
- The rationale behind posing such a question and the logical considerations involved.
|
| 12 |
+
- Seek additional information inputs from the team, relevant stakeholders, or even the target users, considering their perspectives.
|
| 13 |
+
|
| 14 |
+
During the sharing process, the facilitator should guide the group to merge similar problems according to the four dimensions of problem refinement. \
|
| 15 |
+
This collaborative approach ensures that the team consolidates their understanding of the problems at hand, \
|
| 16 |
+
enriches the context with diverse perspectives, and streamlines the focus for more targeted problem-solving efforts.\
|
| 17 |
+
"""
|
| 18 |
+
step3ToolSuggestion = """\
|
| 19 |
+
1. Centering on the [Problem Deconstruction Canvas]: \
|
| 20 |
+
Each participant takes a problem identified from the initial problem-storming phase and uses the canvas to dissect and organize it further.
|
| 21 |
+
2. Clarification of Dimensions: Before diving into the task, \
|
| 22 |
+
it's recommended to take 5 minutes to ensure that all participants have a clear understanding of each dimension on the canvas. \
|
| 23 |
+
This step is crucial for maintaining consistency in how problems are deconstructed and ensuring that all team members are on the same page.\
|
| 24 |
+
"""
|
| 25 |
+
|
| 26 |
+
prompt = getToolPromptTemplate(step3ToolName, step3ToolContext, step3ToolSuggestion)
|
| 27 |
+
|
| 28 |
+
step3Chain = prompt | llm4 | StrOutputParser()
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
@tool("Problem Sharing")
|
| 32 |
+
def step3Tool(context: str) -> str:
|
| 33 |
+
"""Useful for find some detail advise or examples when you process on "Problem Sharing" step. Need to input current problem context about Problem Sharing."""
|
| 34 |
+
|
| 35 |
+
return step3Chain.invoke({"current_situation": context})
|
chains/step4.py
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from langchain.schema.output_parser import StrOutputParser
|
| 2 |
+
from chains.utils import getToolPromptTemplate
|
| 3 |
+
from model import llm4
|
| 4 |
+
from langchain.tools import tool
|
| 5 |
+
|
| 6 |
+
step4ToolName = "Problem Reconstruction"
|
| 7 |
+
step4ToolContext = """\
|
| 8 |
+
In the format provided, the final scalable problem definition would be structured as follows:
|
| 9 |
+
"For [a specific demographic], in the context of [a particular scenario], due to [a certain reason], \
|
| 10 |
+
the specific problem they face is what we need to find answers to in this toolkit. \
|
| 11 |
+
How we can [in a particular manner], achieve [a desired impact] is what we will continue to research."
|
| 12 |
+
|
| 13 |
+
Here is an example using the format:
|
| 14 |
+
"For [urban adolescents], in the context of [high unemployment rates], \
|
| 15 |
+
due to [a lack of job readiness and skills], \
|
| 16 |
+
the specific problem they face—[difficulty in securing first-time employment]—is what \
|
| 17 |
+
we need to find answers to in this toolkit. How we can [through vocational training and apprenticeship programs], \
|
| 18 |
+
achieve [an increase in youth employment rates] is what we will continue to research."\
|
| 19 |
+
"""
|
| 20 |
+
step4ToolSuggestion = """\
|
| 21 |
+
1. The facilitator needs to provide a new workspace for everyone (a whiteboard or a blank canvas).
|
| 22 |
+
2. Each participant, using a sticky note and the format provided on the right, \
|
| 23 |
+
should combine the supplementary information and feedback obtained from the \
|
| 24 |
+
previous step to reconstruct the description of the problem they have posed.
|
| 25 |
+
3. Different participants who identified similar problems in the previous step can either \
|
| 26 |
+
reconstruct the issue separately according to their own understanding or work in \
|
| 27 |
+
pairs to complete the reconstruction of the same problem.
|
| 28 |
+
"""
|
| 29 |
+
|
| 30 |
+
prompt = getToolPromptTemplate(step4ToolName, step4ToolContext, step4ToolSuggestion)
|
| 31 |
+
|
| 32 |
+
step4Chain = prompt | llm4 | StrOutputParser()
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
@tool("Problem Reconstruction")
|
| 36 |
+
def step4Tool(context: str) -> str:
|
| 37 |
+
"""Useful for find some detail advise or example when you process on "Problem Reconstruction" step. Need to input current problem context about Problem Reconstruction."""
|
| 38 |
+
|
| 39 |
+
return step4Chain.invoke({"current_situation": context})
|
chains/utils.py
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from langchain.prompts import PromptTemplate
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
def getToolPromptTemplate(tool_name, tool_context, tool_suggestion):
|
| 5 |
+
return PromptTemplate(
|
| 6 |
+
input_variables=["current_situation"],
|
| 7 |
+
partial_variables={
|
| 8 |
+
"tool_name": tool_name,
|
| 9 |
+
"tool_context": tool_context,
|
| 10 |
+
"tool_suggestion": tool_suggestion,
|
| 11 |
+
},
|
| 12 |
+
template="""\
|
| 13 |
+
You are a coach, need to provide some suggestion or case example how to use this tool to facilitate workshop.
|
| 14 |
+
|
| 15 |
+
Facilitate tool name: ''''{tool_name}''''
|
| 16 |
+
|
| 17 |
+
Facilitate tool context: ````{tool_context}````
|
| 18 |
+
|
| 19 |
+
Facilitate suggestion: ####{tool_suggestion}####
|
| 20 |
+
|
| 21 |
+
Problem context: {current_situation}
|
| 22 |
+
|
| 23 |
+
Combinine the problem and and facilitate tool context, give me some advise or case example in this step. Using concise tone, don't more than 300 words.\
|
| 24 |
+
""",
|
| 25 |
+
)
|
model.py
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from dotenv import load_dotenv, find_dotenv
|
| 3 |
+
from langchain.callbacks.streaming_stdout_final_only import (
|
| 4 |
+
FinalStreamingStdOutCallbackHandler,
|
| 5 |
+
)
|
| 6 |
+
|
| 7 |
+
_ = load_dotenv(find_dotenv()) # read local .env file
|
| 8 |
+
|
| 9 |
+
openaiAPIVersion = os.getenv("OPENAI_API_VERSION") # "2023-05-15"
|
| 10 |
+
gpt4Model = os.getenv("AZURE_OPENAI_DEPLOYMENT_NAME_GPT4")
|
| 11 |
+
gpt35Model = os.getenv("AZURE_OPENAI_DEPLOYMENT_NAME_GPT35")
|
| 12 |
+
|
| 13 |
+
from langchain.chat_models import AzureChatOpenAI
|
| 14 |
+
|
| 15 |
+
llm = AzureChatOpenAI(
|
| 16 |
+
temperature=0,
|
| 17 |
+
deployment_name=gpt35Model,
|
| 18 |
+
openai_api_version=openaiAPIVersion,
|
| 19 |
+
)
|
| 20 |
+
|
| 21 |
+
llm4 = AzureChatOpenAI(
|
| 22 |
+
temperature=0,
|
| 23 |
+
deployment_name=gpt4Model,
|
| 24 |
+
openai_api_version=openaiAPIVersion,
|
| 25 |
+
streaming=True,
|
| 26 |
+
)
|
readme.md
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
title: Gongyi
|
| 3 |
+
emoji: 🐨
|
| 4 |
+
colorFrom: gray
|
| 5 |
+
colorTo: pink
|
| 6 |
+
sdk: gradio
|
| 7 |
+
sdk_version: 4.8.0
|
| 8 |
+
app_file: app.py
|
| 9 |
+
pinned: false
|
| 10 |
+
---
|
| 11 |
+
|
| 12 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
requirements.txt
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
gradio
|
| 2 |
+
langchain
|
| 3 |
+
openai
|
| 4 |
+
tiktoken
|
| 5 |
+
python-dotenv
|