Spaces:
Sleeping
Sleeping
Upload folder using huggingface_hub
Browse files- README.md +2 -8
- openai_assistant_langchain_gradio.py +87 -0
- requirements.txt +3 -0
README.md
CHANGED
|
@@ -1,12 +1,6 @@
|
|
| 1 |
---
|
| 2 |
-
title:
|
| 3 |
-
|
| 4 |
-
colorFrom: green
|
| 5 |
-
colorTo: purple
|
| 6 |
sdk: gradio
|
| 7 |
sdk_version: 4.7.1
|
| 8 |
-
app_file: app.py
|
| 9 |
-
pinned: false
|
| 10 |
---
|
| 11 |
-
|
| 12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
| 1 |
---
|
| 2 |
+
title: newWork
|
| 3 |
+
app_file: openai_assistant_langchain_gradio.py
|
|
|
|
|
|
|
| 4 |
sdk: gradio
|
| 5 |
sdk_version: 4.7.1
|
|
|
|
|
|
|
| 6 |
---
|
|
|
|
|
|
openai_assistant_langchain_gradio.py
ADDED
|
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from langchain.agents.openai_assistant import OpenAIAssistantRunnable
|
| 2 |
+
from langchain.schema.agent import AgentFinish
|
| 3 |
+
|
| 4 |
+
import gradio as gr
|
| 5 |
+
import os
|
| 6 |
+
|
| 7 |
+
os.environ['OPENAI_API_KEY'] = "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"
|
| 8 |
+
tools = []
|
| 9 |
+
thread_id = None
|
| 10 |
+
history = "dummy history"
|
| 11 |
+
assistant_id = "asst_6sNVdUmLT12mSOk1eLqnONkw"
|
| 12 |
+
agent = OpenAIAssistantRunnable(assistant_id=assistant_id, as_agent=True)
|
| 13 |
+
examples = ["Develop a eCommerce website with $10k budget", "Search for Freelancers", "More Freelancer Recommendations", "Help me Interview the Freelancers", "Generate new Milestones", "Generate new Contract", "Evaluate a Milestone"]
|
| 14 |
+
|
| 15 |
+
def execute_agent(agent, tools, input):
|
| 16 |
+
tool_map = {tool.name: tool for tool in tools}
|
| 17 |
+
response = agent.invoke(input)
|
| 18 |
+
while not isinstance(response, AgentFinish):
|
| 19 |
+
tool_outputs = []
|
| 20 |
+
for action in response:
|
| 21 |
+
tool_output = tool_map[action.tool].invoke(action.tool_input)
|
| 22 |
+
print(action.tool, action.tool_input, tool_output, end="\n\n")
|
| 23 |
+
tool_outputs.append(
|
| 24 |
+
{"output": tool_output, "tool_call_id": action.tool_call_id}
|
| 25 |
+
)
|
| 26 |
+
response = agent.invoke(
|
| 27 |
+
{
|
| 28 |
+
"tool_outputs": tool_outputs,
|
| 29 |
+
"run_id": action.run_id,
|
| 30 |
+
"thread_id": action.thread_id,
|
| 31 |
+
}
|
| 32 |
+
)
|
| 33 |
+
|
| 34 |
+
return response
|
| 35 |
+
|
| 36 |
+
def openai_response(message,history):
|
| 37 |
+
global thread_id
|
| 38 |
+
global examples
|
| 39 |
+
option_1 = "\n"
|
| 40 |
+
option_2 = "\n"
|
| 41 |
+
option_3 = "\n"
|
| 42 |
+
|
| 43 |
+
if not thread_id:
|
| 44 |
+
context = "Project is to "
|
| 45 |
+
enriched_message = context + message
|
| 46 |
+
response = execute_agent(agent, tools, {"content": enriched_message})
|
| 47 |
+
thread_id = response.return_values["thread_id"]
|
| 48 |
+
option_1 = "-> **Search for Freelancers**"
|
| 49 |
+
else:
|
| 50 |
+
if message == "Search for Freelancers":
|
| 51 |
+
context = "Provide brief recommendations, under 200 words, for suitable freelancers with specific skills, using fictitious names. Summarize why each is chosen based on their profile, skills, past jobs, client feedback, portfolio, and cost, ensuring budget considerations are met."
|
| 52 |
+
response = execute_agent(agent, tools, {"content": context, "thread_id": thread_id})
|
| 53 |
+
option_1 = "-> **More Freelancer Recommendations**"
|
| 54 |
+
option_2 = "-> **Help me Interview the Freelancers**"
|
| 55 |
+
elif message == "More Freelancer Recommendations":
|
| 56 |
+
context = "Kindly offer additional recommendations, under 200 words."
|
| 57 |
+
response = execute_agent(agent, tools, {"content": context, "thread_id": thread_id})
|
| 58 |
+
option_1 = "-> **More Freelancer Recommendations**"
|
| 59 |
+
option_2 = "-> **Help me Interview the Freelancers**"
|
| 60 |
+
elif message == "Help me Interview the Freelancers":
|
| 61 |
+
context = "Create 5 questions for an interview focusing on the skills required for this project."
|
| 62 |
+
response = execute_agent(agent, tools, {"content": context, "thread_id": thread_id})
|
| 63 |
+
option_1 = "-> **Which Freelancer would you like to hire?**"
|
| 64 |
+
elif message == "Generate new Milestones":
|
| 65 |
+
context = "Create a list of key stages for this software development project, ensuring there are no more than six milestones."
|
| 66 |
+
response = execute_agent(agent, tools, {"content": context, "thread_id": thread_id})
|
| 67 |
+
option_1 = "-> **Generate new Contract**"
|
| 68 |
+
option_2 = "-> **Evaluate a Milestone**"
|
| 69 |
+
elif message == "Generate new Contract":
|
| 70 |
+
context = "Create a contract for the project, including the key stages, milestones, and payment terms."
|
| 71 |
+
response = execute_agent(agent, tools, {"content": context, "thread_id": thread_id})
|
| 72 |
+
option_1 = "-> **Evaluate a Milestone**"
|
| 73 |
+
elif message == "Evaluate a Milestone":
|
| 74 |
+
context = "Ask for code samples to evaluate the quality of their work."
|
| 75 |
+
response = execute_agent(agent, tools, {"content": context, "thread_id": thread_id})
|
| 76 |
+
else:
|
| 77 |
+
response = execute_agent(agent, tools, {"content": message, "thread_id": thread_id})
|
| 78 |
+
option_1 = "-> **Generate new Milestones**"
|
| 79 |
+
option_2 = "-> **Generate new Contract**"
|
| 80 |
+
option_3 = "-> **Evaluate a Milestone**"
|
| 81 |
+
|
| 82 |
+
output = response.return_values["output"]
|
| 83 |
+
next_options = "π₯π₯π₯π₯π₯ What would like to do next? π₯π₯π₯π₯π₯" +"\n\n" + option_1 + "\n" + option_2 + "\n" + option_3
|
| 84 |
+
new_output = output + "\n\n" + next_options
|
| 85 |
+
return new_output
|
| 86 |
+
|
| 87 |
+
gr.ChatInterface(openai_response, examples=examples).launch(auth=("sree", "search"))
|
requirements.txt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
gradio==4.7.1
|
| 2 |
+
langchain==0.0.343
|
| 3 |
+
openai==1.3.6
|