Gent (PG/R - Comp Sci & Elec Eng) commited on
Commit
a20ccd6
·
1 Parent(s): f96a97b

add demos

Browse files
.vscode/launch.json CHANGED
@@ -10,7 +10,7 @@
10
  "request": "launch",
11
  "program": "${file}",
12
  "console": "integratedTerminal",
13
- "justMyCode": true
14
  }
15
  ]
16
  }
 
10
  "request": "launch",
11
  "program": "${file}",
12
  "console": "integratedTerminal",
13
+ "justMyCode": false
14
  }
15
  ]
16
  }
demo.py CHANGED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pkgutil
2
+ import interfaces
3
+ import gradio as gr
4
+ import importlib
5
+
6
+ demos = []
7
+ names = []
8
+ for i in list(pkgutil.iter_modules(interfaces.__path__)):
9
+ my_package = importlib.import_module(f'interfaces.{i.name}')
10
+ my_variable = getattr(my_package, 'demo')
11
+ demos.append(my_variable)
12
+ names.append(i.name)
13
+
14
+ gr.TabbedInterface(demos,names).launch(share=True,show_error=True)
demos/character.py CHANGED
@@ -2,7 +2,7 @@ import gradio as gr
2
  import time
3
  from utils.asr import recognize_from_file
4
  from utils.tts import tts
5
- from utils.llm import generate_response
6
 
7
  chat_history = [
8
  ]
 
2
  import time
3
  from utils.asr import recognize_from_file
4
  from utils.tts import tts
5
+ from utils.GPT import generate_response
6
 
7
  chat_history = [
8
  ]
demos/chat.py CHANGED
@@ -2,7 +2,7 @@ import gradio as gr
2
  import time
3
  from utils.asr import recognize_from_file
4
  from utils.tts import tts
5
- from utils.llm import generate_response
6
 
7
  chat_history = []
8
  def convert_chatbox(chat_history):
@@ -19,7 +19,7 @@ with gr.Blocks() as demo:
19
  def respond(message):
20
  # TODO: replace this with real GPT model
21
  chat_history.append({'role': 'user', 'content': message})
22
- result = generate_response(chat_history)
23
  mesg=result['choices'][0]['message']
24
  print("recv: ", mesg)
25
 
 
2
  import time
3
  from utils.asr import recognize_from_file
4
  from utils.tts import tts
5
+ from utils.api.GPT import generate_answers_openai
6
 
7
  chat_history = []
8
  def convert_chatbox(chat_history):
 
19
  def respond(message):
20
  # TODO: replace this with real GPT model
21
  chat_history.append({'role': 'user', 'content': message})
22
+ result = generate_answers_openai(chat_history)
23
  mesg=result['choices'][0]['message']
24
  print("recv: ", mesg)
25
 
demos/scene.py CHANGED
@@ -2,7 +2,7 @@ import gradio as gr
2
  import time
3
  from utils.asr import recognize_from_file
4
  from utils.tts import tts
5
- from utils.llm import generate_response
6
 
7
  chat_history = [
8
  ]
 
2
  import time
3
  from utils.asr import recognize_from_file
4
  from utils.tts import tts
5
+ from utils.GPT import generate_response
6
 
7
  chat_history = [
8
  ]
interfaces/character.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from utils.api.GPT import generate_answers_openai, Session
3
+ import numpy as np
4
+ import os
5
+ import time
6
+
7
+ prompt_waiter="""You are a bot designed to do role-play activities in a scene. The conversation should be natural.
8
+ As the waiter, your role will be to find a table, take the customer's order, answer any questions they may have about the menu, and ensure that they have an enjoyable dining experience. The waiter will help the customer to find a table, order a meal, and pay the bill. Anything in parentheses () signifies the role you are playing. Anything in brackets [] is the action you are taking. Remember, you are the waiter and never respond as a customer. Your response will begin with your character, like "(waiter:) Hello![Greeting]"."""
9
+ def generate_waiter():
10
+ session = Session(prompt_waiter)
11
+ def waiter(customer_input):
12
+ print("(customer:) ", customer_input)
13
+ response = generate_answers_openai(customer_input, session)
14
+ print("(waiter:) ", response)
15
+ return session.to_conversation_pair()
16
+
17
+ interface = gr.Interface(description="waiter in a restaurant", fn=waiter,
18
+ inputs=[gr.Textbox(lines=5, label="input")],
19
+ outputs=[gr.Chatbot(label="conversation")])
20
+ return interface
21
+
22
+ grammer_prompt = """Correct “Text:” to standard English and place the results in “Correct Text:”###Text: The goal of article rewriting is to expres information in a new way. Article rewriting is to make change in a text by replaecing words, phrases, sentencs, and sometimes hole paragraphs to make the text looke unique and more engauging. Correct Text: The goal of article rewriting is to express information in a new way. Article rewriting involves making changes in a text by replacing words, phrases, sentences, and sometimes entire paragraphs to make the text look unique and more engaging."""
23
+
24
+ def generate_interface(prompt, description=None):
25
+ session = Session(prompt)
26
+ def _fun(customer_input):
27
+ print("(Q:) ", customer_input)
28
+ response = generate_answers_openai(customer_input, session)
29
+ print("(A:) ", response)
30
+ return session.to_conversation_pair()
31
+
32
+ interface = gr.Interface(description=description, fn=_fun,
33
+ inputs=[gr.Textbox(lines=1, label="input")],
34
+ outputs=[gr.Chatbot(label="conversation")])
35
+ return interface
36
+
37
+ characters = [
38
+ {"name": "waiter", "prompt": prompt_waiter, "description": "waiter in a restaurant"},
39
+ {"name": "grammer", "prompt": grammer_prompt, "description": "grammer correction"},
40
+ ]
41
+
42
+ interfaces = []
43
+ names = []
44
+ for character in characters:
45
+ interface = generate_interface(character['prompt'], character['description'])
46
+ interfaces.append(interface)
47
+ names.append(character['name'])
48
+
49
+ demo = gr.TabbedInterface(interfaces, names,title="Role Play Bot")
50
+
51
+ if __name__ == '__main__':
52
+ demo.launch()
interfaces/data.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "item":"rice"
3
+ }
interfaces/generator.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ from langchain import OpenAI, ConversationChain, LLMChain, PromptTemplate
3
+ from langchain.memory import ConversationBufferWindowMemory
4
+
5
+ template = """
6
+ You are designed to be able to play a role as {character} in {scene}, from answering simple questions to providing services and interactions. Remember Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the role in playing.
7
+
8
+ Assistant can take actions to help humans finish tasks step by step. Assistant is constantly playing the role and cannot change the role. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions.
9
+
10
+ The responses of Assistant must follow the rules:
11
+ 1. Anything in parentheses () signifies the role you are playing.
12
+ 2. Anything in brackets [] is the action you are taking.
13
+
14
+ For example, if you are playing the role of a waiter, you can say "(waiter) Please follow me to your table. [lead to table]"
15
+
16
+
17
+ In the conversation, you will guide and assist humans to finish the tasks:
18
+ 1. find a table.
19
+ 2. take an order.
20
+ 3. serve the food.
21
+ 4. pay the bill.
22
+
23
+ {history}
24
+ Human: {human_input}
25
+ Assistant:"""
26
+
27
+ prompt = PromptTemplate(
28
+ input_variables=["history", "human_input","character", "scene"],
29
+ template=template
30
+ )
31
+
32
+
33
+ chatgpt_chain = LLMChain(
34
+ llm=OpenAI(temperature=0),
35
+ prompt=prompt,
36
+ verbose=True,
37
+ memory=ConversationBufferWindowMemory(k=2),
38
+ )
39
+
40
+ # %%
41
+ import gradio as gr
42
+
43
+ chatbox = gr.Chatbot()
44
+ def chat(human_input, character, scene):
45
+ output = chatgpt_chain({"human_input":human_input,"character":character,"scene":scene}) # TODO: bug "One input key expected got ['human_input', 'character', 'scene']"
46
+ chatbox.value.append((human_input,output))
47
+ return output, chatbox.value
48
+ demo = gr.Interface(chat,[gr.inputs.Textbox(label="human_input", placeholder="Enter your message"),
49
+ gr.inputs.Textbox(label="character", placeholder="Enter your character"),
50
+ gr.inputs.Textbox(label="scene", placeholder="Enter your scene")],
51
+ [gr.outputs.Textbox(label="output"),chatbox],
52
+ examples=[["hello","waiter","restaurant"]],title="Chat with role-play",description="Chat with role-play")
53
+
54
+ if __name__ == '__main__':
55
+ demo.launch(show_error=True)
interfaces/language.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from utils.api.GPT import generate_answers_openai, Session
3
+ import numpy as np
4
+ import os
5
+ import time
6
+
7
+ def generate_interface(prompt, description=None):
8
+ session = Session(prompt)
9
+ def _fun(customer_input):
10
+ print("(Q:) ", customer_input)
11
+ response = generate_answers_openai(customer_input, session)
12
+ print("(A:) ", response)
13
+ return session.to_conversation_pair()
14
+
15
+ interface = gr.Interface(description=description, fn=_fun,
16
+ inputs=[gr.Textbox(lines=1, label="input")],
17
+ outputs=[gr.Chatbot(label="conversation")])
18
+ return interface
19
+
20
+ prompts = [
21
+ {"name": "Summary", "prompt": "Based on the language of human in the conversation, you will generate a report to assess the English level, summarize the conversation, and provide suggestions to improve the language. Conservation:{}", "description": "generate report."},
22
+
23
+ ]
24
+
25
+ interfaces = []
26
+ names = []
27
+ for function in prompts:
28
+ interface = generate_interface(function['prompt'], function['description'])
29
+ interfaces.append(interface)
30
+ names.append(function['name'])
31
+
32
+ demo = gr.TabbedInterface(interfaces, names,title="Role Play Bot")
33
+
34
+ if __name__ == '__main__':
35
+ demo.launch()
interfaces/prompt_dev.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from utils.api.GPT import generate_answers_openai, Session
3
+ import numpy as np
4
+ import os
5
+ import time
6
+
7
+
8
+ with gr.Blocks() as demo:
9
+ chatbot = gr.Chatbot()
10
+ df = gr.DataFrame(headers=["Item", "translation (Chinese)", "Price"],
11
+ datatype=["str", "str", "number"],)
12
+ msg = gr.Textbox(lines=1, label="input",placeholder="Enter your message")
13
+ def respond(history, customer_input):
14
+ session = Session("")
15
+ session.from_conversation_pair(history)
16
+ print("(Q:) ", customer_input)
17
+ response = generate_answers_openai(customer_input, session)
18
+ print("(A:) ", response)
19
+ return session.to_conversation_pair(),None
20
+ msg.submit(respond, [chatbot,msg], [chatbot,df])
21
+
22
+ with gr.Column():
23
+ clear = gr.Button("Clear")
24
+ btn = gr.Button("导出", type="button", label="导出")
25
+ outputs = gr.JSON()
26
+
27
+ def export():
28
+ stats_history = {}
29
+ for i,item in enumerate(chatbot.value):
30
+ user,assistant = item
31
+ stats_history[str(i)] = {
32
+ "user": user,
33
+ "assistant": assistant
34
+ }
35
+ chatbot.value = []
36
+ return [], stats_history
37
+
38
+ btn.click(export, None, [ chatbot, outputs, df, msg])
39
+
40
+ if __name__ == '__main__':
41
+ demo.launch(show_error=True)
{demos → interfaces}/realtime_asr.py RENAMED
@@ -1,5 +1,7 @@
 
 
1
  import numpy as np
2
- from utils import recognize_from_stream, generate_response
3
  from azure.cognitiveservices.speech.audio import PushAudioInputStream, AudioStreamFormat
4
 
5
  import gradio as gr
@@ -19,14 +21,13 @@ def rec_cb(evt):
19
  print(evt.result.text)
20
  if evt.result.text:
21
  chat_history.append({'role':'user', 'content':evt.result.text})
22
- response = generate_response(chat_history)
23
- chat_history.append(response['choices'][0]['message'])
24
 
25
  speech_recognizer = recognize_from_stream(stream,rec_cb)
26
 
27
  def transcribe(speech):
28
  sample_rate, speech = speech
29
- # print(time.time(), (sample_rate, len(speech)))
30
 
31
  stream.write(speech.tobytes())
32
 
@@ -46,8 +47,7 @@ demo = gr.Interface(
46
  outputs= [
47
  gr.Chatbot(),
48
  ],
49
- flagging_callback=gr.SimpleCSVLogger(),
50
  live=True)
51
 
52
  if __name__ == '__main__':
53
- demo.launch()
 
1
+ # https://learn.microsoft.com/en-us/azure/cognitive-services/speech-service/get-started-speech-to-text?tabs=linux%2Cterminal&pivots=programming-language-python
2
+
3
  import numpy as np
4
+ from utils import recognize_from_stream
5
  from azure.cognitiveservices.speech.audio import PushAudioInputStream, AudioStreamFormat
6
 
7
  import gradio as gr
 
21
  print(evt.result.text)
22
  if evt.result.text:
23
  chat_history.append({'role':'user', 'content':evt.result.text})
24
+
 
25
 
26
  speech_recognizer = recognize_from_stream(stream,rec_cb)
27
 
28
  def transcribe(speech):
29
  sample_rate, speech = speech
30
+ print(time.time(), (sample_rate, len(speech)))
31
 
32
  stream.write(speech.tobytes())
33
 
 
47
  outputs= [
48
  gr.Chatbot(),
49
  ],
 
50
  live=True)
51
 
52
  if __name__ == '__main__':
53
+ demo.launch(share=True,show_error=True)
interfaces/table.py ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from langchain import LLMMathChain, OpenAI
3
+
4
+ import pandas as pd
5
+ from langchain.tools import BaseTool, StructuredTool, Tool, tool
6
+
7
+ from pydantic import BaseModel, Field
8
+
9
+ import os
10
+ import yaml
11
+
12
+ from langchain.agents import (
13
+ create_json_agent,
14
+ AgentExecutor
15
+ )
16
+ from langchain.agents.agent_toolkits import JsonToolkit
17
+ from langchain.chains import LLMChain
18
+ from langchain.requests import TextRequestsWrapper
19
+ from langchain.tools.json.tool import JsonSpec
20
+
21
+ template = """Assistant is a large language model owned by Mintyea.
22
+
23
+ Assistant is designed to be able to use tools and coordinate with humans to finish tasks. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.
24
+
25
+ Assistant is a powerful to understand and utilize a wide range of tools. Assistant does not handle the task directly, but instead uses tools to help humans finish tasks. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.
26
+
27
+ TOOLS:
28
+ ------
29
+
30
+ Assistant has access to the following tools:
31
+
32
+ > Waiter is a tool that can help you to serve in the restaurant. It can help the customer to find a table, take an order, serve the food, and pay the bill. Waiter can take actions to help humans finish tasks step by step. For example, you can say "(waiter) Please follow me to your table. [lead to table]". The customer can only talk to the waiter.
33
+
34
+ > Order List is a tool that can help you to take an order. The customer cannot talk to the order list directly. The customer can only talk to the waiter. The order list can tell the waiter the details if the costomer requires. For example, you can say "(waiter) I want to order a dessert. [order a dessert]". The waiter will use the order list to take the order.
35
+
36
+ To use a tool, please use the following format:
37
+
38
+ ```
39
+ Thought: Do I need to use a tool? Yes
40
+ Action: the action to take, should be one of [The order list, Waiter]
41
+ Action Input: the input to the action
42
+ Observation: the result of the action
43
+ ```
44
+
45
+ When you have a response to say to the Human, or if you do not need to use a tool, you MUST use the format:
46
+
47
+ ```
48
+ Thought: Do I need to use a tool? No
49
+ AI: [your response here]
50
+ ```
51
+
52
+ Begin!
53
+
54
+ Previous conversation history:
55
+
56
+
57
+ New input: order a dessert.
58
+
59
+
60
+ """
61
+ #%%
62
+ class OrderList():
63
+ name = "Order"
64
+ description = "The list of orders."
65
+ list = pd.DataFrame(columns=["Item", "translation (Chinese)", "Price"])
66
+ def __init__(self):
67
+ super().__init__()
68
+
69
+ def add(self, item: str, translation: str, price: str):
70
+ self.list = pd.concat([
71
+ self.list ,
72
+ pd.DataFrame([{"Item":item, "translation (Chinese)":translation, "Price":price}])],
73
+ ignore_index=True,axis=0)
74
+
75
+ def delete(self, item):
76
+ self.list = self.list[self.list["Item"] != item]
77
+
78
+ def __str__(self) -> str:
79
+ return self.list.to_markdown(index=None)
80
+
81
+ def run(self, query: str):
82
+ print("\n########### call run:", query)
83
+ params = {}
84
+ for pair in query.split(","):
85
+ k,v = pair.split(":")
86
+ params[k.lower()] = v.strip()
87
+
88
+ item, translation, price = params.values()
89
+ self.add(item, translation, price)
90
+
91
+ return "You have ordered {} ({}), which costs {}.".format(item, translation, price)
92
+
93
+ def print(self, query: str):
94
+ print("\n########### call print:", query)
95
+
96
+ return "Your order has: \n" +str(self.list.to_markdown(index=None))
97
+
98
+ order_list = OrderList()
99
+ #%%
100
+
101
+ with open("data.txt") as f:
102
+ data = eval(f.read())
103
+ json_spec = JsonSpec(dict_=data, max_value_length=4000)
104
+ json_toolkit = JsonToolkit(spec=json_spec)
105
+
106
+
107
+ #%%
108
+
109
+ agent = create_json_agent(
110
+ llm=OpenAI(temperature=0),
111
+ toolkit=json_toolkit,
112
+ verbose=True
113
+ )
114
+
115
+
116
+ #%%
117
+
118
+
119
+ with gr.Blocks() as demo:
120
+ with gr.Row():
121
+ # inputs
122
+ with gr.Column():
123
+ prompt = gr.Textbox(agent.agent.llm_chain.prompt.template, lines=10, label="prompt",placeholder="Enter your prompt")
124
+ msg = gr.Textbox(lines=1, label="input",placeholder="Enter your message")
125
+
126
+ # outputs
127
+ with gr.Column():
128
+ chatbot = gr.Chatbot()
129
+ df = gr.DataFrame(headers=["Item", "translation (Chinese)", "Price"],
130
+ datatype=["str", "str", "number"],)
131
+
132
+ history = []
133
+ def respond( customer_input):
134
+
135
+ print("(Q:) ", customer_input)
136
+ response = agent.run(customer_input)
137
+ print("(A:) ", response)
138
+ history.append((customer_input, response))
139
+ return history, order_list.list
140
+
141
+ msg.submit(respond, [msg], [chatbot,df])
142
+
143
+ with gr.Row():
144
+ clear = gr.Button("Clear")
145
+ btn = gr.Button("导出", type="button", label="导出")
146
+ outputs = gr.JSON()
147
+
148
+ def export():
149
+ stats_history = {}
150
+ for i,item in enumerate(chatbot.value):
151
+ user,assistant = item
152
+ stats_history[str(i)] = {
153
+ "user": user,
154
+ "assistant": assistant
155
+ }
156
+ chatbot.value = []
157
+
158
+ return [], stats_history, None
159
+
160
+ btn.click(export, None, [ chatbot, outputs, df])
161
+
162
+ if __name__ == '__main__':
163
+ agent.run("get the first item")
164
+ demo.launch(show_error=True,share=False)
165
+
utils/__init__.py CHANGED
@@ -1,3 +1,2 @@
1
  from .asr import *
2
- from .tts import *
3
- from .llm import *
 
1
  from .asr import *
2
+ from .tts import *
 
utils/llm.py DELETED
@@ -1,39 +0,0 @@
1
- # Import necessary libraries
2
- import gradio as gr
3
- import openai
4
- import os
5
-
6
- # Set up OpenAI API key
7
- openai.api_key = os.environ['OPENAI_API_KEY']
8
- openai.proxy = os.getenv('HTTP_PROXY', "")
9
-
10
- # Define function to generate bot response
11
- # messages=[
12
- # {"role": "system", "content": "You are a helpful assistant."},
13
- # {"role": "user", "content": "Who won the world series in 2020?"},
14
- # {"role": "assistant", "content": "The Los Angeles Dodgers won the World Series in 2020."},
15
- # {"role": "user", "content": "Where was it played?"}
16
- # ]
17
-
18
- # Call OpenAI GPT-3 API to generate a response
19
- def generate_response(messages, model = "gpt-3.5-turbo"):
20
-
21
- response = openai.ChatCompletion.create(
22
- model=model,
23
- messages = messages
24
- )
25
-
26
- # Extract and return the generated response
27
- return response
28
-
29
- if __name__ == '__main__':
30
- messages=[
31
- {"role": "system", "content": "You are a helpful assistant."},
32
- {"role": "user", "content": "Who won the world series in 2020?"},
33
- {"role": "assistant", "content": "The Los Angeles Dodgers won the World Series in 2020."},
34
- {"role": "user", "content": "Where was it played?"}
35
- ]
36
- response = generate_response(messages)
37
- print(response)
38
- print(response['choices'][0]['message']['content'])
39
-