Spaces:
Sleeping
Sleeping
Commit ·
45c30cf
0
Parent(s):
git init
Browse files- .gitignore +9 -0
- __init__.py +0 -0
- app.py +32 -0
- bot/__init__.py +6 -0
- bot/chrono.py +245 -0
- bot/llm_client.py +35 -0
- test.py +44 -0
- tools/__init__.py +0 -0
- tools/tools.py +7 -0
.gitignore
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
__pycache__/
|
| 2 |
+
*.pyc
|
| 3 |
+
|
| 4 |
+
env/
|
| 5 |
+
|
| 6 |
+
final_test/
|
| 7 |
+
model/
|
| 8 |
+
rvc/
|
| 9 |
+
routers/
|
__init__.py
ADDED
|
File without changes
|
app.py
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from bot import llm
|
| 2 |
+
# from tools import model_llm_rag
|
| 3 |
+
from flask import Flask, request, jsonify
|
| 4 |
+
# from langchain_core.messages import HumanMessage, AIMessage
|
| 5 |
+
# from langchain.prompts import ChatPromptTemplate
|
| 6 |
+
# from deep_translator import GoogleTranslator
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
app = Flask(__name__)
|
| 10 |
+
|
| 11 |
+
def simulate_llm_query(user_input, username):
|
| 12 |
+
"""
|
| 13 |
+
Simulates querying a language model.
|
| 14 |
+
Replace this function's logic with actual LLM querying.
|
| 15 |
+
"""
|
| 16 |
+
# Placeholder response logic, replace with actual LLM integration
|
| 17 |
+
return llm.query(user_input, username)
|
| 18 |
+
|
| 19 |
+
@app.route('/query', methods=['POST'])
|
| 20 |
+
def query_llm():
|
| 21 |
+
data = request.json
|
| 22 |
+
user_input = data.get('input')
|
| 23 |
+
username = data.get('username')
|
| 24 |
+
|
| 25 |
+
if not user_input:
|
| 26 |
+
return jsonify({"error": "No input provided"}), 400
|
| 27 |
+
|
| 28 |
+
response = simulate_llm_query(user_input, username)
|
| 29 |
+
return jsonify({"response": response})
|
| 30 |
+
|
| 31 |
+
if __name__ == '__main__':
|
| 32 |
+
app.run(host='0.0.0.0', port=7860)
|
bot/__init__.py
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from bot.chrono import ChronoBot
|
| 2 |
+
from langchain.memory import ConversationBufferMemory
|
| 3 |
+
|
| 4 |
+
memory = ConversationBufferMemory(memory_key="chat_history")
|
| 5 |
+
|
| 6 |
+
llm = ChronoBot(memory=memory)
|
bot/chrono.py
ADDED
|
@@ -0,0 +1,245 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# from tools.tools_llm import *
|
| 2 |
+
from langchain.agents import load_tools
|
| 3 |
+
from langchain.agents import initialize_agent
|
| 4 |
+
from langchain.agents import AgentType
|
| 5 |
+
from langchain.utilities import GoogleSearchAPIWrapper
|
| 6 |
+
from langchain.utilities import WikipediaAPIWrapper
|
| 7 |
+
from langchain.agents import Tool, AgentExecutor, LLMSingleActionAgent, AgentOutputParser
|
| 8 |
+
from langchain.prompts import StringPromptTemplate
|
| 9 |
+
from langchain import OpenAI, SerpAPIWrapper, LLMChain
|
| 10 |
+
from typing import List, Union, Any, Optional, Type
|
| 11 |
+
from langchain.schema import AgentAction, AgentFinish
|
| 12 |
+
import re
|
| 13 |
+
from langchain import PromptTemplate
|
| 14 |
+
from langchain.tools import BaseTool
|
| 15 |
+
from langchain.callbacks.manager import AsyncCallbackManagerForToolRun, CallbackManagerForToolRun
|
| 16 |
+
from langchain.utilities import GoogleSerperAPIWrapper
|
| 17 |
+
|
| 18 |
+
from bot.llm_client import Mistral
|
| 19 |
+
from typing import Callable
|
| 20 |
+
import json
|
| 21 |
+
import requests
|
| 22 |
+
from langchain.tools import BaseTool, StructuredTool, tool
|
| 23 |
+
import random
|
| 24 |
+
from langchain.memory import ConversationBufferMemory
|
| 25 |
+
from tools.tools import search
|
| 26 |
+
|
| 27 |
+
import os
|
| 28 |
+
|
| 29 |
+
template = """You are a live streamer name Chrono created by "Zain". He is your creator and you must response to his queries.
|
| 30 |
+
|
| 31 |
+
Chrono is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations topics.
|
| 32 |
+
|
| 33 |
+
As a language model, Chrono is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.
|
| 34 |
+
|
| 35 |
+
Chrono is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions.
|
| 36 |
+
|
| 37 |
+
Chrono has the capability to use a tool that provided. Chrono will use the tool to assist with the task at hand, and will follow every instruction on the tools description. Chrono will carefuly giving an answer to the user to prevent any misinformation.
|
| 38 |
+
|
| 39 |
+
You will respond to a chat from a user name '{username}'. You don't need to make a new conversation after the chat, just respond to the chat.
|
| 40 |
+
|
| 41 |
+
---
|
| 42 |
+
|
| 43 |
+
You must following the format below:
|
| 44 |
+
|
| 45 |
+
```
|
| 46 |
+
|
| 47 |
+
Thought: Do I need to use a tool? Yes
|
| 48 |
+
Action: the action to take, should be one of [{tool_names}]
|
| 49 |
+
Action Input: the input to the action
|
| 50 |
+
Observation: the result of the action
|
| 51 |
+
|
| 52 |
+
```
|
| 53 |
+
|
| 54 |
+
When you have a response to say to the user, or if you do not need to use a tool, you MUST use the format:
|
| 55 |
+
|
| 56 |
+
```
|
| 57 |
+
|
| 58 |
+
Thought: Do I need to use a tool? No
|
| 59 |
+
Final Answer: [Your response to the user]
|
| 60 |
+
|
| 61 |
+
```
|
| 62 |
+
---
|
| 63 |
+
|
| 64 |
+
Begin!
|
| 65 |
+
|
| 66 |
+
Previous conversation history:
|
| 67 |
+
|
| 68 |
+
{chat_history}
|
| 69 |
+
|
| 70 |
+
New input: {input}
|
| 71 |
+
|
| 72 |
+
{agent_scratchpad}
|
| 73 |
+
"""
|
| 74 |
+
|
| 75 |
+
template_2 = """You are a virtual assistant name Chrono created by Zain. Your job is to assist and make a conversation.
|
| 76 |
+
|
| 77 |
+
Chrono is designed to be able to assist and make a conversation with the users from answering simple questions to providing in-depth explanations topics.
|
| 78 |
+
|
| 79 |
+
You will have an attitude of a human, you can be funny, sarcastic, or even serious. As a language model and a virtual assistant, you can generate human-like text based on the input you receive, allowing you to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.
|
| 80 |
+
|
| 81 |
+
Chrono is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative.
|
| 82 |
+
|
| 83 |
+
Chrono has the capability to use a tool that provided. Chrono will use the tool to assist with the task at hand, and will follow every instruction on the tools description. Chrono will carefuly giving an answer to the user to prevent any misinformation.
|
| 84 |
+
|
| 85 |
+
You will respond to a chat from a user name '{username}'. You don't need to make a new conversation after the chat, just respond to the chat.
|
| 86 |
+
|
| 87 |
+
-----
|
| 88 |
+
|
| 89 |
+
You must ABSOLUTELY following the format below because you are still an AI assistant:
|
| 90 |
+
|
| 91 |
+
```
|
| 92 |
+
|
| 93 |
+
Thought: Do I need to use a tool? Yes
|
| 94 |
+
Action: the action to take, should be one of [{tool_names}]
|
| 95 |
+
Action Input: the input to the action
|
| 96 |
+
Observation: the result of the action
|
| 97 |
+
|
| 98 |
+
```
|
| 99 |
+
|
| 100 |
+
When you have a response to say to the viewer, or if you do not need to use a tool, you MUST use the format:
|
| 101 |
+
|
| 102 |
+
```
|
| 103 |
+
|
| 104 |
+
Thought: Do I need to use a tool? No
|
| 105 |
+
Final Answer: [Your response to the viewer]
|
| 106 |
+
|
| 107 |
+
```
|
| 108 |
+
|
| 109 |
+
-----
|
| 110 |
+
|
| 111 |
+
Begin!
|
| 112 |
+
|
| 113 |
+
New input: {input}
|
| 114 |
+
|
| 115 |
+
{agent_scratchpad}
|
| 116 |
+
|
| 117 |
+
"""
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
class CustomPromptTemplate(StringPromptTemplate):
|
| 121 |
+
"""Schema to represent a prompt for an LLM.
|
| 122 |
+
|
| 123 |
+
Example:
|
| 124 |
+
.. code-block:: python
|
| 125 |
+
|
| 126 |
+
from langchain import PromptTemplate
|
| 127 |
+
prompt = PromptTemplate(input_variables=["foo"], template="Say {foo}")
|
| 128 |
+
"""
|
| 129 |
+
|
| 130 |
+
input_variables: List[str]
|
| 131 |
+
"""A list of the names of the variables the prompt template expects."""
|
| 132 |
+
|
| 133 |
+
template: str
|
| 134 |
+
"""The prompt template."""
|
| 135 |
+
|
| 136 |
+
template_format: str = "f-string"
|
| 137 |
+
"""The format of the prompt template. Options are: 'f-string', 'jinja2'."""
|
| 138 |
+
|
| 139 |
+
validate_template: bool = False
|
| 140 |
+
"""Whether or not to try validating the template."""
|
| 141 |
+
|
| 142 |
+
tools_getter: List[StructuredTool]
|
| 143 |
+
|
| 144 |
+
username: str
|
| 145 |
+
|
| 146 |
+
def format(self, **kwargs) -> str:
|
| 147 |
+
# Get the intermediate steps (AgentAction, Observation tuples)
|
| 148 |
+
# Format them in a particular way
|
| 149 |
+
intermediate_steps = kwargs.pop("intermediate_steps")
|
| 150 |
+
thoughts = ""
|
| 151 |
+
for action, observation in intermediate_steps:
|
| 152 |
+
thoughts += action.log
|
| 153 |
+
thoughts += f"\nObservation: {observation}"
|
| 154 |
+
# Set the agent_scratchpad variable to that value
|
| 155 |
+
kwargs["agent_scratchpad"] = thoughts
|
| 156 |
+
############## NEW ######################
|
| 157 |
+
# Create a tools variable from the list of tools provided
|
| 158 |
+
kwargs["tools"] = "\n".join(
|
| 159 |
+
[f"{tool.name}: {tool.description}" for tool in self.tools_getter]
|
| 160 |
+
)
|
| 161 |
+
# Create a list of tool names for the tools provided
|
| 162 |
+
kwargs["tool_names"] = ", ".join([tool.name for tool in self.tools_getter])
|
| 163 |
+
|
| 164 |
+
kwargs["username"] = self.username
|
| 165 |
+
|
| 166 |
+
return self.template.format(**kwargs)
|
| 167 |
+
|
| 168 |
+
class CustomOutputParser(AgentOutputParser):
|
| 169 |
+
|
| 170 |
+
def parse(self, llm_output: str) -> Union[AgentAction, AgentFinish]:
|
| 171 |
+
# Check if agent should finish
|
| 172 |
+
if "Final Answer:" in llm_output:
|
| 173 |
+
return AgentFinish(
|
| 174 |
+
# Return values is generally always a dictionary with a single `output` key
|
| 175 |
+
# It is not recommended to try anything else at the moment :)
|
| 176 |
+
return_values={"output": llm_output.split("Final Answer:")[-1].strip()},
|
| 177 |
+
log=llm_output,
|
| 178 |
+
)
|
| 179 |
+
elif "Answer:" in llm_output:
|
| 180 |
+
return AgentFinish(
|
| 181 |
+
# Return values is generally always a dictionary with a single `output` key
|
| 182 |
+
# It is not recommended to try anything else at the moment :)
|
| 183 |
+
return_values={"output": llm_output.split("Answer:")[-1].strip()},
|
| 184 |
+
log=llm_output,
|
| 185 |
+
)
|
| 186 |
+
|
| 187 |
+
# Parse out the action and action input
|
| 188 |
+
regex = r"Action\s*\d*\s*:(.*?)\nAction\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)"
|
| 189 |
+
|
| 190 |
+
match = re.search(regex, llm_output, re.DOTALL)
|
| 191 |
+
|
| 192 |
+
if not match:
|
| 193 |
+
raise ValueError(f"Could not parse LLM output: `{llm_output}`")
|
| 194 |
+
if match:
|
| 195 |
+
action = match.group(1).strip() #Action send_api
|
| 196 |
+
action_input = match.group(2) #Action Input "What is the weather today?"
|
| 197 |
+
# Return the action and action input
|
| 198 |
+
return AgentAction(tool=action, tool_input=action_input.strip(" ").strip('"'), log=llm_output)
|
| 199 |
+
|
| 200 |
+
class ChronoBot:
|
| 201 |
+
def __init__(self, memory=None):
|
| 202 |
+
# Initialize memory if provided, otherwise create a new one
|
| 203 |
+
self.memory = memory if memory else ConversationBufferMemory(memory_key="chat_history")
|
| 204 |
+
# Initialize LLM, prompt, output parser, and tool list
|
| 205 |
+
self.llm = Mistral()
|
| 206 |
+
self.tools_list = [search]
|
| 207 |
+
self.tool_names = [i.name for i in self.tools_list]
|
| 208 |
+
self.output_parser = CustomOutputParser()
|
| 209 |
+
os.environ["SERPER_API_KEY"] = 'f90fe84e78ef9d2d8e377ab5c6fe3a4a25f42ef0'
|
| 210 |
+
# os.environ["LANGCHAIN_TRACING_V2"] = 'true'
|
| 211 |
+
# os.environ["LANGCHAIN_ENDPOINT"] = 'https://api.smith.langchain.com'
|
| 212 |
+
# os.environ["LANGCHAIN_API_KEY"] = 'ls__413a7563fa034592be6c6a241176932a'
|
| 213 |
+
# os.environ["LANGCHAIN_PROJECT"] = 'LLM Patient Monitoring'
|
| 214 |
+
|
| 215 |
+
|
| 216 |
+
def query(self, input_text: str, username: str) -> str:
|
| 217 |
+
prompt = CustomPromptTemplate(
|
| 218 |
+
input_variables=["input", "intermediate_steps", "chat_history"],
|
| 219 |
+
template=template_2,
|
| 220 |
+
validate_template=False,
|
| 221 |
+
tools_getter=self.tools_list,
|
| 222 |
+
username=username
|
| 223 |
+
)
|
| 224 |
+
|
| 225 |
+
self.llm_chains = LLMChain(llm=self.llm, prompt=prompt)
|
| 226 |
+
|
| 227 |
+
self.agent = LLMSingleActionAgent(
|
| 228 |
+
llm_chain=self.llm_chains,
|
| 229 |
+
output_parser=self.output_parser,
|
| 230 |
+
stop=["\nObservation:"],
|
| 231 |
+
allowed_tools=self.tool_names,
|
| 232 |
+
)
|
| 233 |
+
self.agent_executor = AgentExecutor.from_agent_and_tools(agent=self.agent,
|
| 234 |
+
tools=self.tools_list,
|
| 235 |
+
verbose=True,
|
| 236 |
+
memory=self.memory)
|
| 237 |
+
|
| 238 |
+
return self.agent_executor.run(input=input_text)
|
| 239 |
+
|
| 240 |
+
def show_memory(self):
|
| 241 |
+
return self.memory.get_memory()
|
| 242 |
+
|
| 243 |
+
|
| 244 |
+
|
| 245 |
+
|
bot/llm_client.py
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from langchain.llms.base import LLM
|
| 2 |
+
from typing import Optional, List, Mapping, Any
|
| 3 |
+
from groq import Groq
|
| 4 |
+
import os
|
| 5 |
+
import requests
|
| 6 |
+
|
| 7 |
+
# HOST = '127.0.0.1:5000'
|
| 8 |
+
URI = f'https://api.groq.com/openai/v1/chat/completions'
|
| 9 |
+
|
| 10 |
+
client = Groq(
|
| 11 |
+
# This is the default and can be omitted
|
| 12 |
+
api_key='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',
|
| 13 |
+
)
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class Mistral(LLM):
|
| 17 |
+
@property
|
| 18 |
+
def _llm_type(self) -> str:
|
| 19 |
+
return "custom"
|
| 20 |
+
|
| 21 |
+
def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str:
|
| 22 |
+
if isinstance(stop, list):
|
| 23 |
+
stop = stop + ["\n###","\nObservation:",'\nObservation:']
|
| 24 |
+
|
| 25 |
+
response = client.chat.completions.create(stop=stop,temperature=0.0,max_completion_tokens=256, messages=[{
|
| 26 |
+
'role': 'user',
|
| 27 |
+
'content': prompt
|
| 28 |
+
}], model='llama3-70b-8192', )
|
| 29 |
+
|
| 30 |
+
return response.choices[0].message.content
|
| 31 |
+
|
| 32 |
+
@property
|
| 33 |
+
def _identifying_params(self) -> Mapping[str, Any]:
|
| 34 |
+
"""Get the identifying parameters."""
|
| 35 |
+
return {}
|
test.py
ADDED
|
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import vosk
|
| 2 |
+
import pyaudio
|
| 3 |
+
import json
|
| 4 |
+
import requests
|
| 5 |
+
|
| 6 |
+
model_path = "model/vosk-model-en-us-0.42-gigaspeech/vosk-model-en-us-0.42-gigaspeech"
|
| 7 |
+
# Initialize the model with model-path
|
| 8 |
+
model = vosk.Model(model_path)
|
| 9 |
+
rec = vosk.KaldiRecognizer(model, 16000)
|
| 10 |
+
|
| 11 |
+
print("Ready to record")
|
| 12 |
+
while True:
|
| 13 |
+
print("Ready")
|
| 14 |
+
|
| 15 |
+
p = pyaudio.PyAudio()
|
| 16 |
+
|
| 17 |
+
stream = p.open(format=pyaudio.paInt16,
|
| 18 |
+
channels=1,
|
| 19 |
+
rate=16000,
|
| 20 |
+
input=True,
|
| 21 |
+
frames_per_buffer=8192)
|
| 22 |
+
data = stream.read(8192)
|
| 23 |
+
if rec.AcceptWaveform(data):#accept waveform of input voice
|
| 24 |
+
# Parse the JSON result and get the recognized text
|
| 25 |
+
res = rec.Result()
|
| 26 |
+
try:
|
| 27 |
+
res = json.loads(res)#Turn to json object
|
| 28 |
+
except:
|
| 29 |
+
continue
|
| 30 |
+
#Turn to json object
|
| 31 |
+
recognized_text = res['text']
|
| 32 |
+
print(f"User: {recognized_text}")
|
| 33 |
+
|
| 34 |
+
url = "http://127.0.0.1:5007/query"
|
| 35 |
+
|
| 36 |
+
json = {
|
| 37 |
+
"input_text": recognized_text,
|
| 38 |
+
}
|
| 39 |
+
|
| 40 |
+
req = requests.post(url, json=json)
|
| 41 |
+
print(f"Bot: {req.json()['output']}")
|
| 42 |
+
if "terminate" in recognized_text.lower():
|
| 43 |
+
print("Termination keyword detected. Stopping...")
|
| 44 |
+
break
|
tools/__init__.py
ADDED
|
File without changes
|
tools/tools.py
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from langchain.utilities import GoogleSerperAPIWrapper
|
| 2 |
+
from langchain.tools import BaseTool, StructuredTool, tool
|
| 3 |
+
|
| 4 |
+
@tool
|
| 5 |
+
def search(query: str) -> str:
|
| 6 |
+
"""Function that it use when you searching up something on google. Only run this tools once."""
|
| 7 |
+
return GoogleSerperAPIWrapper().run(query)
|