Updated Errors and Custom Tools
Browse files- __pycache__/chat.cpython-312.pyc +0 -0
- app.py +66 -25
- chat.py +96 -9
- requirements.txt +1 -0
__pycache__/chat.cpython-312.pyc
ADDED
|
Binary file (9.44 kB). View file
|
|
|
app.py
CHANGED
|
@@ -2,38 +2,79 @@ import mdtex2html
|
|
| 2 |
from flask import Flask, request, jsonify
|
| 3 |
from chat import converse
|
| 4 |
import json
|
|
|
|
| 5 |
|
| 6 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 7 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 8 |
@app.route("/", methods=['GET', 'POST'])
|
| 9 |
def home():
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 10 |
try:
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
provider = par.get('provider')
|
| 17 |
-
model = par.get('model')
|
| 18 |
-
api = par.get('api')
|
| 19 |
-
|
| 20 |
-
if not all([conversation, provider, model, api]):
|
| 21 |
-
return jsonify({"error": "Missing required parameters"}), 400
|
| 22 |
-
|
| 23 |
-
load = json.loads(converse(conversation, provider, model, api))
|
| 24 |
-
load['DirectResult'] = load['content']
|
| 25 |
-
|
| 26 |
-
try:
|
| 27 |
-
load['content'] = mdtex2html.convert(load['content'])
|
| 28 |
-
except Exception as e:
|
| 29 |
-
return jsonify({"error": f"Error converting content: {str(e)}"}), 500
|
| 30 |
-
|
| 31 |
-
toreturn = json.dumps(load, indent=4)
|
| 32 |
-
# print(toreturn)
|
| 33 |
-
return toreturn, 200
|
| 34 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 35 |
except Exception as e:
|
| 36 |
-
return jsonify({"error": str(e)}), 500
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 37 |
|
| 38 |
if __name__ == "__main__":
|
| 39 |
app.run(host='0.0.0.0', debug=True, port=1777)
|
|
|
|
| 2 |
from flask import Flask, request, jsonify
|
| 3 |
from chat import converse
|
| 4 |
import json
|
| 5 |
+
from flask_cors import CORS
|
| 6 |
|
| 7 |
+
AVAIABLETOOLS = [
|
| 8 |
+
{
|
| 9 |
+
"name": "search",
|
| 10 |
+
"description": "Search Internet For Related Query and Provide Uptodate query",
|
| 11 |
+
"parameters": {
|
| 12 |
+
"type": "object",
|
| 13 |
+
"properties": {
|
| 14 |
+
"query": {
|
| 15 |
+
"type": "string",
|
| 16 |
+
"description": "Search Query Follow the General Search Methods to get better result"
|
| 17 |
+
}
|
| 18 |
+
},
|
| 19 |
+
"required": ["query"]
|
| 20 |
+
}
|
| 21 |
+
}
|
| 22 |
+
]
|
| 23 |
|
| 24 |
+
def ToolSelector(other):
|
| 25 |
+
toreturn = []
|
| 26 |
+
if(type(other['tools']) == list):
|
| 27 |
+
try:
|
| 28 |
+
tools = other['tools']
|
| 29 |
+
for tool in tools:
|
| 30 |
+
for thistool in AVAIABLETOOLS:
|
| 31 |
+
if(thistool['name'] == tool):
|
| 32 |
+
toreturn.append(thistool)
|
| 33 |
+
else:
|
| 34 |
+
pass
|
| 35 |
+
except:
|
| 36 |
+
pass
|
| 37 |
+
else:
|
| 38 |
+
raise Exception('tools is not provided in list formate')
|
| 39 |
+
print(toreturn)
|
| 40 |
+
return toreturn
|
| 41 |
+
app = Flask(__name__)
|
| 42 |
+
CORS(app)
|
| 43 |
@app.route("/", methods=['GET', 'POST'])
|
| 44 |
def home():
|
| 45 |
+
# try:
|
| 46 |
+
par = request.get_json()
|
| 47 |
+
if not par:
|
| 48 |
+
return jsonify({"error": "Invalid JSON input"}), 400
|
| 49 |
+
|
| 50 |
+
conversation = par.get('conversation')
|
| 51 |
+
provider = par.get('provider')
|
| 52 |
+
model = par.get('model')
|
| 53 |
+
api = par.get('api')
|
| 54 |
try:
|
| 55 |
+
other = par.get('other')
|
| 56 |
+
selectedtools = ToolSelector(other)
|
| 57 |
+
other['tools'] = selectedtools
|
| 58 |
+
except:
|
| 59 |
+
other = {}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 60 |
|
| 61 |
+
if not all([conversation, provider, model, api]):
|
| 62 |
+
return jsonify({"error": "Missing required parameters"}), 400
|
| 63 |
+
print(json.dumps(other,indent=4))
|
| 64 |
+
load = json.loads(converse(conversation, provider, model, api,other=other))
|
| 65 |
+
load['DirectResult'] = load['content']
|
| 66 |
+
|
| 67 |
+
try:
|
| 68 |
+
load['content'] = mdtex2html.convert(load['content'])
|
| 69 |
except Exception as e:
|
| 70 |
+
return jsonify({"error": f"Error converting content: {str(e)}"}), 500
|
| 71 |
+
|
| 72 |
+
toreturn = json.dumps(load, indent=4)
|
| 73 |
+
# print(toreturn)
|
| 74 |
+
return toreturn, 200
|
| 75 |
+
|
| 76 |
+
# except Exception as e:
|
| 77 |
+
# return jsonify({"error": str(e)}), 500
|
| 78 |
|
| 79 |
if __name__ == "__main__":
|
| 80 |
app.run(host='0.0.0.0', debug=True, port=1777)
|
chat.py
CHANGED
|
@@ -5,23 +5,100 @@ from langchain_core.prompts import ChatPromptTemplate
|
|
| 5 |
from langchain_groq import ChatGroq
|
| 6 |
from langchain.chains import ConversationChain
|
| 7 |
from langchain_google_genai import ChatGoogleGenerativeAI
|
| 8 |
-
from
|
| 9 |
-
from
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 10 |
|
| 11 |
from langchain_cohere import ChatCohere
|
|
|
|
| 12 |
def langchainConversation(conversation):
|
| 13 |
prompts = []
|
| 14 |
for message in conversation:
|
| 15 |
-
prompts.append((message['role'],message['context']))
|
| 16 |
chat_template = ChatPromptTemplate.from_messages(prompts)
|
| 17 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 18 |
def segmind_input_parser(input):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 19 |
toreturn = []
|
| 20 |
for thisdict in input:
|
| 21 |
toreturn.append({'role':thisdict['role'],'content':thisdict['context']})
|
| 22 |
return toreturn
|
| 23 |
def segmind_output_parser(input):
|
| 24 |
-
return
|
| 25 |
def converse(conversation,provider,model,key,other:dict={}):
|
| 26 |
if(provider=='groq'):
|
| 27 |
chat = ChatGroq(temperature=0, groq_api_key=key, model_name=model)
|
|
@@ -47,10 +124,10 @@ def converse(conversation,provider,model,key,other:dict={}):
|
|
| 47 |
input = { "messages": inputs }
|
| 48 |
response = requests.post(f"{API_BASE_URL}{model}", headers=headers, json=input)
|
| 49 |
return response.json()
|
| 50 |
-
inputs =
|
| 51 |
output = run(model, inputs)
|
| 52 |
print(output)
|
| 53 |
-
return {'content':output['result']['response']}
|
| 54 |
elif(provider == 'openrouter'):
|
| 55 |
chat = ChatOpenAI(
|
| 56 |
base_url="https://openrouter.ai/api/v1",
|
|
@@ -68,7 +145,17 @@ def converse(conversation,provider,model,key,other:dict={}):
|
|
| 68 |
response = requests.post(url, json=data, headers={'x-api-key': key})
|
| 69 |
output = json.loads(response.text)
|
| 70 |
print(json.dumps(output,indent=4))
|
| 71 |
-
return segmind_output_parser(output)
|
| 72 |
else:
|
| 73 |
return json.dumps({'content':'unspported Provider'})
|
| 74 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 5 |
from langchain_groq import ChatGroq
|
| 6 |
from langchain.chains import ConversationChain
|
| 7 |
from langchain_google_genai import ChatGoogleGenerativeAI
|
| 8 |
+
from langchain.tools import tool
|
| 9 |
+
from langchain.pydantic_v1 import BaseModel, Field
|
| 10 |
+
import urllib.parse
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def langchain_functions_parser(thisjson):
|
| 14 |
+
try:
|
| 15 |
+
function_call_data = thisjson['additional_kwargs']['function_call']
|
| 16 |
+
thisjson['additional_kwargs']['tool_calls'] = []
|
| 17 |
+
thisjson['additional_kwargs']['tool_calls'].append({'id':'No ID','function':function_call_data,'type':'function'})
|
| 18 |
+
try:
|
| 19 |
+
thisjson['additional_kwargs'].pop('function_call')
|
| 20 |
+
except Exception as e:
|
| 21 |
+
# print('error '+ str(e))
|
| 22 |
+
pass
|
| 23 |
+
return thisjson
|
| 24 |
+
|
| 25 |
+
except Exception as e:
|
| 26 |
+
# print('Error'+str(e))
|
| 27 |
+
return thisjson
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
from typing import List, Dict, Any, Optional
|
| 31 |
+
from langchain.tools import BaseTool
|
| 32 |
+
from pydantic import BaseModel, Field
|
| 33 |
+
|
| 34 |
+
def convert_openai_tools_to_langchain(tools: List[Dict]) -> List[BaseTool]:
|
| 35 |
+
"""
|
| 36 |
+
Converts a list of OpenAI tool dictionaries to a list of LangChain BaseTool objects.
|
| 37 |
+
"""
|
| 38 |
+
langchain_tools = []
|
| 39 |
+
for tool in tools:
|
| 40 |
+
name = tool["name"]
|
| 41 |
+
description = tool["description"]
|
| 42 |
+
parameters = tool["parameters"]
|
| 43 |
+
|
| 44 |
+
# Create a Pydantic model for the tool's arguments
|
| 45 |
+
class ToolArgsSchema(BaseModel):
|
| 46 |
+
# Dynamically create fields based on the parameters
|
| 47 |
+
for param_name, param_info in parameters["properties"].items():
|
| 48 |
+
param_type = str if param_info.get("type") == "string" else param_info.get("type")
|
| 49 |
+
field_definition = f"{param_name}: {param_type.__name__} = Field(..., description='{param_info.get('description')}')"
|
| 50 |
+
exec(field_definition)
|
| 51 |
+
|
| 52 |
+
# Create a subclass of BaseTool with the _run method implemented
|
| 53 |
+
class CustomTool(BaseTool):
|
| 54 |
+
def __init__(self, name: str, description: str, args_schema: BaseModel):
|
| 55 |
+
super().__init__(name=name, description=description, args_schema=args_schema) # Pass name and description to super()
|
| 56 |
+
# self.name = name # No longer needed
|
| 57 |
+
# self.description = description # No longer needed
|
| 58 |
+
# self.args_schema = args_schema # No longer needed
|
| 59 |
+
|
| 60 |
+
def _run(self, query: str) -> str: # Replace with appropriate argument and return types
|
| 61 |
+
# Implement your tool logic here
|
| 62 |
+
print(f"Running tool {self.name} with query: {query}")
|
| 63 |
+
# Example: Replace with your actual web search logic
|
| 64 |
+
return f"Web search results for: {query}"
|
| 65 |
+
|
| 66 |
+
# Create an instance of the custom tool
|
| 67 |
+
langchain_tool = CustomTool(name=name, description=description, args_schema=ToolArgsSchema)
|
| 68 |
+
langchain_tools.append(langchain_tool)
|
| 69 |
+
|
| 70 |
+
return langchain_tools
|
| 71 |
+
class SearchInput(BaseModel):
|
| 72 |
+
query: str = Field(description="a search query follo")
|
| 73 |
|
| 74 |
from langchain_cohere import ChatCohere
|
| 75 |
+
|
| 76 |
def langchainConversation(conversation):
|
| 77 |
prompts = []
|
| 78 |
for message in conversation:
|
| 79 |
+
prompts.append((message['role'],urllib.parse.quote((message['context']))))
|
| 80 |
chat_template = ChatPromptTemplate.from_messages(prompts)
|
| 81 |
+
toreturn = []
|
| 82 |
+
|
| 83 |
+
for z in chat_template.format_messages():
|
| 84 |
+
z.content = urllib.parse.unquote(z.content)
|
| 85 |
+
toreturn.append(z)
|
| 86 |
+
return(toreturn)
|
| 87 |
def segmind_input_parser(input):
|
| 88 |
+
toreturn = []
|
| 89 |
+
for thisdict in input:
|
| 90 |
+
if(thisdict['role'] == 'ai'):
|
| 91 |
+
toreturn.append({'role':'assistant','content':thisdict['context']})
|
| 92 |
+
else:
|
| 93 |
+
toreturn.append({'role':thisdict['role'],'content':thisdict['context']})
|
| 94 |
+
return toreturn
|
| 95 |
+
def workers_input_parser(input):
|
| 96 |
toreturn = []
|
| 97 |
for thisdict in input:
|
| 98 |
toreturn.append({'role':thisdict['role'],'content':thisdict['context']})
|
| 99 |
return toreturn
|
| 100 |
def segmind_output_parser(input):
|
| 101 |
+
return {"content": input['choices'][0]['message']['content'], "additional_kwargs": {}, "response_metadata": {}, "type": "ai", "name": None, "id": input['id'], "example": False, "tool_calls": [], "invalid_tool_calls": [], "usage_metadata": {"input_tokens": input['usage']['prompt_tokens'], "output_tokens": input['usage']['completion_tokens'], "total_tokens": input['usage']['total_tokens']}}
|
| 102 |
def converse(conversation,provider,model,key,other:dict={}):
|
| 103 |
if(provider=='groq'):
|
| 104 |
chat = ChatGroq(temperature=0, groq_api_key=key, model_name=model)
|
|
|
|
| 124 |
input = { "messages": inputs }
|
| 125 |
response = requests.post(f"{API_BASE_URL}{model}", headers=headers, json=input)
|
| 126 |
return response.json()
|
| 127 |
+
inputs = workers_input_parser(conversation)
|
| 128 |
output = run(model, inputs)
|
| 129 |
print(output)
|
| 130 |
+
return json.dumps({'content':output['result']['response']},indent=4)
|
| 131 |
elif(provider == 'openrouter'):
|
| 132 |
chat = ChatOpenAI(
|
| 133 |
base_url="https://openrouter.ai/api/v1",
|
|
|
|
| 145 |
response = requests.post(url, json=data, headers={'x-api-key': key})
|
| 146 |
output = json.loads(response.text)
|
| 147 |
print(json.dumps(output,indent=4))
|
| 148 |
+
return json.dumps(segmind_output_parser(output),indent=4)
|
| 149 |
else:
|
| 150 |
return json.dumps({'content':'unspported Provider'})
|
| 151 |
+
try:
|
| 152 |
+
tools = other['tools']
|
| 153 |
+
except:
|
| 154 |
+
tools = []
|
| 155 |
+
|
| 156 |
+
if(provider not in ['cohere'] ):
|
| 157 |
+
return json.dumps(langchain_functions_parser(json.loads(chat.invoke(langchainConversation(conversation),functions=tools).json())),indent=4)
|
| 158 |
+
else:
|
| 159 |
+
|
| 160 |
+
chat = chat.bind_tools(convert_openai_tools_to_langchain(tools))
|
| 161 |
+
return json.dumps(langchain_functions_parser(json.loads(chat.invoke(langchainConversation(conversation)).json())),indent=4)
|
requirements.txt
CHANGED
|
@@ -9,3 +9,4 @@ langchain_cohere
|
|
| 9 |
langchain_openai
|
| 10 |
requests
|
| 11 |
jq
|
|
|
|
|
|
| 9 |
langchain_openai
|
| 10 |
requests
|
| 11 |
jq
|
| 12 |
+
urllib
|