Update app.py
Browse files
app.py
CHANGED
|
@@ -1,54 +1,39 @@
|
|
| 1 |
-
|
| 2 |
-
import
|
| 3 |
-
import
|
| 4 |
-
import json
|
| 5 |
-
import os
|
| 6 |
-
import re
|
| 7 |
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 11 |
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
|
|
|
|
|
|
|
|
|
| 17 |
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
tasks = []
|
| 26 |
-
async with aiohttp.ClientSession() as session:
|
| 27 |
-
for _ in range(int(number_of_chatbots)): # Ensure number_of_chatbots is treated as an integer
|
| 28 |
-
data = {
|
| 29 |
-
"temperature": 0.1,
|
| 30 |
-
"messages": [{"role": role, "content": intent} for role, intent in zip(roles_of_bots.split(','), intent_of_bots.split(','))],
|
| 31 |
-
"model": "goliath-120b-16k-gptq",
|
| 32 |
-
"stream": False,
|
| 33 |
-
"max_tokens": int(length_of_responses)
|
| 34 |
-
}
|
| 35 |
-
task = asyncio.create_task(fetch_chatbot_response(session, url, headers, data))
|
| 36 |
-
tasks.append(task)
|
| 37 |
-
|
| 38 |
-
responses = await asyncio.gather(*tasks)
|
| 39 |
-
return " || ".join(responses)
|
| 40 |
|
|
|
|
|
|
|
| 41 |
|
| 42 |
-
|
| 43 |
-
return asyncio.run(chatbot_concurrent_interaction(input_text, number_of_chatbots, length_of_responses, roles_of_bots, intent_of_bots))
|
| 44 |
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
gr.inputs.Textbox(default="Hello,World", label="Intent of Bots (comma-separated)")
|
| 51 |
-
]
|
| 52 |
-
|
| 53 |
-
iface = gr.Interface(fn=chatbot_api, inputs=inputs, outputs="text", title="Concurrent Hive of AI Chatbots")
|
| 54 |
-
iface.launch()
|
|
|
|
| 1 |
+
from langchain.llms import OpenAI
|
| 2 |
+
from langchain.chat_models import ChatOpenAI
|
| 3 |
+
from concurrent.futures import ThreadPoolExecutor
|
|
|
|
|
|
|
|
|
|
| 4 |
|
| 5 |
+
# Function to initialize a chat model and get a response
|
| 6 |
+
def chatbot_response(api_key, base_url, model_name, query):
|
| 7 |
+
chat_model = ChatOpenAI(
|
| 8 |
+
openai_api_key=api_key,
|
| 9 |
+
openai_api_base=base_url,
|
| 10 |
+
model_name=model_name
|
| 11 |
+
)
|
| 12 |
+
return chat_model.predict(query)
|
| 13 |
|
| 14 |
+
# Example usage with a hive of chatbots
|
| 15 |
+
def hive_response(query):
|
| 16 |
+
# Configuration for each bot in the hive (could be different for each)
|
| 17 |
+
bots_config = [
|
| 18 |
+
{"api_key": "tune-adafd1fc-f66f-4242-aed1-c0ce3722718a1711930019", "base_url": "https://chat.tune.app/api/", "model_name": "goliath-120b-16k-gptq"},
|
| 19 |
+
{"api_key": "tune-adafd1fc-f66f-4242-aed1-c0ce3722718a1711930019", "base_url": "https://chat.tune.app/api/", "model_name": "goliath-120b-16k-gptq"},
|
| 20 |
+
{"api_key": "tune-adafd1fc-f66f-4242-aed1-c0ce3722718a1711930019", "base_url": "https://chat.tune.app/api/", "model_name": "goliath-120b-16k-gptq"}
|
| 21 |
+
]
|
| 22 |
|
| 23 |
+
# Use ThreadPoolExecutor to send queries in parallel to each bot
|
| 24 |
+
with ThreadPoolExecutor(max_workers=len(bots_config)) as executor:
|
| 25 |
+
futures = [
|
| 26 |
+
executor.submit(chatbot_response, bot['api_key'], bot['base_url'], bot['model_name'], query)
|
| 27 |
+
for bot in bots_config
|
| 28 |
+
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 29 |
|
| 30 |
+
# Collect responses from all bots
|
| 31 |
+
responses = [future.result() for future in futures]
|
| 32 |
|
| 33 |
+
return responses
|
|
|
|
| 34 |
|
| 35 |
+
# Example query to the hive
|
| 36 |
+
query = "hi!"
|
| 37 |
+
responses = hive_response(query)
|
| 38 |
+
for idx, response in enumerate(responses, 1):
|
| 39 |
+
print(f"Bot {idx} response: {response}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|