AEUPH commited on
Commit
443adc2
·
verified ·
1 Parent(s): 1553c06

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +33 -48
app.py CHANGED
@@ -1,54 +1,39 @@
1
- import gradio as gr
2
- import asyncio
3
- import aiohttp
4
- import json
5
- import os
6
- import re
7
 
8
- def filter_non_alphanumeric(text):
9
- filter_pattern = os.environ.get('FILTER_PATTERN', r'[^a-zA-Z0-9\s]')
10
- return re.sub(filter_pattern, '', text)
 
 
 
 
 
11
 
12
- async def fetch_chatbot_response(session, url, headers, data):
13
- async with session.post(url, headers=headers, json=data) as response:
14
- response_json = await response.json()
15
- chat_content = response_json['choices'][0]['message']['content']
16
- return filter_non_alphanumeric(chat_content)
 
 
 
17
 
18
- async def chatbot_concurrent_interaction(input_text, number_of_chatbots, length_of_responses, roles_of_bots, intent_of_bots):
19
- url = "https://chat.tune.app/api/chat/completions"
20
- headers = {
21
- "Authorization": os.environ.get('API_KEY'),
22
- "Content-Type": "application/json"
23
- }
24
-
25
- tasks = []
26
- async with aiohttp.ClientSession() as session:
27
- for _ in range(int(number_of_chatbots)): # Ensure number_of_chatbots is treated as an integer
28
- data = {
29
- "temperature": 0.1,
30
- "messages": [{"role": role, "content": intent} for role, intent in zip(roles_of_bots.split(','), intent_of_bots.split(','))],
31
- "model": "goliath-120b-16k-gptq",
32
- "stream": False,
33
- "max_tokens": int(length_of_responses)
34
- }
35
- task = asyncio.create_task(fetch_chatbot_response(session, url, headers, data))
36
- tasks.append(task)
37
-
38
- responses = await asyncio.gather(*tasks)
39
- return " || ".join(responses)
40
 
 
 
41
 
42
- def chatbot_api(input_text, number_of_chatbots, length_of_responses, roles_of_bots, intent_of_bots):
43
- return asyncio.run(chatbot_concurrent_interaction(input_text, number_of_chatbots, length_of_responses, roles_of_bots, intent_of_bots))
44
 
45
- inputs = [
46
- gr.inputs.Textbox(lines=2, label="Initial Input Text"),
47
- gr.inputs.Number(label="Number of Chatbots", default=1),
48
- gr.inputs.Slider(minimum=50, maximum=500, default=256, label="Length of Responses"),
49
- gr.inputs.Textbox(default="user,system", label="Roles of Bots (comma-separated)"),
50
- gr.inputs.Textbox(default="Hello,World", label="Intent of Bots (comma-separated)")
51
- ]
52
-
53
- iface = gr.Interface(fn=chatbot_api, inputs=inputs, outputs="text", title="Concurrent Hive of AI Chatbots")
54
- iface.launch()
 
1
+ from langchain.llms import OpenAI
2
+ from langchain.chat_models import ChatOpenAI
3
+ from concurrent.futures import ThreadPoolExecutor
 
 
 
4
 
5
+ # Function to initialize a chat model and get a response
6
+ def chatbot_response(api_key, base_url, model_name, query):
7
+ chat_model = ChatOpenAI(
8
+ openai_api_key=api_key,
9
+ openai_api_base=base_url,
10
+ model_name=model_name
11
+ )
12
+ return chat_model.predict(query)
13
 
14
+ # Example usage with a hive of chatbots
15
+ def hive_response(query):
16
+ # Configuration for each bot in the hive (could be different for each)
17
+ bots_config = [
18
+ {"api_key": "tune-adafd1fc-f66f-4242-aed1-c0ce3722718a1711930019", "base_url": "https://chat.tune.app/api/", "model_name": "goliath-120b-16k-gptq"},
19
+ {"api_key": "tune-adafd1fc-f66f-4242-aed1-c0ce3722718a1711930019", "base_url": "https://chat.tune.app/api/", "model_name": "goliath-120b-16k-gptq"},
20
+ {"api_key": "tune-adafd1fc-f66f-4242-aed1-c0ce3722718a1711930019", "base_url": "https://chat.tune.app/api/", "model_name": "goliath-120b-16k-gptq"}
21
+ ]
22
 
23
+ # Use ThreadPoolExecutor to send queries in parallel to each bot
24
+ with ThreadPoolExecutor(max_workers=len(bots_config)) as executor:
25
+ futures = [
26
+ executor.submit(chatbot_response, bot['api_key'], bot['base_url'], bot['model_name'], query)
27
+ for bot in bots_config
28
+ ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
 
30
+ # Collect responses from all bots
31
+ responses = [future.result() for future in futures]
32
 
33
+ return responses
 
34
 
35
+ # Example query to the hive
36
+ query = "hi!"
37
+ responses = hive_response(query)
38
+ for idx, response in enumerate(responses, 1):
39
+ print(f"Bot {idx} response: {response}")