AEUPH commited on
Commit
c53a77e
·
verified ·
1 Parent(s): 2849cd0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -14
app.py CHANGED
@@ -1,6 +1,8 @@
 
1
  from langchain.llms import OpenAI
2
  from langchain.chat_models import ChatOpenAI
3
  from concurrent.futures import ThreadPoolExecutor
 
4
 
5
  # Function to initialize a chat model and get a response
6
  def chatbot_response(api_key, base_url, model_name, query):
@@ -11,14 +13,10 @@ def chatbot_response(api_key, base_url, model_name, query):
11
  )
12
  return chat_model.predict(query)
13
 
14
- # Example usage with a hive of chatbots
15
- def hive_response(query):
16
- # Configuration for each bot in the hive (could be different for each)
17
- bots_config = [
18
- {"api_key": "tune-adafd1fc-f66f-4242-aed1-c0ce3722718a1711930019", "base_url": "https://chat.tune.app/api/", "model_name": "goliath-120b-16k-gptq"},
19
- {"api_key": "tune-adafd1fc-f66f-4242-aed1-c0ce3722718a1711930019", "base_url": "https://chat.tune.app/api/", "model_name": "goliath-120b-16k-gptq"},
20
- {"api_key": "tune-adafd1fc-f66f-4242-aed1-c0ce3722718a1711930019", "base_url": "https://chat.tune.app/api/", "model_name": "goliath-120b-16k-gptq"}
21
- ]
22
 
23
  # Use ThreadPoolExecutor to send queries in parallel to each bot
24
  with ThreadPoolExecutor(max_workers=len(bots_config)) as executor:
@@ -30,10 +28,21 @@ def hive_response(query):
30
  # Collect responses from all bots
31
  responses = [future.result() for future in futures]
32
 
33
- return responses
34
 
35
- # Example query to the hive
36
- query = "hi!"
37
- responses = hive_response(query)
38
- for idx, response in enumerate(responses, 1):
39
- print(f"Bot {idx} response: {response}")
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
  from langchain.llms import OpenAI
3
  from langchain.chat_models import ChatOpenAI
4
  from concurrent.futures import ThreadPoolExecutor
5
+ import json
6
 
7
  # Function to initialize a chat model and get a response
8
  def chatbot_response(api_key, base_url, model_name, query):
 
13
  )
14
  return chat_model.predict(query)
15
 
16
+ # Function to handle hive response with dynamic configuration
17
+ def hive_response(query, bots_config_json):
18
+ # Parse the JSON input into a Python list of dictionaries
19
+ bots_config = json.loads(bots_config_json)
 
 
 
 
20
 
21
  # Use ThreadPoolExecutor to send queries in parallel to each bot
22
  with ThreadPoolExecutor(max_workers=len(bots_config)) as executor:
 
28
  # Collect responses from all bots
29
  responses = [future.result() for future in futures]
30
 
31
+ return [f"Bot {idx} response: {response}" for idx, response in enumerate(responses, 1)]
32
 
33
+ # Define the Gradio interface
34
+ iface = gr.Interface(
35
+ fn=hive_response,
36
+ inputs=[
37
+ gr.Textbox(label="Query"),
38
+ gr.Textbox(label="Bots Configuration (JSON)", default=json.dumps([
39
+ {"api_key": "tune-adafd1fc-f66f-4242-aed1-c0ce3722718a1711930019", "base_url": "https://chat.tune.app/api/", "model_name": "goliath-120b-16k-gptq"},
40
+ {"api_key": "tune-bbb2d24b-f913-48d3-b3fe-dcf2b8ed10371711926291", "base_url": "https://chat.tune.app/api/", "model_name": "goliath-120b-16k-gptq"},
41
+ {"api_key": "tune-a67090f0-e25e-4769-a67e-86d71ba22e0f1696395693", "base_url": "https://chat.tune.app/api/", "model_name": "goliath-120b-16k-gptq"}
42
+ ], indent=2))
43
+ ],
44
+ outputs=[gr.Text(label="Responses")],
45
+ description="Enter your query and bots configuration in JSON format to get responses from a hive of chatbots."
46
+ )
47
+
48
+ iface.launch()