Spaces:
Runtime error
Runtime error
Commit
·
9513cae
1
Parent(s):
9825f32
Added multi model config
Browse files
app.py
CHANGED
|
@@ -1,26 +1,43 @@
|
|
| 1 |
import os
|
|
|
|
| 2 |
from typing import List, Tuple
|
|
|
|
| 3 |
|
| 4 |
import gradio as gr
|
| 5 |
from openai import OpenAI
|
| 6 |
|
| 7 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 8 |
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 13 |
|
| 14 |
|
| 15 |
|
| 16 |
def respond(
|
| 17 |
message,
|
| 18 |
history: List[Tuple[str, str]],
|
|
|
|
|
|
|
| 19 |
conversational,
|
| 20 |
max_tokens,
|
| 21 |
):
|
| 22 |
messages = []
|
| 23 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 24 |
if conversational:
|
| 25 |
for val in history[-2:]:
|
| 26 |
if val[0]:
|
|
@@ -30,7 +47,7 @@ def respond(
|
|
| 30 |
|
| 31 |
messages.append({"role": "user", "content": message})
|
| 32 |
|
| 33 |
-
completion =
|
| 34 |
model="neongeckocom/NeonLLM",
|
| 35 |
messages=messages,
|
| 36 |
max_tokens=max_tokens,
|
|
@@ -48,6 +65,8 @@ def respond(
|
|
| 48 |
demo = gr.ChatInterface(
|
| 49 |
respond,
|
| 50 |
additional_inputs=[
|
|
|
|
|
|
|
| 51 |
gr.Checkbox(value=True, label="conversational"),
|
| 52 |
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
|
| 53 |
],
|
|
|
|
| 1 |
import os
|
| 2 |
+
import json
|
| 3 |
from typing import List, Tuple
|
| 4 |
+
from collections import OrderedDict
|
| 5 |
|
| 6 |
import gradio as gr
|
| 7 |
from openai import OpenAI
|
| 8 |
|
| 9 |
|
| 10 |
+
config = json.loads(os.environ['CONFIG'])
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
model_names = list(config.keys())
|
| 14 |
+
personas = list(OrderedDict.fromkeys(persona for name in config for persona in config[name]["personas"]))
|
| 15 |
|
| 16 |
+
|
| 17 |
+
clients = {}
|
| 18 |
+
for name in config:
|
| 19 |
+
client = OpenAI(
|
| 20 |
+
base_url=f"{os.environ[config[name]['api_url']]}/v1",
|
| 21 |
+
api_key=os.environ[config[name]['api_key']],
|
| 22 |
+
)
|
| 23 |
+
clients[name] = client
|
| 24 |
|
| 25 |
|
| 26 |
|
| 27 |
def respond(
|
| 28 |
message,
|
| 29 |
history: List[Tuple[str, str]],
|
| 30 |
+
model,
|
| 31 |
+
persona,
|
| 32 |
conversational,
|
| 33 |
max_tokens,
|
| 34 |
):
|
| 35 |
messages = []
|
| 36 |
|
| 37 |
+
system_prompt = config[model]["personas"][persona]
|
| 38 |
+
if system_prompt is not None:
|
| 39 |
+
messages.append({"role": "system", "content": system_prompt})
|
| 40 |
+
|
| 41 |
if conversational:
|
| 42 |
for val in history[-2:]:
|
| 43 |
if val[0]:
|
|
|
|
| 47 |
|
| 48 |
messages.append({"role": "user", "content": message})
|
| 49 |
|
| 50 |
+
completion = clients[model].chat.completions.create(
|
| 51 |
model="neongeckocom/NeonLLM",
|
| 52 |
messages=messages,
|
| 53 |
max_tokens=max_tokens,
|
|
|
|
| 65 |
demo = gr.ChatInterface(
|
| 66 |
respond,
|
| 67 |
additional_inputs=[
|
| 68 |
+
gr.Radio(choices=model_names, value="stable", label="model"),
|
| 69 |
+
gr.Radio(choices=personas, value="default", label="persona"),
|
| 70 |
gr.Checkbox(value=True, label="conversational"),
|
| 71 |
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
|
| 72 |
],
|