Spaces:
Runtime error
Runtime error
hanzla javaid
commited on
Commit
·
ff120ef
1
Parent(s):
116a0d1
test
Browse files
app.py
CHANGED
|
@@ -7,55 +7,153 @@ import spaces
|
|
| 7 |
loaded_models = {}
|
| 8 |
|
| 9 |
# List of available models (update with your preferred models)
|
| 10 |
-
models = [
|
|
|
|
|
|
|
|
|
|
|
|
|
| 11 |
|
| 12 |
@spaces.GPU
|
| 13 |
-
def
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 20 |
|
| 21 |
@spaces.GPU
|
| 22 |
def get_model_response(model_name, message):
|
| 23 |
-
|
|
|
|
|
|
|
|
|
|
| 24 |
inputs = tokenizer(message, return_tensors="pt").to(model.device)
|
| 25 |
|
| 26 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 27 |
|
| 28 |
-
response = tokenizer.decode(outputs[0])
|
| 29 |
return response
|
| 30 |
|
| 31 |
|
| 32 |
-
def chat(message,
|
|
|
|
|
|
|
|
|
|
|
|
|
| 33 |
response1 = get_model_response(model1, message)
|
| 34 |
response2 = get_model_response(model2, message)
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 39 |
|
| 40 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 41 |
|
| 42 |
with gr.Blocks() as demo:
|
| 43 |
-
gr.Markdown("# Hugging Face Model Comparison Chat")
|
| 44 |
|
|
|
|
| 45 |
with gr.Row():
|
| 46 |
model1_dropdown = gr.Dropdown(choices=models, label="Model 1", value=models[0])
|
| 47 |
model2_dropdown = gr.Dropdown(choices=models, label="Model 2", value=models[1])
|
| 48 |
|
| 49 |
-
|
| 50 |
-
msg = gr.Textbox(label="Your message")
|
| 51 |
-
clear = gr.Button("Clear")
|
| 52 |
-
|
| 53 |
with gr.Row():
|
| 54 |
-
|
| 55 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 56 |
|
| 57 |
-
|
| 58 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 59 |
|
| 60 |
if __name__ == "__main__":
|
| 61 |
-
demo.launch()
|
|
|
|
| 7 |
loaded_models = {}
|
| 8 |
|
| 9 |
# List of available models (update with your preferred models)
|
| 10 |
+
models = [
|
| 11 |
+
"hanzla/gemma-2b-datascience-instruct-v5",
|
| 12 |
+
"hanzla/gemma-2b-datascience-instruct-v4.5"
|
| 13 |
+
]
|
| 14 |
+
|
| 15 |
|
| 16 |
@spaces.GPU
|
| 17 |
+
def load_all_models():
|
| 18 |
+
"""
|
| 19 |
+
Pre-loads all models and their tokenizers into memory.
|
| 20 |
+
"""
|
| 21 |
+
for model_name in models:
|
| 22 |
+
if model_name not in loaded_models:
|
| 23 |
+
print(f"Loading model: {model_name}")
|
| 24 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 25 |
+
model = AutoModelForCausalLM.from_pretrained(model_name).to("cuda" if torch.cuda.is_available() else "cpu")
|
| 26 |
+
loaded_models[model_name] = (model, tokenizer)
|
| 27 |
+
return "All models loaded successfully."
|
| 28 |
+
|
| 29 |
|
| 30 |
@spaces.GPU
|
| 31 |
def get_model_response(model_name, message):
|
| 32 |
+
"""
|
| 33 |
+
Generates a response from the specified model given a user message.
|
| 34 |
+
"""
|
| 35 |
+
model, tokenizer = loaded_models[model_name]
|
| 36 |
inputs = tokenizer(message, return_tensors="pt").to(model.device)
|
| 37 |
|
| 38 |
+
# Generate response with appropriate parameters
|
| 39 |
+
outputs = model.generate(
|
| 40 |
+
**inputs,
|
| 41 |
+
max_length=512,
|
| 42 |
+
do_sample=True,
|
| 43 |
+
top_p=0.95,
|
| 44 |
+
top_k=50
|
| 45 |
+
)
|
| 46 |
|
| 47 |
+
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 48 |
return response
|
| 49 |
|
| 50 |
|
| 51 |
+
def chat(message, history1, history2, model1, model2):
|
| 52 |
+
"""
|
| 53 |
+
Handles the chat interaction by getting responses from both models
|
| 54 |
+
and updating their respective histories.
|
| 55 |
+
"""
|
| 56 |
response1 = get_model_response(model1, message)
|
| 57 |
response2 = get_model_response(model2, message)
|
| 58 |
+
|
| 59 |
+
history1 = history1 or []
|
| 60 |
+
history2 = history2 or []
|
| 61 |
+
|
| 62 |
+
# Update history for Model 1
|
| 63 |
+
history1.append(("User", message))
|
| 64 |
+
history1.append((model1.split("/")[-1], response1))
|
| 65 |
+
|
| 66 |
+
# Update history for Model 2
|
| 67 |
+
history2.append(("User", message))
|
| 68 |
+
history2.append((model2.split("/")[-1], response2))
|
| 69 |
+
|
| 70 |
+
return history1, history2
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
# Initialize vote counts
|
| 74 |
+
vote_counts = {"model1": 0, "model2": 0}
|
| 75 |
|
| 76 |
|
| 77 |
+
def upvote_vote(model1, model2):
|
| 78 |
+
"""
|
| 79 |
+
Increments the vote count for Model 1 and returns updated counts.
|
| 80 |
+
"""
|
| 81 |
+
vote_counts["model1"] += 1
|
| 82 |
+
return f"Votes - {model1.split('/')[-1]}: {vote_counts['model1']}, {model2.split('/')[-1]}: {vote_counts['model2']}"
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
def downvote_vote(model1, model2):
|
| 86 |
+
"""
|
| 87 |
+
Increments the vote count for Model 2 and returns updated counts.
|
| 88 |
+
"""
|
| 89 |
+
vote_counts["model2"] += 1
|
| 90 |
+
return f"Votes - {model1.split('/')[-1]}: {vote_counts['model1']}, {model2.split('/')[-1]}: {vote_counts['model2']}"
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
def clear_chat():
|
| 94 |
+
"""
|
| 95 |
+
Clears both chat histories and resets vote counts.
|
| 96 |
+
"""
|
| 97 |
+
global vote_counts
|
| 98 |
+
vote_counts = {"model1": 0, "model2": 0}
|
| 99 |
+
return [], [], "Votes - 0, 0"
|
| 100 |
+
|
| 101 |
|
| 102 |
with gr.Blocks() as demo:
|
| 103 |
+
gr.Markdown("# 🤖 Hugging Face Model Comparison Chat")
|
| 104 |
|
| 105 |
+
# Dropdowns for selecting models
|
| 106 |
with gr.Row():
|
| 107 |
model1_dropdown = gr.Dropdown(choices=models, label="Model 1", value=models[0])
|
| 108 |
model2_dropdown = gr.Dropdown(choices=models, label="Model 2", value=models[1])
|
| 109 |
|
| 110 |
+
# Separate chatboxes for each model
|
|
|
|
|
|
|
|
|
|
| 111 |
with gr.Row():
|
| 112 |
+
with gr.Column():
|
| 113 |
+
gr.Markdown("### 🧠 Model 1 Chat")
|
| 114 |
+
chatbot1 = gr.Chatbot(label=f"{models[0].split('/')[-1]} Chat History")
|
| 115 |
+
with gr.Column():
|
| 116 |
+
gr.Markdown("### 🧠 Model 2 Chat")
|
| 117 |
+
chatbot2 = gr.Chatbot(label=f"{models[1].split('/')[-1]} Chat History")
|
| 118 |
|
| 119 |
+
# Input textbox for user message
|
| 120 |
+
msg = gr.Textbox(label="💬 Your Message", placeholder="Type your message here...")
|
| 121 |
+
|
| 122 |
+
# Buttons for upvote, downvote, and clearing the chat
|
| 123 |
+
with gr.Row():
|
| 124 |
+
upvote = gr.Button("👍 Upvote Model 1")
|
| 125 |
+
downvote = gr.Button("👎 Downvote Model 2")
|
| 126 |
+
clear = gr.Button("🧹 Clear Chat")
|
| 127 |
+
|
| 128 |
+
# Textbox to display vote counts
|
| 129 |
+
vote_text = gr.Textbox(label="🏆 Vote Counts", value="Votes - 0, 0", interactive=False)
|
| 130 |
+
|
| 131 |
+
# Define interactions
|
| 132 |
+
msg.submit(
|
| 133 |
+
chat,
|
| 134 |
+
inputs=[msg, chatbot1, chatbot2, model1_dropdown, model2_dropdown],
|
| 135 |
+
outputs=[chatbot1, chatbot2]
|
| 136 |
+
)
|
| 137 |
+
|
| 138 |
+
upvote.click(
|
| 139 |
+
upvote_vote,
|
| 140 |
+
inputs=[model1_dropdown, model2_dropdown],
|
| 141 |
+
outputs=vote_text
|
| 142 |
+
)
|
| 143 |
+
|
| 144 |
+
downvote.click(
|
| 145 |
+
downvote_vote,
|
| 146 |
+
inputs=[model1_dropdown, model2_dropdown],
|
| 147 |
+
outputs=vote_text
|
| 148 |
+
)
|
| 149 |
+
|
| 150 |
+
clear.click(
|
| 151 |
+
clear_chat,
|
| 152 |
+
outputs=[chatbot1, chatbot2, vote_text]
|
| 153 |
+
)
|
| 154 |
+
|
| 155 |
+
# Pre-load all models when the space starts
|
| 156 |
+
load_all_models()
|
| 157 |
|
| 158 |
if __name__ == "__main__":
|
| 159 |
+
demo.launch()
|