Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,3 +1,4 @@
|
|
|
|
|
| 1 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 2 |
|
| 3 |
model_id = "INTERX/Qwen2.5-GenX-14B"
|
|
@@ -5,17 +6,12 @@ model_id = "INTERX/Qwen2.5-GenX-14B"
|
|
| 5 |
tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
|
| 6 |
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype="auto", trust_remote_code=True)
|
| 7 |
|
| 8 |
-
|
| 9 |
-
messages = [{"role": "user", "content": prompt}]
|
|
|
|
|
|
|
|
|
|
|
|
|
| 10 |
|
| 11 |
-
|
| 12 |
-
messages,
|
| 13 |
-
tokenize=True,
|
| 14 |
-
add_generation_prompt=True,
|
| 15 |
-
return_tensors='pt'
|
| 16 |
-
).to(model.device)
|
| 17 |
|
| 18 |
-
generated_ids = model.generate(tokenized_chat, max_new_tokens=512)
|
| 19 |
-
|
| 20 |
-
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
|
| 21 |
-
print(response)
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 3 |
|
| 4 |
model_id = "INTERX/Qwen2.5-GenX-14B"
|
|
|
|
| 6 |
tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
|
| 7 |
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype="auto", trust_remote_code=True)
|
| 8 |
|
| 9 |
+
def chat(prompt):
|
| 10 |
+
messages = [{"role": "user", "content": prompt}]
|
| 11 |
+
tokenized_chat = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors='pt').to(model.device)
|
| 12 |
+
generated_ids = model.generate(tokenized_chat, max_new_tokens=512)
|
| 13 |
+
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
|
| 14 |
+
return response
|
| 15 |
|
| 16 |
+
gr.Interface(fn=chat, inputs="text", outputs="text", title="Qwen2.5-GenX-14B Chat").launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 17 |
|
|
|
|
|
|
|
|
|
|
|
|