Genie-AI-Lab commited on
Commit
d4e1b88
·
verified ·
1 Parent(s): a041cb0

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -0
app.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM
3
+ import torch
4
+
5
+ # Load your model
6
+ model_name = "Genie-AI-Lab/Omni-Genie"
7
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
8
+ model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16)
9
+
10
+ def chat(message):
11
+ inputs = tokenizer(message, return_tensors="pt")
12
+ outputs = model.generate(**inputs, max_new_tokens=50)
13
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
14
+ return response
15
+
16
+ gr.ChatInterface(chat).launch()