Prithmesh commited on
Commit
5241af1
·
verified ·
1 Parent(s): 5e9c61a

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -0
app.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer
3
+ import torch
4
+
5
+ # Load model
6
+ MODEL_NAME = "mistralai/Mistral-7B-Instruct-v0.3"
7
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
8
+ model = AutoModelForCausalLM.from_pretrained(MODEL_NAME, device_map="auto", torch_dtype=torch.float16)
9
+
10
+ def generate(prompt):
11
+ inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
12
+ with torch.no_grad():
13
+ outputs = model.generate(**inputs, max_new_tokens=300)
14
+ text = tokenizer.decode(outputs[0], skip_special_tokens=True)
15
+ return text
16
+
17
+ # Gradio interface
18
+ iface = gr.Interface(
19
+ fn=generate,
20
+ inputs=gr.Textbox(lines=5, placeholder="Enter your prompt here"),
21
+ outputs=gr.Textbox(label="AI Response"),
22
+ )
23
+
24
+ iface.launch(server_name="0.0.0.0", server_port=7860)