NithinAI12 commited on
Commit
de4e92a
·
verified ·
1 Parent(s): d363673

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +30 -0
app.py CHANGED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer
3
+ import torch
4
+
5
+ # Load a smaller model that works on Hugging Face free tier
6
+ model_name = "mistralai/Mistral-7B-Instruct-v0.1" # Use instruct-tuned model
7
+
8
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
9
+ model = AutoModelForCausalLM.from_pretrained(
10
+ model_name,
11
+ torch_dtype=torch.float16,
12
+ device_map="cpu" # Change to "auto" if using a GPU
13
+ )
14
+
15
+ def nithin_ai(question):
16
+ inputs = tokenizer(question, return_tensors="pt").input_ids
17
+ outputs = model.generate(inputs, max_length=200)
18
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
19
+ return response
20
+
21
+ # Gradio Chat Interface
22
+ iface = gr.Interface(
23
+ fn=nithin_ai,
24
+ inputs="text",
25
+ outputs="text",
26
+ title="Nithin AI - Student Doubt Solver",
27
+ description="Ask any question related to robotics, science, or math!"
28
+ )
29
+
30
+ iface.launch()