NithinAI12 commited on
Commit
b4f9367
·
verified ·
1 Parent(s): de4e92a

Update app.py

Browse files

import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch

# Load a smaller model that works on Hugging Face free tier
model_name = "tiiuae/falcon-7b-instruct" # Use instruct-tuned model

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
model_name,
torch_dtype=torch.float16,
device_map="cpu" # Change to "auto" if using a GPU
)

def nithin_ai(question):
inputs = tokenizer(question, return_tensors="pt").input_ids
outputs = model.generate(inputs, max_length=200)
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
return response

# Gradio Chat Interface
iface = gr.Interface(
fn=nithin_ai,
inputs="text",
outputs="text",
title="Nithin AI - Student Doubt Solver",
description="Ask any question related to robotics, science, or math!"
)

iface.launch()

Files changed (1) hide show
  1. app.py +1 -1
app.py CHANGED
@@ -3,7 +3,7 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
3
  import torch
4
 
5
  # Load a smaller model that works on Hugging Face free tier
6
- model_name = "mistralai/Mistral-7B-Instruct-v0.1" # Use instruct-tuned model
7
 
8
  tokenizer = AutoTokenizer.from_pretrained(model_name)
9
  model = AutoModelForCausalLM.from_pretrained(
 
3
  import torch
4
 
5
  # Load a smaller model that works on Hugging Face free tier
6
+ model_name = "tiiuae/falcon-7b-instruct" # Use instruct-tuned model
7
 
8
  tokenizer = AutoTokenizer.from_pretrained(model_name)
9
  model = AutoModelForCausalLM.from_pretrained(