import gradio as gr from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig from peft import PeftModel import torch print("Loading model...") bnb_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype=torch.float16, bnb_4bit_use_double_quant=True ) base_model_name = "unsloth/Llama-3.2-1B-Instruct" tokenizer = AutoTokenizer.from_pretrained(base_model_name) base_model = AutoModelForCausalLM.from_pretrained( base_model_name, quantization_config=bnb_config, device_map="auto", trust_remote_code=True ) model = PeftModel.from_pretrained(base_model, "AA65327/lora_model") print("Model loaded!") def classify_emotion(text): prompt = f"Classify the emotion in this text: {text}\n\nEmotion:" inputs = tokenizer(prompt, return_tensors="pt").to(model.device) outputs = model.generate(**inputs, max_new_tokens=50, temperature=0.3, pad_token_id=tokenizer.eos_token_id) response = tokenizer.decode(outputs[0], skip_special_tokens=True) emotion = response.split("Emotion:")[-1].strip().split()[0] return emotion demo = gr.Interface( fn=classify_emotion, inputs=gr.Textbox(label="Enter text to classify", placeholder="I am so happy today!"), outputs=gr.Textbox(label="Detected Emotion"), title="Emotion Classifier", description="Classify emotions in text using a fine-tuned Llama model" ) demo.launch()