HaryaniAnjali commited on
Commit
0875b04
Β·
verified Β·
1 Parent(s): 7c46550

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +75 -0
app.py CHANGED
@@ -11,3 +11,78 @@ def classify_text(text):
11
  # Create a Gradio interface
12
  interface = gr.Interface(fn=classify_text, inputs="text", outputs="json")
13
  interface.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
  # Create a Gradio interface
12
  interface = gr.Interface(fn=classify_text, inputs="text", outputs="json")
13
  interface.launch()
14
+
15
+
16
+ import gradio as gr
17
+ import torch
18
+ from transformers import AutoModelForSequenceClassification, AutoTokenizer
19
+
20
+ # Setup device
21
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
22
+
23
+ # Model paths on Hugging Face Hub
24
+ model_paths = {
25
+ "LLaMA-3.2": "HaryaniAnjali/Llama_3.2_Trained_Emotion"
26
+ }
27
+
28
+ # Load tokenizers first with error handling
29
+ tokenizers = {}
30
+ for name, path in model_paths.items():
31
+ try:
32
+ print(f"πŸ”„ Loading tokenizer for {name}...")
33
+ tokenizer = AutoTokenizer.from_pretrained(path)
34
+
35
+ # Ensure the tokenizer has a padding token
36
+ if tokenizer.pad_token is None:
37
+ tokenizer.pad_token = tokenizer.eos_token # Use EOS as padding token if none exists
38
+
39
+ tokenizers[name] = tokenizer
40
+ print(f"βœ… Tokenizer loaded for {name}")
41
+ except Exception as e:
42
+ print(f"❌ Error loading tokenizer for {name}: {e}")
43
+
44
+ # Lazy loading of models to save memory
45
+ models = {}
46
+
47
+ def get_model(model_name):
48
+ if model_name not in models:
49
+ try:
50
+ print(f"πŸ”„ Loading model: {model_name}...")
51
+ models[model_name] = AutoModelForSequenceClassification.from_pretrained(
52
+ model_paths[model_name], num_labels=7, ignore_mismatched_sizes=True, torch_dtype=torch.float16
53
+ ).to(device)
54
+ print(f"βœ… Model {model_name} loaded successfully.")
55
+ except Exception as e:
56
+ print(f"❌ Error loading {model_name}: {e}")
57
+ return None
58
+ return models[model_name]
59
+
60
+ # Emotion classification function
61
+ def predict_emotion(text, model_name):
62
+ model = get_model(model_name)
63
+ if model is None:
64
+ return f"❌ Model {model_name} failed to load. Check logs."
65
+
66
+ tokenizer = tokenizers.get(model_name)
67
+ if tokenizer is None:
68
+ return f"❌ Tokenizer for {model_name} not available."
69
+
70
+ inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=256).to(device)
71
+
72
+ with torch.no_grad():
73
+ outputs = model(**inputs)
74
+ predicted_label = torch.argmax(outputs.logits, dim=1).item()
75
+
76
+ labels = ["anger", "disgust", "fear", "guilt", "joy", "sadness", "shame"]
77
+ return labels[predicted_label]
78
+
79
+ # Gradio UI
80
+ ui = gr.Interface(
81
+ fn=predict_emotion,
82
+ inputs=["text", gr.Radio(list(model_paths.keys()), label="Select Model")],
83
+ outputs="text",
84
+ title="Emotion Classifier",
85
+ description="Enter a text, select a model, and classify its emotion."
86
+ )
87
+
88
+ ui.queue().launch()