HaryaniAnjali commited on
Commit
0152dd4
·
verified ·
1 Parent(s): 16db478

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +47 -30
app.py CHANGED
@@ -5,35 +5,52 @@ from transformers import AutoModelForSequenceClassification, AutoTokenizer
5
  # Setup device
6
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
7
 
8
- # Model path on Hugging Face Hub
9
- model_path = "HaryaniAnjali/Llama_3.2_Trained_Emotion"
10
-
11
- # Load the tokenizer with error handling
12
- try:
13
- print("Loading tokenizer...")
14
- tokenizer = AutoTokenizer.from_pretrained(model_path)
15
-
16
- # Ensure the tokenizer has a padding token
17
- if tokenizer.pad_token is None:
18
- tokenizer.pad_token = tokenizer.eos_token # Use EOS as padding token if none exists
19
- print("Tokenizer loaded successfully.")
20
- except Exception as e:
21
- print(f"Error loading tokenizer: {e}")
22
-
23
- # Load the model with error handling
24
- try:
25
- print("Loading model...")
26
- model = AutoModelForSequenceClassification.from_pretrained(
27
- model_path, num_labels=7, ignore_mismatched_sizes=True, torch_dtype=torch.float16
28
- ).to(device)
29
- print("Model loaded successfully.")
30
- except Exception as e:
31
- print(f"Error loading model: {e}")
 
 
 
 
 
 
 
 
 
 
 
 
32
 
33
  # Emotion classification function
34
- def predict_emotion(text):
35
- if model is None or tokenizer is None:
36
- return "Model or tokenizer failed to load."
 
 
 
 
 
37
 
38
  inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=256).to(device)
39
 
@@ -47,10 +64,10 @@ def predict_emotion(text):
47
  # Gradio UI
48
  ui = gr.Interface(
49
  fn=predict_emotion,
50
- inputs="text",
51
  outputs="text",
52
  title="Emotion Classifier",
53
- description="Enter a text and classify its emotion."
54
  )
55
 
56
- ui.launch()
 
5
  # Setup device
6
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
7
 
8
+ # Model paths on Hugging Face Hub
9
+ model_paths = {
10
+ "LLaMA-3.2": "HaryaniAnjali/Llama_3.2_Trained_Emotion"
11
+ }
12
+
13
+ # Load tokenizers first with error handling
14
+ tokenizers = {}
15
+ for name, path in model_paths.items():
16
+ try:
17
+ print(f"🔄 Loading tokenizer for {name}...")
18
+ tokenizer = AutoTokenizer.from_pretrained(path)
19
+
20
+ # Ensure the tokenizer has a padding token
21
+ if tokenizer.pad_token is None:
22
+ tokenizer.pad_token = tokenizer.eos_token # Use EOS as padding token if none exists
23
+
24
+ tokenizers[name] = tokenizer
25
+ print(f"Tokenizer loaded for {name}")
26
+ except Exception as e:
27
+ print(f"Error loading tokenizer for {name}: {e}")
28
+
29
+ # Lazy loading of models to save memory
30
+ models = {}
31
+
32
+ def get_model(model_name):
33
+ if model_name not in models:
34
+ try:
35
+ print(f"Loading model: {model_name}...")
36
+ models[model_name] = AutoModelForSequenceClassification.from_pretrained(
37
+ model_paths[model_name], num_labels=7, ignore_mismatched_sizes=True, torch_dtype=torch.float16
38
+ ).to(device)
39
+ print(f"Model {model_name} loaded successfully.")
40
+ except Exception as e:
41
+ print(f"Error loading {model_name}: {e}")
42
+ return None
43
+ return models[model_name]
44
 
45
  # Emotion classification function
46
+ def predict_emotion(text, model_name):
47
+ model = get_model(model_name)
48
+ if model is None:
49
+ return f"Model {model_name} failed to load. Check logs."
50
+
51
+ tokenizer = tokenizers.get(model_name)
52
+ if tokenizer is None:
53
+ return f"Tokenizer for {model_name} not available."
54
 
55
  inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=256).to(device)
56
 
 
64
  # Gradio UI
65
  ui = gr.Interface(
66
  fn=predict_emotion,
67
+ inputs=["text", gr.Radio(list(model_paths.keys()), label="Select Model")],
68
  outputs="text",
69
  title="Emotion Classifier",
70
+ description="Enter a text, select a model, and classify its emotion."
71
  )
72
 
73
+ ui.queue().launch()