humy65 commited on
Commit
f8fab2a
ยท
1 Parent(s): 80fe507

Add training data display to debug tab

Browse files
Files changed (1) hide show
  1. app.py +199 -52
app.py CHANGED
@@ -6,82 +6,87 @@ import gradio as gr
6
  import sys
7
  import traceback
8
 
 
9
  def test_model_loading():
10
  """Test if model can be loaded"""
11
  try:
12
  print("๐Ÿ”„ Testing model loading...")
13
  from transformers import AutoTokenizer, AutoModelForSequenceClassification
14
-
15
  model_name = "humy65/hebrew-intent-classifier"
16
  print(f"๐Ÿ“ก Attempting to load: {model_name}")
17
-
18
  tokenizer = AutoTokenizer.from_pretrained(model_name)
19
  print("โœ… Tokenizer loaded")
20
-
21
  model = AutoModelForSequenceClassification.from_pretrained(model_name)
22
  print("โœ… Model loaded")
23
-
24
  print(f"๐Ÿ“‹ Labels: {model.config.id2label}")
25
  return True, "Model loaded successfully!", model, tokenizer
26
-
27
  except Exception as e:
28
  error_msg = f"โŒ Error: {str(e)}\n\nTraceback:\n{traceback.format_exc()}"
29
  print(error_msg)
30
  return False, error_msg, None, None
31
 
 
32
  def classify_text(text):
33
  """Classification function with lazy loading"""
34
  if not text or not text.strip():
35
  return "โš ๏ธ Please enter Hebrew text", {}
36
-
37
  try:
38
  # Try to load model on demand
39
  success, message, model, tokenizer = test_model_loading()
40
-
41
  if not success:
42
  return f"Model Loading Failed:\n{message}", {}
43
-
44
  # Perform classification
45
  import torch
46
-
47
- inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
48
-
 
49
  with torch.no_grad():
50
  outputs = model(**inputs)
51
  logits = outputs.logits
52
  probabilities = torch.softmax(logits, dim=-1)
53
-
54
  # Get results
55
  predicted_id = torch.argmax(logits, dim=-1).item()
56
  predicted_label = model.config.id2label[predicted_id]
57
  confidence = probabilities[0][predicted_id].item()
58
-
59
  # Create confidence scores for all labels
60
  all_scores = {}
61
  for i, prob in enumerate(probabilities[0]):
62
  intent_name = model.config.id2label[i]
63
  all_scores[intent_name] = float(prob)
64
-
65
  result = f"""
66
  ๐ŸŽฏ Predicted Intent: {predicted_label}
67
  ๐ŸŽฒ Confidence: {confidence:.1%}
68
 
69
  ๐Ÿ“Š All Predictions:
70
  """
71
-
72
  # Sort and display
73
- sorted_scores = sorted(all_scores.items(), key=lambda x: x[1], reverse=True)
 
74
  for intent, score in sorted_scores:
75
  bar = "โ–ˆ" * max(1, int(score * 20))
76
  result += f"\n{intent}: {score:.1%} {bar}"
77
-
78
  return result, all_scores
79
-
80
  except Exception as e:
81
  error_msg = f"Classification Error: {str(e)}\n\nTraceback:\n{traceback.format_exc()}"
82
  print(error_msg)
83
  return error_msg, {}
84
 
 
85
  def test_connection():
86
  """Test Hugging Face connection"""
87
  try:
@@ -92,11 +97,138 @@ def test_connection():
92
  except Exception as e:
93
  return f"โŒ Repository access failed: {str(e)}"
94
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
95
  # Create interface
96
  with gr.Blocks(title="Hebrew Intent Classification - Debug") as demo:
97
-
98
  gr.Markdown("# ๐Ÿ‡ฎ๐Ÿ‡ฑ Hebrew Intent Classification - Debug Version")
99
-
100
  with gr.Tab("Classification"):
101
  with gr.Row():
102
  with gr.Column():
@@ -106,67 +238,82 @@ with gr.Blocks(title="Hebrew Intent Classification - Debug") as demo:
106
  lines=3
107
  )
108
  classify_btn = gr.Button("Classify", variant="primary")
109
-
110
  # Quick examples
111
  gr.Markdown("### Examples:")
112
  examples = [
113
  "ืฉ๏ฟฝ๏ฟฝื—ืชื™ ืืช ื”ืกื™ืกืžื” ืฉืœื™",
114
- "ืจื•ืฆื” ืœื‘ื˜ืœ ืืช ื”ืžื ื•ื™",
115
  "ื›ืžื” ืขื•ืœื” ื”ื—ื‘ื™ืœื”",
116
  "ื”ืืชืจ ืœื ืขื•ื‘ื“"
117
  ]
118
-
119
  for example in examples:
120
  gr.Button(example, size="sm").click(
121
  lambda x=example: x, outputs=text_input
122
  )
123
-
124
  with gr.Column():
125
  result_output = gr.Textbox(
126
  label="Result:",
127
  lines=12,
128
  interactive=False
129
  )
130
-
131
  confidence_output = gr.Label(
132
  label="Confidence Scores",
133
  num_top_classes=4
134
  )
135
-
136
  with gr.Tab("Debug"):
137
  gr.Markdown("### Debug Information")
138
-
139
- test_btn = gr.Button("Test Model Loading")
140
- debug_output = gr.Textbox(
141
- label="Debug Output:",
142
- lines=15,
143
- interactive=False
144
- )
145
-
146
- test_btn.click(
147
- lambda: test_model_loading()[1],
148
- outputs=debug_output
149
- )
150
-
151
- conn_btn = gr.Button("Test Repository Connection")
152
- conn_output = gr.Textbox(
153
- label="Connection Test:",
154
- lines=5,
155
- interactive=False
156
- )
157
-
158
- conn_btn.click(
159
- test_connection,
160
- outputs=conn_output
161
- )
162
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
163
  # Connect classification
164
  classify_btn.click(
165
  classify_text,
166
  inputs=[text_input],
167
  outputs=[result_output, confidence_output]
168
  )
169
-
170
  text_input.submit(
171
  classify_text,
172
  inputs=[text_input],
 
6
  import sys
7
  import traceback
8
 
9
+
10
  def test_model_loading():
11
  """Test if model can be loaded"""
12
  try:
13
  print("๐Ÿ”„ Testing model loading...")
14
  from transformers import AutoTokenizer, AutoModelForSequenceClassification
15
+
16
  model_name = "humy65/hebrew-intent-classifier"
17
  print(f"๐Ÿ“ก Attempting to load: {model_name}")
18
+
19
  tokenizer = AutoTokenizer.from_pretrained(model_name)
20
  print("โœ… Tokenizer loaded")
21
+
22
  model = AutoModelForSequenceClassification.from_pretrained(model_name)
23
  print("โœ… Model loaded")
24
+
25
  print(f"๐Ÿ“‹ Labels: {model.config.id2label}")
26
  return True, "Model loaded successfully!", model, tokenizer
27
+
28
  except Exception as e:
29
  error_msg = f"โŒ Error: {str(e)}\n\nTraceback:\n{traceback.format_exc()}"
30
  print(error_msg)
31
  return False, error_msg, None, None
32
 
33
+
34
  def classify_text(text):
35
  """Classification function with lazy loading"""
36
  if not text or not text.strip():
37
  return "โš ๏ธ Please enter Hebrew text", {}
38
+
39
  try:
40
  # Try to load model on demand
41
  success, message, model, tokenizer = test_model_loading()
42
+
43
  if not success:
44
  return f"Model Loading Failed:\n{message}", {}
45
+
46
  # Perform classification
47
  import torch
48
+
49
+ inputs = tokenizer(text, return_tensors="pt",
50
+ padding=True, truncation=True, max_length=128)
51
+
52
  with torch.no_grad():
53
  outputs = model(**inputs)
54
  logits = outputs.logits
55
  probabilities = torch.softmax(logits, dim=-1)
56
+
57
  # Get results
58
  predicted_id = torch.argmax(logits, dim=-1).item()
59
  predicted_label = model.config.id2label[predicted_id]
60
  confidence = probabilities[0][predicted_id].item()
61
+
62
  # Create confidence scores for all labels
63
  all_scores = {}
64
  for i, prob in enumerate(probabilities[0]):
65
  intent_name = model.config.id2label[i]
66
  all_scores[intent_name] = float(prob)
67
+
68
  result = f"""
69
  ๐ŸŽฏ Predicted Intent: {predicted_label}
70
  ๐ŸŽฒ Confidence: {confidence:.1%}
71
 
72
  ๐Ÿ“Š All Predictions:
73
  """
74
+
75
  # Sort and display
76
+ sorted_scores = sorted(
77
+ all_scores.items(), key=lambda x: x[1], reverse=True)
78
  for intent, score in sorted_scores:
79
  bar = "โ–ˆ" * max(1, int(score * 20))
80
  result += f"\n{intent}: {score:.1%} {bar}"
81
+
82
  return result, all_scores
83
+
84
  except Exception as e:
85
  error_msg = f"Classification Error: {str(e)}\n\nTraceback:\n{traceback.format_exc()}"
86
  print(error_msg)
87
  return error_msg, {}
88
 
89
+
90
  def test_connection():
91
  """Test Hugging Face connection"""
92
  try:
 
97
  except Exception as e:
98
  return f"โŒ Repository access failed: {str(e)}"
99
 
100
+
101
+ def get_training_data():
102
+ """Display the training data used for the model"""
103
+ training_data = [
104
+ ("ืฉื›ื—ืชื™ ืืช ื”ืกื™ืกืžื” ืฉืœื™", "ืฉื›ื—ืช ืกื™ืกืžื”"),
105
+ ("ืื™ืš ืื ื™ ืžื‘ื˜ืœ ืืช ื”ืžื ื•ื™?", "ื‘ื™ื˜ื•ืœ ืžื ื•ื™"),
106
+ ("ืžื” ื”ืžื—ื™ืจ ืฉืœ ื”ืชื•ื›ื ื™ืช?", "ืฉืืœื” ื›ืœืœื™ืช"),
107
+ ("ื”ืืชืจ ืœื ืขื•ื‘ื“ ืœื™", "ืชืžื™ื›ื” ื˜ื›ื ื™ืช"),
108
+ ("ืื ื™ ืœื ืžืฆืœื™ื— ืœื”ืชื—ื‘ืจ", "ืชืžื™ื›ื” ื˜ื›ื ื™ืช"),
109
+ ("ืื™ืš ืื ื™ ืžืฉื ื” ืืช ื›ืชื•ื‘ืช ื”ืื™ืžื™ื™ืœ?", "ืฉืืœื” ื›ืœืœื™ืช"),
110
+ ("ืื ื™ ืจื•ืฆื” ืœืฉื“ืจื’ ืืช ื”ืชื•ื›ื ื™ืช ืฉืœื™", "ืฉืืœื” ื›ืœืœื™ืช"),
111
+ ("ื”ื—ืฉื‘ื•ืŸ ืฉืœื™ ื ื ืขืœ", "ืชืžื™ื›ื” ื˜ื›ื ื™ืช"),
112
+ ("ืื ื™ ืœื ืžืงื‘ืœ ืžื™ื™ืœื™ื", "ืชืžื™ื›ื” ื˜ื›ื ื™ืช"),
113
+ ("ืื™ืš ืื ื™ ืจื•ืื” ืืช ื”ื—ืฉื‘ื•ื ื™ืช ืฉืœื™?", "ืฉืืœื” ื›ืœืœื™ืช"),
114
+ ("ืื ื™ ืจื•ืฆื” ืœื‘ื˜ืœ ืืช ื”ืฉื™ืจื•ืช", "ื‘ื™ื˜ื•ืœ ืžื ื•ื™"),
115
+ ("ืฉื›ื—ืชื™ ืืช ืคืจื˜ื™ ื”ื”ืชื—ื‘ืจื•ืช", "ืฉื›ื—ืช ืกื™ืกืžื”"),
116
+ ("ืื™ื‘ื“ืชื™ ืืช ื”ืกื™ืกืžื”", "ืฉื›ื—ืช ืกื™ืกืžื”"),
117
+ ("ืœื ื–ื•ื›ืจ ืืช ื”ืกื™ืกืžื”", "ืฉื›ื—ืช ืกื™ืกืžื”"),
118
+ ("ื”ืกื™ืกืžื” ืœื ืขื•ื‘ื“ืช", "ืฉื›ื—ืช ืกื™ืกืžื”"),
119
+ ("ืœื ืžืฆืœื™ื— ืœื”ื™ื›ื ืก ืขื ื”ืกื™ืกืžื”", "ืฉื›ื—ืช ืกื™ืกืžื”"),
120
+ ("ืฆืจื™ืš ืœืืคืก ืืช ื”ืกื™ืกืžื”", "ืฉื›ื—ืช ืกื™ืกืžื”"),
121
+ ("ื‘ืขื™ื” ืขื ื”ืกื™ืกืžื”", "ืฉื›ื—ืช ืกื™ืกืžื”"),
122
+ ("ื”ืกื™ืกืžื” ืฉืœื™ ืœื ื ื›ื•ื ื”", "ืฉื›ื—ืช ืกื™ืกืžื”"),
123
+ ("ืฉื›ื—ืชื™ ืžื” ื”ืกื™ืกืžื”", "ืฉื›ื—ืช ืกื™ืกืžื”"),
124
+ ("ืื™ืš ืื ื™ ืžืฉื—ื–ืจ ืืช ื”ืกื™ืกืžื”", "ืฉื›ื—ืช ืกื™ืกืžื”"),
125
+ ("ืจื•ืฆื” ืœืฉื ื•ืช ืืช ื”ืกื™ืกืžื”", "ืฉื›ื—ืช ืกื™ืกืžื”"),
126
+ ("ื”ืกื™ืกืžื” ืœื ืžืชืงื‘ืœืช", "ืฉื›ื—ืช ืกื™ืกืžื”"),
127
+ ("ื‘ืขื™ื™ืช ื”ืชื—ื‘ืจื•ืช - ืกื™ืกืžื”", "ืฉื›ื—ืช ืกื™ืกืžื”"),
128
+ ("ืฆืจื™ืš ืขื–ืจื” ืขื ื”ืกื™ืกืžื”", "ืฉื›ื—ืช ืกื™ืกืžื”"),
129
+ ("ืœื ื™ื•ื“ืข ืžื” ื”ืกื™ืกืžื” ืฉืœื™", "ืฉื›ื—ืช ืกื™ืกืžื”"),
130
+ ("ืจื•ืฆื” ืœื‘ื˜ืœ ืืช ื”ืฉื™ืจื•ืช", "ื‘ื™ื˜ื•ืœ ืžื ื•ื™"),
131
+ ("ืื™ืš ืžืคืกื™ืงื™ื ืืช ื”ืžื ื•ื™", "ื‘ื™ื˜ื•ืœ ืžื ื•ื™"),
132
+ ("ืจื•ืฆื” ืœื”ืคืกื™ืง ืืช ื”ืชืฉืœื•ื", "ื‘ื™ื˜ื•ืœ ืžื ื•ื™"),
133
+ ("ืื™ืš ื™ื•ืฆืื™ื ืžื”ืžื ื•ื™", "ื‘ื™ื˜ื•ืœ ืžื ื•ื™"),
134
+ ("ื‘ืงืฉื” ืœื‘ื™ื˜ื•ืœ ืžื ื•ื™", "ื‘ื™ื˜ื•ืœ ืžื ื•ื™"),
135
+ ("ืœื ืจื•ืฆื” ื™ื•ืชืจ ืืช ื”ืฉื™ืจื•ืช", "ื‘ื™ื˜ื•ืœ ืžื ื•ื™"),
136
+ ("ืื™ืš ืžื‘ื˜ืœื™ื ืืช ื”ื—ืฉื‘ื•ืŸ", "ื‘ื™ื˜ื•ืœ ืžื ื•ื™"),
137
+ ("ืจื•ืฆื” ืœืกื’ื•ืจ ืืช ื”ื—ืฉื‘ื•ืŸ", "ื‘ื™ื˜ื•ืœ ืžื ื•ื™"),
138
+ ("ืขื–ืจื” ื‘ื‘ื™ื˜ื•ืœ ืžื ื•ื™", "ื‘ื™ื˜ื•ืœ ืžื ื•ื™"),
139
+ ("ื”ืœื™ืš ื‘ื™ื˜ื•ืœ ื”ืžื ื•ื™", "ื‘ื™ื˜ื•ืœ ืžื ื•ื™"),
140
+ ("ืžืขื•ื ื™ื™ืŸ ืœื‘ื˜ืœ", "ื‘ื™ื˜ื•ืœ ืžื ื•ื™"),
141
+ ("ืื™ืš ืžืคืกื™ืงื™ื ืืช ื”ืฉื™ืจื•ืช", "ื‘ื™ื˜ื•ืœ ืžื ื•ื™"),
142
+ ("ืจื•ืฆื” ืœื”ืคืกื™ืง ืืช ื”ื”ืจืฉืžื”", "ื‘ื™ื˜ื•ืœ ืžื ื•ื™"),
143
+ ("ื‘ืงืฉื” ืœื”ืคืกืงืช ืฉื™ืจื•ืช", "ื‘ื™ื˜ื•ืœ ืžื ื•ื™"),
144
+ ("ืžื” ื›ื•ืœืœ ื”ืฉื™ืจื•ืช", "ืฉืืœื” ื›ืœืœื™ืช"),
145
+ ("ืื™ืœื• ืชื•ื›ื ื™ื•ืช ื™ืฉ ืœื›ื", "ืฉืืœื” ื›ืœืœื™ืช"),
146
+ ("ื›ืžื” ืขื•ืœื” ื”ื—ื‘ื™ืœื”", "ืฉืืœื” ื›ืœืœื™ืช"),
147
+ ("ืžื” ื”ื”ื‘ื“ืœ ื‘ื™ืŸ ื”ืชื•ื›ื ื™ื•ืช", "ืฉืืœื” ื›ืœืœื™ืช"),
148
+ ("ืื™ืš ืื ื™ ืžืฉื ื” ืืช ื”ืคืจื˜ื™ื ืฉืœื™", "ืฉืืœื” ื›ืœืœื™ืช"),
149
+ ("ืื™ืš ืืคืฉืจ ืœืฉื“ืจื’", "ืฉืืœื” ื›ืœืœื™ืช"),
150
+ ("ืžื” ื”ืืคืฉืจื•ื™ื•ืช ืฉืœื›ื", "ืฉืืœื” ื›ืœืœื™ืช"),
151
+ ("ืื ื™ ืจื•ืฆื” ืœืขื“ื›ืŸ ืคืจื˜ื™ื", "ืฉืืœื” ื›ืœืœื™ืช"),
152
+ ("ืื™ืš ืจื•ืื™ื ืืช ื”ื”ื™ืกื˜ื•ืจื™ื”", "ืฉืืœื” ื›ืœืœื™ืช"),
153
+ ("ื”ืืคืœื™ืงืฆื™ื” ืงื•ืจืกืช", "ืชืžื™ื›ื” ื˜ื›ื ื™ืช"),
154
+ ("ื™ืฉ ื‘ืื’ ื‘ืืชืจ", "ืชืžื™ื›ื” ื˜ื›ื ื™ืช"),
155
+ ("ื”ื“ืฃ ืœื ื ื˜ืขืŸ", "ืชืžื™ื›ื” ื˜ื›ื ื™ืช"),
156
+ ("ืฉื’ื™ืื” ื‘ืžืขืจื›ืช", "ืชืžื™ื›ื” ื˜ื›ื ื™ืช"),
157
+ ("ื”ื˜ื•ืขืŸ ืœื ืขื•ื‘ื“", "ืชืžื™ื›ื” ื˜ื›ื ื™ืช"),
158
+ ("ื‘ืขื™ื” ื˜ื›ื ื™ืช", "ืชืžื™ื›ื” ื˜ื›ื ื™ืช"),
159
+ ("ื”ืžืขืจื›ืช ืœื ืžื’ื™ื‘ื”", "ืชืžื™ื›ื” ื˜ื›ื ื™ืช"),
160
+ ("ืฉื’ื™ืืช ื—ื™ื‘ื•ืจ", "ืชืžื™ื›ื” ื˜ื›ื ื™ืช"),
161
+ ("ื”ื›ืคืชื•ืจ ืœื ืขื•ื‘ื“", "ืชืžื™ื›ื” ื˜ื›ื ื™ืช"),
162
+ ("ื”ืชืžื•ื ื•ืช ืœื ื ื˜ืขื ื•ืช", "ืชืžื™ื›ื” ื˜ื›ื ื™ืช"),
163
+ ("ื”ื•ื•ื™ื“ืื• ืœื ืžืชื ื’ืŸ", "ืชืžื™ื›ื” ื˜ื›ื ื™ืช"),
164
+ ("ืื™ื˜ื™ื•ืช ื‘ืืชืจ", "ืชืžื™ื›ื” ื˜ื›ื ื™ืช")
165
+ ]
166
+
167
+ # Count examples per category
168
+ category_counts = {}
169
+ for _, label in training_data:
170
+ category_counts[label] = category_counts.get(label, 0) + 1
171
+
172
+ result = f"""
173
+ ๐Ÿ“Š **Training Data Summary**
174
+ Total Examples: {len(training_data)}
175
+
176
+ ๐Ÿ“ˆ **Examples per Category:**
177
+ """
178
+
179
+ # Add category statistics
180
+ for category, count in sorted(category_counts.items()):
181
+ percentage = (count / len(training_data)) * 100
182
+ result += f"\nโ€ข {category}: {count} examples ({percentage:.1f}%)"
183
+
184
+ result += f"""
185
+
186
+ ๐Ÿ“ **Sample Training Examples:**
187
+
188
+ ๐Ÿ” **ืฉื›ื—ืช ืกื™ืกืžื” (Password Reset):**
189
+ โ€ข ืฉื›ื—ืชื™ ืืช ื”ืกื™ืกืžื” ืฉืœื™
190
+ โ€ข ืœื ื–ื•ื›ืจ ืืช ื”ืกื™ืกืžื”
191
+ โ€ข ื”ืกื™ืกืžื” ืœื ืขื•ื‘ื“ืช
192
+ โ€ข ืฆืจื™ืš ืœืืคืก ืืช ื”ืกื™ืกืžื”
193
+ โ€ข ืื™ืš ืื ื™ ืžืฉื—ื–ืจ ืืช ื”ืกื™ืกืžื”
194
+
195
+ โŒ **ื‘ื™ื˜ื•ืœ ืžื ื•ื™ (Cancel Subscription):**
196
+ โ€ข ืื™ืš ืื ื™ ืžื‘ื˜ืœ ืืช ื”ืžื ื•ื™?
197
+ โ€ข ืจื•ืฆื” ืœื”ืคืกื™ืง ืืช ื”ืชืฉืœื•ื
198
+ โ€ข ืœื ืจื•ืฆื” ื™ื•ืชืจ ืืช ื”ืฉื™ืจื•ืช
199
+ โ€ข ืื™ืš ืžื‘ื˜ืœื™ื ืืช ื”ื—ืฉื‘ื•ืŸ
200
+ โ€ข ื‘ืงืฉื” ืœื‘ื™ื˜ื•ืœ ืžื ื•ื™
201
+
202
+ โ“ **ืฉืืœื” ื›ืœืœื™ืช (General Question):**
203
+ โ€ข ืžื” ื”ืžื—ื™ืจ ืฉืœ ื”ืชื•ื›ื ื™ืช?
204
+ โ€ข ื›ืžื” ืขื•ืœื” ื”ื—ื‘ื™ืœื”
205
+ โ€ข ืื™ืœื• ืชื•ื›ื ื™ื•ืช ื™ืฉ ืœื›ื
206
+ โ€ข ืื™ืš ืื ื™ ืžืฉื ื” ืืช ื”ืคืจื˜ื™ื ืฉืœื™
207
+ โ€ข ืžื” ื›ื•ืœืœ ื”ืฉื™ืจื•ืช
208
+
209
+ ๐Ÿ”ง **ืชืžื™ื›ื” ื˜ื›ื ื™ืช (Technical Support):**
210
+ โ€ข ื”ืืชืจ ืœื ืขื•ื‘ื“ ืœื™
211
+ โ€ข ื”ืืคืœื™ืงืฆื™ื” ืงื•ืจืกืช
212
+ โ€ข ื™ืฉ ื‘ืื’ ื‘ืืชืจ
213
+ โ€ข ื”ื“ืฃ ืœื ื ื˜ืขืŸ
214
+ โ€ข ืฉื’ื™ืื” ื‘ืžืขืจื›ืช
215
+
216
+ ---
217
+ ๐Ÿ’ก **Model was trained with data augmentation techniques:**
218
+ โ€ข Synonym replacement
219
+ โ€ข Paraphrasing
220
+ โ€ข Context variation
221
+ โ€ข Original 12 examples โ†’ Enhanced to {len(training_data)} examples
222
+ """
223
+
224
+ return result
225
+
226
+
227
  # Create interface
228
  with gr.Blocks(title="Hebrew Intent Classification - Debug") as demo:
229
+
230
  gr.Markdown("# ๐Ÿ‡ฎ๐Ÿ‡ฑ Hebrew Intent Classification - Debug Version")
231
+
232
  with gr.Tab("Classification"):
233
  with gr.Row():
234
  with gr.Column():
 
238
  lines=3
239
  )
240
  classify_btn = gr.Button("Classify", variant="primary")
241
+
242
  # Quick examples
243
  gr.Markdown("### Examples:")
244
  examples = [
245
  "ืฉ๏ฟฝ๏ฟฝื—ืชื™ ืืช ื”ืกื™ืกืžื” ืฉืœื™",
246
+ "ืจื•ืฆื” ืœื‘ื˜ืœ ืืช ื”ืžื ื•ื™",
247
  "ื›ืžื” ืขื•ืœื” ื”ื—ื‘ื™ืœื”",
248
  "ื”ืืชืจ ืœื ืขื•ื‘ื“"
249
  ]
250
+
251
  for example in examples:
252
  gr.Button(example, size="sm").click(
253
  lambda x=example: x, outputs=text_input
254
  )
255
+
256
  with gr.Column():
257
  result_output = gr.Textbox(
258
  label="Result:",
259
  lines=12,
260
  interactive=False
261
  )
262
+
263
  confidence_output = gr.Label(
264
  label="Confidence Scores",
265
  num_top_classes=4
266
  )
267
+
268
  with gr.Tab("Debug"):
269
  gr.Markdown("### Debug Information")
270
+
271
+ with gr.Row():
272
+ with gr.Column():
273
+ test_btn = gr.Button("Test Model Loading")
274
+ debug_output = gr.Textbox(
275
+ label="Debug Output:",
276
+ lines=15,
277
+ interactive=False
278
+ )
279
+
280
+ test_btn.click(
281
+ lambda: test_model_loading()[1],
282
+ outputs=debug_output
283
+ )
284
+
285
+ conn_btn = gr.Button("Test Repository Connection")
286
+ conn_output = gr.Textbox(
287
+ label="Connection Test:",
288
+ lines=5,
289
+ interactive=False
290
+ )
291
+
292
+ conn_btn.click(
293
+ test_connection,
294
+ outputs=conn_output
295
+ )
296
+
297
+ with gr.Column():
298
+ data_btn = gr.Button("Show Training Data")
299
+ training_output = gr.Textbox(
300
+ label="Training Data:",
301
+ lines=20,
302
+ interactive=False
303
+ )
304
+
305
+ data_btn.click(
306
+ get_training_data,
307
+ outputs=training_output
308
+ )
309
+
310
  # Connect classification
311
  classify_btn.click(
312
  classify_text,
313
  inputs=[text_input],
314
  outputs=[result_output, confidence_output]
315
  )
316
+
317
  text_input.submit(
318
  classify_text,
319
  inputs=[text_input],