tenet commited on
Commit
d576613
·
verified ·
1 Parent(s): ffa47f5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -11
app.py CHANGED
@@ -3,27 +3,45 @@ from transformers import pipeline
3
 
4
  # Pre-load multiple models
5
  models = {
6
- "TinyBERT (NLU)": pipeline("fill-mask", model="prajjwal1/bert-tiny"),
7
- "DistilBERT (NLU)": pipeline("fill-mask", model="distilbert-base-uncased"),
8
- "ALBERT (NLU)": pipeline("fill-mask", model="albert-base-v2"),
9
- "MobileBERT (NLU)": pipeline("fill-mask", model="google/mobilebert-uncased"),
10
- "GPT-2 (text gen)": pipeline("text-generation", model="gpt2")
11
  }
12
 
13
  def run_model(model_name, text):
14
  pipe = models[model_name]
 
 
15
  if "GPT-2" in model_name:
16
- return pipe(text, max_length=50, do_sample=True)[0]["generated_text"]
 
 
 
17
  else:
18
- return str(pipe(text))
 
 
 
 
 
 
 
 
 
 
 
19
 
20
  with gr.Blocks() as demo:
21
- gr.Markdown("# 🔥 Tiny LLM Playground\nChoose a small model and test it!")
22
-
 
 
23
  model_choice = gr.Dropdown(list(models.keys()), label="Choose Model")
24
  text_input = gr.Textbox(label="Enter text or prompt")
25
- output = gr.Textbox(label="Output")
26
-
27
  run_btn = gr.Button("Run")
28
  run_btn.click(fn=run_model, inputs=[model_choice, text_input], outputs=output)
29
 
 
3
 
4
  # Pre-load multiple models
5
  models = {
6
+ "TinyBERT (Fill Mask)": pipeline("fill-mask", model="prajjwal1/bert-tiny"),
7
+ "DistilBERT (Fill Mask)": pipeline("fill-mask", model="distilbert-base-uncased"),
8
+ "ALBERT (Fill Mask)": pipeline("fill-mask", model="albert-base-v2"),
9
+ "MobileBERT (Fill Mask)": pipeline("fill-mask", model="google/mobilebert-uncased"),
10
+ "GPT-2 (Text Generation)": pipeline("text-generation", model="gpt2")
11
  }
12
 
13
  def run_model(model_name, text):
14
  pipe = models[model_name]
15
+
16
+ # GPT-2 → freeform text generation
17
  if "GPT-2" in model_name:
18
+ output = pipe(text, max_length=50, do_sample=True, top_k=50, temperature=0.7)
19
+ return output[0]["generated_text"]
20
+
21
+ # Fill-mask models → require [MASK] token
22
  else:
23
+ if "[MASK]" not in text:
24
+ # If user didn’t include a mask, append one
25
+ text = text.strip()
26
+ if not text.endswith("."):
27
+ text += "."
28
+ text = text[:-1] + " [MASK]."
29
+
30
+ preds = pipe(text, top_k=5) # top 5 predictions
31
+ formatted = "\n".join(
32
+ [f"{p['token_str']} (prob={p['score']:.4f})" for p in preds]
33
+ )
34
+ return f"Input: {text}\n\nPredictions:\n{formatted}"
35
 
36
  with gr.Blocks() as demo:
37
+ gr.Markdown("# 🔥 Tiny LLM Playground\nChoose a small model and test it!\n\n"
38
+ "💡 For BERT-style models, you can add `[MASK]` in your text, "
39
+ "or just type normally and I'll add one for you.")
40
+
41
  model_choice = gr.Dropdown(list(models.keys()), label="Choose Model")
42
  text_input = gr.Textbox(label="Enter text or prompt")
43
+ output = gr.Textbox(label="Output", lines=8)
44
+
45
  run_btn = gr.Button("Run")
46
  run_btn.click(fn=run_model, inputs=[model_choice, text_input], outputs=output)
47