Tralalabs commited on
Commit
9cb422a
Β·
verified Β·
1 Parent(s): a4767c3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +28 -15
app.py CHANGED
@@ -7,15 +7,15 @@ model_id = "EleutherAI/gpt-neo-125M"
7
  tokenizer = AutoTokenizer.from_pretrained(model_id)
8
  model = AutoModelForCausalLM.from_pretrained(model_id)
9
 
10
- def generate_text(prompt, max_new_tokens=80):
11
  inputs = tokenizer(prompt, return_tensors="pt")
12
 
13
  with torch.no_grad():
14
  outputs = model.generate(
15
  **inputs,
16
- max_new_tokens=max_new_tokens,
17
  do_sample=True,
18
- temperature=0.9,
19
  top_p=0.95,
20
  repetition_penalty=1.1,
21
  pad_token_id=tokenizer.eos_token_id
@@ -24,26 +24,39 @@ def generate_text(prompt, max_new_tokens=80):
24
  full_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
25
  generated = full_text[len(prompt):]
26
 
27
- return f"<span style='color:#00ff88'>{generated}</span>"
 
 
 
 
 
 
 
 
 
28
 
29
- with gr.Blocks(css="textarea {font-family: monospace;}") as demo:
30
  gr.Markdown("# 🧠 TextPlayground")
31
 
32
  with gr.Row():
33
- prompt = gr.Textbox(
34
- label="Input",
35
- placeholder="Type something like: Once upon a time...",
36
- lines=5
37
- )
 
 
 
38
 
39
- output = gr.HTML(label="Completion")
40
 
41
- generate_btn = gr.Button("Generate ⚑")
 
 
42
 
43
- generate_btn.click(
44
  fn=generate_text,
45
- inputs=[prompt],
46
- outputs=[output]
47
  )
48
 
49
  demo.launch()
 
7
  tokenizer = AutoTokenizer.from_pretrained(model_id)
8
  model = AutoModelForCausalLM.from_pretrained(model_id)
9
 
10
+ def generate_text(prompt, temperature, max_new_tokens):
11
  inputs = tokenizer(prompt, return_tensors="pt")
12
 
13
  with torch.no_grad():
14
  outputs = model.generate(
15
  **inputs,
16
+ max_new_tokens=int(max_new_tokens),
17
  do_sample=True,
18
+ temperature=temperature,
19
  top_p=0.95,
20
  repetition_penalty=1.1,
21
  pad_token_id=tokenizer.eos_token_id
 
24
  full_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
25
  generated = full_text[len(prompt):]
26
 
27
+ # πŸ”₯ combine prompt + green continuation
28
+ return f"""
29
+ <div style="font-family:monospace; white-space:pre-wrap;">
30
+ {prompt}<span style="color:#00ff88;">{generated}</span>
31
+ </div>
32
+ """
33
+
34
+ with gr.Blocks(css="""
35
+ textarea {font-family: monospace;}
36
+ """) as demo:
37
 
 
38
  gr.Markdown("# 🧠 TextPlayground")
39
 
40
  with gr.Row():
41
+ with gr.Column(scale=3):
42
+ prompt = gr.Textbox(
43
+ label="Prompt",
44
+ placeholder="Type something like: Three reasons to start a succulent garden",
45
+ lines=10
46
+ )
47
+
48
+ output = gr.HTML(label="Output")
49
 
50
+ btn = gr.Button("Submit ⚑")
51
 
52
+ with gr.Column(scale=1):
53
+ temperature = gr.Slider(0.1, 1.5, value=0.7, label="Temperature")
54
+ max_tokens = gr.Slider(10, 256, value=120, label="Max length")
55
 
56
+ btn.click(
57
  fn=generate_text,
58
+ inputs=[prompt, temperature, max_tokens],
59
+ outputs=output
60
  )
61
 
62
  demo.launch()