pcasale commited on
Commit
a90147a
·
verified ·
1 Parent(s): 7650f35

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +33 -0
app.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Import Gradio and the Hugging Face Transformers pipeline
2
+ import gradio as gr
3
+ from transformers import pipeline
4
+
5
+ # Load a text-generation pipeline with the DistilGPT-2 model.
6
+ # DistilGPT-2 is a distilled (compressed) version of GPT-2, so it's faster and lightweight:contentReference[oaicite:3]{index=3}.
7
+ generator = pipeline("text-generation", model="distilgpt2") # This downloads the model weights if not already available
8
+
9
+ # Define a function that uses the generator to produce text based on the input prompt.
10
+ def generate_text(prompt):
11
+ # Use the text-generation pipeline to continue the prompt.
12
+ # We set a max_length to limit the output length for practicality.
13
+ result = generator(prompt, max_length=100, num_return_sequences=1)[0]["generated_text"]
14
+ return result
15
+
16
+ # Set up the Gradio interface:
17
+ # - Input: a textbox for the prompt (single-line or a short prompt, so we use lines=2).
18
+ # - Output: a textbox for the generated text.
19
+ # - We also add a title and description for user guidance.
20
+ input_prompt = gr.Textbox(lines=2, label="Prompt", placeholder="Enter a text prompt...")
21
+ output_text = gr.Textbox(label="Generated Text")
22
+
23
+ demo = gr.Interface(
24
+ fn=generate_text,
25
+ inputs=input_prompt,
26
+ outputs=output_text,
27
+ title="🤖 DistilGPT-2 Text Generator",
28
+ description="**Description:** Enter a prompt, and the DistilGPT-2 language model will continue the text. "
29
+ "This demonstrates basic text generation using a small pre-trained GPT-2 model."
30
+ )
31
+
32
+ # Launch the app (in a Hugging Face Space, this will run automatically).
33
+ demo.launch()