ArceusInception commited on
Commit
04fee53
·
verified ·
1 Parent(s): e0bbdeb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -17
app.py CHANGED
@@ -1,25 +1,27 @@
1
  import gradio as gr
2
- from transformers import pipeline
3
 
4
- # Load the GPT-2 model
5
- gpt2 = pipeline("text-generation", model="gpt2")
 
 
6
 
7
- # Define the function that will generate the response
8
- def chat_with_gpt2(user_input):
9
- # Use the GPT-2 model to generate a response
10
- responses = gpt2(user_input, max_length=100, num_return_sequences=1)
11
- # Return the first (and only) response
12
- return responses[0]['generated_text']
13
 
14
- # Create the Gradio interface
 
 
15
  iface = gr.Interface(
16
- fn=chat_with_gpt2,
17
- inputs=gr.inputs.Textbox(lines=2, placeholder="Type your message here..."),
18
  outputs="text",
19
- title="Chat with GPT-2",
20
- description="This is a simple chatbot powered by GPT-2. Type your message and get a response."
21
  )
22
 
23
- # Launch the app
24
- if __name__ == "__main__":
25
- iface.launch()
 
1
  import gradio as gr
2
+ from huggingface_hub import InferenceClient
3
 
4
+ # Initializing the Hugging Face InferenceClient
5
+ import os
6
+ token = os.getenv("HF_TOKEN")
7
+ client = InferenceClient(model="gpt2", token=token)
8
 
9
+ def generate_text(prompt):
10
+ # Calling the model to directly get the response as text
11
+ generated_text = client.text_generation(prompt, max_new_tokens=140)
12
+ # Since the response is plain text, directly return it
13
+ return generated_text
 
14
 
15
+
16
+
17
+ # Creating a Gradio interface
18
  iface = gr.Interface(
19
+ fn=generate_text,
20
+ inputs=gr.Textbox(lines=2, placeholder="Enter your prompt here..."),
21
  outputs="text",
22
+ title="GPT-2 Text Generator",
23
+ description="This Gradio app generates text using the GPT-2 model. Enter a prompt and see how GPT-2 completes it."
24
  )
25
 
26
+ # Launching the interface.
27
+ iface.launch()