Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| from huggingface_hub import InferenceClient | |
| from transformers import pipeline | |
| """ | |
| For more information on `huggingface_hub` Inference API support, please check the docs: | |
| https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference | |
| """ | |
| # Initialize the inference client with the model you're using | |
| client = InferenceClient(model="isitcoding/gpt2_120_finetuned") | |
| # Initialize a text generation pipeline using Hugging Face's transformer | |
| generator = pipeline('text-generation', model=client) | |
| def respond(message, history: list[tuple[str, str]]): | |
| """ | |
| Respond function to generate text based on the user's message and conversation history. | |
| The `history` parameter keeps track of the conversation context. | |
| """ | |
| # Add the new message to the conversation history | |
| history.append(("User", message)) | |
| # Use the generator model to get a response from the model | |
| input_text = " ".join([h[1] for h in history]) # Combine the conversation history into one string | |
| output = generator(input_text, max_length=500, num_return_sequences=1) | |
| # Extract the response from the output | |
| response = output[0]['generated_text'].strip() | |
| # Add the model's response to the history | |
| history.append(("Bot", response)) | |
| return response, history | |
| # Create a Gradio interface for interaction | |
| iface = gr.Interface( | |
| fn=respond, | |
| inputs=[gr.Textbox(label="Enter your message", placeholder="Type here..."), gr.State()], | |
| outputs=[gr.Textbox(label="Response"), gr.State()], | |
| live=True | |
| ) | |
| # Launch the Gradio interface | |
| iface.launch() | |