Spaces:
Runtime error
Runtime error
| import os | |
| import gradio as gr | |
| import random | |
| from transformers import pipeline | |
| from transformers import AutoModelForCausalLM, AutoTokenizer | |
| # Get the Hugging Face token from environment variables | |
| hf_token = os.getenv("gpt2_token") | |
| if not hf_token: | |
| raise ValueError("Hugging Face token not found. Please set HF_TOKEN as an environment variable.") | |
| model = AutoModelForCausalLM.from_pretrained( | |
| "isitcoding/gpt2_120_finetuned", | |
| config="adapter_config.json", # Specify the custom config file | |
| state_dict="adapter_model.safetensors" # Specify the custom model weights | |
| ) | |
| tokenizer = AutoTokenizer.from_pretrained("isitcoding/gpt2_120_finetuned") | |
| # Load the text generation pipeline with your fine-tuned model | |
| generator = pipeline('text-generation', model=model, tokenizer=tokenizer, use_auth_token = hf_token) | |
| # Function to generate responses using the text generation model | |
| def respond(message, chat_history): | |
| # Generate a response from the model | |
| response = generator(message, max_length=1028, num_return_sequences=3)[0]['generated_text'] | |
| # Append the user message and model response to chat history | |
| chat_history.append(("User", message)) | |
| chat_history.append(("Bot", response)) | |
| return chat_history | |
| # Create a Gradio interface using Blocks | |
| with gr.Blocks() as demo: | |
| # Add a Chatbot component | |
| chatbot = gr.Chatbot() | |
| # Add a textbox for user input | |
| msg = gr.Textbox(label="Enter your message") | |
| # Add a button to clear the chat | |
| clear = gr.Button("Clear") | |
| # Define what happens when the user submits a message | |
| msg.submit(respond, [msg, chatbot], chatbot) | |
| # Define what happens when the clear button is pressed | |
| clear.click(lambda: [], None, chatbot) | |
| # Launch the Gradio interface | |
| demo.launch() | |