Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| def chatbot_response(user_input): | |
| # Handle basic greeting | |
| if user_input.lower() in ["hello", "hi"]: | |
| return "Hello! How can I assist you today?" | |
| # Add more conditions for different queries here | |
| elif "supervised learning" in user_input.lower(): | |
| return "Supervised learning is a machine learning approach where models are trained using labeled data." | |
| # If no predefined match, ask for more clarification | |
| else: | |
| return "I'm here to assist with academic questions. Please specify what you'd like help with." | |
| with gr.Blocks() as demo: | |
| gr.Markdown("### Study Assistance Chatbot") | |
| gr.Markdown("Welcome! Ask me anything related to your academic studies.") | |
| with gr.Row(): | |
| with gr.Column(): | |
| user_input = gr.Textbox(label="Enter your question here:") | |
| submit_button = gr.Button("Submit") | |
| with gr.Column(): | |
| chatbot_output = gr.Textbox(label="Chatbot Response", interactive=False) | |
| submit_button.click(chatbot_response, inputs=user_input, outputs=chatbot_output) | |
| demo.launch() | |
| from datasets import load_dataset | |
| # Load a sample dataset from Hugging Face | |
| dataset = load_dataset("squad") # you can replace "squad" with any dataset you're using | |
| # Print the first few entries to verify that it’s loaded | |
| print(dataset["train"][0]) # Prints the first example from the training set | |
| from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer | |
| # Load pre-trained GPT-2 model and tokenizer from Hugging Face | |
| model_name = "gpt2" # You can use other models such as 'distilgpt2' for faster responses | |
| # Initialize tokenizer and model | |
| tokenizer = AutoTokenizer.from_pretrained(model_name) | |
| model = AutoModelForCausalLM.from_pretrained(model_name) | |
| # Create a pipeline for text generation | |
| generator = pipeline("text-generation", model=model, tokenizer=tokenizer) | |
| def chatbot_response(user_input): | |
| # Generate a response using the model | |
| response = generator(user_input, max_length=100, num_return_sequences=1) | |
| # Extract and return the generated text | |
| return response[0]['generated_text'] | |