File size: 2,163 Bytes
a8e7c75
 
2f27609
2ab4929
2f27609
2ab4929
2f27609
2ab4929
 
b760005
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
70eac0c
 
 
 
 
 
 
eca3380
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a8e7c75
cbb60b6
a8e7c75
d20e712
4c522a2
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
import gradio as gr

def chatbot_response(user_input):
    # Handle basic greeting
    if user_input.lower() in ["hello", "hi"]:
        return "Hello! How can I assist you today?"
    
    # Add more conditions for different queries here
    elif "supervised learning" in user_input.lower():
        return "Supervised learning is a machine learning approach where models are trained using labeled data."
    
    # If no predefined match, ask for more clarification
    else:
        return "I'm here to assist with academic questions. Please specify what you'd like help with."

with gr.Blocks() as demo:
    gr.Markdown("### Study Assistance Chatbot")
    gr.Markdown("Welcome! Ask me anything related to your academic studies.")
    
    with gr.Row():
        with gr.Column():
            user_input = gr.Textbox(label="Enter your question here:")
            submit_button = gr.Button("Submit")
        
        with gr.Column():
            chatbot_output = gr.Textbox(label="Chatbot Response", interactive=False)
    
    submit_button.click(chatbot_response, inputs=user_input, outputs=chatbot_output)

demo.launch()
from datasets import load_dataset

# Load a sample dataset from Hugging Face
dataset = load_dataset("squad")  # you can replace "squad" with any dataset you're using

# Print the first few entries to verify that it’s loaded
print(dataset["train"][0])  # Prints the first example from the training set
from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
# Load pre-trained GPT-2 model and tokenizer from Hugging Face
model_name = "gpt2"  # You can use other models such as 'distilgpt2' for faster responses

# Initialize tokenizer and model
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)

# Create a pipeline for text generation
generator = pipeline("text-generation", model=model, tokenizer=tokenizer)
def chatbot_response(user_input):
    # Generate a response using the model
    response = generator(user_input, max_length=100, num_return_sequences=1)
    
    # Extract and return the generated text
    return response[0]['generated_text']