File size: 1,138 Bytes
04ebad3
c1b80cc
 
2b4a1c2
 
c1b80cc
2b4a1c2
c1b80cc
2b4a1c2
c1b80cc
2b4a1c2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c1b80cc
 
04ebad3
2b4a1c2
04ebad3
c1b80cc
2b4a1c2
c1b80cc
04ebad3
2b4a1c2
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
import gradio as gr
from transformers import pipeline

# Load your StudyBuddyAI model
chat = pipeline("text-generation", model="redmint/studybuddy-ai", device=-1)  # use -1 for CPU, 0 for GPU

# Core response function
def respond(message, history):
    # Convert history into simple Q&A format
    context = ""
    for user, bot in history:
        context += f"Question: {user}\nAnswer: {bot}\n"

    # Add the latest user question
    prompt = context + f"Question: {message}\nAnswer:"

    # Generate response
    output = chat(
        prompt,
        max_new_tokens=200,
        temperature=0.4,
        top_p=0.9,
        do_sample=True,
    )[0]["generated_text"]

    # Extract only the part after "Answer:"
    if "Answer:" in output:
        response = output.split("Answer:")[-1].strip()
    else:
        response = output.strip()

    history.append((message, response))
    return history, history

# Gradio interface
with gr.Blocks() as demo:
    chatbot = gr.Chatbot()
    msg = gr.Textbox(label="Ask StudyBuddyAI")
    msg.submit(respond, [msg, chatbot], [chatbot, chatbot])

if __name__ == "__main__":
    demo.launch()