File size: 2,486 Bytes
04192d0
b6ce12d
 
 
 
04192d0
 
b6ce12d
 
 
 
 
 
 
 
 
 
 
 
 
04192d0
 
 
 
 
 
 
 
 
 
b6ce12d
04192d0
 
b6ce12d
 
 
 
 
04192d0
b6ce12d
 
04192d0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b6ce12d
04192d0
 
b6ce12d
04192d0
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
import gradio as gr
import faiss
import pickle
from sentence_transformers import SentenceTransformer
import numpy as np
from huggingface_hub import InferenceClient

index = faiss.read_index("alzheimers_index.faiss")

with open("chunks.pkl", "rb") as f:
    chunks = pickle.load(f)

model = SentenceTransformer("all-MiniLM-L6-v2")

def retrieve_rag_context(query, k=3):
    """Return top-k relevant chunks for a query."""
    query_embedding = model.encode([query])
    distances, indices = index.search(np.array(query_embedding), k)
    results = "\n\n---\n\n".join([chunks[i]["text"] for i in indices[0]])
    return results

def respond(
    message,
    history: list[dict[str, str]],
    system_message,
    max_tokens,
    temperature,
    top_p,
    hf_token: gr.OAuthToken,
):
   """Respond using GPT-OSS-20B with RAG context"""
    client = InferenceClient(token=hf_token.token, model="openai/gpt-oss-20b")

    # Retrieve RAG context
    rag_context = retrieve_rag_context(message)
    
    # Combine system message with RAG context
    full_system_message = f"{system_message}\n\nRelevant info from knowledge base:\n{rag_context}"

    # Prepare messages
    messages = [{"role": "system", "content": full_system_message}]
    messages.extend(history)
    messages.append({"role": "user", "content": message})

    response = ""

    for message in client.chat_completion(
        messages,
        max_tokens=max_tokens,
        stream=True,
        temperature=temperature,
        top_p=top_p,
    ):
        choices = message.choices
        token = ""
        if len(choices) and choices[0].delta.content:
            token = choices[0].delta.content

        response += token
        yield response


"""
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
"""
chatbot = gr.ChatInterface(
    respond,
    type="messages",
    additional_inputs=[
        gr.Textbox(value="You are a helpful AI assistant for Alzheimer's patients and caregivers.", label="System message"),
        gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
        gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
        gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"),
    ],
)

with gr.Blocks() as demo:
    with gr.Sidebar():
        gr.LoginButton()
    chatbot.render()


if __name__ == "__main__":
    demo.launch()