File size: 2,585 Bytes
fb43022
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f544c85
fb43022
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
import gradio as gr
import os
from huggingface_hub import InferenceClient
from sentence_transformers import SentenceTransformer
import faiss

# Load text from stored data
with open("verilog.txt", "r", encoding="utf-8") as f:
    data = f.read()

# Simple chunking
chunks = data.split("\n\n")

embedder = SentenceTransformer('all-MiniLM-L6-v2')
embeddings = embedder.encode(chunks)

# Create Faiss index
dimension = embeddings.shape[1]
index = faiss.IndexFlatL2(dimension)
index.add(embeddings)

def rag_retrieve(query, top_k=3):
    query_emb = embedder.encode([query])
    distances, indices = index.search(query_emb, top_k)
    retrieved_chunks = [chunks[i] for i in indices[0]]
    return "\n".join(retrieved_chunks)

def respond(
    message,
    history: list[dict[str, str]],
    system_message,
    max_tokens,
    temperature,
    top_p,
    hf_token: gr.OAuthToken,
):

    retrieved_context = rag_retrieve(message)

    rag_augmented_system = (
        f"{system_message}\n\n"
        "Relevant technical specifications and Verilog implementation details:\n"
        f"{retrieved_context}\n\n"
        "Use this information while responding clearly and politely."
    )

    client = InferenceClient(token=hf_token.token, model="openai/gpt-oss-20b")

    messages = [{"role": "system", "content": rag_augmented_system}]
    messages.extend(history)
    messages.append({"role": "user", "content": message})

    response = ""

    for message in client.chat_completion(
        messages,
        max_tokens=max_tokens,
        stream=True,
        temperature=temperature,
        top_p=top_p,
    ):
        choices = message.choices
        token = ""
        if len(choices) and choices[0].delta.content:
            token = choices[0].delta.content

        response += token
        yield response


chatbot = gr.ChatInterface(
    respond,
    additional_inputs=[
        gr.Textbox(value="You are an AI based knowledge base of the ICT project of 16-bit RISC processor built in verilog by Hashir Ehtisham, Abdullah Ikram and Hadi Khan Lodhi.", label="System message"),
        gr.Slider(minimum=1, maximum=4096, value=2048, step=1, label="Max new tokens"),
        gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
        gr.Slider(
            minimum=0.1,
            maximum=1.0,
            value=0.95,
            step=0.05,
            label="Top-p (nucleus sampling)",
        ),
    ],
)

with gr.Blocks() as demo:
    with gr.Sidebar():
        gr.LoginButton()
    chatbot.render()


if __name__ == "__main__":
    demo.launch()