File size: 3,459 Bytes
796621b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113

import os
from generate_embeddings import connect_tovector_store
from generate_embeddings import query_knowledge_base
#from Evaluation import evaluation
from llama_index.llms.openai import OpenAI

llm = OpenAI(model="gpt-4o-mini")
perplexity_key = ""
cohere_key = ""

import gradio as gr
from llama_index.core.memory import ChatSummaryMemoryBuffer

def save_api_keys(openai_key, perplexity_key, cohere_key):
    if not openai_key or not perplexity_key or not cohere_key:
        raise gr.Error(" ALl API keys are required")

    return {
        "openai": openai_key.strip(),
        "perplexity": perplexity_key.strip(),
        "cohere": cohere_key.strip()
    }, gr.update(visible=False), gr.update(visible=True)


def generate_completion(query, history, memory, api_keys):
    openai_key = api_keys["openai"]
    os.environ["OPENAI_API_KEY"] = openai_key
    perplexity_key = api_keys["perplexity"]
    cohere_key = api_keys["cohere"]

    index = connect_tovector_store()
    response = query_knowledge_base(index, query, perplexity_key, cohere_key)
    if hasattr(response, "response_text"):
        return response.response_text
    elif hasattr(response, "response"):
        return response.response
    else:
        return str(response)

    return response


def launch_ui():
    with gr.Blocks(
            fill_height=True,
            title="AI Tutor 🤖",
            analytics_enabled=True,
    ) as demo:
        # ---------- GLOBAL STATE ----------
        api_keys_state = gr.State({})
        memory_state = gr.State(
            lambda: ChatSummaryMemoryBuffer.from_defaults(
                token_limit=120000,
            )
        )

        # ---------- PAGE 1: API KEY SETUP ----------
        with gr.Column(visible=True) as setup_page:
            gr.Markdown("## 🔐 API Key Setup")
            gr.Markdown(
                "Enter your API keys to start the AI Tutor.\n"
                "Keys are stored only in memory for this session."
            )

            openai_key = gr.Textbox(
                label="OpenAI API Key",
                placeholder="",
                type="password"
            )
            perplexity_key = gr.Textbox(
                label="Perplexity API Key",
                placeholder="",
                type="password"
            )
            cohere_key = gr.Textbox(
                label="Cohere API Key",
                placeholder="",
                type="password"
            )

            submit_keys = gr.Button("Continue →")

        # ---------- PAGE 2: CHAT UI ----------
        with gr.Column(visible=False) as chat_page:
            chatbot = gr.Chatbot(
                scale=1,
                placeholder="<strong>AI Tutor 🤖: Ask anything about AI, LLMs, RAG, architectures</strong><br>",
                show_label=False,
                #show_copy_button=True,
            )

            gr.ChatInterface(
                fn=generate_completion,
                chatbot=chatbot,
                additional_inputs=[memory_state, api_keys_state],
                type="messages",
            )

        # ---------- ACTION ----------
        submit_keys.click(
            fn=save_api_keys,
            inputs=[openai_key, perplexity_key, cohere_key],
            outputs=[api_keys_state, setup_page, chat_page],
        )

        demo.queue(default_concurrency_limit=1)
        demo.launch(debug=True, share=True)


if __name__ == "__main__":
    launch_ui()