Spaces:
Sleeping
Sleeping
| import os | |
| from generate_embeddings import connect_tovector_store | |
| from generate_embeddings import query_knowledge_base | |
| #from Evaluation import evaluation | |
| from llama_index.llms.openai import OpenAI | |
| llm = OpenAI(model="gpt-4o-mini") | |
| perplexity_key = "" | |
| cohere_key = "" | |
| import gradio as gr | |
| from llama_index.core.memory import ChatSummaryMemoryBuffer | |
| def save_api_keys(openai_key, perplexity_key, cohere_key): | |
| if not openai_key or not perplexity_key or not cohere_key: | |
| raise gr.Error(" ALl API keys are required") | |
| return { | |
| "openai": openai_key.strip(), | |
| "perplexity": perplexity_key.strip(), | |
| "cohere": cohere_key.strip() | |
| }, gr.update(visible=False), gr.update(visible=True) | |
| def generate_completion(query, history, memory, api_keys): | |
| openai_key = api_keys["openai"] | |
| os.environ["OPENAI_API_KEY"] = openai_key | |
| perplexity_key = api_keys["perplexity"] | |
| cohere_key = api_keys["cohere"] | |
| index = connect_tovector_store() | |
| response = query_knowledge_base(index, query, perplexity_key, cohere_key) | |
| if hasattr(response, "response_text"): | |
| return response.response_text | |
| elif hasattr(response, "response"): | |
| return response.response | |
| else: | |
| return str(response) | |
| return response | |
| def launch_ui(): | |
| with gr.Blocks( | |
| fill_height=True, | |
| title="AI Tutor π€", | |
| analytics_enabled=True, | |
| ) as demo: | |
| # ---------- GLOBAL STATE ---------- | |
| api_keys_state = gr.State({}) | |
| memory_state = gr.State( | |
| lambda: ChatSummaryMemoryBuffer.from_defaults( | |
| token_limit=120000, | |
| ) | |
| ) | |
| # ---------- PAGE 1: API KEY SETUP ---------- | |
| with gr.Column(visible=True) as setup_page: | |
| gr.Markdown("## π API Key Setup") | |
| gr.Markdown( | |
| "Enter your API keys to start the AI Tutor.\n" | |
| "Keys are stored only in memory for this session." | |
| ) | |
| openai_key = gr.Textbox( | |
| label="OpenAI API Key", | |
| placeholder="", | |
| type="password" | |
| ) | |
| perplexity_key = gr.Textbox( | |
| label="Perplexity API Key", | |
| placeholder="", | |
| type="password" | |
| ) | |
| cohere_key = gr.Textbox( | |
| label="Cohere API Key", | |
| placeholder="", | |
| type="password" | |
| ) | |
| submit_keys = gr.Button("Continue β") | |
| # ---------- PAGE 2: CHAT UI ---------- | |
| with gr.Column(visible=False) as chat_page: | |
| chatbot = gr.Chatbot( | |
| scale=1, | |
| placeholder="<strong>AI Tutor π€: Ask anything about AI, LLMs, RAG, architectures</strong><br>", | |
| show_label=False, | |
| #show_copy_button=True, | |
| ) | |
| gr.ChatInterface( | |
| fn=generate_completion, | |
| chatbot=chatbot, | |
| additional_inputs=[memory_state, api_keys_state], | |
| type="messages", | |
| ) | |
| # ---------- ACTION ---------- | |
| submit_keys.click( | |
| fn=save_api_keys, | |
| inputs=[openai_key, perplexity_key, cohere_key], | |
| outputs=[api_keys_state, setup_page, chat_page], | |
| ) | |
| demo.queue(default_concurrency_limit=1) | |
| demo.launch(debug=True, share=True) | |
| if __name__ == "__main__": | |
| launch_ui() | |