Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| import openai | |
| from getpass import getpass | |
| import os | |
| from langchain.vectorstores import FAISS | |
| from langchain.embeddings import HuggingFaceEmbeddings | |
| #OpenRouter API | |
| openai.api_key = os.environ.get("OPENROUTER_API_KEY") | |
| openai.api_base = "https://openrouter.ai/api/v1" | |
| # Load vector DB | |
| embedding_model = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2") | |
| db = FAISS.load_local("gita_vector_index", embedding_model, allow_dangerous_deserialization=True) | |
| retriever = db.as_retriever() | |
| # Chat function | |
| def Talk2Gita(query, history): | |
| try: | |
| docs = retriever.get_relevant_documents(query) | |
| context = "\n\n".join([doc.page_content for doc in docs[:2]]) | |
| response = openai.ChatCompletion.create( | |
| model="mistralai/mistral-7b-instruct", | |
| messages=[ | |
| {"role": "system", "content": "You are a helpful assistant answering questions from the Bhagavad Gita."}, | |
| {"role": "user", "content": f"""Context: | |
| {context} | |
| Question: | |
| {query}"""} | |
| ], | |
| max_tokens=300, | |
| ) | |
| return response["choices"][0]["message"]["content"] | |
| except Exception as e: | |
| return f" Error: {str(e)}" | |
| # Launch Gradio app | |
| gr.ChatInterface(Talk2Gita, title="Talk to the Bhagavad Gita").launch(share=True) | |