Spaces:
Running
Running
| # import os | |
| # import sys | |
| # import requests | |
| # from langchain.chains import ConversationalRetrievalChain | |
| # from langchain.document_loaders import PyPDFLoader, Docx2txtLoader, TextLoader | |
| # from langchain_text_splitters import CharacterTextSplitter | |
| # from langchain.vectorstores import Chroma | |
| # from langchain.embeddings import HuggingFaceEmbeddings | |
| # from langchain.llms.base import LLM | |
| # import gradio as gr | |
| # # workaround for sqlite in HF spaces | |
| # __import__('pysqlite3') | |
| # sys.modules['sqlite3'] = sys.modules.pop('pysqlite3') | |
| # # π Load documents | |
| # docs = [] | |
| # for f in os.listdir("multiple_docs"): | |
| # if f.endswith(".pdf"): | |
| # loader = PyPDFLoader(os.path.join("multiple_docs", f)) | |
| # docs.extend(loader.load()) | |
| # elif f.endswith(".docx") or f.endswith(".doc"): | |
| # loader = Docx2txtLoader(os.path.join("multiple_docs", f)) | |
| # docs.extend(loader.load()) | |
| # elif f.endswith(".txt"): | |
| # loader = TextLoader(os.path.join("multiple_docs", f)) | |
| # docs.extend(loader.load()) | |
| # # π Split into chunks | |
| # splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=10) | |
| # docs = splitter.split_documents(docs) | |
| # texts = [doc.page_content for doc in docs] | |
| # metadatas = [{"id": i} for i in range(len(texts))] | |
| # # π§ Embeddings | |
| # embedding_function = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2") | |
| # # ποΈ Vectorstore | |
| # vectorstore = Chroma( | |
| # persist_directory="./db", | |
| # embedding_function=embedding_function | |
| # ) | |
| # vectorstore.add_texts(texts=texts, metadatas=metadatas) | |
| # vectorstore.persist() | |
| # # π Get DeepSeek API key from env | |
| # DEEPSEEK_API_KEY = os.getenv("DEEPSEEK_API_KEY") | |
| # if DEEPSEEK_API_KEY is None: | |
| # raise ValueError("DEEPSEEK_API_KEY environment variable is not set.") | |
| # # π DeepSeek API endpoint | |
| # DEEPSEEK_API_URL = "https://api.deepseek.com/v1/chat/completions" | |
| # # π· Wrap DeepSeek API into LangChain LLM | |
| # class DeepSeekLLM(LLM): | |
| # """LLM that queries DeepSeek's API.""" | |
| # api_key: str = DEEPSEEK_API_KEY | |
| # def _call(self, prompt, stop=None, run_manager=None, **kwargs): | |
| # headers = { | |
| # "Authorization": f"Bearer {self.api_key}", | |
| # "Content-Type": "application/json" | |
| # } | |
| # payload = { | |
| # "model": "deepseek-chat", # adjust if you have a specific model name | |
| # "messages": [ | |
| # {"role": "system", "content": "You are a helpful assistant."}, | |
| # {"role": "user", "content": prompt} | |
| # ], | |
| # "temperature": 0.7, | |
| # "max_tokens": 512 | |
| # } | |
| # response = requests.post(DEEPSEEK_API_URL, headers=headers, json=payload) | |
| # response.raise_for_status() | |
| # data = response.json() | |
| # return data["choices"][0]["message"]["content"].strip() | |
| # @property | |
| # def _llm_type(self) -> str: | |
| # return "deepseek_api" | |
| # llm = DeepSeekLLM() | |
| # # π Conversational chain | |
| # chain = ConversationalRetrievalChain.from_llm( | |
| # llm, | |
| # retriever=vectorstore.as_retriever(search_kwargs={'k': 6}), | |
| # return_source_documents=True, | |
| # verbose=False | |
| # ) | |
| # # π¬ Gradio UI | |
| # chat_history = [] | |
| # with gr.Blocks() as demo: | |
| # chatbot = gr.Chatbot( | |
| # [("", "Hello, I'm Thierry Decae's chatbot, you can ask me any recruitment related questions such as my experience, where I'm eligible to work, skills etc you can chat with me directly in multiple languages")], | |
| # avatar_images=["./multiple_docs/Guest.jpg", "./multiple_docs/Thierry Picture.jpg"] | |
| # ) | |
| # msg = gr.Textbox(placeholder="Type your question here...") | |
| # clear = gr.Button("Clear") | |
| # def user(query, chat_history): | |
| # chat_history_tuples = [(m[0], m[1]) for m in chat_history] | |
| # result = chain({"question": query, "chat_history": chat_history_tuples}) | |
| # chat_history.append((query, result["answer"])) | |
| # return gr.update(value=""), chat_history | |
| # msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False) | |
| # clear.click(lambda: None, None, chatbot, queue=False) | |
| # demo.launch(debug=True) # remove share=True if running in HF Spaces | |
| import os | |
| import sys | |
| from langchain_classic.chains import ConversationalRetrievalChain, LLMChain | |
| from langchain_community.document_loaders import PyPDFLoader, Docx2txtLoader, TextLoader | |
| from langchain_text_splitters import CharacterTextSplitter | |
| from langchain_community.vectorstores import Chroma | |
| from langchain_huggingface import HuggingFaceEmbeddings | |
| from langchain_openai import ChatOpenAI | |
| from langchain_core.prompts import PromptTemplate | |
| from langchain_classic.chains.question_answering import load_qa_chain | |
| import gradio as gr | |
| # workaround for sqlite in HF spaces (only needed on HuggingFace Spaces) | |
| try: | |
| __import__('pysqlite3') | |
| sys.modules['sqlite3'] = sys.modules.pop('pysqlite3') | |
| except ImportError: | |
| # pysqlite3 not available, use system sqlite3 (fine for local macOS/Linux) | |
| pass | |
| # π Load documents | |
| docs = [] | |
| for f in os.listdir("multiple_docs"): | |
| if f.endswith(".pdf"): | |
| loader = PyPDFLoader(os.path.join("multiple_docs", f)) | |
| docs.extend(loader.load()) | |
| elif f.endswith(".docx") or f.endswith(".doc"): | |
| loader = Docx2txtLoader(os.path.join("multiple_docs", f)) | |
| docs.extend(loader.load()) | |
| elif f.endswith(".txt"): | |
| loader = TextLoader(os.path.join("multiple_docs", f)) | |
| docs.extend(loader.load()) | |
| # π Split into chunks | |
| splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=10) | |
| docs = splitter.split_documents(docs) | |
| # π§ Embeddings | |
| embedding_function = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2") | |
| # ποΈ Vectorstore - use from_documents which is more modern | |
| vectorstore = Chroma.from_documents( | |
| documents=docs, | |
| embedding=embedding_function, | |
| persist_directory="./db" | |
| ) | |
| # π€ Use HuggingFace Inference API (hosted in Spaces) | |
| # HF token (must be set via HF_TOKEN environment variable) | |
| HF_TOKEN = os.getenv("HF_TOKEN") | |
| if not HF_TOKEN: | |
| raise ValueError("HF_TOKEN environment variable is not set. Please set it before running the application.") | |
| # You can change MODEL_NAME to use different models from HuggingFace: | |
| # Popular options: | |
| # - "microsoft/DialoGPT-large" (conversational model) | |
| # - "HuggingFaceH4/zephyr-7b-beta" (high quality chat model) | |
| # - "mistralai/Mistral-7B-Instruct-v0.2" (excellent quality) | |
| # - "meta-llama/Llama-2-7b-chat-hf" (if you have access) | |
| # - "google/flan-t5-large" (good for Q&A) | |
| MODEL_NAME = os.getenv("HF_MODEL_NAME", "meta-llama/Llama-3.1-8B-Instruct:novita") | |
| print(f"Using HuggingFace Router API with model: {MODEL_NAME}") | |
| # Initialize ChatOpenAI with HuggingFace Router (OpenAI-compatible API) | |
| # This allows using provider suffixes like :novita | |
| # Uses OpenAI-compatible interface via HF router | |
| llm = ChatOpenAI( | |
| model=MODEL_NAME, | |
| base_url="https://router.huggingface.co/v1", | |
| api_key=HF_TOKEN, | |
| temperature=0.7, | |
| max_tokens=512, | |
| ) | |
| # β¨ Custom prompt template | |
| template = """ | |
| You are an AI trading advisor for the Quantum Financial Network trading universe. | |
| IMPORTANT: You are providing DIRECT TRADING RECOMMENDATIONS. Give clear, actionable advice based on: | |
| - Current market scenarios described in the question | |
| - Risk tolerance level specified (0=conservative, 10=aggressive) | |
| - Company information and market context provided | |
| - Specific stock symbols and prices mentioned | |
| When asked about a specific trade (e.g., "Should I buy X shares of Y?"), you MUST: | |
| 1. Give a CLEAR RECOMMENDATION: "Yes, buy..." or "No, don't buy..." or "Consider buying/selling..." | |
| 2. Explain WHY based on the current scenario and company information | |
| 3. Reference specific companies, market events, or data from the context | |
| 4. Adjust your certainty based on the risk tolerance level mentioned | |
| 5. Be DIRECT and ACTIONABLE - no vague "I'd need more information" responses | |
| Use the following context to answer the user's question. Always give a direct recommendation. | |
| Context: | |
| {context} | |
| Question: {question} | |
| Answer: | |
| """ | |
| prompt = PromptTemplate( | |
| input_variables=["context", "question"], | |
| template=template, | |
| ) | |
| # π QA chain with custom prompt | |
| qa_chain = load_qa_chain(llm, chain_type="stuff", prompt=prompt) | |
| # π· Question rephraser chain for follow-up questions β standalone | |
| CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template( | |
| """ | |
| Given the following conversation and a follow-up question, rephrase the follow-up question to be a standalone question. | |
| Chat History: | |
| {chat_history} | |
| Follow Up Input: {question} | |
| Standalone question: | |
| """ | |
| ) | |
| question_generator = LLMChain( | |
| llm=llm, | |
| prompt=CONDENSE_QUESTION_PROMPT | |
| ) | |
| # π· Finally: build the ConversationalRetrievalChain manually | |
| chain = ConversationalRetrievalChain( | |
| retriever=vectorstore.as_retriever(search_kwargs={'k': 6}), | |
| question_generator=question_generator, | |
| combine_docs_chain=qa_chain, | |
| return_source_documents=True, | |
| verbose=False | |
| ) | |
| # π¬ Gradio UI | |
| chat_history = [] | |
| with gr.Blocks() as demo: | |
| chatbot = gr.Chatbot( | |
| value=[{"role": "assistant", "content": "Welcome to the Quantum Financial Network! I'm your trading universe assistant. You can ask me about companies (VCG, CSI, STDY, AUBIO, NLN), market events, trading strategies, currency systems, trading hours, market history, and anything about our financial markets. What would you like to know?"}] | |
| ) | |
| msg = gr.Textbox(placeholder="Type your question here...") | |
| clear = gr.Button("Clear") | |
| def user(query, chat_history): | |
| # Convert Gradio 4.x format (dicts) to tuple format for LangChain chain | |
| # LangChain expects list of tuples: [(human_message, ai_message), ...] | |
| chat_history_tuples = [] | |
| current_human = None | |
| current_ai = None | |
| for msg in chat_history: | |
| if isinstance(msg, dict): | |
| role = msg.get("role", "") | |
| content = msg.get("content", "") | |
| if role == "user": | |
| # If we have a previous human message without AI response, add it with empty AI | |
| if current_human is not None: | |
| human_str = str(current_human) if current_human else "" | |
| ai_str = str(current_ai) if current_ai else "" | |
| chat_history_tuples.append((human_str, ai_str)) | |
| current_human = str(content) if content else "" | |
| current_ai = None | |
| elif role == "assistant": | |
| current_ai = str(content) if content else "" | |
| # Pair the human and AI messages | |
| if current_human is not None: | |
| human_str = str(current_human) if current_human else "" | |
| ai_str = str(current_ai) if current_ai else "" | |
| chat_history_tuples.append((human_str, ai_str)) | |
| current_human = None | |
| current_ai = None | |
| else: | |
| # Fallback for old tuple format | |
| if isinstance(msg, (list, tuple)) and len(msg) >= 2: | |
| chat_history_tuples.append((str(msg[0]), str(msg[1]))) | |
| # Handle case where last message was human without AI response | |
| if current_human is not None: | |
| human_str = str(current_human) if current_human else "" | |
| ai_str = str(current_ai) if current_ai else "" | |
| chat_history_tuples.append((human_str, ai_str)) | |
| # Get response from chain | |
| result = chain({"question": query, "chat_history": chat_history_tuples}) | |
| # Append both user query and assistant response in Gradio 4.x format | |
| chat_history.append({"role": "user", "content": query}) | |
| chat_history.append({"role": "assistant", "content": result["answer"]}) | |
| return gr.update(value=""), chat_history | |
| msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False) | |
| clear.click(lambda: None, None, chatbot, queue=False) | |
| # Only launch Gradio demo if running this file directly (not when imported) | |
| if __name__ == "__main__": | |
| demo.launch(debug=True) # remove share=True if running in HF Spaces | |