Spaces:
Sleeping
Sleeping
| import os | |
| import torch | |
| from torch import cuda, bfloat16 | |
| from transformers import AutoTokenizer, pipeline, BitsAndBytesConfig | |
| from langchain.llms import HuggingFacePipeline | |
| from langchain.vectorstores import FAISS | |
| from langchain.chains import ConversationalRetrievalChain | |
| import gradio as gr | |
| from langchain.embeddings import HuggingFaceEmbeddings | |
| from transformers import InferenceClient | |
| # Load the Hugging Face token from environment | |
| HF_TOKEN = os.environ.get("HF_TOKEN", None) | |
| # Load the Mistral model and tokenizer | |
| model_id = 'mistralai/Mistral-7B-Instruct-v0.3' | |
| client = InferenceClient(model_id) | |
| # Define stopping criteria | |
| class StopOnTokens: | |
| def __call__(self, input_ids, scores, **kwargs): | |
| for stop_ids in stop_token_ids: | |
| if torch.eq(input_ids[0][-len(stop_ids):], stop_ids).all(): | |
| return True | |
| return False | |
| # Define stopping criteria list | |
| stop_list = ['\nHuman:', '\n```\n'] | |
| stop_token_ids = [client.tokenizer(x)['input_ids'] for x in stop_list] | |
| stop_token_ids = [torch.LongTensor(x).to(cuda.current_device() if cuda.is_available() else 'cpu') for x in stop_token_ids] | |
| # Create text generation pipeline | |
| def generate(prompt, history, system_prompt=None, temperature=0.2, max_new_tokens=1024, top_p=0.95, repetition_penalty=1.0): | |
| temperature = float(temperature) | |
| if temperature < 1e-2: | |
| temperature = 1e-2 | |
| top_p = float(top_p) | |
| generate_kwargs = dict( | |
| temperature=temperature, | |
| max_new_tokens=max_new_tokens, | |
| top_p=top_p, | |
| repetition_penalty=repetition_penalty, | |
| do_sample=True, | |
| seed=42, | |
| ) | |
| formatted_prompt = format_prompt(prompt, history, system_prompt) | |
| stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False) | |
| output = "" | |
| for response in stream: | |
| output += response.token.text | |
| yield output | |
| return output | |
| llm = HuggingFacePipeline(pipeline=generate) | |
| # Load the stored FAISS index | |
| try: | |
| vectorstore = FAISS.load_local('faiss_index', HuggingFaceEmbeddings(model_name="sentence-transformers/all-mpnet-base-v2", model_kwargs={"device": "cuda"})) | |
| print("Loaded embedding successfully") | |
| except ImportError as e: | |
| print("FAISS could not be imported. Make sure FAISS is installed correctly.") | |
| raise e | |
| # Set up the Conversational Retrieval Chain | |
| chain = ConversationalRetrievalChain.from_llm(llm, vectorstore.as_retriever(), return_source_documents=True) | |
| chat_history = [] | |
| def format_prompt(query): | |
| prompt = f""" | |
| You are a knowledgeable assistant with access to a comprehensive database. | |
| I need you to answer my question and provide related information in a specific format. | |
| Here's what I need: | |
| 1. A brief, general response to my question based on related answers retrieved. | |
| 2. A JSON-formatted output containing: | |
| - "question": The original question. | |
| - "answer": The detailed answer. | |
| - "related_questions": A list of related questions and their answers, each as a dictionary with the keys: | |
| - "question": The related question. | |
| - "answer": The related answer. | |
| Here's my question: | |
| {query} | |
| Include a brief final answer without additional comments, sign-offs, or extra phrases. Be direct and to the point. | |
| """ | |
| return prompt | |
| def qa_infer(query): | |
| formatted_prompt = format_prompt(query) | |
| result = chain({"question": formatted_prompt, "chat_history": chat_history}) | |
| for doc in result['source_documents']: | |
| print("-"*50) | |
| print("Retrieved Document:", doc.page_content) | |
| print("#"*100) | |
| print(result['answer']) | |
| return result['answer'] | |
| EXAMPLES = ["How to use IPU1_0 instead of A15_0 to process NDK in TDA2x-EVM", | |
| "Can BQ25896 support I2C interface?", | |
| "Does TDA2 vout support bt656 8-bit mode?"] | |
| demo = gr.Interface(fn=qa_infer, inputs="text", allow_flagging='never', examples=EXAMPLES, cache_examples=False, outputs="text") | |
| demo.launch() | |