import transformers from torch import cuda, bfloat16 from langchain.embeddings.huggingface import HuggingFaceEmbeddings from langchain.document_loaders import HuggingFaceDatasetLoader from langchain.vectorstores import Chroma from langchain.llms import HuggingFacePipeline from langchain.chains import RetrievalQA from langchain.schema import AIMessage, HumanMessage import gradio as gr embed_model_id = 'sentence-transformers/all-MiniLM-L6-v2' device = f'cuda:{cuda.current_device()}' if cuda.is_available() else 'cpu' embed_model = HuggingFaceEmbeddings( model_name=embed_model_id, model_kwargs={'device': device}, encode_kwargs={'device': device, 'batch_size': 4} ) dataset_name = "beinghasnain16/company-policies" page_content_column = "chunk" hf_auth = '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' loader = HuggingFaceDatasetLoader(dataset_name, page_content_column, use_auth_token=hf_auth) data = loader.load() vectordb = Chroma.from_documents(data, embed_model) model_id = 'meta-llama/Llama-2-13b-chat-hf' # model_id = 'microsoft/phi-1_5' # model_id = 'meta-llama/Llama-2-7b-chat-hf' device = f'cuda:{cuda.current_device()}' if cuda.is_available() else 'cpu' # set quantization configuration to load large model with less GPU memory # this requires the `bitsandbytes` library bnb_config = transformers.BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_quant_type='nf4', bnb_4bit_use_double_quant=True, bnb_4bit_compute_dtype=bfloat16 ) # begin initializing HF items, need auth token for these model_config = transformers.AutoConfig.from_pretrained( model_id, use_auth_token=hf_auth ) model = transformers.AutoModelForCausalLM.from_pretrained( model_id, trust_remote_code=True, config=model_config, quantization_config=bnb_config, device_map='auto', use_auth_token=hf_auth ) model.eval() print(f"Model loaded on {device}") tokenizer = transformers.AutoTokenizer.from_pretrained( model_id, use_auth_token=hf_auth ) generate_text = transformers.pipeline( model=model, tokenizer=tokenizer, return_full_text=True, # langchain expects the full text task='text-generation', # we pass model parameters here too temperature=0.0, # 'randomness' of outputs, 0.0 is the min and 1.0 the max max_new_tokens=512, # mex number of tokens to generate in the output repetition_penalty=1.1, # without this output begins repeating ) res = generate_text("Explain to me the difference between nuclear fission and fusion.") print(res[0]["generated_text"]) llm = HuggingFacePipeline(pipeline=generate_text) rag_pipeline = RetrievalQA.from_chain_type( llm=llm, chain_type='stuff', retriever=vectordb.as_retriever() ) def predict(message, history): history_langchain_format = [] for human, ai in history: history_langchain_format.append(HumanMessage(content=human)) history_langchain_format.append(AIMessage(content=ai)) history_langchain_format.append(HumanMessage(content=message)) llm_response = rag_pipeline(message) return llm_response['result'] gr.ChatInterface(predict).launch()