VDNT11 commited on
Commit
c51d070
·
verified ·
1 Parent(s): 1316bcd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -8
app.py CHANGED
@@ -12,15 +12,23 @@ from langchain.text_splitter import RecursiveCharacterTextSplitter
12
  from langchain.docstore.document import Document
13
  import PyPDF2
14
  import tempfile
 
 
 
 
 
 
 
15
 
16
  # Initialize BLIP for image captioning
17
  blip_processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
18
  blip_model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base")
19
 
20
- # Initialize Gemma-2B-Instruct for conversational tasks
21
- gemma_model_name = "google/gemma-2b-it"
22
- gemma_tokenizer = AutoTokenizer.from_pretrained(gemma_model_name)
23
- gemma_model = AutoModelForCausalLM.from_pretrained(gemma_model_name)
 
24
 
25
  # Initialize vector store and embeddings for RAG
26
  embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
@@ -83,11 +91,11 @@ def chat_with_llm(message, history):
83
  if vector_store:
84
  docs = vector_store.similarity_search(message, k=2)
85
  context = "\n".join([doc.page_content for doc in docs])
86
- prompt = f"<start_of_turn>user\nYou are a helpful assistant. Use the following context to answer the question accurately:\n\n{context}\n\nQuestion: {message}\n<end_of_turn>\n<start_of_turn>assistant"
87
- inputs = gemma_tokenizer(prompt, return_tensors="pt")
88
  with torch.no_grad():
89
- outputs = gemma_model.generate(**inputs, max_length=500, num_return_sequences=1, temperature=0.7)
90
- response = gemma_tokenizer.decode(outputs[0], skip_special_tokens=True)
91
  return response.replace(prompt, "").strip()
92
 
93
  def image_tab(image, target_languages):
 
12
  from langchain.docstore.document import Document
13
  import PyPDF2
14
  import tempfile
15
+ from huggingface_hub import login
16
+
17
+ # Authenticate with Hugging Face token
18
+ if os.getenv("HF_TOKEN"):
19
+ login(token=os.getenv("HF_TOKEN"))
20
+ else:
21
+ raise ValueError("HF_TOKEN environment variable not set. Please set it in Hugging Face Spaces settings.")
22
 
23
  # Initialize BLIP for image captioning
24
  blip_processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
25
  blip_model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base")
26
 
27
+ # Initialize Mixtral-8x7B-Instruct for conversational tasks
28
+ mixtral_model_name = "mistralai/Mixtral-8x7B-Instruct-v0.1"
29
+ mixtral_tokenizer = AutoTokenizer.from_pretrained(mixtral_model_name)
30
+ mixtral_model = AutoModelForCausalLM.from_pretrained(mixtral_model_name)
31
+ mixtral_model = torch.quantization.quantize_dynamic(mixtral_model, {torch.nn.Linear}, dtype=torch.qint8)
32
 
33
  # Initialize vector store and embeddings for RAG
34
  embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
 
91
  if vector_store:
92
  docs = vector_store.similarity_search(message, k=2)
93
  context = "\n".join([doc.page_content for doc in docs])
94
+ prompt = f"[INST] You are a helpful assistant. Use the following context to answer the question accurately:\n\n{context}\n\nQuestion: {message} [/INST]"
95
+ inputs = mixtral_tokenizer(prompt, return_tensors="pt")
96
  with torch.no_grad():
97
+ outputs = mixtral_model.generate(**inputs, max_length=500, num_return_sequences=1, temperature=0.7)
98
+ response = mixtral_tokenizer.decode(outputs[0], skip_special_tokens=True)
99
  return response.replace(prompt, "").strip()
100
 
101
  def image_tab(image, target_languages):