Update app.py
Browse files
app.py
CHANGED
|
@@ -1,11 +1,6 @@
|
|
| 1 |
|
| 2 |
from huggingface_hub import hf_hub_download
|
| 3 |
|
| 4 |
-
model_name_or_path = "hlhr202/llama-7B-ggml-int4"
|
| 5 |
-
model_basename = "ggml-model-q4_0.bin" # the model is in bin format
|
| 6 |
-
|
| 7 |
-
model_path = hf_hub_download(repo_id=model_name_or_path, filename=model_basename)
|
| 8 |
-
|
| 9 |
n_gpu_layers = 40 # Change this value based on your model and your GPU VRAM pool.
|
| 10 |
n_batch = 256
|
| 11 |
|
|
@@ -24,12 +19,19 @@ from langchain_g4f import G4FLLM
|
|
| 24 |
# Make sure the model path is correct for your system!
|
| 25 |
llm = LLM = G4FLLM(
|
| 26 |
model=models.gpt_35_turbo,
|
| 27 |
-
provider=Provider.
|
| 28 |
)
|
| 29 |
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 33 |
docs = Docs(llm=llm, embeddings=embeddings)
|
| 34 |
|
| 35 |
keyword_search = 'bispecific antibody manufacture'
|
|
|
|
| 1 |
|
| 2 |
from huggingface_hub import hf_hub_download
|
| 3 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 4 |
n_gpu_layers = 40 # Change this value based on your model and your GPU VRAM pool.
|
| 5 |
n_batch = 256
|
| 6 |
|
|
|
|
| 19 |
# Make sure the model path is correct for your system!
|
| 20 |
llm = LLM = G4FLLM(
|
| 21 |
model=models.gpt_35_turbo,
|
| 22 |
+
provider=Provider.Aichat,
|
| 23 |
)
|
| 24 |
|
| 25 |
+
from langchain.embeddings import HuggingFaceEmbeddings
|
| 26 |
+
|
| 27 |
+
model_name = "sentence-transformers/all-mpnet-base-v2"
|
| 28 |
+
model_kwargs = {'device': 'cpu'}
|
| 29 |
+
encode_kwargs = {'normalize_embeddings': False}
|
| 30 |
+
embeddings = HuggingFaceEmbeddings(
|
| 31 |
+
model_name=model_name,
|
| 32 |
+
model_kwargs=model_kwargs,
|
| 33 |
+
encode_kwargs=encode_kwargs
|
| 34 |
+
)
|
| 35 |
docs = Docs(llm=llm, embeddings=embeddings)
|
| 36 |
|
| 37 |
keyword_search = 'bispecific antibody manufacture'
|