add device 'cpu' to embed model
Browse files
app.py
CHANGED
|
@@ -17,7 +17,7 @@ st.title("Aplikacja z LlamaIndex")
|
|
| 17 |
db = chromadb.PersistentClient(path="./abc")
|
| 18 |
chroma_collection = db.get_or_create_collection("pomoc_ukrainie")
|
| 19 |
vector_store = ChromaVectorStore(chroma_collection=chroma_collection)
|
| 20 |
-
embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-small-en-v1.5")
|
| 21 |
|
| 22 |
# Utw贸rz pipeline do przetwarzania dokument贸w
|
| 23 |
pipeline = IngestionPipeline(
|
|
@@ -32,18 +32,13 @@ pipeline = IngestionPipeline(
|
|
| 32 |
index = VectorStoreIndex.from_vector_store(vector_store, embed_model=embed_model)
|
| 33 |
|
| 34 |
# Utw贸rz silnik zapyta艅
|
| 35 |
-
# huggingface
|
| 36 |
from transformers import AutoTokenizer
|
| 37 |
|
| 38 |
-
# Settings.tokenizer = AutoTokenizer.from_pretrained(
|
| 39 |
-
# "Qwen/Qwen2-7B-Instruct"
|
| 40 |
-
# )
|
| 41 |
-
|
| 42 |
# Load the correct tokenizer for Qwen/Qwen2-7B-Instruct
|
| 43 |
tokeni = AutoTokenizer.from_pretrained("Qwen/Qwen2-0.5B")
|
| 44 |
|
| 45 |
llm = HuggingFaceLLM(model_name="Qwen/Qwen2-0.5B", tokenizer=tokeni)
|
| 46 |
-
|
| 47 |
query_engine = index.as_query_engine(
|
| 48 |
llm=llm,
|
| 49 |
response_mode='compact')
|
|
|
|
| 17 |
db = chromadb.PersistentClient(path="./abc")
|
| 18 |
chroma_collection = db.get_or_create_collection("pomoc_ukrainie")
|
| 19 |
vector_store = ChromaVectorStore(chroma_collection=chroma_collection)
|
| 20 |
+
embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-small-en-v1.5", device="cpu")
|
| 21 |
|
| 22 |
# Utw贸rz pipeline do przetwarzania dokument贸w
|
| 23 |
pipeline = IngestionPipeline(
|
|
|
|
| 32 |
index = VectorStoreIndex.from_vector_store(vector_store, embed_model=embed_model)
|
| 33 |
|
| 34 |
# Utw贸rz silnik zapyta艅
|
|
|
|
| 35 |
from transformers import AutoTokenizer
|
| 36 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 37 |
# Load the correct tokenizer for Qwen/Qwen2-7B-Instruct
|
| 38 |
tokeni = AutoTokenizer.from_pretrained("Qwen/Qwen2-0.5B")
|
| 39 |
|
| 40 |
llm = HuggingFaceLLM(model_name="Qwen/Qwen2-0.5B", tokenizer=tokeni)
|
| 41 |
+
|
| 42 |
query_engine = index.as_query_engine(
|
| 43 |
llm=llm,
|
| 44 |
response_mode='compact')
|