Update app.py
Browse files
app.py
CHANGED
|
@@ -7,7 +7,7 @@ import chromadb
|
|
| 7 |
from llama_index.vector_stores.chroma import ChromaVectorStore
|
| 8 |
from llama_index.llms.ollama import Ollama
|
| 9 |
|
| 10 |
-
|
| 11 |
|
| 12 |
from llama_index.core import Settings
|
| 13 |
|
|
@@ -32,7 +32,18 @@ pipeline = IngestionPipeline(
|
|
| 32 |
index = VectorStoreIndex.from_vector_store(vector_store, embed_model=embed_model)
|
| 33 |
|
| 34 |
# Utw贸rz silnik zapyta艅
|
| 35 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 36 |
query_engine = index.as_query_engine(
|
| 37 |
llm=llm,
|
| 38 |
response_mode='compact')
|
|
@@ -67,6 +78,4 @@ if st.session_state.messages[-1]["role"] != "assistant":
|
|
| 67 |
st.write(content) # Wy艣wietl ca艂膮 tre艣膰 w Streamlit
|
| 68 |
|
| 69 |
message = {"role": "assistant", "content": content} # Zapisz ca艂膮 tre艣膰 w wiadomo艣ci
|
| 70 |
-
st.session_state.messages.append(message)
|
| 71 |
-
|
| 72 |
-
|
|
|
|
| 7 |
from llama_index.vector_stores.chroma import ChromaVectorStore
|
| 8 |
from llama_index.llms.ollama import Ollama
|
| 9 |
|
| 10 |
+
from llama_index.llms.huggingface import HuggingFaceLLM
|
| 11 |
|
| 12 |
from llama_index.core import Settings
|
| 13 |
|
|
|
|
| 32 |
index = VectorStoreIndex.from_vector_store(vector_store, embed_model=embed_model)
|
| 33 |
|
| 34 |
# Utw贸rz silnik zapyta艅
|
| 35 |
+
# huggingface
|
| 36 |
+
from transformers import AutoTokenizer
|
| 37 |
+
|
| 38 |
+
# Settings.tokenizer = AutoTokenizer.from_pretrained(
|
| 39 |
+
# "Qwen/Qwen2-7B-Instruct"
|
| 40 |
+
# )
|
| 41 |
+
|
| 42 |
+
# Load the correct tokenizer for Qwen/Qwen2-7B-Instruct
|
| 43 |
+
tokeni = AutoTokenizer.from_pretrained("Qwen/Qwen2-0.5B")
|
| 44 |
+
|
| 45 |
+
llm = HuggingFaceLLM(model_name="Qwen/Qwen2-0.5B", tokenizer=tokeni)
|
| 46 |
+
# print(llm._tokenizer)
|
| 47 |
query_engine = index.as_query_engine(
|
| 48 |
llm=llm,
|
| 49 |
response_mode='compact')
|
|
|
|
| 78 |
st.write(content) # Wy艣wietl ca艂膮 tre艣膰 w Streamlit
|
| 79 |
|
| 80 |
message = {"role": "assistant", "content": content} # Zapisz ca艂膮 tre艣膰 w wiadomo艣ci
|
| 81 |
+
st.session_state.messages.append(message)
|
|
|
|
|
|