Update app.py
Browse filesUse Embed and LLM in Hugging Face
app.py
CHANGED
|
@@ -103,7 +103,9 @@ def main() -> None:
|
|
| 103 |
#chunks = text_splitter.split_documents(docs)
|
| 104 |
chunks = text_splitter.split_documents(raw_text)
|
| 105 |
|
| 106 |
-
|
|
|
|
|
|
|
| 107 |
|
| 108 |
single_vector = embeddings.embed_query("this is some text data")
|
| 109 |
|
|
@@ -136,7 +138,8 @@ def main() -> None:
|
|
| 136 |
|
| 137 |
prompt = ChatPromptTemplate.from_template(prompt)
|
| 138 |
|
| 139 |
-
|
|
|
|
| 140 |
|
| 141 |
rag_chain = (
|
| 142 |
{"context": retriever|format_docs, "question": RunnablePassthrough()}
|
|
|
|
| 103 |
#chunks = text_splitter.split_documents(docs)
|
| 104 |
chunks = text_splitter.split_documents(raw_text)
|
| 105 |
|
| 106 |
+
## Mod from nomic-embed-text to nomic-ai/nomic-embed-text-v1.5
|
| 107 |
+
#embeddings = OllamaEmbeddings(model='nomic-ai/nomic-embed-text-v1.5', base_url="http://localhost:11434")
|
| 108 |
+
embeddings = OllamaEmbeddings(model='nomic-ai/nomic-embed-text-v1.5')
|
| 109 |
|
| 110 |
single_vector = embeddings.embed_query("this is some text data")
|
| 111 |
|
|
|
|
| 138 |
|
| 139 |
prompt = ChatPromptTemplate.from_template(prompt)
|
| 140 |
|
| 141 |
+
## from llama3.2:latest to unsloth/Llama-3.2-3B
|
| 142 |
+
model = ChatOllama(model="unsloth/Llama-3.2-3B")
|
| 143 |
|
| 144 |
rag_chain = (
|
| 145 |
{"context": retriever|format_docs, "question": RunnablePassthrough()}
|