Spaces:
Sleeping
Sleeping
new model
Browse files
app.py
CHANGED
|
@@ -179,18 +179,12 @@ qa_prompt = ChatPromptTemplate.from_template(
|
|
| 179 |
# =============================
|
| 180 |
# 4) LLM — local, token-free
|
| 181 |
# =============================
|
| 182 |
-
|
| 183 |
-
|
| 184 |
-
|
| 185 |
-
|
| 186 |
-
|
| 187 |
-
|
| 188 |
-
model=mdl,
|
| 189 |
-
tokenizer=tok,
|
| 190 |
-
do_sample=False, # deterministic; helps JSON adherence
|
| 191 |
-
)
|
| 192 |
-
llm = HuggingFacePipeline(pipeline=gen)
|
| 193 |
-
|
| 194 |
|
| 195 |
# ===========================================
|
| 196 |
# 5) Chain (memory + robust JSON extraction)
|
|
|
|
| 179 |
# =============================
|
| 180 |
# 4) LLM — local, token-free
|
| 181 |
# =============================
|
| 182 |
+
llm = HuggingFaceHub( repo_id="mistralai/Mixtral-8x7B-v0.1",
|
| 183 |
+
task="text-generation",
|
| 184 |
+
model_kwargs={ "max_new_tokens": 2000,
|
| 185 |
+
"top_k": 30,
|
| 186 |
+
"temperature": 0.1,
|
| 187 |
+
"repetition_penalty": 1.03 }, )
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 188 |
|
| 189 |
# ===========================================
|
| 190 |
# 5) Chain (memory + robust JSON extraction)
|