Update Modules/Deep_Research.py
Browse files- Modules/Deep_Research.py +2 -2
Modules/Deep_Research.py
CHANGED
|
@@ -422,7 +422,7 @@ def Deep_Research(
|
|
| 422 |
def _invoke_chat(messages, provider: str, max_tokens: int, temp: float, top_p: float):
|
| 423 |
client = InferenceClient(provider=provider, api_key=HF_TEXTGEN_TOKEN)
|
| 424 |
return client.chat.completions.create(
|
| 425 |
-
model="zai-org/GLM-4.
|
| 426 |
messages=messages,
|
| 427 |
max_tokens=max_tokens,
|
| 428 |
temperature=temp,
|
|
@@ -496,7 +496,7 @@ def Deep_Research(
|
|
| 496 |
except Exception:
|
| 497 |
prompt_chars = -1
|
| 498 |
print(f"[PIPELINE] Fetch complete: pages={len(pages)}, unique_urls={len(pages.keys())}, prompt_chars={prompt_chars}", flush=True)
|
| 499 |
-
print("[PIPELINE] Starting inference (provider=cerebras, model=zai-org/GLM-4.
|
| 500 |
|
| 501 |
try:
|
| 502 |
print("[LLM] Attempt 1: provider=cerebras, max_tokens=32768", flush=True)
|
|
|
|
| 422 |
def _invoke_chat(messages, provider: str, max_tokens: int, temp: float, top_p: float):
|
| 423 |
client = InferenceClient(provider=provider, api_key=HF_TEXTGEN_TOKEN)
|
| 424 |
return client.chat.completions.create(
|
| 425 |
+
model="zai-org/GLM-4.7",
|
| 426 |
messages=messages,
|
| 427 |
max_tokens=max_tokens,
|
| 428 |
temperature=temp,
|
|
|
|
| 496 |
except Exception:
|
| 497 |
prompt_chars = -1
|
| 498 |
print(f"[PIPELINE] Fetch complete: pages={len(pages)}, unique_urls={len(pages.keys())}, prompt_chars={prompt_chars}", flush=True)
|
| 499 |
+
print("[PIPELINE] Starting inference (provider=cerebras, model=zai-org/GLM-4.7)", flush=True)
|
| 500 |
|
| 501 |
try:
|
| 502 |
print("[LLM] Attempt 1: provider=cerebras, max_tokens=32768", flush=True)
|