Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -11,7 +11,8 @@ HF_TOKEN = os.getenv("HF")
|
|
| 11 |
|
| 12 |
client = InferenceClient(
|
| 13 |
model="Qwen/Qwen2.5-7B-Instruct",
|
| 14 |
-
token=HF_TOKEN
|
|
|
|
| 15 |
)
|
| 16 |
|
| 17 |
# ===============================
|
|
@@ -51,7 +52,7 @@ User question:
|
|
| 51 |
{user_question}
|
| 52 |
"""
|
| 53 |
response = ""
|
| 54 |
-
for chunk in client.
|
| 55 |
messages=[{"role": "user", "content": prompt}],
|
| 56 |
max_tokens=150,
|
| 57 |
temperature=0,
|
|
@@ -92,7 +93,7 @@ User question:
|
|
| 92 |
planner_prompt += f"\nPrevious error:\n{last_error}\nFix the code."
|
| 93 |
|
| 94 |
code = ""
|
| 95 |
-
for chunk in client.
|
| 96 |
messages=[{"role": "user", "content": planner_prompt}],
|
| 97 |
max_tokens=400,
|
| 98 |
temperature=0.2,
|
|
@@ -174,7 +175,7 @@ Focus on meaning and implications.
|
|
| 174 |
if show_code:
|
| 175 |
response += f"🧾 Generated Python code:\n\n```python\n{code}\n```\n\n"
|
| 176 |
|
| 177 |
-
for chunk in client.
|
| 178 |
messages=[{"role": "user", "content": insight_prompt}],
|
| 179 |
max_tokens=350,
|
| 180 |
temperature=0.4,
|
|
@@ -208,7 +209,7 @@ Do not generate code.
|
|
| 208 |
"""
|
| 209 |
|
| 210 |
response = ""
|
| 211 |
-
for chunk in client.
|
| 212 |
messages=[{"role": "user", "content": insight_prompt}],
|
| 213 |
max_tokens=400,
|
| 214 |
temperature=0.4,
|
|
|
|
| 11 |
|
| 12 |
client = InferenceClient(
|
| 13 |
model="Qwen/Qwen2.5-7B-Instruct",
|
| 14 |
+
token=HF_TOKEN,
|
| 15 |
+
provider="hf-inference"
|
| 16 |
)
|
| 17 |
|
| 18 |
# ===============================
|
|
|
|
| 52 |
{user_question}
|
| 53 |
"""
|
| 54 |
response = ""
|
| 55 |
+
for chunk in client.chat(
|
| 56 |
messages=[{"role": "user", "content": prompt}],
|
| 57 |
max_tokens=150,
|
| 58 |
temperature=0,
|
|
|
|
| 93 |
planner_prompt += f"\nPrevious error:\n{last_error}\nFix the code."
|
| 94 |
|
| 95 |
code = ""
|
| 96 |
+
for chunk in client.chat(
|
| 97 |
messages=[{"role": "user", "content": planner_prompt}],
|
| 98 |
max_tokens=400,
|
| 99 |
temperature=0.2,
|
|
|
|
| 175 |
if show_code:
|
| 176 |
response += f"🧾 Generated Python code:\n\n```python\n{code}\n```\n\n"
|
| 177 |
|
| 178 |
+
for chunk in client.chat(
|
| 179 |
messages=[{"role": "user", "content": insight_prompt}],
|
| 180 |
max_tokens=350,
|
| 181 |
temperature=0.4,
|
|
|
|
| 209 |
"""
|
| 210 |
|
| 211 |
response = ""
|
| 212 |
+
for chunk in client.chat(
|
| 213 |
messages=[{"role": "user", "content": insight_prompt}],
|
| 214 |
max_tokens=400,
|
| 215 |
temperature=0.4,
|