Spaces:
Sleeping
Sleeping
Update planner.py
Browse files- planner.py +36 -22
planner.py
CHANGED
|
@@ -1,34 +1,48 @@
|
|
| 1 |
# planner.py
|
|
|
|
| 2 |
from huggingface_hub import InferenceClient
|
| 3 |
-
import re
|
| 4 |
|
| 5 |
-
# Get
|
| 6 |
-
import os
|
| 7 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
| 8 |
|
|
|
|
| 9 |
client = InferenceClient(
|
| 10 |
-
"Qwen/Qwen2.5-Coder-7B-Instruct",
|
| 11 |
token=HF_TOKEN
|
| 12 |
)
|
| 13 |
|
| 14 |
def generate_task_plan(goal: str) -> str:
|
| 15 |
-
prompt = f"""
|
| 16 |
-
Break down the following goal into actionable tasks with:
|
| 17 |
-
- Clear task descriptions
|
| 18 |
-
- Realistic deadlines (relative to today)
|
| 19 |
-
- Dependencies between tasks (if any)
|
| 20 |
-
|
| 21 |
-
Format your response as a numbered list with this structure:
|
| 22 |
-
1. [Task name] - Due: [Day X] - Depends on: [Task # or "None"]
|
| 23 |
-
Description: [Brief explanation]
|
| 24 |
-
|
| 25 |
-
Goal: "{goal}"
|
| 26 |
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 27 |
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
# planner.py
|
| 2 |
+
import os
|
| 3 |
from huggingface_hub import InferenceClient
|
|
|
|
| 4 |
|
| 5 |
+
# Get HF token from environment (set as Secret in HF Spaces)
|
|
|
|
| 6 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
| 7 |
|
| 8 |
+
# Use the SAME model as your working chatbot
|
| 9 |
client = InferenceClient(
|
| 10 |
+
model="Qwen/Qwen2.5-Coder-7B-Instruct",
|
| 11 |
token=HF_TOKEN
|
| 12 |
)
|
| 13 |
|
| 14 |
def generate_task_plan(goal: str) -> str:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 15 |
"""
|
| 16 |
+
Uses Qwen2.5-Coder (instruct model) via chat API to break down a goal into tasks.
|
| 17 |
+
"""
|
| 18 |
+
messages = [
|
| 19 |
+
{
|
| 20 |
+
"role": "user",
|
| 21 |
+
"content": (
|
| 22 |
+
"You are a smart project planner. Break down the following goal into a clear, actionable task list.\n\n"
|
| 23 |
+
"Requirements:\n"
|
| 24 |
+
"- Number each task (1., 2., 3., ...)\n"
|
| 25 |
+
"- Include a realistic deadline as 'Due: Day X' (start from Day 1)\n"
|
| 26 |
+
"- Specify dependencies as 'Depends on: Task N' or 'None'\n"
|
| 27 |
+
"- Add a short description for each task\n"
|
| 28 |
+
"- Keep it practical and time-bound\n\n"
|
| 29 |
+
"Example format:\n"
|
| 30 |
+
"1. Research market needs - Due: Day 1 - Depends on: None\n"
|
| 31 |
+
" Description: Interview 5 potential users about pain points.\n\n"
|
| 32 |
+
f"Goal: \"{goal}\""
|
| 33 |
+
)
|
| 34 |
+
}
|
| 35 |
+
]
|
| 36 |
|
| 37 |
+
try:
|
| 38 |
+
response = client.chat.completions.create(
|
| 39 |
+
model="Qwen/Qwen2.5-Coder-7B-Instruct",
|
| 40 |
+
messages=messages,
|
| 41 |
+
max_tokens=600,
|
| 42 |
+
temperature=0.4,
|
| 43 |
+
top_p=0.95,
|
| 44 |
+
stream=False
|
| 45 |
+
)
|
| 46 |
+
return response.choices[0].message.content.strip()
|
| 47 |
+
except Exception as e:
|
| 48 |
+
raise Exception(f"LLM inference failed: {str(e)}")
|