viswanani commited on
Commit
1e6549c
·
verified ·
1 Parent(s): d2cd020

Upload 8 files

Browse files
Files changed (8) hide show
  1. .gitignore +5 -0
  2. LICENSE +1 -0
  3. README.md +28 -20
  4. ai.py +62 -0
  5. app.py +83 -35
  6. db.py +73 -0
  7. env.example +3 -0
  8. requirements.txt +3 -6
.gitignore ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ __pycache__/
2
+ *.pyc
3
+ tasks.db
4
+ *.csv
5
+ .env
LICENSE ADDED
@@ -0,0 +1 @@
 
 
1
+ MIT
README.md CHANGED
@@ -1,26 +1,34 @@
 
1
 
2
- ---
3
- title: "🏏 FullTrack AI – Fixed Calibrated MVP"
4
- emoji: 🏏
5
- colorFrom: blue
6
- colorTo: indigo
7
- sdk: gradio
8
- sdk_version: "4.31.0"
9
- app_file: app.py
10
- pinned: true
11
- ---
12
 
13
- # FullTrack AI – Fixed Calibrated MVP (Hugging Face Space)
 
 
 
 
 
14
 
15
- This package fixes the earlier runtime error by removing usage of non-installed libraries (e.g., ultralytics) and adding a robust click-to-select calibration flow that works with modern Gradio versions.
 
 
 
 
16
 
17
- Files:
18
- - app.py (main Gradio app)
19
- - requirements.txt
20
- - README.md
 
 
21
 
22
- How to deploy:
23
- 1. Create a new Space (Gradio) on Hugging Face.
24
- 2. Upload these files and run the Space.
25
- 3. Use the "Calibration" tab to click 4 points on a frame, then go to Analysis and run on a video.
 
26
 
 
 
 
 
 
1
+ # FullTrack AI — Hugging Face Space (MVP)
2
 
3
+ This repository contains a lightweight FullTrack AI MVP packaged as a **Hugging Face Space** using **Gradio**.
 
 
 
 
 
 
 
 
 
4
 
5
+ ## Features
6
+ - AI chat interface (uses OpenAI by default)
7
+ - AI-parsed actions (create_task, create_project)
8
+ - SQLite-backed task storage
9
+ - Dashboard pie chart
10
+ - Export tasks to CSV
11
 
12
+ ## Files
13
+ - `app.py` — Gradio app entrypoint
14
+ - `ai.py` — OpenAI wrapper + JSON action parser
15
+ - `db.py` — SQLite helper functions
16
+ - `requirements.txt` — Python dependencies
17
 
18
+ ## Deploy to Hugging Face Spaces
19
+ 1. Create a new Space on Hugging Face: choose **Gradio** and **Python**.
20
+ 2. Upload all files from this repo.
21
+ 3. In the Space settings, add a secret named `OPENAI_API_KEY` with your OpenAI API key.
22
+ 4. (Optional) set `OPENAI_MODEL` to your preferred model (default: `gpt-4o-mini`).
23
+ 5. Click **Deploy**.
24
 
25
+ ## Local Run (for testing)
26
+ 1. Create and activate a venv: `python -m venv .venv && source .venv/bin/activate`
27
+ 2. Install: `pip install -r requirements.txt`
28
+ 3. Set `OPENAI_API_KEY` env var.
29
+ 4. Run: `python app.py`
30
 
31
+ ## Notes & Limitations
32
+ - This is an MVP intended for prototyping inside Hugging Face Spaces.
33
+ - The login is simulated (uses a static demo user).
34
+ - Be cautious with OpenAI API usage and costs.
ai.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os, re, json
2
+ import openai
3
+
4
+ openai.api_key = os.getenv("OPENAI_API_KEY")
5
+
6
+ SYSTEM_PROMPT = """You are FullTrack AI, an assistant for project and task tracking.
7
+ Be concise. When the user issues commands that create or update tasks or projects, respond with:
8
+ 1) A human-friendly reply.
9
+ 2) A JSON code block labeled ```json``` containing an object with an "actions" array.
10
+ Each action must be an object: {"type": <string>, "confidence": <0-1 float>, "payload": {...}}.
11
+ Supported action types for this Space: create_task, create_project.
12
+ Use low temperature and deterministic outputs for parsing.
13
+ """
14
+
15
+ def ask_ai(message, history):
16
+ # Build messages; include recent history for context
17
+ messages = [{"role":"system","content":SYSTEM_PROMPT}]
18
+ if history:
19
+ # history is list of (user, assistant)
20
+ for u, a in history[-6:]:
21
+ messages.append({"role":"user","content":u})
22
+ messages.append({"role":"assistant","content":a})
23
+ messages.append({"role":"user","content":message})
24
+
25
+ try:
26
+ resp = openai.ChatCompletion.create(
27
+ model=os.getenv("OPENAI_MODEL","gpt-4o-mini"),
28
+ messages=messages,
29
+ temperature=0.1,
30
+ max_tokens=600
31
+ )
32
+ content = resp.choices[0].message.get("content","")
33
+ return content
34
+ except Exception as e:
35
+ return f"[Error calling OpenAI API: {e}]"
36
+
37
+ def parse_actions(reply_text):
38
+ # Extract JSON block labeled ```json ... ```
39
+ try:
40
+ m = re.search(r"```json\s*(\{.*?\})\s*```", reply_text, re.S)
41
+ if not m:
42
+ # fallback: try to find first JSON-like object
43
+ m = re.search(r"(\{\s*\"actions\"\s*:\s*\[.*?\])\s*\}", reply_text, re.S)
44
+ if m:
45
+ json_text = m.group(0)
46
+ else:
47
+ return []
48
+ else:
49
+ json_text = m.group(1)
50
+ data = json.loads(json_text)
51
+ actions = data.get("actions", [])
52
+ # sanitize actions: ensure keys exist
53
+ cleaned = []
54
+ for a in actions:
55
+ if not isinstance(a, dict): continue
56
+ atype = a.get("type")
57
+ conf = float(a.get("confidence", 0.0))
58
+ payload = a.get("payload", {})
59
+ cleaned.append({"type": atype, "confidence": conf, "payload": payload})
60
+ return cleaned
61
+ except Exception:
62
+ return []
app.py CHANGED
@@ -1,51 +1,99 @@
1
  import gradio as gr
2
- import cv2
3
- import tempfile
4
- from ultralytics import YOLO
5
 
6
- # Load YOLOv8 model
7
- model = YOLO("yolov8n.pt")
8
 
9
- # Function for object detection + tracking
10
- def detect_and_track(video_file):
11
- if video_file is None:
12
- return None
13
 
14
- cap = cv2.VideoCapture(video_file.name)
15
- output_path = tempfile.mktemp(suffix=".mp4")
 
 
16
 
17
- fourcc = cv2.VideoWriter_fourcc(*'mp4v')
18
- fps = cap.get(cv2.CAP_PROP_FPS) or 25.0
19
- width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
20
- height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
21
 
22
- out = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
 
24
- while True:
25
- ret, frame = cap.read()
26
- if not ret:
27
- break
 
 
 
 
 
28
 
29
- results = model.track(frame, persist=True, verbose=False)
30
- annotated_frame = results[0].plot()
31
- out.write(annotated_frame)
32
 
33
- cap.release()
34
- out.release()
35
- return output_path # must return path for gr.Video
36
 
37
- # Gradio UI
38
- with gr.Blocks() as demo:
39
- gr.Markdown("# 🎯 FullTrack AI - Object Tracking\nUpload a video to run YOLOv8 tracking with trajectory predictions.")
 
 
 
 
 
 
 
 
 
 
 
 
40
 
41
  with gr.Row():
42
- with gr.Column():
43
- video_in = gr.File(label="Upload a Video", file_types=[".mp4", ".avi", ".mov"])
44
- btn = gr.Button("Submit", variant="primary")
45
- with gr.Column():
46
- video_out = gr.Video(label="Tracked Output")
 
 
 
 
 
 
 
 
 
 
 
 
 
47
 
48
- btn.click(detect_and_track, inputs=video_in, outputs=video_out)
 
 
 
49
 
50
  if __name__ == "__main__":
51
  demo.launch()
 
1
  import gradio as gr
2
+ from ai import ask_ai, parse_actions
3
+ from db import init_db, add_task, get_dashboard_fig, list_tasks, export_tasks_csv
4
+ import os
5
 
6
+ init_db()
 
7
 
8
+ DEFAULT_USER = "demo_user"
 
 
 
9
 
10
+ def chat_and_handle(message, history, user=DEFAULT_USER):
11
+ # Save user message to history
12
+ history = history or []
13
+ history.append((message, ""))
14
 
15
+ # Call AI
16
+ reply = ask_ai(message, history)
17
+ # Parse actions
18
+ actions = parse_actions(reply)
19
 
20
+ applied = []
21
+ suggestions = []
22
+ for act in actions:
23
+ t = act.get("type")
24
+ conf = act.get("confidence", 0)
25
+ payload = act.get("payload", {})
26
+ if t == "create_task" and payload.get("title"):
27
+ # For safety, only auto-apply if confidence high
28
+ if conf >= 0.9:
29
+ add_task(user, payload)
30
+ applied.append(payload.get("title"))
31
+ else:
32
+ suggestions.append(payload)
33
+ elif t == "create_project":
34
+ # projects are treated as tasks with tag 'project' in this simple MVP
35
+ if payload.get("name"):
36
+ add_task(user, {"title": f"Project: {payload.get('name')}", "tags": ["project"]})
37
+ applied.append(f"Project: {payload.get('name')}")
38
+ else:
39
+ # unsupported action types are returned as suggestions
40
+ suggestions.append(act)
41
 
42
+ final_reply_lines = [reply.strip()]
43
+ if applied:
44
+ final_reply_lines.append("\n\nApplied actions (auto):")
45
+ for a in applied:
46
+ final_reply_lines.append("- " + a)
47
+ if suggestions:
48
+ final_reply_lines.append("\n\nSuggested actions (please confirm manually):")
49
+ for s in suggestions:
50
+ final_reply_lines.append("- " + (s.get("title") if isinstance(s, dict) else str(s)))
51
 
52
+ final_reply = "\n".join(final_reply_lines)
 
 
53
 
54
+ # update history with assistant reply
55
+ history[-1] = (message, final_reply)
 
56
 
57
+ # dashboard figure
58
+ fig = get_dashboard_fig(user)
59
+ tasks = list_tasks(user)
60
+ return history, final_reply, fig, tasks
61
+
62
+ def export_csv(user=DEFAULT_USER):
63
+ path = export_tasks_csv(user)
64
+ return path
65
+
66
+ with gr.Blocks(title="FullTrack AI — Hugging Face Space (MVP)") as demo:
67
+ gr.Markdown("""# 🚀 FullTrack AI — Hugging Face Space (MVP)
68
+ This Space demonstrates an AI-driven task/project tracker MVP.
69
+ - Type instructions in the chat (e.g. "Create a project Website Redesign and 3 tasks due next week assigned to Raj.")
70
+ - The assistant will reply and propose actions in JSON. High-confidence task creations are auto-applied.
71
+ """)
72
 
73
  with gr.Row():
74
+ with gr.Column(scale=2):
75
+ chatbot = gr.Chatbot([], elem_id="chatbot")
76
+ msg = gr.Textbox(placeholder="Type a message and press Enter", lines=2)
77
+ send_btn = gr.Button("Send")
78
+ clear_btn = gr.Button("Clear chat")
79
+ with gr.Column(scale=1):
80
+ gr.Markdown("### Dashboard")
81
+ dash_plot = gr.Plot()
82
+ gr.Markdown("### Tasks (latest)")
83
+ tasks_table = gr.Dataframe(headers=["id","title","status","created_at"], interactive=False)
84
+ export_button = gr.Button("Export CSV")
85
+ export_path = gr.Textbox(label="Export path", interactive=False)
86
+
87
+ def on_send(message, history):
88
+ return chat_and_handle(message, history)
89
+
90
+ def on_clear():
91
+ return [], "", None, []
92
 
93
+ msg.submit(on_send, inputs=[msg, chatbot], outputs=[chatbot, gr.Textbox(), dash_plot, tasks_table])
94
+ send_btn.click(on_send, inputs=[msg, chatbot], outputs=[chatbot, gr.Textbox(), dash_plot, tasks_table])
95
+ clear_btn.click(on_clear, outputs=[chatbot, gr.Textbox(), dash_plot, tasks_table])
96
+ export_button.click(export_csv, outputs=[export_path])
97
 
98
  if __name__ == "__main__":
99
  demo.launch()
db.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sqlite3, os, datetime, csv
2
+ import matplotlib.pyplot as plt
3
+
4
+ DB_PATH = os.path.join(os.path.dirname(__file__), "tasks.db")
5
+
6
+ def init_db():
7
+ conn = sqlite3.connect(DB_PATH)
8
+ c = conn.cursor()
9
+ c.execute("""CREATE TABLE IF NOT EXISTS tasks (
10
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
11
+ user TEXT,
12
+ title TEXT,
13
+ description TEXT,
14
+ status TEXT,
15
+ tags TEXT,
16
+ created_at TEXT
17
+ )""")
18
+ conn.commit()
19
+ conn.close()
20
+
21
+ def add_task(user, payload):
22
+ title = payload.get("title") or payload.get("name") or "Untitled"
23
+ desc = payload.get("description", "")
24
+ status = payload.get("status", "todo")
25
+ tags = ",".join(payload.get("tags", [])) if isinstance(payload.get("tags"), list) else (payload.get("tags") or "")
26
+ created_at = datetime.datetime.utcnow().isoformat()
27
+ conn = sqlite3.connect(DB_PATH)
28
+ c = conn.cursor()
29
+ c.execute("INSERT INTO tasks (user, title, description, status, tags, created_at) VALUES (?,?,?,?,?,?)",
30
+ (user, title, desc, status, tags, created_at))
31
+ conn.commit()
32
+ conn.close()
33
+
34
+ def list_tasks(user, limit=20):
35
+ conn = sqlite3.connect(DB_PATH)
36
+ c = conn.cursor()
37
+ c.execute("SELECT id, title, status, created_at FROM tasks WHERE user=? ORDER BY id DESC LIMIT ?", (user, limit))
38
+ rows = c.fetchall()
39
+ conn.close()
40
+ return rows
41
+
42
+ def get_dashboard_fig(user):
43
+ conn = sqlite3.connect(DB_PATH)
44
+ c = conn.cursor()
45
+ c.execute("SELECT status, COUNT(*) FROM tasks WHERE user=? GROUP BY status", (user,))
46
+ rows = c.fetchall()
47
+ conn.close()
48
+ statuses = [r[0] for r in rows] if rows else []
49
+ counts = [r[1] for r in rows] if rows else []
50
+ if not rows:
51
+ # empty fig
52
+ fig, ax = plt.subplots()
53
+ ax.text(0.5, 0.5, 'No tasks yet', horizontalalignment='center', verticalalignment='center', fontsize=12)
54
+ ax.axis('off')
55
+ return fig
56
+ fig, ax = plt.subplots()
57
+ ax.pie(counts, labels=statuses, autopct='%1.1f%%')
58
+ ax.set_title('Tasks by Status')
59
+ return fig
60
+
61
+ def export_tasks_csv(user):
62
+ path = os.path.join(os.path.dirname(__file__), f"tasks_export_{user}.csv")
63
+ conn = sqlite3.connect(DB_PATH)
64
+ c = conn.cursor()
65
+ c.execute("SELECT id, title, description, status, tags, created_at FROM tasks WHERE user=? ORDER BY id DESC", (user,))
66
+ rows = c.fetchall()
67
+ conn.close()
68
+ with open(path, 'w', newline='', encoding='utf-8') as f:
69
+ writer = csv.writer(f)
70
+ writer.writerow(["id","title","description","status","tags","created_at"])
71
+ for r in rows:
72
+ writer.writerow(r)
73
+ return path
env.example ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ # Copy to .env and fill
2
+ OPENAI_API_KEY=sk-...
3
+ OPENAI_MODEL=gpt-4o-mini
requirements.txt CHANGED
@@ -1,6 +1,3 @@
1
-
2
- ultralytics
3
- opencv-python-headless
4
- gradio
5
- pandas>=2.2.2
6
- matplotlib>=3.8.4
 
1
+ gradio>=3.0
2
+ openai
3
+ matplotlib