renzoide commited on
Commit
3bf6c5c
·
1 Parent(s): d8dbb01

Refactor code structure for improved readability and maintainability

Browse files
Files changed (8) hide show
  1. .gitignore +4 -0
  2. .python-version +1 -0
  3. app.py +132 -24
  4. helper.py +220 -0
  5. main.py +6 -0
  6. pyproject.toml +12 -0
  7. tools.py +50 -0
  8. uv.lock +0 -0
.gitignore ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ data/*
2
+ plan.md
3
+ __pycache__/
4
+ *.log
.python-version ADDED
@@ -0,0 +1 @@
 
 
1
+ 3.11
app.py CHANGED
@@ -1,5 +1,77 @@
 
 
 
 
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
 
4
 
5
  def respond(
@@ -12,32 +84,18 @@ def respond(
12
  hf_token: gr.OAuthToken,
13
  ):
14
  """
15
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
 
 
16
  """
17
- client = InferenceClient(token=hf_token.token, model="openai/gpt-oss-20b")
18
-
19
- messages = [{"role": "system", "content": system_message}]
20
-
21
- messages.extend(history)
22
 
23
- messages.append({"role": "user", "content": message})
24
 
25
- response = ""
26
-
27
- for message in client.chat_completion(
28
- messages,
29
- max_tokens=max_tokens,
30
- stream=True,
31
- temperature=temperature,
32
- top_p=top_p,
33
- ):
34
- choices = message.choices
35
- token = ""
36
- if len(choices) and choices[0].delta.content:
37
- token = choices[0].delta.content
38
-
39
- response += token
40
- yield response
41
 
42
 
43
  """
@@ -65,6 +123,56 @@ with gr.Blocks() as demo:
65
  gr.LoginButton()
66
  chatbot.render()
67
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
68
 
69
  if __name__ == "__main__":
70
  demo.launch()
 
1
+ import os
2
+ from pathlib import Path
3
+ from typing import Dict, List, Tuple
4
+
5
  import gradio as gr
6
+
7
+ from smolagents import CodeAgent, OpenAIModel
8
+
9
+ from tools import create_task
10
+ from helper import (
11
+ add_row_to_csv,
12
+ generate_unique_id,
13
+ get_current_timestamp,
14
+ get_csv_headers,
15
+ get_csv_path,
16
+ read_csv_as_dicts
17
+ )
18
+
19
+
20
+
21
+
22
+ def append_conversation_entry(role: str, content: str, tags: str = "") -> None:
23
+ """
24
+ Append a single conversation message to conversations.csv.
25
+
26
+ Columns: id, timestamp, role, content, tags
27
+ """
28
+ # Get the CSV path using helper
29
+ csv_path = get_csv_path("conversations")
30
+
31
+ # Generate message data
32
+ message_data: Dict[str, str] = {
33
+ "id": generate_unique_id("msg"),
34
+ "timestamp": get_current_timestamp(),
35
+ "role": role,
36
+ "content": content,
37
+ "tags": tags,
38
+ }
39
+
40
+ # Add row to CSV using helper
41
+ add_row_to_csv(csv_path, message_data)
42
+
43
+
44
+
45
+
46
+ model = OpenAIModel(
47
+ model_id="Qwen/Qwen3-Coder-480B-A35B-Instruct",
48
+ api_base="https://api.tokenfactory.nebius.com/v1/",
49
+ api_key=os.environ.get("NEBIUS_API_KEY"),
50
+ )
51
+
52
+ # Get CSV schema information
53
+ csv_schema_info = get_csv_headers()
54
+
55
+ agent = CodeAgent(
56
+ tools=[],
57
+ model=model,
58
+ stream_outputs=True,
59
+ instructions=(
60
+ "You are a friendly chatbot with access to structured data.\n\n"
61
+ "Available CSV files:\n"
62
+ f"{csv_schema_info}\n\n"
63
+ "You can use pandas to analyze this data. The CSV files are located in the 'data/' directory.\n"
64
+ "Example operations you can perform:\n"
65
+ "- pd.read_csv('data/conversations.csv').query(\"role == 'user'\")\n"
66
+ "- pd.read_csv('data/tasks.csv').query(\"status == 'pending'\")\n"
67
+ "- pd.read_csv('data/conversations.csv')['content'].tolist()\n\n"
68
+ "You can also create tasks by calling the `create_task` tool when the user "
69
+ "asks you to remember something or schedule a reminder.\n"
70
+ "Confirm task details in natural language after calling the tool."
71
+ ),
72
+ additional_authorized_imports=["pandas", "numpy"],
73
+ max_steps=10,
74
+ )
75
 
76
 
77
  def respond(
 
84
  hf_token: gr.OAuthToken,
85
  ):
86
  """
87
+ Uses smolagents CodeAgent to process the user message.
88
+ The agent can chat normally and, when appropriate, call the
89
+ `create_task` tool to append a new row to tasks.csv.
90
  """
91
+ append_conversation_entry(role="user", content=message)
 
 
 
 
92
 
93
+ prompt = f"{system_message}\n\nUser: {message}"
94
 
95
+ result = agent.run(prompt)
96
+ response = str(result) if result is not None else ""
97
+ append_conversation_entry(role="assistant", content=response)
98
+ yield response
 
 
 
 
 
 
 
 
 
 
 
 
99
 
100
 
101
  """
 
123
  gr.LoginButton()
124
  chatbot.render()
125
 
126
+ refresh_btn = gr.Button("Refresh CSV views")
127
+
128
+ with gr.Tab("Conversations CSV"):
129
+ conversations_df = gr.Dataframe(
130
+ label="conversations.csv",
131
+ interactive=False,
132
+ wrap=False,
133
+ )
134
+ with gr.Tab("Tasks CSV"):
135
+ tasks_df = gr.Dataframe(
136
+ label="tasks.csv",
137
+ interactive=False,
138
+ wrap=False,
139
+ )
140
+ with gr.Tab("Memories CSV"):
141
+ memories_df = gr.Dataframe(
142
+ label="memories.csv",
143
+ interactive=False,
144
+ wrap=False,
145
+ )
146
+
147
+ def load_csv_paths() -> Tuple[List[Dict], List[Dict], List[Dict]]:
148
+ """Load data from CSV files for display"""
149
+ conversations_path = get_csv_path("conversations")
150
+ tasks_path = get_csv_path("tasks")
151
+ memories_path = get_csv_path("memories")
152
+
153
+ # Read CSV data using helper
154
+ conversations_data = read_csv_as_dicts(conversations_path)
155
+ tasks_data = read_csv_as_dicts(tasks_path)
156
+ memories_data = read_csv_as_dicts(memories_path)
157
+
158
+ return (
159
+ conversations_data,
160
+ tasks_data,
161
+ memories_data,
162
+ )
163
+
164
+ refresh_btn.click(
165
+ fn=load_csv_paths,
166
+ inputs=None,
167
+ outputs=[conversations_df, tasks_df, memories_df],
168
+ )
169
+
170
+ demo.load(
171
+ fn=load_csv_paths,
172
+ inputs=None,
173
+ outputs=[conversations_df, tasks_df, memories_df],
174
+ )
175
+
176
 
177
  if __name__ == "__main__":
178
  demo.launch()
helper.py ADDED
@@ -0,0 +1,220 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import csv
2
+ from datetime import datetime
3
+ from pathlib import Path
4
+ from typing import Any, Dict, List, Optional, Union
5
+
6
+ import pandas as pd
7
+
8
+
9
+ DATA_DIR = Path(__file__).parent / "data"
10
+
11
+
12
+ def get_csv_path(csv_name: str) -> Path:
13
+ """
14
+ Get the full path for a CSV file in the data directory.
15
+
16
+ Args:
17
+ csv_name: Name of the CSV file (with or without .csv extension)
18
+
19
+ Returns:
20
+ Path object for the CSV file in the data directory
21
+ """
22
+ if not csv_name.endswith('.csv'):
23
+ csv_name = f"{csv_name}.csv"
24
+
25
+ return DATA_DIR / csv_name
26
+
27
+
28
+ def ensure_csv_exists(
29
+ csv_path: Path,
30
+ fieldnames: List[str],
31
+ overwrite: bool = False
32
+ ) -> bool:
33
+ """
34
+ Ensure a CSV file exists with the specified fieldnames.
35
+
36
+ Args:
37
+ csv_path: Path to the CSV file
38
+ fieldnames: List of column headers for the CSV
39
+ overwrite: If True, will overwrite existing file with headers only
40
+
41
+ Returns:
42
+ True if file was created or headers were written, False if file already exists
43
+ """
44
+ # Ensure data directory exists
45
+ csv_path.parent.mkdir(parents=True, exist_ok=True)
46
+
47
+ # Check if file exists
48
+ file_exists = csv_path.exists()
49
+
50
+ # If file doesn't exist or we want to overwrite, create/write headers
51
+ if not file_exists or overwrite:
52
+ with csv_path.open("w", newline="", encoding="utf-8") as f:
53
+ writer = csv.DictWriter(f, fieldnames=fieldnames)
54
+ writer.writeheader()
55
+ return True
56
+
57
+ return False
58
+
59
+
60
+ def add_row_to_csv(
61
+ csv_path: Path,
62
+ row_data: Dict[str, Any],
63
+ fieldnames: Optional[List[str]] = None
64
+ ) -> None:
65
+ """
66
+ Add a row to a CSV file, creating the file if it doesn't exist.
67
+
68
+ Args:
69
+ csv_path: Path to the CSV file
70
+ row_data: Dictionary containing the data to add
71
+ fieldnames: Optional list of column headers. If not provided,
72
+ will use keys from row_data
73
+ """
74
+ # Use row_data keys if fieldnames not provided
75
+ if fieldnames is None:
76
+ fieldnames = list(row_data.keys())
77
+
78
+ # Ensure CSV exists with headers
79
+ ensure_csv_exists(csv_path, fieldnames)
80
+
81
+ # Append the new row
82
+ with csv_path.open("a", newline="", encoding="utf-8") as f:
83
+ writer = csv.DictWriter(f, fieldnames=fieldnames)
84
+ writer.writerow(row_data)
85
+
86
+
87
+ def read_csv_as_dicts(csv_path: Path) -> List[Dict[str, Any]]:
88
+ """
89
+ Read a CSV file and return a list of dictionaries.
90
+
91
+ Args:
92
+ csv_path: Path to the CSV file
93
+
94
+ Returns:
95
+ List of dictionaries representing the CSV rows
96
+ """
97
+ if not csv_path.exists():
98
+ return []
99
+
100
+ with csv_path.open("r", encoding="utf-8") as f:
101
+ reader = csv.DictReader(f)
102
+ return list(reader)
103
+
104
+
105
+ def find_row_in_csv(
106
+ csv_path: Path,
107
+ key_field: str,
108
+ value: Any
109
+ ) -> Optional[Dict[str, Any]]:
110
+ """
111
+ Find a row in a CSV file based on a key field value.
112
+
113
+ Args:
114
+ csv_path: Path to the CSV file
115
+ key_field: The column to search in
116
+ value: The value to search for
117
+
118
+ Returns:
119
+ The matching row as a dictionary, or None if not found
120
+ """
121
+ rows = read_csv_as_dicts(csv_path)
122
+
123
+ for row in rows:
124
+ if row.get(key_field) == str(value):
125
+ return row
126
+
127
+ return None
128
+
129
+
130
+ def update_row_in_csv(
131
+ csv_path: Path,
132
+ key_field: str,
133
+ key_value: Any,
134
+ update_data: Dict[str, Any]
135
+ ) -> bool:
136
+ """
137
+ Update a row in a CSV file based on a key field value.
138
+
139
+ Args:
140
+ csv_path: Path to the CSV file
141
+ key_field: The column to identify the row
142
+ key_value: The value to identify the row
143
+ update_data: Dictionary of fields to update
144
+
145
+ Returns:
146
+ True if row was updated, False if not found
147
+ """
148
+ rows = read_csv_as_dicts(csv_path)
149
+ updated = False
150
+
151
+ # Find and update the matching row
152
+ for row in rows:
153
+ if row.get(key_field) == str(key_value):
154
+ row.update(update_data)
155
+ updated = True
156
+ break
157
+
158
+ # If we updated a row, write all rows back to the file
159
+ if updated:
160
+ if rows: # Check if we have any rows
161
+ fieldnames = list(rows[0].keys())
162
+ with csv_path.open("w", newline="", encoding="utf-8") as f:
163
+ writer = csv.DictWriter(f, fieldnames=fieldnames)
164
+ writer.writeheader()
165
+ writer.writerows(rows)
166
+
167
+ return updated
168
+
169
+
170
+ def generate_unique_id(prefix: str = "row") -> str:
171
+ """
172
+ Generate a unique ID with a prefix and timestamp.
173
+
174
+ Args:
175
+ prefix: Prefix for the ID
176
+
177
+ Returns:
178
+ A unique ID string
179
+ """
180
+ timestamp = int(datetime.utcnow().timestamp() * 1000)
181
+ return f"{prefix}-{timestamp}"
182
+
183
+
184
+ def get_current_timestamp() -> str:
185
+ """
186
+ Get the current UTC timestamp in ISO format.
187
+
188
+ Returns:
189
+ Current timestamp as ISO string
190
+ """
191
+ return datetime.utcnow().isoformat()
192
+
193
+
194
+ def get_csv_headers() -> str:
195
+ """
196
+ Get column headers from all CSV files and format them as instructions.
197
+
198
+ Returns:
199
+ Formatted string containing CSV file schemas
200
+ """
201
+ csv_files = ["conversations", "tasks", "memories"]
202
+ schemas = []
203
+
204
+ for csv_name in csv_files:
205
+ try:
206
+ csv_path = get_csv_path(csv_name)
207
+ # Read only header to get column names
208
+ df = pd.read_csv(csv_path, nrows=0)
209
+ columns = df.columns.tolist()
210
+ schemas.append(f"- {csv_name}.csv: columns {columns}")
211
+ except FileNotFoundError:
212
+ # Handle case where CSV doesn't exist yet
213
+ if csv_name == "conversations":
214
+ schemas.append(f"- {csv_name}.csv: columns ['id', 'timestamp', 'role', 'content', 'tags']")
215
+ elif csv_name == "tasks":
216
+ schemas.append(f"- {csv_name}.csv: columns ['task_id', 'created_at', 'description', 'status', 'schedule_type', 'scheduled_at', 'recurrence_rule']")
217
+ elif csv_name == "memories":
218
+ schemas.append(f"- {csv_name}.csv: columns ['memory_id', 'created_at', 'source_message_ids', 'content', 'tags']")
219
+
220
+ return "\n".join(schemas)
main.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ def main():
2
+ print("Hello from rowmind!")
3
+
4
+
5
+ if __name__ == "__main__":
6
+ main()
pyproject.toml ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [project]
2
+ name = "rowmind"
3
+ version = "0.1.0"
4
+ description = "Add your description here"
5
+ readme = "README.md"
6
+ requires-python = ">=3.11"
7
+ dependencies = [
8
+ "gradio[oauth]>=5.49.1",
9
+ "huggingface-hub>=1.1.4",
10
+ "openai>=2.8.1",
11
+ "smolagents[toolkit]>=1.22.0",
12
+ ]
tools.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict
2
+
3
+ from smolagents import tool
4
+
5
+ from helper import (
6
+ add_row_to_csv,
7
+ generate_unique_id,
8
+ get_current_timestamp,
9
+ get_csv_path
10
+ )
11
+
12
+
13
+ @tool
14
+ def create_task(
15
+ description: str,
16
+ schedule_type: str = "one_time",
17
+ scheduled_at: str | None = None,
18
+ recurrence_rule: str | None = None,
19
+ ) -> str:
20
+ """
21
+ Creates a new task in the tasks CSV file.
22
+
23
+ Args:
24
+ description: Short human-friendly description of the task.
25
+ schedule_type: Either "one_time" or "recurring".
26
+ scheduled_at: ISO 8601 datetime string for the next run (optional).
27
+ recurrence_rule: Optional human-readable recurrence rule
28
+ (for example: "daily" or "weekly:mon,wed").
29
+ """
30
+ # Get the CSV path using helper
31
+ csv_path = get_csv_path("tasks")
32
+
33
+ # Generate task data
34
+ task_data: Dict[str, str] = {
35
+ "task_id": generate_unique_id("task"),
36
+ "created_at": get_current_timestamp(),
37
+ "description": description,
38
+ "status": "pending",
39
+ "schedule_type": schedule_type,
40
+ "scheduled_at": scheduled_at or "",
41
+ "recurrence_rule": recurrence_rule or "",
42
+ }
43
+
44
+ # Add row to CSV using helper
45
+ add_row_to_csv(csv_path, task_data)
46
+
47
+ return (
48
+ f"Created task {task_data['task_id']!r} scheduled_type={schedule_type!r} "
49
+ f"scheduled_at={scheduled_at!r} recurrence_rule={recurrence_rule!r}"
50
+ )
uv.lock ADDED
The diff for this file is too large to render. See raw diff