tensorus commited on
Commit
817b9cc
ยท
verified ยท
1 Parent(s): 9ac6a1e

Upload 8 files

Browse files
app.py ADDED
@@ -0,0 +1,406 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # app.py
2
+ """
3
+ Streamlit frontend application for the Tensorus platform.
4
+ Interacts with the FastAPI backend (api.py).
5
+ """
6
+
7
+ import streamlit as st
8
+ import json
9
+ import time
10
+ import requests # Needed for ui_utils functions if integrated
11
+ import logging # Needed for ui_utils functions if integrated
12
+ import torch # Needed for integrated tensor utils
13
+ from typing import List, Dict, Any, Optional, Union, Tuple # Needed for integrated tensor utils
14
+
15
+ # --- Page Configuration ---
16
+ st.set_page_config(
17
+ page_title="Tensorus Platform",
18
+ page_icon="๐ŸงŠ",
19
+ layout="wide",
20
+ initial_sidebar_state="expanded"
21
+ )
22
+
23
+ # --- Configure Logging (Optional but good practice) ---
24
+ logging.basicConfig(level=logging.INFO)
25
+ logger = logging.getLogger(__name__)
26
+
27
+ # --- Integrated Tensor Utilities ---
28
+
29
+ def _validate_tensor_data(data: List[Any], shape: List[int]):
30
+ """
31
+ Validates if the nested list structure of 'data' matches the 'shape'.
32
+ Raises ValueError on mismatch. (Optional validation)
33
+ """
34
+ if not shape:
35
+ if not isinstance(data, (int, float)): raise ValueError("Scalar tensor data must be a single number.")
36
+ return True
37
+ if not isinstance(data, list): raise ValueError(f"Data for shape {shape} must be a list.")
38
+ expected_len = shape[0]
39
+ if len(data) != expected_len: raise ValueError(f"Dimension 0 mismatch: Expected {expected_len}, got {len(data)} for shape {shape}.")
40
+ if len(shape) > 1:
41
+ for item in data: _validate_tensor_data(item, shape[1:])
42
+ elif len(shape) == 1:
43
+ if not all(isinstance(x, (int, float)) for x in data): raise ValueError("Innermost list elements must be numbers.")
44
+ return True
45
+
46
+ def list_to_tensor(shape: List[int], dtype_str: str, data: Union[List[Any], int, float]) -> torch.Tensor:
47
+ """
48
+ Converts a Python list (potentially nested) or scalar into a PyTorch tensor
49
+ with the specified shape and dtype.
50
+ """
51
+ try:
52
+ dtype_map = {
53
+ 'float32': torch.float32, 'float': torch.float,
54
+ 'float64': torch.float64, 'double': torch.double,
55
+ 'int32': torch.int32, 'int': torch.int,
56
+ 'int64': torch.int64, 'long': torch.long,
57
+ 'bool': torch.bool
58
+ }
59
+ torch_dtype = dtype_map.get(dtype_str.lower())
60
+ if torch_dtype is None: raise ValueError(f"Unsupported dtype string: {dtype_str}")
61
+
62
+ tensor = torch.tensor(data, dtype=torch_dtype)
63
+
64
+ if list(tensor.shape) != shape:
65
+ logger.debug(f"Initial tensor shape {list(tensor.shape)} differs from target {shape}. Attempting reshape.")
66
+ try:
67
+ tensor = tensor.reshape(shape)
68
+ except RuntimeError as reshape_err:
69
+ raise ValueError(f"Created tensor shape {list(tensor.shape)} != requested {shape} and reshape failed: {reshape_err}") from reshape_err
70
+
71
+ return tensor
72
+ except (TypeError, ValueError) as e:
73
+ logger.error(f"Error converting list to tensor: {e}. Shape: {shape}, Dtype: {dtype_str}")
74
+ raise ValueError(f"Failed tensor conversion: {e}") from e
75
+ except Exception as e:
76
+ logger.exception(f"Unexpected error during list_to_tensor: {e}")
77
+ raise ValueError(f"Unexpected tensor conversion error: {e}") from e
78
+
79
+ def tensor_to_list(tensor: torch.Tensor) -> Tuple[List[int], str, List[Any]]:
80
+ """
81
+ Converts a PyTorch tensor back into its shape, dtype string, and nested list representation.
82
+ """
83
+ if not isinstance(tensor, torch.Tensor):
84
+ raise TypeError("Input must be a torch.Tensor")
85
+ shape = list(tensor.shape)
86
+ dtype_str = str(tensor.dtype).split('.')[-1]
87
+ data = tensor.tolist()
88
+ return shape, dtype_str, data
89
+
90
+ # --- Integrated UI Utilities (from former ui_utils.py) ---
91
+
92
+ # Define the base URL of your FastAPI backend
93
+ API_BASE_URL = "http://127.0.0.1:8000" # Make sure this matches where api.py runs
94
+
95
+ def get_api_status():
96
+ """Checks if the backend API is reachable."""
97
+ try:
98
+ response = requests.get(f"{API_BASE_URL}/", timeout=2)
99
+ response.raise_for_status() # Raise an exception for bad status codes (4xx or 5xx)
100
+ return True, response.json()
101
+ except requests.exceptions.RequestException as e:
102
+ logger.error(f"API connection error: {e}")
103
+ return False, {"error": str(e)}
104
+
105
+ def get_agent_status():
106
+ """Fetches status for all agents from the backend."""
107
+ try:
108
+ response = requests.get(f"{API_BASE_URL}/agents/status", timeout=5)
109
+ response.raise_for_status()
110
+ return response.json()
111
+ except requests.exceptions.RequestException as e:
112
+ st.error(f"Connection Error fetching agent status: {e}")
113
+ return None
114
+
115
+ def start_agent(agent_id: str):
116
+ """Sends a request to start an agent."""
117
+ try:
118
+ response = requests.post(f"{API_BASE_URL}/agents/{agent_id}/start", timeout=5)
119
+ response.raise_for_status()
120
+ return response.json()
121
+ except requests.exceptions.RequestException as e:
122
+ st.error(f"Connection Error starting agent {agent_id}: {e}")
123
+ return {"success": False, "message": str(e)}
124
+
125
+ def stop_agent(agent_id: str):
126
+ """Sends a request to stop an agent."""
127
+ try:
128
+ response = requests.post(f"{API_BASE_URL}/agents/{agent_id}/stop", timeout=5)
129
+ response.raise_for_status()
130
+ return response.json()
131
+ except requests.exceptions.RequestException as e:
132
+ st.error(f"Connection Error stopping agent {agent_id}: {e}")
133
+ return {"success": False, "message": str(e)}
134
+
135
+ def configure_agent(agent_id: str, config: dict):
136
+ """Sends a request to configure an agent."""
137
+ try:
138
+ response = requests.post(
139
+ f"{API_BASE_URL}/agents/{agent_id}/configure",
140
+ json={"config": config},
141
+ timeout=5
142
+ )
143
+ response.raise_for_status()
144
+ return response.json()
145
+ except requests.exceptions.RequestException as e:
146
+ st.error(f"Connection Error configuring agent {agent_id}: {e}")
147
+ return {"success": False, "message": str(e)}
148
+
149
+ def post_nql_query(query: str):
150
+ """Sends an NQL query to the backend."""
151
+ try:
152
+ response = requests.post(
153
+ f"{API_BASE_URL}/chat/query",
154
+ json={"query": query},
155
+ timeout=15 # Allow more time for potentially complex queries
156
+ )
157
+ response.raise_for_status()
158
+ return response.json()
159
+ except requests.exceptions.RequestException as e:
160
+ st.error(f"Connection Error posting NQL query: {e}")
161
+ return {"query": query, "response_text": "Error connecting to backend.", "error": str(e)}
162
+
163
+ def get_datasets():
164
+ """Fetches the list of available datasets."""
165
+ try:
166
+ response = requests.get(f"{API_BASE_URL}/explorer/datasets", timeout=5)
167
+ response.raise_for_status()
168
+ data = response.json()
169
+ return data.get("datasets", [])
170
+ except requests.exceptions.RequestException as e:
171
+ st.error(f"Connection Error fetching datasets: {e}")
172
+ return [] # Return empty list on error
173
+
174
+ def get_dataset_preview(dataset_name: str, limit: int = 5):
175
+ """Fetches preview data for a specific dataset."""
176
+ try:
177
+ response = requests.get(f"{API_BASE_URL}/explorer/dataset/{dataset_name}/preview?limit={limit}", timeout=10)
178
+ response.raise_for_status()
179
+ return response.json()
180
+ except requests.exceptions.RequestException as e:
181
+ st.error(f"Connection Error fetching preview for {dataset_name}: {e}")
182
+ return None
183
+
184
+ def operate_explorer(dataset: str, operation: str, index: int, params: dict):
185
+ """Sends an operation request to the data explorer."""
186
+ payload = {
187
+ "dataset": dataset,
188
+ "operation": operation,
189
+ "tensor_index": index,
190
+ "params": params
191
+ }
192
+ try:
193
+ response = requests.post(
194
+ f"{API_BASE_URL}/explorer/operate",
195
+ json=payload,
196
+ timeout=15 # Allow time for computation
197
+ )
198
+ response.raise_for_status()
199
+ return response.json()
200
+ except requests.exceptions.RequestException as e:
201
+ st.error(f"Connection Error performing operation '{operation}' on {dataset}: {e}")
202
+ return {"success": False, "metadata": {"error": str(e)}, "result_data": None}
203
+
204
+
205
+ # --- Initialize Session State ---
206
+ if 'agent_status' not in st.session_state:
207
+ st.session_state.agent_status = None
208
+ if 'datasets' not in st.session_state:
209
+ st.session_state.datasets = []
210
+ if 'selected_dataset' not in st.session_state:
211
+ st.session_state.selected_dataset = None
212
+ if 'dataset_preview' not in st.session_state:
213
+ st.session_state.dataset_preview = None
214
+ if 'explorer_result' not in st.session_state:
215
+ st.session_state.explorer_result = None
216
+ if 'nql_response' not in st.session_state:
217
+ st.session_state.nql_response = None
218
+
219
+
220
+ # --- Sidebar ---
221
+ with st.sidebar:
222
+ st.title("Tensorus Control")
223
+ st.markdown("---")
224
+
225
+ # API Status Check
226
+ st.subheader("API Status")
227
+ api_ok, api_info = get_api_status() # Use integrated function
228
+ if api_ok:
229
+ st.success(f"Connected to API v{api_info.get('version', 'N/A')}")
230
+ else:
231
+ st.error(f"API Connection Failed: {api_info.get('error', 'Unknown error')}")
232
+ st.warning("Ensure the backend (`uvicorn api:app ...`) is running.")
233
+ st.stop()
234
+
235
+ st.markdown("---")
236
+ # Navigation
237
+ app_mode = st.radio(
238
+ "Select Feature",
239
+ ("Dashboard", "Agent Control", "NQL Chat", "Data Explorer")
240
+ )
241
+ st.markdown("---")
242
+
243
+
244
+ # --- Main Page Content ---
245
+
246
+ if app_mode == "Dashboard":
247
+ st.title("๐Ÿ“Š Operations Dashboard")
248
+ st.warning("Live WebSocket dashboard view is best accessed directly via the backend's `/dashboard` HTML page or a dedicated JS frontend. This is a simplified view.")
249
+ st.markdown(f"Access the basic live dashboard here.", unsafe_allow_html=True) # Link to backend dashboard
250
+ st.info("This Streamlit view doesn't currently support live WebSocket updates.")
251
+
252
+
253
+ elif app_mode == "Agent Control":
254
+ st.title("๐Ÿค– Agent Control Panel")
255
+
256
+ if st.button("Refresh Agent Status"):
257
+ st.session_state.agent_status = get_agent_status() # Use integrated function
258
+
259
+ if st.session_state.agent_status:
260
+ agents = st.session_state.agent_status
261
+ agent_ids = list(agents.keys())
262
+
263
+ if not agent_ids:
264
+ st.warning("No agents reported by the backend.")
265
+ else:
266
+ selected_agent_id = st.selectbox("Select Agent", agent_ids)
267
+
268
+ if selected_agent_id:
269
+ agent_info = agents[selected_agent_id]
270
+ st.subheader(f"Agent: {agent_info.get('name', selected_agent_id)}")
271
+
272
+ col1, col2 = st.columns(2)
273
+ with col1:
274
+ st.metric("Status", "Running" if agent_info.get('running') else "Stopped")
275
+ st.write("**Configuration:**")
276
+ st.json(agent_info.get('config', {}))
277
+ with col2:
278
+ st.write("**Recent Logs:**")
279
+ st.code('\n'.join(agent_info.get('logs', [])), language='log')
280
+
281
+ st.write("**Actions:**")
282
+ btn_col1, btn_col2, btn_col3 = st.columns(3)
283
+ with btn_col1:
284
+ if st.button("Start Agent", key=f"start_{selected_agent_id}", disabled=agent_info.get('running')):
285
+ result = start_agent(selected_agent_id) # Use integrated function
286
+ st.toast(result.get("message", "Request sent."))
287
+ st.session_state.agent_status = get_agent_status() # Refresh status
288
+ st.rerun()
289
+ with btn_col2:
290
+ if st.button("Stop Agent", key=f"stop_{selected_agent_id}", disabled=not agent_info.get('running')):
291
+ result = stop_agent(selected_agent_id) # Use integrated function
292
+ st.toast(result.get("message", "Request sent."))
293
+ st.session_state.agent_status = get_agent_status() # Refresh status
294
+ st.rerun()
295
+
296
+ else:
297
+ st.info("Click 'Refresh Agent Status' to load agent information.")
298
+
299
+
300
+ elif app_mode == "NQL Chat":
301
+ st.title("๐Ÿ’ฌ Natural Language Query (NQL)")
302
+ st.info("Ask questions about the data stored in Tensorus (e.g., 'show me tensors from rl_experiences', 'count records in sample_data').")
303
+
304
+ user_query = st.text_input("Enter your query:", key="nql_query_input")
305
+
306
+ if st.button("Submit Query", key="nql_submit"):
307
+ if user_query:
308
+ with st.spinner("Processing query..."):
309
+ st.session_state.nql_response = post_nql_query(user_query) # Use integrated function
310
+ else:
311
+ st.warning("Please enter a query.")
312
+
313
+ if st.session_state.nql_response:
314
+ resp = st.session_state.nql_response
315
+ st.markdown("---")
316
+ st.write(f"**Query:** {resp.get('query')}")
317
+ if resp.get("error"):
318
+ st.error(f"Error: {resp.get('error')}")
319
+ else:
320
+ st.success(f"**Response:** {resp.get('response_text')}")
321
+ if resp.get("results"):
322
+ st.write("**Results Preview:**")
323
+ st.json(resp.get("results"))
324
+ st.session_state.nql_response = None
325
+
326
+
327
+ elif app_mode == "Data Explorer":
328
+ st.title("๐Ÿ” Data Explorer")
329
+
330
+ if not st.session_state.datasets or st.button("Refresh Datasets"):
331
+ st.session_state.datasets = get_datasets() # Use integrated function
332
+
333
+ if not st.session_state.datasets:
334
+ st.warning("No datasets found or failed to fetch from backend.")
335
+ else:
336
+ st.session_state.selected_dataset = st.selectbox(
337
+ "Select Dataset",
338
+ st.session_state.datasets,
339
+ index=st.session_state.datasets.index(st.session_state.selected_dataset) if st.session_state.selected_dataset in st.session_state.datasets else 0
340
+ )
341
+
342
+ if st.session_state.selected_dataset:
343
+ if st.button("Show Preview", key="preview_btn"):
344
+ with st.spinner(f"Fetching preview for {st.session_state.selected_dataset}..."):
345
+ st.session_state.dataset_preview = get_dataset_preview(st.session_state.selected_dataset) # Use integrated function
346
+
347
+ if st.session_state.dataset_preview:
348
+ st.subheader(f"Preview: {st.session_state.dataset_preview.get('dataset')}")
349
+ st.write(f"Total Records: {st.session_state.dataset_preview.get('record_count')}")
350
+ st.dataframe(st.session_state.dataset_preview.get('preview', []))
351
+ st.markdown("---")
352
+
353
+ st.subheader("Perform Operation")
354
+ record_count = st.session_state.dataset_preview.get('record_count', 1) if st.session_state.dataset_preview else 1
355
+ tensor_index = st.number_input("Select Tensor Index", min_value=0, max_value=max(0, record_count - 1), value=0, step=1)
356
+
357
+ operations = ["info", "head", "slice", "sum", "mean", "reshape", "transpose"]
358
+ selected_op = st.selectbox("Select Operation", operations)
359
+
360
+ params = {}
361
+ # Dynamic parameter inputs
362
+ if selected_op == "head":
363
+ params['count'] = st.number_input("Count", min_value=1, value=5, step=1)
364
+ elif selected_op == "slice":
365
+ params['dim'] = st.number_input("Dimension (dim)", value=0, step=1)
366
+ params['start'] = st.number_input("Start Index", value=0, step=1)
367
+ params['end'] = st.number_input("End Index (optional)", value=None, step=1, format="%d")
368
+ params['step'] = st.number_input("Step (optional)", value=None, step=1, format="%d")
369
+ elif selected_op in ["sum", "mean"]:
370
+ dim_input = st.text_input("Dimension(s) (optional, e.g., 0 or 0,1)")
371
+ if dim_input:
372
+ try: params['dim'] = [int(x.strip()) for x in dim_input.split(',')] if ',' in dim_input else int(dim_input)
373
+ except ValueError: st.warning("Invalid dimension format.")
374
+ params['keepdim'] = st.checkbox("Keep Dimensions (keepdim)", value=False)
375
+ elif selected_op == "reshape":
376
+ shape_input = st.text_input("Target Shape (comma-separated, e.g., 2,3,5)")
377
+ if shape_input:
378
+ try: params['shape'] = [int(x.strip()) for x in shape_input.split(',')]
379
+ except ValueError: st.warning("Invalid shape format.")
380
+ elif selected_op == "transpose":
381
+ params['dim0'] = st.number_input("Dimension 0", value=0, step=1)
382
+ params['dim1'] = st.number_input("Dimension 1", value=1, step=1)
383
+
384
+ if st.button("Run Operation", key="run_op_btn"):
385
+ valid_request = True
386
+ if selected_op == "reshape" and not params.get('shape'):
387
+ st.error("Target Shape is required for reshape.")
388
+ valid_request = False
389
+
390
+ if valid_request:
391
+ with st.spinner(f"Running {selected_op} on {st.session_state.selected_dataset}[{tensor_index}]..."):
392
+ st.session_state.explorer_result = operate_explorer( # Use integrated function
393
+ st.session_state.selected_dataset,
394
+ selected_op,
395
+ tensor_index,
396
+ params
397
+ )
398
+
399
+ if st.session_state.explorer_result:
400
+ st.markdown("---")
401
+ st.subheader("Operation Result")
402
+ st.write("**Metadata:**")
403
+ st.json(st.session_state.explorer_result.get("metadata", {}))
404
+ st.write("**Result Data:**")
405
+ st.json(st.session_state.explorer_result.get("result_data", "No data returned."))
406
+
pages/1_๐Ÿ“Š_Dashboard.py ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # pages/1_๐Ÿ“Š_Dashboard.py (Modifications for Step 3)
2
+
3
+ import streamlit as st
4
+ import pandas as pd
5
+ import numpy as np
6
+ import plotly.express as px
7
+ import time
8
+ # Updated imports to use API-backed functions
9
+ from ui_utils import get_dashboard_metrics, list_all_agents, get_agent_status
10
+
11
+ st.set_page_config(page_title="Tensorus Dashboard", layout="wide")
12
+
13
+ st.title("๐Ÿ“Š Operations Dashboard")
14
+ st.caption("Overview of Tensorus datasets and agent activity from API.")
15
+
16
+ # --- Fetch Data ---
17
+ # Use st.cache_data for API calls that don't need constant updates
18
+ # or manage refresh manually. For simplicity, call directly on rerun/button click.
19
+ metrics_data = None
20
+ agent_list = None # Fetch full agent list for detailed status display
21
+
22
+ # Button to force refresh
23
+ if st.button("๐Ÿ”„ Refresh Dashboard Data"):
24
+ # Clear previous cache if any or just proceed to refetch
25
+ metrics_data = get_dashboard_metrics()
26
+ agent_list = list_all_agents()
27
+ st.session_state['dashboard_metrics'] = metrics_data # Store in session state
28
+ st.session_state['dashboard_agents'] = agent_list
29
+ st.rerun() # Rerun the script to reflect fetched data
30
+ else:
31
+ # Try to load from session state or fetch if not present
32
+ if 'dashboard_metrics' not in st.session_state:
33
+ st.session_state['dashboard_metrics'] = get_dashboard_metrics()
34
+ if 'dashboard_agents' not in st.session_state:
35
+ st.session_state['dashboard_agents'] = list_all_agents()
36
+
37
+ metrics_data = st.session_state['dashboard_metrics']
38
+ agent_list = st.session_state['dashboard_agents']
39
+
40
+
41
+ # --- Display Metrics ---
42
+ st.subheader("System Metrics")
43
+ if metrics_data:
44
+ col1, col2, col3 = st.columns(3)
45
+ col1.metric("Total Datasets", metrics_data.get('dataset_count', 'N/A'))
46
+ col2.metric("Total Records (Est.)", f"{metrics_data.get('total_records_est', 0):,}")
47
+ # Agent status summary from metrics
48
+ agent_summary = metrics_data.get('agent_status_summary', {})
49
+ running_agents = agent_summary.get('running', 0) + agent_summary.get('starting', 0)
50
+ col3.metric("Running Agents", running_agents)
51
+
52
+ st.divider()
53
+
54
+ # --- Performance Metrics Row ---
55
+ st.subheader("Performance Indicators (Simulated)")
56
+ pcol1, pcol2, pcol3, pcol4 = st.columns(4)
57
+ pcol1.metric("Ingestion Rate (rec/s)", f"{metrics_data.get('data_ingestion_rate', 0.0):.1f}")
58
+ pcol2.metric("Avg Query Latency (ms)", f"{metrics_data.get('avg_query_latency_ms', 0.0):.1f}")
59
+ pcol3.metric("Latest RL Reward", f"{metrics_data.get('rl_latest_reward', 'N/A')}")
60
+ pcol4.metric("Best AutoML Score", f"{metrics_data.get('automl_best_score', 'N/A')}")
61
+
62
+ else:
63
+ st.warning("Could not fetch dashboard metrics from the API.")
64
+
65
+ st.divider()
66
+
67
+ # --- Agent Status Details ---
68
+ st.subheader("Agent Status")
69
+ if agent_list:
70
+ num_agents = len(agent_list)
71
+ cols = st.columns(max(1, num_agents)) # Create columns for agents
72
+
73
+ for i, agent_info in enumerate(agent_list):
74
+ agent_id = agent_info.get('id')
75
+ with cols[i % len(cols)]: # Distribute agents into columns
76
+ with st.container(border=True):
77
+ st.markdown(f"**{agent_info.get('name', 'Unknown Agent')}** (`{agent_id}`)")
78
+ # Fetch detailed status for more info if needed, or use basic status from list
79
+ # status_details = get_agent_status(agent_id) # Can make page slower
80
+ status = agent_info.get('status', 'unknown')
81
+ status_color = "green" if status in ["running", "starting"] else ("orange" if status in ["stopping"] else ("red" if status in ["error"] else "grey"))
82
+ st.markdown(f"Status: :{status_color}[**{status.upper()}**]")
83
+
84
+ # Display config from the list info
85
+ with st.expander("Config"):
86
+ st.json(agent_info.get('config', {}), expanded=False)
87
+ else:
88
+ st.warning("Could not fetch agent list from the API.")
89
+
90
+ st.divider()
91
+
92
+ # --- Performance Monitoring Chart (Using simulated data from metrics for now) ---
93
+ st.subheader("Performance Monitoring (Placeholder Graph)")
94
+ if metrics_data:
95
+ # Create some fake historical data for plotting based on current metrics
96
+ history_len = 20
97
+ # Use session state to persist some history for smoother simulation
98
+ if 'sim_history' not in st.session_state:
99
+ st.session_state['sim_history'] = pd.DataFrame({
100
+ 'Ingestion Rate': np.random.rand(history_len) * metrics_data.get('data_ingestion_rate', 10),
101
+ 'Query Latency': np.random.rand(history_len) * metrics_data.get('avg_query_latency_ms', 100),
102
+ 'RL Reward': np.random.randn(history_len) * 5 + (metrics_data.get('rl_latest_reward', 0) or 0)
103
+ })
104
+
105
+ # Update history with latest point
106
+ latest_data = pd.DataFrame({
107
+ 'Ingestion Rate': [metrics_data.get('data_ingestion_rate', 0.0)],
108
+ 'Query Latency': [metrics_data.get('avg_query_latency_ms', 0.0)],
109
+ 'RL Reward': [metrics_data.get('rl_latest_reward', 0) or 0] # Handle None
110
+ })
111
+ st.session_state['sim_history'] = pd.concat([st.session_state['sim_history'].iloc[1:], latest_data], ignore_index=True)
112
+
113
+
114
+ # Use Plotly for better interactivity
115
+ try:
116
+ fig = px.line(st.session_state['sim_history'], title="Simulated Performance Metrics Over Time")
117
+ fig.update_layout(legend_title_text='Metrics')
118
+ st.plotly_chart(fig, use_container_width=True)
119
+ except Exception as e:
120
+ st.warning(f"Could not display performance chart: {e}")
121
+
122
+ else:
123
+ st.info("Performance metrics unavailable.")
124
+
125
+ st.caption(f"Dashboard data timestamp: {time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(metrics_data.get('timestamp', time.time())) if metrics_data else time.time())}")
pages/2_๐Ÿ•น๏ธ_Control_Panel.py ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # pages/2_๐Ÿ•น๏ธ_Control_Panel.py (Modifications for Step 3)
2
+
3
+ import streamlit as st
4
+ import time
5
+ # Use the updated API-backed functions
6
+ from ui_utils import list_all_agents, get_agent_status, get_agent_logs, start_agent, stop_agent
7
+
8
+ st.set_page_config(page_title="Agent Control Panel", layout="wide")
9
+
10
+ st.title("๐Ÿ•น๏ธ Multi-Agent Control Panel")
11
+ st.caption("Manage and monitor Tensorus agents via API.")
12
+
13
+ # Fetch agent list from API
14
+ agent_list = list_all_agents()
15
+
16
+ if not agent_list:
17
+ st.error("Could not fetch agent list from API. Please ensure the backend is running and reachable.")
18
+ st.stop()
19
+
20
+ # Create a mapping from name to ID for easier selection
21
+ # Handle potential duplicate names if necessary, though IDs should be unique
22
+ agent_options = {agent['name']: agent['id'] for agent in agent_list}
23
+ # Add ID to name if names aren't unique (optional robustness)
24
+ # agent_options = {f"{agent['name']} ({agent['id']})": agent['id'] for agent in agent_list}
25
+
26
+
27
+ selected_agent_name = st.selectbox("Select Agent:", options=agent_options.keys())
28
+
29
+ if selected_agent_name:
30
+ selected_agent_id = agent_options[selected_agent_name]
31
+ st.divider()
32
+ st.subheader(f"Control: {selected_agent_name} (`{selected_agent_id}`)")
33
+
34
+ # Use session state to store fetched status and logs for the selected agent
35
+ # This avoids refetching constantly unless a refresh is triggered
36
+ agent_state_key = f"agent_status_{selected_agent_id}"
37
+ agent_logs_key = f"agent_logs_{selected_agent_id}"
38
+
39
+ # Button to force refresh status and logs
40
+ if st.button(f"๐Ÿ”„ Refresh Status & Logs##{selected_agent_id}"): # Unique key per agent
41
+ st.session_state[agent_state_key] = get_agent_status(selected_agent_id)
42
+ st.session_state[agent_logs_key] = get_agent_logs(selected_agent_id)
43
+ st.rerun() # Rerun to display refreshed data
44
+
45
+ # Fetch status if not in session state or refresh button wasn't just clicked
46
+ if agent_state_key not in st.session_state:
47
+ st.session_state[agent_state_key] = get_agent_status(selected_agent_id)
48
+
49
+ status_info = st.session_state[agent_state_key]
50
+
51
+ if status_info:
52
+ status = status_info.get('status', 'unknown')
53
+ status_color = "green" if status in ["running", "starting"] else ("orange" if status in ["stopping"] else ("red" if status in ["error"] else "grey"))
54
+ st.markdown(f"Current Status: :{status_color}[**{status.upper()}**]")
55
+ last_log_ts = status_info.get('last_log_timestamp')
56
+ if last_log_ts:
57
+ st.caption(f"Last Log Entry (approx.): {time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(last_log_ts))}")
58
+ else:
59
+ st.error(f"Could not retrieve status for agent '{selected_agent_name}'.")
60
+
61
+ # Control Buttons (Now call API functions)
62
+ col1, col2, col3 = st.columns([1, 1, 5])
63
+ is_running = status_info and status_info.get('status') == 'running'
64
+ is_stopped = status_info and status_info.get('status') == 'stopped'
65
+
66
+ with col1:
67
+ start_disabled = not is_stopped # Disable if not stopped
68
+ if st.button("โ–ถ๏ธ Start", key=f"start_{selected_agent_id}", disabled=start_disabled):
69
+ if start_agent(selected_agent_id): # API call returns success/fail
70
+ # Trigger refresh after short delay to allow backend state change (optimistic)
71
+ time.sleep(1.0)
72
+ # Clear state cache and rerun
73
+ if agent_state_key in st.session_state: del st.session_state[agent_state_key]
74
+ if agent_logs_key in st.session_state: del st.session_state[agent_logs_key]
75
+ st.rerun()
76
+ with col2:
77
+ stop_disabled = not is_running # Disable if not running
78
+ if st.button("โน๏ธ Stop", key=f"stop_{selected_agent_id}", disabled=stop_disabled):
79
+ if stop_agent(selected_agent_id): # API call returns success/fail
80
+ time.sleep(1.0)
81
+ if agent_state_key in st.session_state: del st.session_state[agent_state_key]
82
+ if agent_logs_key in st.session_state: del st.session_state[agent_logs_key]
83
+ st.rerun()
84
+
85
+ st.divider()
86
+
87
+ # Configuration & Logs
88
+ tab1, tab2 = st.tabs(["Configuration", "Logs"])
89
+
90
+ with tab1:
91
+ if status_info and 'config' in status_info:
92
+ st.write("Current configuration:")
93
+ st.json(status_info['config'])
94
+ # TODO: Implement configuration editing via API
95
+ st.button("โœ๏ธ Edit Configuration (Placeholder)", disabled=True)
96
+ else:
97
+ st.warning("Configuration not available.")
98
+
99
+ with tab2:
100
+ st.write("Recent logs (fetched from API):")
101
+ # Fetch logs if not in session state
102
+ if agent_logs_key not in st.session_state:
103
+ st.session_state[agent_logs_key] = get_agent_logs(selected_agent_id)
104
+
105
+ logs = st.session_state[agent_logs_key]
106
+ if logs is not None:
107
+ st.code("\n".join(logs), language="log")
108
+ else:
109
+ st.error("Could not retrieve logs.")
110
+
111
+ else:
112
+ st.info("Select an agent from the dropdown above.")
pages/3_๐Ÿ’ฌ_NQL_Chatbot.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # pages/3_๐Ÿ’ฌ_NQL_Chatbot.py
2
+
3
+ import streamlit as st
4
+ from ui_utils import execute_nql_query
5
+
6
+ st.set_page_config(page_title="NQL Chatbot", layout="wide")
7
+
8
+ st.title("๐Ÿ’ฌ Natural Query Language (NQL) Chatbot")
9
+ st.caption("Query Tensorus datasets using natural language.")
10
+ st.info("Backend uses Regex-based NQL Agent. LLM integration is future work.")
11
+
12
+ # Initialize chat history
13
+ if "messages" not in st.session_state:
14
+ st.session_state.messages = []
15
+
16
+ # Display chat messages from history on app rerun
17
+ for message in st.session_state.messages:
18
+ with st.chat_message(message["role"]):
19
+ st.markdown(message["content"])
20
+ if "results" in message and message["results"]:
21
+ st.dataframe(message["results"], use_container_width=True) # Display results as dataframe
22
+ elif "error" in message:
23
+ st.error(message["error"])
24
+
25
+
26
+ # React to user input
27
+ if prompt := st.chat_input("Enter your query (e.g., 'get all data from my_dataset')"):
28
+ # Display user message in chat message container
29
+ st.chat_message("user").markdown(prompt)
30
+ # Add user message to chat history
31
+ st.session_state.messages.append({"role": "user", "content": prompt})
32
+
33
+ # Get assistant response from NQL Agent API
34
+ with st.spinner("Processing query..."):
35
+ nql_response = execute_nql_query(prompt)
36
+
37
+ response_content = ""
38
+ results_df = None
39
+ error_msg = None
40
+
41
+ if nql_response:
42
+ response_content = nql_response.get("message", "Error processing response.")
43
+ if nql_response.get("success"):
44
+ results_list = nql_response.get("results")
45
+ if results_list:
46
+ # Convert results list (containing dicts with 'metadata', 'shape', etc.) to DataFrame
47
+ # Extract relevant fields for display
48
+ display_data = []
49
+ for res in results_list:
50
+ row = {
51
+ "record_id": res["metadata"].get("record_id"),
52
+ "shape": str(res.get("shape")), # Convert shape list to string
53
+ "dtype": res.get("dtype"),
54
+ **res["metadata"] # Flatten metadata into columns
55
+ }
56
+ # Remove potentially large 'tensor' data from direct display
57
+ row.pop('tensor', None)
58
+ # Avoid duplicate metadata keys if also present at top level
59
+ row.pop('shape', None)
60
+ row.pop('dtype', None)
61
+ row.pop('record_id', None)
62
+ display_data.append(row)
63
+
64
+ if display_data:
65
+ results_df = pd.DataFrame(display_data)
66
+
67
+ # Augment message if results found
68
+ count = nql_response.get("count")
69
+ if count is not None:
70
+ response_content += f" Found {count} record(s)."
71
+
72
+ else:
73
+ # NQL agent indicated failure (parsing or execution)
74
+ error_msg = response_content # Use the message as the error
75
+
76
+ else:
77
+ # API call itself failed (connection error, etc.)
78
+ response_content = "Failed to get response from the NQL agent."
79
+ error_msg = response_content
80
+
81
+ # Display assistant response in chat message container
82
+ message_data = {"role": "assistant", "content": response_content}
83
+ with st.chat_message("assistant"):
84
+ st.markdown(response_content)
85
+ if results_df is not None:
86
+ st.dataframe(results_df, use_container_width=True)
87
+ message_data["results"] = results_df # Store for history display if needed (might be large)
88
+ elif error_msg:
89
+ st.error(error_msg)
90
+ message_data["error"] = error_msg
91
+
92
+ # Add assistant response to chat history
93
+ st.session_state.messages.append(message_data)
pages/4_๐Ÿ”_Data_Explorer.py ADDED
@@ -0,0 +1,148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # pages/4_explorador_Data_Explorer.py
2
+
3
+ import streamlit as st
4
+ import pandas as pd
5
+ import plotly.express as px
6
+ from ui_utils import list_datasets, fetch_dataset_data
7
+ import torch # Needed if we want to recreate tensors for inspection/plotting
8
+
9
+ st.set_page_config(page_title="Data Explorer", layout="wide")
10
+
11
+ st.title("๐Ÿ” Interactive Data Explorer")
12
+ st.caption("Browse, filter, and visualize Tensorus datasets.")
13
+
14
+ # --- Dataset Selection ---
15
+ datasets = list_datasets()
16
+ if not datasets:
17
+ st.warning("No datasets found or API connection failed. Cannot explore data.")
18
+ st.stop() # Stop execution if no datasets
19
+
20
+ selected_dataset = st.selectbox("Select Dataset:", datasets)
21
+
22
+ # --- Data Fetching & Filtering ---
23
+ if selected_dataset:
24
+ st.subheader(f"Exploring: {selected_dataset}")
25
+
26
+ # Fetch data (limited records for UI)
27
+ # TODO: Implement server-side sampling/pagination via API for large datasets
28
+ MAX_RECORDS_DISPLAY = 100
29
+ records = fetch_dataset_data(selected_dataset, max_records=MAX_RECORDS_DISPLAY)
30
+
31
+ if records is None:
32
+ st.error("Failed to fetch data for the selected dataset.")
33
+ st.stop()
34
+ elif not records:
35
+ st.info("Selected dataset is empty.")
36
+ st.stop()
37
+
38
+ st.info(f"Displaying first {len(records)} records out of potentially more.")
39
+
40
+ # Create DataFrame from metadata for filtering/display
41
+ metadata_list = [r['metadata'] for r in records]
42
+ df_meta = pd.DataFrame(metadata_list)
43
+
44
+ # --- Metadata Filtering UI ---
45
+ st.sidebar.header("Filter by Metadata")
46
+ filter_cols = st.sidebar.multiselect("Select metadata columns to filter:", options=df_meta.columns.tolist())
47
+
48
+ filtered_df = df_meta.copy()
49
+ for col in filter_cols:
50
+ unique_values = filtered_df[col].dropna().unique().tolist()
51
+ if pd.api.types.is_numeric_dtype(filtered_df[col]):
52
+ # Numeric filter (slider)
53
+ min_val, max_val = float(filtered_df[col].min()), float(filtered_df[col].max())
54
+ if min_val < max_val:
55
+ selected_range = st.sidebar.slider(f"Filter {col}:", min_val, max_val, (min_val, max_val))
56
+ filtered_df = filtered_df[filtered_df[col].between(selected_range[0], selected_range[1])]
57
+ else:
58
+ st.sidebar.caption(f"{col}: Single numeric value ({min_val}), no range filter.")
59
+
60
+ elif len(unique_values) > 0 and len(unique_values) <= 20: # Limit dropdown options
61
+ # Categorical filter (multiselect)
62
+ selected_values = st.sidebar.multiselect(f"Filter {col}:", options=unique_values, default=unique_values)
63
+ if selected_values: # Only filter if some values are selected
64
+ filtered_df = filtered_df[filtered_df[col].isin(selected_values)]
65
+ else: # If user deselects everything, show nothing
66
+ filtered_df = filtered_df[filtered_df[col].isnull()] # Hack to get empty DF matching columns
67
+
68
+ else:
69
+ st.sidebar.text_input(f"Filter {col} (Text contains):", key=f"text_{col}")
70
+ search_term = st.session_state.get(f"text_{col}", "").lower()
71
+ if search_term:
72
+ # Ensure column is string type before using .str.contains
73
+ filtered_df = filtered_df[filtered_df[col].astype(str).str.lower().str.contains(search_term, na=False)]
74
+
75
+
76
+ st.divider()
77
+ st.subheader("Filtered Data View")
78
+ st.write(f"{len(filtered_df)} records matching filters.")
79
+ st.dataframe(filtered_df, use_container_width=True)
80
+
81
+ # --- Tensor Preview & Visualization ---
82
+ st.divider()
83
+ st.subheader("Tensor Preview")
84
+
85
+ if not filtered_df.empty:
86
+ # Allow selecting a record ID from the filtered results
87
+ record_ids = filtered_df['record_id'].tolist()
88
+ selected_record_id = st.selectbox("Select Record ID to Preview Tensor:", record_ids)
89
+
90
+ if selected_record_id:
91
+ # Find the full record data corresponding to the selected ID
92
+ selected_record = next((r for r in records if r['metadata'].get('record_id') == selected_record_id), None)
93
+
94
+ if selected_record:
95
+ st.write("Metadata:")
96
+ st.json(selected_record['metadata'])
97
+
98
+ shape = selected_record.get("shape")
99
+ dtype = selected_record.get("dtype")
100
+ data_list = selected_record.get("data")
101
+
102
+ st.write(f"Tensor Info: Shape={shape}, Dtype={dtype}")
103
+
104
+ try:
105
+ # Recreate tensor for potential plotting/display
106
+ # Be careful with large tensors in Streamlit UI!
107
+ # We might only want to show info or small slices.
108
+ if shape and dtype and data_list is not None:
109
+ tensor = torch.tensor(data_list, dtype=getattr(torch, dtype, torch.float32)) # Use getattr for dtype
110
+ st.write("Tensor Data (first few elements):")
111
+ st.code(f"{tensor.flatten()[:10].numpy()}...") # Show flattened start
112
+
113
+ # --- Simple Visualizations ---
114
+ if tensor.ndim == 1 and tensor.numel() > 1:
115
+ st.line_chart(tensor.numpy())
116
+ elif tensor.ndim == 2 and tensor.shape[0] > 1 and tensor.shape[1] > 1 :
117
+ # Simple heatmap using plotly (requires plotly)
118
+ try:
119
+ fig = px.imshow(tensor.numpy(), title="Tensor Heatmap", aspect="auto")
120
+ st.plotly_chart(fig, use_container_width=True)
121
+ except Exception as plot_err:
122
+ st.warning(f"Could not generate heatmap: {plot_err}")
123
+ elif tensor.ndim == 3 and tensor.shape[0] in [1, 3]: # Basic image check (C, H, W) or (1, H, W)
124
+ try:
125
+ # Permute if needed (e.g., C, H, W -> H, W, C for display)
126
+ if tensor.shape[0] in [1, 3]:
127
+ display_tensor = tensor.permute(1, 2, 0).squeeze() # H, W, C or H, W
128
+ # Clamp/normalize data to display range [0, 1] or [0, 255] - basic attempt
129
+ display_tensor = (display_tensor - display_tensor.min()) / (display_tensor.max() - display_tensor.min() + 1e-6)
130
+ st.image(display_tensor.numpy(), caption="Tensor as Image (Attempted)", use_column_width=True)
131
+ except ImportError:
132
+ st.warning("Pillow needed for image display (`pip install Pillow`)")
133
+ except Exception as img_err:
134
+ st.warning(f"Could not display tensor as image: {img_err}")
135
+ else:
136
+ st.info("No specific visualization available for this tensor shape/dimension.")
137
+
138
+ else:
139
+ st.warning("Tensor data, shape, or dtype missing in the record.")
140
+
141
+ except Exception as tensor_err:
142
+ st.error(f"Error processing tensor data for preview: {tensor_err}")
143
+ else:
144
+ st.warning("Selected record details not found (this shouldn't happen).")
145
+ else:
146
+ st.info("Select a record ID above to preview its tensor.")
147
+ else:
148
+ st.info("No records match the current filters.")
pages/5_๐Ÿš€_API_Playground.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # pages/5_๐Ÿš€_API_Playground.py
2
+
3
+ import streamlit as st
4
+ import streamlit.components.v1 as components
5
+ from ui_utils import TENSORUS_API_URL, get_api_status # Import base URL and status check
6
+
7
+ st.set_page_config(page_title="API Playground", layout="wide")
8
+
9
+ st.title("๐Ÿš€ API Playground & Documentation Hub")
10
+ st.caption("Explore and interact with the Tensorus REST API.")
11
+
12
+ # Check if API is running
13
+ api_running = get_api_status()
14
+
15
+ if not api_running:
16
+ st.error(
17
+ f"The Tensorus API backend does not seem to be running at {TENSORUS_API_URL}. "
18
+ "Please start the backend (`uvicorn api:app --reload`) to use the API Playground."
19
+ )
20
+ st.stop() # Stop execution if API is not available
21
+ else:
22
+ st.success(f"Connected to API backend at {TENSORUS_API_URL}")
23
+
24
+ st.markdown(
25
+ f"""
26
+ This section provides live, interactive documentation for the Tensorus API,
27
+ powered by FastAPI's OpenAPI integration. You can explore endpoints,
28
+ view schemas, and even try out API calls directly in your browser.
29
+
30
+ * **Swagger UI:** A graphical interface for exploring and testing API endpoints.
31
+ * **ReDoc:** Alternative documentation format, often preferred for reading.
32
+
33
+ Select a view below:
34
+ """
35
+ )
36
+
37
+ # Use tabs to embed Swagger and ReDoc
38
+ tab1, tab2 = st.tabs(["Swagger UI", "ReDoc"])
39
+
40
+ # Construct the documentation URLs based on the API base URL
41
+ swagger_url = f"{TENSORUS_API_URL}/docs"
42
+ redoc_url = f"{TENSORUS_API_URL}/redoc"
43
+
44
+ with tab1:
45
+ st.subheader("Swagger UI")
46
+ st.markdown(f"Explore the API interactively. [Open in new tab]({swagger_url})")
47
+ # Embed Swagger UI using an iframe
48
+ components.iframe(swagger_url, height=800, scrolling=True)
49
+
50
+ with tab2:
51
+ st.subheader("ReDoc")
52
+ st.markdown(f"View the API documentation. [Open in new tab]({redoc_url})")
53
+ # Embed ReDoc using an iframe
54
+ components.iframe(redoc_url, height=800, scrolling=True)
55
+
56
+ st.divider()
57
+ st.caption("Note: Ensure the Tensorus API backend is running to interact with the playground.")
requirements.txt ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # requirements.txt
2
+ # Updated: 2025-04-08
3
+
4
+ # --- Core Tensor and Numerics ---
5
+ torch>=1.13.0
6
+ numpy>=1.21.0
7
+
8
+ # --- Agent Specific Dependencies ---
9
+ # For Ingestion Agent image processing example
10
+ Pillow>=9.0.0
11
+
12
+ # --- API Layer Dependencies ---
13
+ fastapi>=0.90.0
14
+ # Lock Pydantic < 2.0 for broad FastAPI compatibility, adjust if using newer FastAPI explicitly with Pydantic v2
15
+ pydantic>=1.10.0,<2.0.0
16
+ # ASGI Server (standard includes extras like watchfiles for reload)
17
+ uvicorn[standard]>=0.20.0
18
+ # Optional: Needed if using FastAPI file uploads via forms
19
+ # python-multipart>=0.0.5
20
+
21
+ # --- Streamlit UI Dependencies ---
22
+ streamlit>=1.25.0
23
+ # For calling the FastAPI backend from the Streamlit UI
24
+ requests>=2.28.0
25
+ # For plotting in the Streamlit UI (Dashboard, Data Explorer)
26
+ plotly>=5.10.0
27
+ # Optional: For plotting example in rl_agent.py
28
+ matplotlib>=3.5.0
ui_utils.py ADDED
@@ -0,0 +1,214 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ui_utils.py (Modifications for Step 3)
2
+ """Utility functions for the Tensorus Streamlit UI, now using API calls."""
3
+
4
+ import requests
5
+ import streamlit as st
6
+ import logging
7
+ from typing import List, Dict, Any, Optional
8
+
9
+ logger = logging.getLogger(__name__)
10
+
11
+ # --- Configuration ---
12
+ TENSORUS_API_URL = "http://127.0.0.1:8000" # Ensure FastAPI runs here
13
+
14
+ # --- API Interaction Functions ---
15
+
16
+ def get_api_status() -> bool:
17
+ """Checks if the Tensorus API is reachable."""
18
+ try:
19
+ response = requests.get(f"{TENSORUS_API_URL}/", timeout=2)
20
+ return response.status_code == 200
21
+ except requests.exceptions.ConnectionError:
22
+ return False
23
+ except Exception as e:
24
+ logger.error(f"Error checking API status: {e}")
25
+ return False
26
+
27
+ def list_datasets() -> Optional[List[str]]:
28
+ """Fetches the list of dataset names from the API."""
29
+ try:
30
+ response = requests.get(f"{TENSORUS_API_URL}/datasets")
31
+ response.raise_for_status()
32
+ data = response.json()
33
+ if data.get("success"):
34
+ return data.get("data", [])
35
+ else:
36
+ st.error(f"API Error listing datasets: {data.get('message')}")
37
+ return None
38
+ except requests.exceptions.RequestException as e:
39
+ st.error(f"Connection Error listing datasets: {e}")
40
+ return None
41
+ except Exception as e:
42
+ st.error(f"Unexpected error listing datasets: {e}")
43
+ logger.exception("Unexpected error in list_datasets")
44
+ return None
45
+
46
+ def fetch_dataset_data(dataset_name: str, max_records: int = 50) -> Optional[List[Dict[str, Any]]]:
47
+ """Fetches records from a dataset via API, limited after fetching."""
48
+ try:
49
+ response = requests.get(f"{TENSORUS_API_URL}/datasets/{dataset_name}/fetch")
50
+ response.raise_for_status()
51
+ data = response.json()
52
+ if data.get("success"):
53
+ all_records = data.get("data", [])
54
+ return all_records[:max_records] # Limit client-side for now
55
+ else:
56
+ st.error(f"API Error fetching '{dataset_name}': {data.get('message')}")
57
+ return None
58
+ except requests.exceptions.RequestException as e:
59
+ st.error(f"Connection Error fetching '{dataset_name}': {e}")
60
+ return None
61
+ except Exception as e:
62
+ st.error(f"Unexpected error fetching '{dataset_name}': {e}")
63
+ logger.exception(f"Unexpected error in fetch_dataset_data for {dataset_name}")
64
+ return None
65
+
66
+ def execute_nql_query(query: str) -> Optional[Dict[str, Any]]:
67
+ """Sends an NQL query to the API."""
68
+ try:
69
+ payload = {"query": query}
70
+ response = requests.post(f"{TENSORUS_API_URL}/query", json=payload)
71
+ # Handle specific NQL errors (400) vs other errors
72
+ if response.status_code == 400:
73
+ error_detail = response.json().get("detail", "Unknown NQL processing error")
74
+ return {"success": False, "message": error_detail, "results": None, "count": None}
75
+ response.raise_for_status() # Raise for 5xx etc.
76
+ return response.json() # Return the full NQLResponse structure
77
+ except requests.exceptions.RequestException as e:
78
+ st.error(f"Connection Error executing NQL query: {e}")
79
+ return {"success": False, "message": f"Connection Error: {e}", "results": None, "count": None}
80
+ except Exception as e:
81
+ st.error(f"Unexpected error executing NQL query: {e}")
82
+ logger.exception("Unexpected error in execute_nql_query")
83
+ return {"success": False, "message": f"Unexpected Error: {e}", "results": None, "count": None}
84
+
85
+ # --- NEW/UPDATED Agent and Metrics Functions ---
86
+
87
+ def list_all_agents() -> Optional[List[Dict[str, Any]]]:
88
+ """Fetches the list of all registered agents from the API."""
89
+ try:
90
+ response = requests.get(f"{TENSORUS_API_URL}/agents")
91
+ response.raise_for_status()
92
+ # The response is directly the list of AgentInfo objects
93
+ return response.json()
94
+ except requests.exceptions.RequestException as e:
95
+ st.error(f"Connection Error listing agents: {e}")
96
+ return None
97
+ except Exception as e:
98
+ st.error(f"Unexpected error listing agents: {e}")
99
+ logger.exception("Unexpected error in list_all_agents")
100
+ return None
101
+
102
+ def get_agent_status(agent_id: str) -> Optional[Dict[str, Any]]:
103
+ """Fetches status for a specific agent from the API."""
104
+ try:
105
+ response = requests.get(f"{TENSORUS_API_URL}/agents/{agent_id}/status")
106
+ if response.status_code == 404:
107
+ st.error(f"Agent '{agent_id}' not found via API.")
108
+ return None
109
+ response.raise_for_status()
110
+ # Returns AgentStatus model dict
111
+ return response.json()
112
+ except requests.exceptions.RequestException as e:
113
+ st.error(f"Connection Error getting status for agent '{agent_id}': {e}")
114
+ return None
115
+ except Exception as e:
116
+ st.error(f"Unexpected error getting status for agent '{agent_id}': {e}")
117
+ logger.exception(f"Unexpected error in get_agent_status for {agent_id}")
118
+ return None
119
+
120
+ def get_agent_logs(agent_id: str, lines: int = 20) -> Optional[List[str]]:
121
+ """Fetches recent logs for a specific agent from the API."""
122
+ try:
123
+ response = requests.get(f"{TENSORUS_API_URL}/agents/{agent_id}/logs", params={"lines": lines})
124
+ if response.status_code == 404:
125
+ st.error(f"Agent '{agent_id}' not found via API for logs.")
126
+ return None
127
+ response.raise_for_status()
128
+ data = response.json()
129
+ # Returns AgentLogResponse model dict
130
+ return data.get("logs", [])
131
+ except requests.exceptions.RequestException as e:
132
+ st.error(f"Connection Error getting logs for agent '{agent_id}': {e}")
133
+ return None
134
+ except Exception as e:
135
+ st.error(f"Unexpected error getting logs for agent '{agent_id}': {e}")
136
+ logger.exception(f"Unexpected error in get_agent_logs for {agent_id}")
137
+ return None
138
+
139
+ def start_agent(agent_id: str) -> bool:
140
+ """Sends a start signal to an agent via the API."""
141
+ try:
142
+ response = requests.post(f"{TENSORUS_API_URL}/agents/{agent_id}/start")
143
+ if response.status_code == 404:
144
+ st.error(f"Agent '{agent_id}' not found via API.")
145
+ return False
146
+ # 202 Accepted is success, other 2xx might be okay too (e.g. already running if handled gracefully)
147
+ # 4xx errors indicate failure
148
+ if 200 <= response.status_code < 300:
149
+ api_response = response.json()
150
+ if api_response.get("success"):
151
+ st.success(f"API: {api_response.get('message', 'Start signal sent.')}")
152
+ return True
153
+ else:
154
+ # API indicated logical failure (e.g., already running)
155
+ st.warning(f"API: {api_response.get('message', 'Agent might already be running.')}")
156
+ return False
157
+ else:
158
+ # Handle other potential errors reported by API
159
+ error_detail = "Unknown error"
160
+ try: error_detail = response.json().get("detail", error_detail)
161
+ except: pass
162
+ st.error(f"API Error starting agent '{agent_id}': {error_detail} (Status: {response.status_code})")
163
+ return False
164
+ except requests.exceptions.RequestException as e:
165
+ st.error(f"Connection Error starting agent '{agent_id}': {e}")
166
+ return False
167
+ except Exception as e:
168
+ st.error(f"Unexpected error starting agent '{agent_id}': {e}")
169
+ logger.exception(f"Unexpected error in start_agent for {agent_id}")
170
+ return False
171
+
172
+ def stop_agent(agent_id: str) -> bool:
173
+ """Sends a stop signal to an agent via the API."""
174
+ try:
175
+ response = requests.post(f"{TENSORUS_API_URL}/agents/{agent_id}/stop")
176
+ if response.status_code == 404:
177
+ st.error(f"Agent '{agent_id}' not found via API.")
178
+ return False
179
+ if 200 <= response.status_code < 300:
180
+ api_response = response.json()
181
+ if api_response.get("success"):
182
+ st.success(f"API: {api_response.get('message', 'Stop signal sent.')}")
183
+ return True
184
+ else:
185
+ st.warning(f"API: {api_response.get('message', 'Agent might already be stopped.')}")
186
+ return False
187
+ else:
188
+ error_detail = "Unknown error"
189
+ try: error_detail = response.json().get("detail", error_detail)
190
+ except: pass
191
+ st.error(f"API Error stopping agent '{agent_id}': {error_detail} (Status: {response.status_code})")
192
+ return False
193
+ except requests.exceptions.RequestException as e:
194
+ st.error(f"Connection Error stopping agent '{agent_id}': {e}")
195
+ return False
196
+ except Exception as e:
197
+ st.error(f"Unexpected error stopping agent '{agent_id}': {e}")
198
+ logger.exception(f"Unexpected error in stop_agent for {agent_id}")
199
+ return False
200
+
201
+ def get_dashboard_metrics() -> Optional[Dict[str, Any]]:
202
+ """Fetches dashboard metrics from the API."""
203
+ try:
204
+ response = requests.get(f"{TENSORUS_API_URL}/metrics/dashboard")
205
+ response.raise_for_status()
206
+ # Returns DashboardMetrics model dict
207
+ return response.json()
208
+ except requests.exceptions.RequestException as e:
209
+ st.error(f"Connection Error fetching dashboard metrics: {e}")
210
+ return None
211
+ except Exception as e:
212
+ st.error(f"Unexpected error fetching dashboard metrics: {e}")
213
+ logger.exception("Unexpected error in get_dashboard_metrics")
214
+ return None