Spaces:
Sleeping
Sleeping
fix: sanitize numpy/pandas types in submit_solution JSON serialization
Browse filesResolves "float object cannot be interpreted as integer" error when
submit_solution tries to serialize pd.Timestamp (from cast_datetime),
numpy.int64 (from df.shape), and numpy.float64 values. Now uses
pandas to_json for cleaned_preview and explicit int/float/bool casts.
Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
- server/cleaning_environment.py +15 -10
server/cleaning_environment.py
CHANGED
|
@@ -338,9 +338,9 @@ class FSDSCleaningEnvironment(MCPEnvironment):
|
|
| 338 |
)
|
| 339 |
return {
|
| 340 |
"message": message,
|
| 341 |
-
"reward": round(step_reward, 4),
|
| 342 |
-
"quality_score": round(after_score, 4),
|
| 343 |
-
"shape":
|
| 344 |
}
|
| 345 |
|
| 346 |
@mcp.tool
|
|
@@ -358,6 +358,7 @@ class FSDSCleaningEnvironment(MCPEnvironment):
|
|
| 358 |
|
| 359 |
@mcp.tool
|
| 360 |
def submit_solution() -> dict[str, Any]:
|
|
|
|
| 361 |
episode = self._require_episode()
|
| 362 |
report = self._evaluate_quality_gates(episode)
|
| 363 |
match_score = self._required_operations_score(episode)
|
|
@@ -373,14 +374,18 @@ class FSDSCleaningEnvironment(MCPEnvironment):
|
|
| 373 |
)
|
| 374 |
episode.total_reward += final_reward
|
| 375 |
episode.submitted = True
|
|
|
|
|
|
|
|
|
|
|
|
|
| 376 |
return {
|
| 377 |
"done": True,
|
| 378 |
-
"passed": report["passed"],
|
| 379 |
"final_reward": final_reward,
|
| 380 |
-
"cumulative_reward": round(episode.total_reward, 4),
|
| 381 |
"quality_report": report,
|
| 382 |
-
"required_operation_coverage": round(match_score, 4),
|
| 383 |
-
"cleaned_preview":
|
| 384 |
}
|
| 385 |
|
| 386 |
super().__init__(mcp)
|
|
@@ -566,10 +571,10 @@ class FSDSCleaningEnvironment(MCPEnvironment):
|
|
| 566 |
}
|
| 567 |
passed = all(test["passed"] for test in tests.values())
|
| 568 |
return {
|
| 569 |
-
"passed": passed,
|
| 570 |
"tests": tests,
|
| 571 |
-
"shape":
|
| 572 |
-
"retention_ratio": round(retention_ratio, 4),
|
| 573 |
}
|
| 574 |
|
| 575 |
def _quality_score(self, episode: EpisodeData) -> float:
|
|
|
|
| 338 |
)
|
| 339 |
return {
|
| 340 |
"message": message,
|
| 341 |
+
"reward": round(float(step_reward), 4),
|
| 342 |
+
"quality_score": round(float(after_score), 4),
|
| 343 |
+
"shape": [int(x) for x in episode.working_df.shape],
|
| 344 |
}
|
| 345 |
|
| 346 |
@mcp.tool
|
|
|
|
| 358 |
|
| 359 |
@mcp.tool
|
| 360 |
def submit_solution() -> dict[str, Any]:
|
| 361 |
+
import json as _json
|
| 362 |
episode = self._require_episode()
|
| 363 |
report = self._evaluate_quality_gates(episode)
|
| 364 |
match_score = self._required_operations_score(episode)
|
|
|
|
| 374 |
)
|
| 375 |
episode.total_reward += final_reward
|
| 376 |
episode.submitted = True
|
| 377 |
+
# Use pandas to_json to safely serialize numpy/pandas types (Timestamps, int64, float64).
|
| 378 |
+
cleaned_preview = _json.loads(
|
| 379 |
+
episode.working_df.head(5).to_json(orient="records", date_format="iso")
|
| 380 |
+
)
|
| 381 |
return {
|
| 382 |
"done": True,
|
| 383 |
+
"passed": bool(report["passed"]),
|
| 384 |
"final_reward": final_reward,
|
| 385 |
+
"cumulative_reward": round(float(episode.total_reward), 4),
|
| 386 |
"quality_report": report,
|
| 387 |
+
"required_operation_coverage": round(float(match_score), 4),
|
| 388 |
+
"cleaned_preview": cleaned_preview,
|
| 389 |
}
|
| 390 |
|
| 391 |
super().__init__(mcp)
|
|
|
|
| 571 |
}
|
| 572 |
passed = all(test["passed"] for test in tests.values())
|
| 573 |
return {
|
| 574 |
+
"passed": bool(passed),
|
| 575 |
"tests": tests,
|
| 576 |
+
"shape": [int(x) for x in df.shape],
|
| 577 |
+
"retention_ratio": round(float(retention_ratio), 4),
|
| 578 |
}
|
| 579 |
|
| 580 |
def _quality_score(self, episode: EpisodeData) -> float:
|