example_name
stringlengths 10
28
| python_file
stringlengths 9
32
| python_code
stringlengths 490
18.2k
| rust_code
stringlengths 0
434
| has_rust
bool 2
classes | category
stringlengths 2
20
| python_lines
int32 13
586
| rust_lines
int32 0
6
| blocking_features
listlengths 0
7
| suspiciousness
float32 0
0.95
| error
stringlengths 33
500
⌀ |
|---|---|---|---|---|---|---|---|---|---|---|
example_pytorch_mseloss
|
mseloss_flat.py
|
#!/usr/bin/env python3
"""MSE Loss CLI - flat structure for depyler compatibility.
Mean Squared Error: L = (1/n) * sum((y_pred - y_true)^2)
"""
import argparse
def main():
"""Main entry point."""
parser = argparse.ArgumentParser(description="MSE Loss CLI")
parser.add_argument("--mode", type=str, required=True, choices=["loss", "grad"], help="Mode")
# Predictions (4 values)
parser.add_argument("--p0", type=float, default=0.0, help="Prediction 0")
parser.add_argument("--p1", type=float, default=0.0, help="Prediction 1")
parser.add_argument("--p2", type=float, default=0.0, help="Prediction 2")
parser.add_argument("--p3", type=float, default=0.0, help="Prediction 3")
# Targets (4 values)
parser.add_argument("--t0", type=float, default=1.0, help="Target 0")
parser.add_argument("--t1", type=float, default=1.0, help="Target 1")
parser.add_argument("--t2", type=float, default=1.0, help="Target 2")
parser.add_argument("--t3", type=float, default=1.0, help="Target 3")
args = parser.parse_args()
p0 = args.p0
p1 = args.p1
p2 = args.p2
p3 = args.p3
t0 = args.t0
t1 = args.t1
t2 = args.t2
t3 = args.t3
# Differences
d0 = p0 - t0
d1 = p1 - t1
d2 = p2 - t2
d3 = p3 - t3
if args.mode == "loss":
# MSE = mean of squared differences
mse = (d0 * d0 + d1 * d1 + d2 * d2 + d3 * d3) / 4.0
print(f"mse={mse}")
elif args.mode == "grad":
# d(MSE)/d(p_i) = 2 * (p_i - t_i) / n = 2 * d_i / 4 = d_i / 2
g0 = d0 / 2.0
g1 = d1 / 2.0
g2 = d2 / 2.0
g3 = d3 / 2.0
print(f"g0={g0} g1={g1} g2={g2} g3={g3}")
if __name__ == "__main__":
main()
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_pytorch_mseloss/mseloss_flat.py (1724 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_pytorch_mseloss/mseloss_flat.rs (2737 bytes)
📦 Cargo.toml: /home/noah/src/reprorusted-python-cli/examples/example_pytorch_mseloss/Cargo.toml (1 dependencies)
⏱️ Parse time: 52ms
📊 Throughput: 32.3 KB/s
⏱️ Total time: 52ms
| true
|
pytorch_mseloss
| 56
| 6
|
[] | 0
| null |
example_pytorch_mseloss
|
mseloss_tool.py
|
#!/usr/bin/env python3
"""Loss functions CLI tool.
A CLI for PyTorch-style loss functions.
Designed for Python-to-Rust transpilation via depyler → aprender.
Academic Reference: Paszke et al. (2019) PyTorch [2]
Usage:
echo '{"pred": [1, 2], "target": [2, 3]}' | python mseloss_tool.py forward
"""
import argparse
import json
import math
import sys
def mse_loss(pred: list[float], target: list[float], reduction: str = "mean") -> float:
"""Mean squared error loss."""
if len(pred) != len(target):
raise ValueError("Shape mismatch")
squared_errors = [(pred[i] - target[i]) ** 2 for i in range(len(pred))]
if reduction == "sum":
return sum(squared_errors)
elif reduction == "mean":
return sum(squared_errors) / len(squared_errors)
else:
raise ValueError(f"Unknown reduction: {reduction}")
def mse_backward(pred: list[float], target: list[float]) -> list[float]:
"""Gradient of MSE loss with respect to pred."""
n = len(pred)
return [2 * (pred[i] - target[i]) / n for i in range(n)]
def l1_loss(pred: list[float], target: list[float], reduction: str = "mean") -> float:
"""L1 loss (mean absolute error)."""
if len(pred) != len(target):
raise ValueError("Shape mismatch")
abs_errors = [abs(pred[i] - target[i]) for i in range(len(pred))]
if reduction == "sum":
return sum(abs_errors)
elif reduction == "mean":
return sum(abs_errors) / len(abs_errors)
else:
raise ValueError(f"Unknown reduction: {reduction}")
def cross_entropy_loss(logits: list[float], target: int) -> float:
"""Cross-entropy loss with softmax."""
# Softmax
max_logit = max(logits)
exp_logits = [math.exp(logit - max_logit) for logit in logits]
sum_exp = sum(exp_logits)
softmax = [e / sum_exp for e in exp_logits]
# Negative log likelihood
return -math.log(softmax[target] + 1e-10)
def cmd_forward(args: argparse.Namespace) -> None:
"""Handle forward subcommand."""
try:
data = json.load(sys.stdin)
except json.JSONDecodeError as e:
print(f"Error: Invalid JSON: {e}", file=sys.stderr)
sys.exit(1)
if "pred" not in data or "target" not in data:
print("Error: Missing 'pred' or 'target'", file=sys.stderr)
sys.exit(1)
reduction = data.get("reduction", "mean")
try:
loss = mse_loss(data["pred"], data["target"], reduction)
print(json.dumps({"loss": loss}))
except ValueError as e:
print(f"Error: {e}", file=sys.stderr)
sys.exit(1)
def cmd_backward(args: argparse.Namespace) -> None:
"""Handle backward subcommand."""
try:
data = json.load(sys.stdin)
except json.JSONDecodeError as e:
print(f"Error: Invalid JSON: {e}", file=sys.stderr)
sys.exit(1)
if "pred" not in data or "target" not in data:
print("Error: Missing 'pred' or 'target'", file=sys.stderr)
sys.exit(1)
grad = mse_backward(data["pred"], data["target"])
print(json.dumps({"grad": grad}))
def cmd_l1_forward(args: argparse.Namespace) -> None:
"""Handle l1-forward subcommand."""
try:
data = json.load(sys.stdin)
except json.JSONDecodeError as e:
print(f"Error: Invalid JSON: {e}", file=sys.stderr)
sys.exit(1)
if "pred" not in data or "target" not in data:
print("Error: Missing 'pred' or 'target'", file=sys.stderr)
sys.exit(1)
reduction = data.get("reduction", "mean")
try:
loss = l1_loss(data["pred"], data["target"], reduction)
print(json.dumps({"loss": loss}))
except ValueError as e:
print(f"Error: {e}", file=sys.stderr)
sys.exit(1)
def cmd_cross_entropy(args: argparse.Namespace) -> None:
"""Handle cross-entropy subcommand."""
try:
data = json.load(sys.stdin)
except json.JSONDecodeError as e:
print(f"Error: Invalid JSON: {e}", file=sys.stderr)
sys.exit(1)
if "logits" not in data or "target" not in data:
print("Error: Missing 'logits' or 'target'", file=sys.stderr)
sys.exit(1)
loss = cross_entropy_loss(data["logits"], data["target"])
print(json.dumps({"loss": loss}))
def main() -> None:
"""Main entry point."""
parser = argparse.ArgumentParser(
description="Loss CLI - MSE, L1, CrossEntropy (PyTorch-compatible)"
)
subparsers = parser.add_subparsers(dest="command", help="Available commands")
subparsers.add_parser("forward", help="MSE loss forward").set_defaults(func=cmd_forward)
subparsers.add_parser("backward", help="MSE loss backward").set_defaults(func=cmd_backward)
subparsers.add_parser("l1-forward", help="L1 loss forward").set_defaults(func=cmd_l1_forward)
subparsers.add_parser("cross-entropy", help="Cross-entropy loss").set_defaults(
func=cmd_cross_entropy
)
args = parser.parse_args()
if args.command is None:
parser.print_help()
sys.exit(0)
args.func(args)
if __name__ == "__main__":
main()
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_pytorch_mseloss/mseloss_tool.py (5058 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_pytorch_mseloss/mseloss_tool.rs (11405 bytes)
📦 Cargo.toml: /home/noah/src/reprorusted-python-cli/examples/example_pytorch_mseloss/Cargo.toml (3 dependencies)
⏱️ Parse time: 51ms
📊 Throughput: 96.7 KB/s
⏱️ Total time: 51ms
| true
|
pytorch_mseloss
| 165
| 6
|
[
"context_manager",
"exception_handling",
"stdin_usage"
] | 0.652
| null |
example_pytorch_mseloss
|
test_mseloss_tool.py
|
#!/usr/bin/env python3
"""EXTREME TDD: Tests for PyTorch MSELoss CLI.
Academic Reference: Paszke et al. (2019) PyTorch [2]
Tests mean squared error loss function.
"""
import json
import subprocess
from pathlib import Path
SCRIPT = Path(__file__).parent / "mseloss_tool.py"
def run(args, input_data=None):
"""Run the CLI and return (stdout, stderr, returncode)."""
result = subprocess.run(
["python3", str(SCRIPT)] + args,
capture_output=True,
text=True,
input=input_data,
)
return result.stdout, result.stderr, result.returncode
class TestMSEForward:
"""Test MSE loss forward pass."""
def test_mse_zero_loss(self):
"""Test MSE = 0 when pred == target."""
data = json.dumps({
"pred": [1.0, 2.0, 3.0],
"target": [1.0, 2.0, 3.0]
})
stdout, stderr, code = run(["forward"], data)
assert code == 0
result = json.loads(stdout)
assert result["loss"] == 0.0
def test_mse_nonzero_loss(self):
"""Test MSE calculation."""
# MSE = ((1-2)^2 + (2-3)^2) / 2 = 2/2 = 1
data = json.dumps({
"pred": [1.0, 2.0],
"target": [2.0, 3.0]
})
stdout, stderr, code = run(["forward"], data)
assert code == 0
result = json.loads(stdout)
assert abs(result["loss"] - 1.0) < 0.01
def test_mse_sum_reduction(self):
"""Test MSE with sum reduction."""
# Sum = (1-2)^2 + (2-3)^2 = 2
data = json.dumps({
"pred": [1.0, 2.0],
"target": [2.0, 3.0],
"reduction": "sum"
})
stdout, stderr, code = run(["forward"], data)
assert code == 0
result = json.loads(stdout)
assert abs(result["loss"] - 2.0) < 0.01
class TestMSEBackward:
"""Test MSE loss backward pass."""
def test_mse_gradient(self):
"""Test MSE gradient computation."""
# d/dpred MSE = 2(pred - target) / n
data = json.dumps({
"pred": [2.0, 4.0],
"target": [1.0, 2.0]
})
stdout, stderr, code = run(["backward"], data)
assert code == 0
result = json.loads(stdout)
assert "grad" in result
# grad = 2 * (pred - target) / n = [2*(2-1)/2, 2*(4-2)/2] = [1, 2]
assert abs(result["grad"][0] - 1.0) < 0.01
assert abs(result["grad"][1] - 2.0) < 0.01
class TestL1Loss:
"""Test L1 loss (MAE)."""
def test_l1_forward(self):
"""Test L1 loss forward."""
# MAE = (|1-2| + |2-4|) / 2 = 3/2 = 1.5
data = json.dumps({
"pred": [1.0, 2.0],
"target": [2.0, 4.0]
})
stdout, stderr, code = run(["l1-forward"], data)
assert code == 0
result = json.loads(stdout)
assert abs(result["loss"] - 1.5) < 0.01
class TestCrossEntropy:
"""Test cross-entropy loss."""
def test_cross_entropy(self):
"""Test cross-entropy loss."""
data = json.dumps({
"logits": [2.0, 1.0, 0.1], # Raw scores
"target": 0 # Class index
})
stdout, stderr, code = run(["cross-entropy"], data)
assert code == 0
result = json.loads(stdout)
assert "loss" in result
assert result["loss"] > 0
class TestHelp:
"""Test help messages."""
def test_help(self):
"""Test --help flag."""
stdout, stderr, code = run(["--help"])
assert code == 0
assert "MSE" in stdout or "loss" in stdout.lower()
class TestEdgeCases:
"""Test edge cases."""
def test_invalid_json_fails(self):
"""Test invalid JSON fails."""
stdout, stderr, code = run(["forward"], "not json")
assert code == 1
def test_shape_mismatch_fails(self):
"""Test shape mismatch fails."""
data = json.dumps({
"pred": [1.0, 2.0],
"target": [1.0]
})
stdout, stderr, code = run(["forward"], data)
assert code == 1
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_pytorch_mseloss/test_mseloss_tool.py (4021 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_pytorch_mseloss/test_mseloss_tool.rs (6210 bytes)
📦 Cargo.toml: /home/noah/src/reprorusted-python-cli/examples/example_pytorch_mseloss/Cargo.toml (2 dependencies)
⏱️ Parse time: 52ms
📊 Throughput: 74.6 KB/s
⏱️ Total time: 52ms
| true
|
pytorch_mseloss
| 142
| 6
|
[
"context_manager",
"class_definition"
] | 0.652
| null |
example_pytorch_relu
|
relu_flat.py
|
#!/usr/bin/env python3
"""Activation functions CLI - flat structure for depyler compatibility.
ReLU, Sigmoid, Tanh implementations.
"""
import argparse
def main():
"""Main entry point."""
parser = argparse.ArgumentParser(description="Activation functions CLI")
parser.add_argument(
"--mode", type=str, required=True, choices=["relu", "sigmoid", "tanh"], help="Mode"
)
parser.add_argument("--x", type=float, default=0.0, help="Input value")
args = parser.parse_args()
x = args.x
if args.mode == "relu":
# ReLU: max(0, x)
if x > 0.0:
y = x
else:
y = 0.0
print(f"y={y}")
elif args.mode == "sigmoid":
# Sigmoid: 1 / (1 + e^(-x))
neg_bound = 0.0 - 20.0
if x > 20.0:
y = 1.0
elif x < neg_bound:
y = 0.0
else:
# Taylor series for e^(-x)
neg_x = 0.0 - x
exp_neg_x = 1.0
term = 1.0
i = 1.0
while i <= 10.0:
term = term * neg_x / i
exp_neg_x = exp_neg_x + term
i = i + 1.0
y = 1.0 / (1.0 + exp_neg_x)
print(f"y={y}")
elif args.mode == "tanh":
# Tanh: (e^x - e^(-x)) / (e^x + e^(-x))
neg_bound = 0.0 - 10.0
if x > 10.0:
y = 1.0
elif x < neg_bound:
y = 0.0 - 1.0
else:
# Taylor series for e^x and e^(-x)
neg_x = 0.0 - x
exp_x = 1.0
term_p = 1.0
exp_neg_x = 1.0
term_n = 1.0
i = 1.0
while i <= 10.0:
term_p = term_p * x / i
exp_x = exp_x + term_p
term_n = term_n * neg_x / i
exp_neg_x = exp_neg_x + term_n
i = i + 1.0
denom = exp_x + exp_neg_x
if denom > 0.0:
y = (exp_x - exp_neg_x) / denom
else:
y = 0.0
print(f"y={y}")
if __name__ == "__main__":
main()
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_pytorch_relu/relu_flat.py (2080 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_pytorch_relu/relu_flat.rs (3687 bytes)
📦 Cargo.toml: /home/noah/src/reprorusted-python-cli/examples/example_pytorch_relu/Cargo.toml (1 dependencies)
⏱️ Parse time: 47ms
📊 Throughput: 42.4 KB/s
⏱️ Total time: 48ms
| true
|
pytorch_relu
| 78
| 6
|
[] | 0
| null |
example_pytorch_relu
|
relu_tool.py
|
#!/usr/bin/env python3
"""Activation functions CLI tool.
A CLI for PyTorch-style activation functions.
Designed for Python-to-Rust transpilation via depyler → aprender.
Academic Reference: He et al. (2015) Deep Rectifiers [5]
Usage:
echo '{"x": [-1, 0, 1]}' | python relu_tool.py relu
"""
import argparse
import json
import math
import sys
def relu(x: list[float]) -> list[float]:
"""ReLU activation: max(0, x)."""
return [max(0.0, xi) for xi in x]
def sigmoid(x: list[float]) -> list[float]:
"""Sigmoid activation: 1 / (1 + exp(-x))."""
result = []
for xi in x:
if xi >= 0:
result.append(1.0 / (1.0 + math.exp(-xi)))
else:
exp_x = math.exp(xi)
result.append(exp_x / (1.0 + exp_x))
return result
def tanh(x: list[float]) -> list[float]:
"""Tanh activation."""
return [math.tanh(xi) for xi in x]
def softmax(x: list[float]) -> list[float]:
"""Softmax activation."""
# Subtract max for numerical stability
max_x = max(x)
exp_x = [math.exp(xi - max_x) for xi in x]
sum_exp = sum(exp_x)
return [e / sum_exp for e in exp_x]
def cmd_relu(args: argparse.Namespace) -> None:
"""Handle relu subcommand."""
try:
data = json.load(sys.stdin)
except json.JSONDecodeError as e:
print(f"Error: Invalid JSON: {e}", file=sys.stderr)
sys.exit(1)
if "x" not in data:
print("Error: Missing 'x'", file=sys.stderr)
sys.exit(1)
output = relu(data["x"])
print(json.dumps({"output": output}))
def cmd_sigmoid(args: argparse.Namespace) -> None:
"""Handle sigmoid subcommand."""
try:
data = json.load(sys.stdin)
except json.JSONDecodeError as e:
print(f"Error: Invalid JSON: {e}", file=sys.stderr)
sys.exit(1)
if "x" not in data:
print("Error: Missing 'x'", file=sys.stderr)
sys.exit(1)
output = sigmoid(data["x"])
print(json.dumps({"output": output}))
def cmd_tanh(args: argparse.Namespace) -> None:
"""Handle tanh subcommand."""
try:
data = json.load(sys.stdin)
except json.JSONDecodeError as e:
print(f"Error: Invalid JSON: {e}", file=sys.stderr)
sys.exit(1)
if "x" not in data:
print("Error: Missing 'x'", file=sys.stderr)
sys.exit(1)
output = tanh(data["x"])
print(json.dumps({"output": output}))
def cmd_softmax(args: argparse.Namespace) -> None:
"""Handle softmax subcommand."""
try:
data = json.load(sys.stdin)
except json.JSONDecodeError as e:
print(f"Error: Invalid JSON: {e}", file=sys.stderr)
sys.exit(1)
if "x" not in data:
print("Error: Missing 'x'", file=sys.stderr)
sys.exit(1)
output = softmax(data["x"])
print(json.dumps({"output": output}))
def main() -> None:
"""Main entry point."""
parser = argparse.ArgumentParser(
description="Activation CLI - ReLU, Sigmoid, Tanh, Softmax (PyTorch-compatible)"
)
subparsers = parser.add_subparsers(dest="command", help="Available commands")
subparsers.add_parser("relu", help="ReLU activation").set_defaults(func=cmd_relu)
subparsers.add_parser("sigmoid", help="Sigmoid activation").set_defaults(func=cmd_sigmoid)
subparsers.add_parser("tanh", help="Tanh activation").set_defaults(func=cmd_tanh)
subparsers.add_parser("softmax", help="Softmax activation").set_defaults(func=cmd_softmax)
args = parser.parse_args()
if args.command is None:
parser.print_help()
sys.exit(0)
args.func(args)
if __name__ == "__main__":
main()
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_pytorch_relu/relu_tool.py (3628 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_pytorch_relu/relu_tool.rs (7019 bytes)
📦 Cargo.toml: /home/noah/src/reprorusted-python-cli/examples/example_pytorch_relu/Cargo.toml (3 dependencies)
⏱️ Parse time: 49ms
📊 Throughput: 71.8 KB/s
⏱️ Total time: 49ms
| true
|
pytorch_relu
| 134
| 6
|
[
"exception_handling",
"stdin_usage"
] | 0.577
| null |
example_pytorch_relu
|
test_relu_tool.py
|
#!/usr/bin/env python3
"""EXTREME TDD: Tests for PyTorch activation functions CLI.
Academic Reference: He et al. (2015) Deep Rectifiers [5]
Tests ReLU, Sigmoid, Tanh activation functions.
"""
import json
import subprocess
from pathlib import Path
SCRIPT = Path(__file__).parent / "relu_tool.py"
def run(args, input_data=None):
"""Run the CLI and return (stdout, stderr, returncode)."""
result = subprocess.run(
["python3", str(SCRIPT)] + args,
capture_output=True,
text=True,
input=input_data,
)
return result.stdout, result.stderr, result.returncode
class TestRelu:
"""Test ReLU activation."""
def test_relu_positive(self):
"""Test ReLU on positive values."""
data = json.dumps({"x": [1.0, 2.0, 3.0]})
stdout, stderr, code = run(["relu"], data)
assert code == 0
result = json.loads(stdout)
assert result["output"] == [1.0, 2.0, 3.0]
def test_relu_negative(self):
"""Test ReLU on negative values."""
data = json.dumps({"x": [-1.0, -2.0, -3.0]})
stdout, stderr, code = run(["relu"], data)
assert code == 0
result = json.loads(stdout)
assert result["output"] == [0.0, 0.0, 0.0]
def test_relu_mixed(self):
"""Test ReLU on mixed values."""
data = json.dumps({"x": [-1.0, 0.0, 1.0]})
stdout, stderr, code = run(["relu"], data)
assert code == 0
result = json.loads(stdout)
assert result["output"] == [0.0, 0.0, 1.0]
class TestSigmoid:
"""Test Sigmoid activation."""
def test_sigmoid_zero(self):
"""Test sigmoid(0) = 0.5."""
data = json.dumps({"x": [0.0]})
stdout, stderr, code = run(["sigmoid"], data)
assert code == 0
result = json.loads(stdout)
assert abs(result["output"][0] - 0.5) < 0.01
def test_sigmoid_bounds(self):
"""Test sigmoid output is in (0, 1)."""
data = json.dumps({"x": [-10.0, 0.0, 10.0]})
stdout, stderr, code = run(["sigmoid"], data)
assert code == 0
result = json.loads(stdout)
for val in result["output"]:
assert 0 < val < 1
class TestTanh:
"""Test Tanh activation."""
def test_tanh_zero(self):
"""Test tanh(0) = 0."""
data = json.dumps({"x": [0.0]})
stdout, stderr, code = run(["tanh"], data)
assert code == 0
result = json.loads(stdout)
assert abs(result["output"][0]) < 0.01
def test_tanh_bounds(self):
"""Test tanh output is in (-1, 1)."""
data = json.dumps({"x": [-10.0, 0.0, 10.0]})
stdout, stderr, code = run(["tanh"], data)
assert code == 0
result = json.loads(stdout)
for val in result["output"]:
assert -1 < val < 1
class TestSoftmax:
"""Test Softmax activation."""
def test_softmax_sums_to_one(self):
"""Test softmax sums to 1."""
data = json.dumps({"x": [1.0, 2.0, 3.0]})
stdout, stderr, code = run(["softmax"], data)
assert code == 0
result = json.loads(stdout)
assert abs(sum(result["output"]) - 1.0) < 0.01
def test_softmax_all_positive(self):
"""Test softmax outputs are all positive."""
data = json.dumps({"x": [-1.0, 0.0, 1.0]})
stdout, stderr, code = run(["softmax"], data)
assert code == 0
result = json.loads(stdout)
for val in result["output"]:
assert val > 0
class TestHelp:
"""Test help messages."""
def test_help(self):
"""Test --help flag."""
stdout, stderr, code = run(["--help"])
assert code == 0
assert "relu" in stdout.lower()
class TestEdgeCases:
"""Test edge cases."""
def test_invalid_json_fails(self):
"""Test invalid JSON fails."""
stdout, stderr, code = run(["relu"], "not json")
assert code == 1
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_pytorch_relu/test_relu_tool.py (3917 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_pytorch_relu/test_relu_tool.rs (6641 bytes)
📦 Cargo.toml: /home/noah/src/reprorusted-python-cli/examples/example_pytorch_relu/Cargo.toml (2 dependencies)
⏱️ Parse time: 52ms
📊 Throughput: 72.4 KB/s
⏱️ Total time: 53ms
| true
|
pytorch_relu
| 133
| 6
|
[
"class_definition"
] | 0.612
| null |
example_pytorch_sequential
|
sequential_tool.py
|
#!/usr/bin/env python3
"""Sequential model CLI tool.
A CLI for PyTorch-style nn.Sequential layer chaining.
Designed for Python-to-Rust transpilation via depyler → aprender.
Academic Reference: Paszke et al. (2019) PyTorch [2]
Usage:
echo '{"x": [1, -1], "layers": [{"type": "linear", "weight": [[1,0],[0,1]], "bias": [0,0]}, {"type": "relu"}]}' | python sequential_tool.py forward
"""
import argparse
import json
import math
import random
import sys
from typing import Any
def linear_forward(x: list[float], weight: list[list[float]], bias: list[float]) -> list[float]:
"""Linear layer forward."""
out = []
for i in range(len(weight)):
val = bias[i]
for j in range(len(x)):
val += weight[i][j] * x[j]
out.append(val)
return out
def relu_forward(x: list[float]) -> list[float]:
"""ReLU activation."""
return [max(0.0, xi) for xi in x]
def sigmoid_forward(x: list[float]) -> list[float]:
"""Sigmoid activation."""
return [
1.0 / (1.0 + math.exp(-xi)) if xi >= 0 else math.exp(xi) / (1.0 + math.exp(xi)) for xi in x
]
def tanh_forward(x: list[float]) -> list[float]:
"""Tanh activation."""
return [math.tanh(xi) for xi in x]
def forward(x: list[float], layers: list[dict[str, Any]]) -> list[float]:
"""Forward pass through sequential layers."""
output = x
for layer in layers:
layer_type = layer["type"]
if layer_type == "linear":
output = linear_forward(output, layer["weight"], layer["bias"])
elif layer_type == "relu":
output = relu_forward(output)
elif layer_type == "sigmoid":
output = sigmoid_forward(output)
elif layer_type == "tanh":
output = tanh_forward(output)
else:
raise ValueError(f"Unknown layer type: {layer_type}")
return output
def init_linear(in_features: int, out_features: int) -> dict[str, Any]:
"""Initialize linear layer."""
std = math.sqrt(2.0 / in_features)
bound = math.sqrt(3.0) * std
weight = [
[random.uniform(-bound, bound) for _ in range(in_features)] for _ in range(out_features)
]
bias_bound = 1.0 / math.sqrt(in_features)
bias = [random.uniform(-bias_bound, bias_bound) for _ in range(out_features)]
return {"type": "linear", "weight": weight, "bias": bias}
def build(architecture: list[dict[str, Any]], random_state: int = None) -> list[dict[str, Any]]:
"""Build sequential model from architecture spec."""
if random_state is not None:
random.seed(random_state)
layers = []
for spec in architecture:
if spec["type"] == "linear":
layers.append(init_linear(spec["in"], spec["out"]))
else:
layers.append({"type": spec["type"]})
return layers
def cmd_forward(args: argparse.Namespace) -> None:
"""Handle forward subcommand."""
try:
data = json.load(sys.stdin)
except json.JSONDecodeError as e:
print(f"Error: Invalid JSON: {e}", file=sys.stderr)
sys.exit(1)
if "x" not in data or "layers" not in data:
print("Error: Missing 'x' or 'layers'", file=sys.stderr)
sys.exit(1)
try:
output = forward(data["x"], data["layers"])
print(json.dumps({"output": output}))
except (ValueError, KeyError) as e:
print(f"Error: {e}", file=sys.stderr)
sys.exit(1)
def cmd_build(args: argparse.Namespace) -> None:
"""Handle build subcommand."""
try:
data = json.load(sys.stdin)
except json.JSONDecodeError as e:
print(f"Error: Invalid JSON: {e}", file=sys.stderr)
sys.exit(1)
if "architecture" not in data:
print("Error: Missing 'architecture'", file=sys.stderr)
sys.exit(1)
random_state = data.get("random_state", None)
layers = build(data["architecture"], random_state)
print(json.dumps({"layers": layers}))
def main() -> None:
"""Main entry point."""
parser = argparse.ArgumentParser(
description="Sequential CLI - nn.Sequential layer chaining (PyTorch-compatible)"
)
subparsers = parser.add_subparsers(dest="command", help="Available commands")
subparsers.add_parser("forward", help="Forward pass").set_defaults(func=cmd_forward)
subparsers.add_parser("build", help="Build model from architecture").set_defaults(
func=cmd_build
)
args = parser.parse_args()
if args.command is None:
parser.print_help()
sys.exit(0)
args.func(args)
if __name__ == "__main__":
main()
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_pytorch_sequential/sequential_tool.py (4575 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_pytorch_sequential/sequential_tool.rs (10405 bytes)
📦 Cargo.toml: /home/noah/src/reprorusted-python-cli/examples/example_pytorch_sequential/Cargo.toml (4 dependencies)
⏱️ Parse time: 52ms
📊 Throughput: 84.7 KB/s
⏱️ Total time: 53ms
| true
|
pytorch_sequential
| 150
| 6
|
[
"exception_handling",
"stdin_usage"
] | 0.577
| null |
example_pytorch_sequential
|
test_sequential_tool.py
|
#!/usr/bin/env python3
"""EXTREME TDD: Tests for PyTorch nn.Sequential CLI.
Academic Reference: Paszke et al. (2019) PyTorch [2]
Tests sequential layer chaining.
"""
import json
import subprocess
from pathlib import Path
SCRIPT = Path(__file__).parent / "sequential_tool.py"
def run(args, input_data=None):
"""Run the CLI and return (stdout, stderr, returncode)."""
result = subprocess.run(
["python3", str(SCRIPT)] + args,
capture_output=True,
text=True,
input=input_data,
)
return result.stdout, result.stderr, result.returncode
class TestSequentialForward:
"""Test sequential forward pass."""
def test_forward_linear_relu(self):
"""Test Linear -> ReLU chain."""
data = json.dumps({
"x": [1.0, -1.0],
"layers": [
{"type": "linear", "weight": [[1.0, 0.0], [0.0, 1.0]], "bias": [0.0, 0.0]},
{"type": "relu"}
]
})
stdout, stderr, code = run(["forward"], data)
assert code == 0
result = json.loads(stdout)
# Linear: [1, -1], ReLU: [1, 0]
assert result["output"] == [1.0, 0.0]
def test_forward_mlp(self):
"""Test MLP: Linear -> ReLU -> Linear."""
data = json.dumps({
"x": [1.0, 1.0],
"layers": [
{"type": "linear", "weight": [[1.0, 1.0]], "bias": [0.0]}, # 2->1
{"type": "relu"},
{"type": "linear", "weight": [[2.0]], "bias": [1.0]} # 1->1
]
})
stdout, stderr, code = run(["forward"], data)
assert code == 0
result = json.loads(stdout)
# Linear: [2], ReLU: [2], Linear: [2*2+1] = [5]
assert result["output"] == [5.0]
class TestSequentialBuild:
"""Test building sequential model."""
def test_build_mlp(self):
"""Test building MLP architecture."""
data = json.dumps({
"architecture": [
{"type": "linear", "in": 4, "out": 2},
{"type": "relu"},
{"type": "linear", "in": 2, "out": 1}
],
"random_state": 42
})
stdout, stderr, code = run(["build"], data)
assert code == 0
result = json.loads(stdout)
assert "layers" in result
assert len(result["layers"]) == 3
class TestHelp:
"""Test help messages."""
def test_help(self):
"""Test --help flag."""
stdout, stderr, code = run(["--help"])
assert code == 0
assert "Sequential" in stdout or "sequential" in stdout.lower()
class TestEdgeCases:
"""Test edge cases."""
def test_invalid_json_fails(self):
"""Test invalid JSON fails."""
stdout, stderr, code = run(["forward"], "not json")
assert code == 1
def test_empty_layers(self):
"""Test empty layers list."""
data = json.dumps({"x": [1.0], "layers": []})
stdout, stderr, code = run(["forward"], data)
assert code == 0
result = json.loads(stdout)
assert result["output"] == [1.0]
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_pytorch_sequential/test_sequential_tool.py (3106 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_pytorch_sequential/test_sequential_tool.rs (6907 bytes)
📦 Cargo.toml: /home/noah/src/reprorusted-python-cli/examples/example_pytorch_sequential/Cargo.toml (2 dependencies)
⏱️ Parse time: 52ms
📊 Throughput: 57.7 KB/s
⏱️ Total time: 52ms
| true
|
pytorch_sequential
| 105
| 6
|
[
"class_definition"
] | 0.612
| null |
example_pytorch_tensor
|
tensor_flat.py
|
#!/usr/bin/env python3
"""Tensor ops CLI - flat structure for depyler compatibility.
Basic tensor operations (2x2 matrix).
"""
import argparse
def main():
"""Main entry point."""
parser = argparse.ArgumentParser(description="Tensor CLI")
parser.add_argument(
"--mode", type=str, required=True, choices=["add", "mul", "matmul"], help="Mode"
)
# 2x2 matrix A
parser.add_argument("--a00", type=float, default=1.0, help="A[0,0]")
parser.add_argument("--a01", type=float, default=0.0, help="A[0,1]")
parser.add_argument("--a10", type=float, default=0.0, help="A[1,0]")
parser.add_argument("--a11", type=float, default=1.0, help="A[1,1]")
# 2x2 matrix B
parser.add_argument("--b00", type=float, default=1.0, help="B[0,0]")
parser.add_argument("--b01", type=float, default=0.0, help="B[0,1]")
parser.add_argument("--b10", type=float, default=0.0, help="B[1,0]")
parser.add_argument("--b11", type=float, default=1.0, help="B[1,1]")
args = parser.parse_args()
a00 = args.a00
a01 = args.a01
a10 = args.a10
a11 = args.a11
b00 = args.b00
b01 = args.b01
b10 = args.b10
b11 = args.b11
if args.mode == "add":
# Element-wise add
c00 = a00 + b00
c01 = a01 + b01
c10 = a10 + b10
c11 = a11 + b11
print(f"c00={c00} c01={c01} c10={c10} c11={c11}")
elif args.mode == "mul":
# Element-wise multiply
c00 = a00 * b00
c01 = a01 * b01
c10 = a10 * b10
c11 = a11 * b11
print(f"c00={c00} c01={c01} c10={c10} c11={c11}")
elif args.mode == "matmul":
# Matrix multiplication
c00 = a00 * b00 + a01 * b10
c01 = a00 * b01 + a01 * b11
c10 = a10 * b00 + a11 * b10
c11 = a10 * b01 + a11 * b11
print(f"c00={c00} c01={c01} c10={c10} c11={c11}")
if __name__ == "__main__":
main()
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_pytorch_tensor/tensor_flat.py (1907 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_pytorch_tensor/tensor_flat.rs (2953 bytes)
📦 Cargo.toml: /home/noah/src/reprorusted-python-cli/examples/example_pytorch_tensor/Cargo.toml (1 dependencies)
⏱️ Parse time: 48ms
📊 Throughput: 38.5 KB/s
⏱️ Total time: 48ms
| true
|
pytorch_tensor
| 62
| 6
|
[] | 0
| null |
example_pytorch_tensor
|
tensor_tool.py
|
#!/usr/bin/env python3
"""Tensor operations CLI tool.
A CLI for PyTorch-style tensor operations.
Designed for Python-to-Rust transpilation via depyler → aprender.
Academic Reference: Paszke et al. (2019) PyTorch [2]
Usage:
echo '{"data": [1, 2, 3]}' | python tensor_tool.py create
echo '{"a": [1, 2], "b": [3, 4]}' | python tensor_tool.py add
"""
import argparse
import json
import sys
Tensor = list[float] | list[list[float]]
def get_shape(tensor: Tensor) -> list[int]:
"""Get shape of tensor."""
if not tensor:
return [0]
if isinstance(tensor[0], list):
return [len(tensor), len(tensor[0])]
return [len(tensor)]
def flatten(tensor: Tensor) -> list[float]:
"""Flatten tensor to 1D."""
if not tensor:
return []
if isinstance(tensor[0], list):
return [x for row in tensor for x in row]
return tensor
def zeros(shape: list[int]) -> Tensor:
"""Create zeros tensor."""
if len(shape) == 1:
return [0.0] * shape[0]
return [[0.0] * shape[1] for _ in range(shape[0])]
def ones(shape: list[int]) -> Tensor:
"""Create ones tensor."""
if len(shape) == 1:
return [1.0] * shape[0]
return [[1.0] * shape[1] for _ in range(shape[0])]
def add(a: Tensor, b: Tensor) -> Tensor:
"""Element-wise addition."""
if isinstance(a[0], list):
if len(a) != len(b) or len(a[0]) != len(b[0]):
raise ValueError("Shape mismatch")
return [[a[i][j] + b[i][j] for j in range(len(a[0]))] for i in range(len(a))]
if len(a) != len(b):
raise ValueError("Shape mismatch")
return [a[i] + b[i] for i in range(len(a))]
def mul(a: Tensor, b: Tensor) -> Tensor:
"""Element-wise multiplication."""
if isinstance(a[0], list):
if len(a) != len(b) or len(a[0]) != len(b[0]):
raise ValueError("Shape mismatch")
return [[a[i][j] * b[i][j] for j in range(len(a[0]))] for i in range(len(a))]
if len(a) != len(b):
raise ValueError("Shape mismatch")
return [a[i] * b[i] for i in range(len(a))]
def matmul(a: list[list[float]], b: list[list[float]]) -> list[list[float]]:
"""Matrix multiplication."""
if len(a[0]) != len(b):
raise ValueError(f"Shape mismatch: {len(a[0])} != {len(b)}")
m, k = len(a), len(a[0])
n = len(b[0])
result = [[0.0] * n for _ in range(m)]
for i in range(m):
for j in range(n):
for p in range(k):
result[i][j] += a[i][p] * b[p][j]
return result
def tensor_sum(tensor: Tensor) -> float:
"""Sum all elements."""
flat = flatten(tensor)
return sum(flat)
def tensor_mean(tensor: Tensor) -> float:
"""Mean of all elements."""
flat = flatten(tensor)
return sum(flat) / len(flat) if flat else 0.0
def cmd_create(args: argparse.Namespace) -> None:
try:
data = json.load(sys.stdin)
except json.JSONDecodeError as e:
print(f"Error: Invalid JSON: {e}", file=sys.stderr)
sys.exit(1)
if "data" not in data:
print("Error: Missing 'data'", file=sys.stderr)
sys.exit(1)
tensor = data["data"]
shape = get_shape(tensor)
print(json.dumps({"tensor": tensor, "shape": shape}))
def cmd_zeros(args: argparse.Namespace) -> None:
try:
data = json.load(sys.stdin)
except json.JSONDecodeError as e:
print(f"Error: Invalid JSON: {e}", file=sys.stderr)
sys.exit(1)
shape = data.get("shape", [1])
tensor = zeros(shape)
print(json.dumps({"tensor": tensor, "shape": shape}))
def cmd_ones(args: argparse.Namespace) -> None:
try:
data = json.load(sys.stdin)
except json.JSONDecodeError as e:
print(f"Error: Invalid JSON: {e}", file=sys.stderr)
sys.exit(1)
shape = data.get("shape", [1])
tensor = ones(shape)
print(json.dumps({"tensor": tensor, "shape": shape}))
def cmd_add(args: argparse.Namespace) -> None:
try:
data = json.load(sys.stdin)
except json.JSONDecodeError as e:
print(f"Error: Invalid JSON: {e}", file=sys.stderr)
sys.exit(1)
try:
result = add(data["a"], data["b"])
print(json.dumps({"tensor": result}))
except (ValueError, KeyError) as e:
print(f"Error: {e}", file=sys.stderr)
sys.exit(1)
def cmd_mul(args: argparse.Namespace) -> None:
try:
data = json.load(sys.stdin)
except json.JSONDecodeError as e:
print(f"Error: Invalid JSON: {e}", file=sys.stderr)
sys.exit(1)
try:
result = mul(data["a"], data["b"])
print(json.dumps({"tensor": result}))
except (ValueError, KeyError) as e:
print(f"Error: {e}", file=sys.stderr)
sys.exit(1)
def cmd_matmul(args: argparse.Namespace) -> None:
try:
data = json.load(sys.stdin)
except json.JSONDecodeError as e:
print(f"Error: Invalid JSON: {e}", file=sys.stderr)
sys.exit(1)
try:
result = matmul(data["a"], data["b"])
print(json.dumps({"tensor": result}))
except (ValueError, KeyError) as e:
print(f"Error: {e}", file=sys.stderr)
sys.exit(1)
def cmd_sum(args: argparse.Namespace) -> None:
try:
data = json.load(sys.stdin)
except json.JSONDecodeError as e:
print(f"Error: Invalid JSON: {e}", file=sys.stderr)
sys.exit(1)
result = tensor_sum(data["tensor"])
print(json.dumps({"value": result}))
def cmd_mean(args: argparse.Namespace) -> None:
try:
data = json.load(sys.stdin)
except json.JSONDecodeError as e:
print(f"Error: Invalid JSON: {e}", file=sys.stderr)
sys.exit(1)
result = tensor_mean(data["tensor"])
print(json.dumps({"value": result}))
def main() -> None:
parser = argparse.ArgumentParser(
description="Tensor CLI - PyTorch-style tensor operations (aprender-compatible)"
)
subparsers = parser.add_subparsers(dest="command", help="Available commands")
subparsers.add_parser("create", help="Create tensor from data").set_defaults(func=cmd_create)
subparsers.add_parser("zeros", help="Create zeros tensor").set_defaults(func=cmd_zeros)
subparsers.add_parser("ones", help="Create ones tensor").set_defaults(func=cmd_ones)
subparsers.add_parser("add", help="Element-wise addition").set_defaults(func=cmd_add)
subparsers.add_parser("mul", help="Element-wise multiplication").set_defaults(func=cmd_mul)
subparsers.add_parser("matmul", help="Matrix multiplication").set_defaults(func=cmd_matmul)
subparsers.add_parser("sum", help="Sum reduction").set_defaults(func=cmd_sum)
subparsers.add_parser("mean", help="Mean reduction").set_defaults(func=cmd_mean)
args = parser.parse_args()
if args.command is None:
parser.print_help()
sys.exit(0)
args.func(args)
if __name__ == "__main__":
main()
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_pytorch_tensor/tensor_tool.py (6876 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_pytorch_tensor/tensor_tool.rs (18019 bytes)
📦 Cargo.toml: /home/noah/src/reprorusted-python-cli/examples/example_pytorch_tensor/Cargo.toml (3 dependencies)
⏱️ Parse time: 56ms
📊 Throughput: 117.9 KB/s
⏱️ Total time: 57ms
| true
|
pytorch_tensor
| 231
| 6
|
[
"exception_handling",
"stdin_usage"
] | 0.577
| null |
example_pytorch_tensor
|
test_tensor_tool.py
|
#!/usr/bin/env python3
"""EXTREME TDD: Tests for PyTorch tensor operations CLI.
Academic Reference: Paszke et al. (2019) PyTorch [2]
Tests tensor creation and basic operations.
"""
import json
import subprocess
from pathlib import Path
SCRIPT = Path(__file__).parent / "tensor_tool.py"
def run(args, input_data=None):
"""Run the CLI and return (stdout, stderr, returncode)."""
result = subprocess.run(
["python3", str(SCRIPT)] + args,
capture_output=True,
text=True,
input=input_data,
)
return result.stdout, result.stderr, result.returncode
class TestTensorCreate:
"""Test tensor creation."""
def test_create_from_list(self):
"""Test creating tensor from list."""
data = json.dumps({"data": [1.0, 2.0, 3.0]})
stdout, stderr, code = run(["create"], data)
assert code == 0
result = json.loads(stdout)
assert "tensor" in result
assert result["tensor"] == [1.0, 2.0, 3.0]
assert result["shape"] == [3]
def test_create_2d(self):
"""Test creating 2D tensor."""
data = json.dumps({"data": [[1, 2], [3, 4]]})
stdout, stderr, code = run(["create"], data)
assert code == 0
result = json.loads(stdout)
assert result["shape"] == [2, 2]
def test_zeros(self):
"""Test creating zeros tensor."""
data = json.dumps({"shape": [2, 3]})
stdout, stderr, code = run(["zeros"], data)
assert code == 0
result = json.loads(stdout)
assert result["tensor"] == [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]
def test_ones(self):
"""Test creating ones tensor."""
data = json.dumps({"shape": [2, 2]})
stdout, stderr, code = run(["ones"], data)
assert code == 0
result = json.loads(stdout)
assert result["tensor"] == [[1.0, 1.0], [1.0, 1.0]]
class TestTensorOps:
"""Test tensor operations."""
def test_add(self):
"""Test element-wise addition."""
data = json.dumps({
"a": [1.0, 2.0, 3.0],
"b": [4.0, 5.0, 6.0]
})
stdout, stderr, code = run(["add"], data)
assert code == 0
result = json.loads(stdout)
assert result["tensor"] == [5.0, 7.0, 9.0]
def test_mul(self):
"""Test element-wise multiplication."""
data = json.dumps({
"a": [1.0, 2.0, 3.0],
"b": [2.0, 3.0, 4.0]
})
stdout, stderr, code = run(["mul"], data)
assert code == 0
result = json.loads(stdout)
assert result["tensor"] == [2.0, 6.0, 12.0]
def test_matmul(self):
"""Test matrix multiplication."""
data = json.dumps({
"a": [[1, 2], [3, 4]],
"b": [[5, 6], [7, 8]]
})
stdout, stderr, code = run(["matmul"], data)
assert code == 0
result = json.loads(stdout)
# [[1*5+2*7, 1*6+2*8], [3*5+4*7, 3*6+4*8]]
assert result["tensor"] == [[19, 22], [43, 50]]
class TestTensorReduce:
"""Test reduction operations."""
def test_sum(self):
"""Test sum reduction."""
data = json.dumps({"tensor": [1.0, 2.0, 3.0, 4.0]})
stdout, stderr, code = run(["sum"], data)
assert code == 0
result = json.loads(stdout)
assert result["value"] == 10.0
def test_mean(self):
"""Test mean reduction."""
data = json.dumps({"tensor": [1.0, 2.0, 3.0, 4.0]})
stdout, stderr, code = run(["mean"], data)
assert code == 0
result = json.loads(stdout)
assert result["value"] == 2.5
class TestTensorHelp:
"""Test help messages."""
def test_help(self):
"""Test --help flag."""
stdout, stderr, code = run(["--help"])
assert code == 0
assert "tensor" in stdout.lower()
def test_subcommand_help(self):
"""Test subcommand help."""
stdout, stderr, code = run(["add", "--help"])
assert code == 0
class TestTensorEdgeCases:
"""Test edge cases."""
def test_invalid_json_fails(self):
"""Test invalid JSON fails."""
stdout, stderr, code = run(["create"], "not json")
assert code == 1
def test_shape_mismatch_fails(self):
"""Test shape mismatch in operations fails."""
data = json.dumps({
"a": [1.0, 2.0, 3.0],
"b": [1.0, 2.0]
})
stdout, stderr, code = run(["add"], data)
assert code == 1
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_pytorch_tensor/test_tensor_tool.py (4493 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_pytorch_tensor/test_tensor_tool.rs (7280 bytes)
📦 Cargo.toml: /home/noah/src/reprorusted-python-cli/examples/example_pytorch_tensor/Cargo.toml (2 dependencies)
⏱️ Parse time: 50ms
📊 Throughput: 87.1 KB/s
⏱️ Total time: 50ms
| true
|
pytorch_tensor
| 152
| 6
|
[
"class_definition"
] | 0.612
| null |
example_queue
|
queue_tool.py
|
#!/usr/bin/env python3
"""Queue Example - Queue-like operations CLI (simplified)."""
import argparse
def main():
parser = argparse.ArgumentParser(description="Queue operations tool")
subs = parser.add_subparsers(dest="cmd", required=True)
f = subs.add_parser("first")
f.add_argument("items", nargs="+")
lp = subs.add_parser("last")
lp.add_argument("items", nargs="+")
args = parser.parse_args()
if args.cmd == "first":
print(args.items[0])
elif args.cmd == "last":
print(args.items[len(args.items) - 1])
if __name__ == "__main__":
main()
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_queue/queue_tool.py (602 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_queue/queue_tool.rs (1607 bytes)
📦 Cargo.toml: /home/noah/src/reprorusted-python-cli/examples/example_queue/Cargo.toml (1 dependencies)
⏱️ Parse time: 46ms
📊 Throughput: 12.7 KB/s
⏱️ Total time: 46ms
| true
|
queue
| 25
| 6
|
[] | 0
| null |
example_queue
|
test_queue_tool.py
|
#!/usr/bin/env python3
"""EXTREME TDD: Tests for queue CLI."""
import subprocess
SCRIPT = "queue_tool.py"
def run(args): return subprocess.run(["python3", SCRIPT] + args, capture_output=True, text=True, cwd=__file__.rsplit("/", 1)[0])
class TestFirst:
def test_first(self): r = run(["first", "a", "b", "c"]); assert r.returncode == 0 and "a" in r.stdout
class TestLast:
def test_last(self): r = run(["last", "a", "b", "c"]); assert r.returncode == 0 and "c" in r.stdout
class TestHelp:
def test_help(self): assert run(["--help"]).returncode == 0
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_queue/test_queue_tool.py (562 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_queue/test_queue_tool.rs (1999 bytes)
⏱️ Parse time: 47ms
📊 Throughput: 11.6 KB/s
⏱️ Total time: 47ms
| true
|
queue
| 15
| 5
|
[
"class_definition"
] | 0.612
| null |
example_random
|
random_tool.py
|
#!/usr/bin/env python3
"""Random Example - Random number operations CLI."""
import argparse
import random
def main():
parser = argparse.ArgumentParser(description="Random operations tool")
subs = parser.add_subparsers(dest="cmd", required=True)
ri = subs.add_parser("randint")
ri.add_argument("low", type=int)
ri.add_argument("high", type=int)
ch = subs.add_parser("choice")
ch.add_argument("a")
ch.add_argument("b")
ch.add_argument("c")
ct = subs.add_parser("count")
ct.add_argument("n", type=int)
args = parser.parse_args()
if args.cmd == "randint":
print(random.randint(args.low, args.high))
elif args.cmd == "choice":
items = [args.a, args.b, args.c]
print(random.choice(items))
elif args.cmd == "count":
print(args.n)
if __name__ == "__main__":
main()
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_random/random_tool.py (860 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_random/random_tool.rs (1194 bytes)
📦 Cargo.toml: /home/noah/src/reprorusted-python-cli/examples/example_random/Cargo.toml (2 dependencies)
⏱️ Parse time: 50ms
📊 Throughput: 16.6 KB/s
⏱️ Total time: 50ms
| true
|
random
| 33
| 6
|
[] | 0
| null |
example_random
|
test_random_tool.py
|
#!/usr/bin/env python3
"""EXTREME TDD: Tests for random CLI."""
import subprocess
SCRIPT = "random_tool.py"
def run(args): return subprocess.run(["python3", SCRIPT] + args, capture_output=True, text=True, cwd=__file__.rsplit("/", 1)[0])
class TestRandom:
def test_randint(self): r = run(["randint", "1", "10"]); assert r.returncode == 0 and r.stdout.strip().isdigit()
def test_choice(self): r = run(["choice", "a", "b", "c"]); assert r.returncode == 0 and r.stdout.strip() in ["a", "b", "c"]
def test_shuffle_count(self): r = run(["count", "5"]); assert r.returncode == 0
class TestHelp:
def test_help(self): assert run(["--help"]).returncode == 0
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_random/test_random_tool.py (667 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_random/test_random_tool.rs (2286 bytes)
⏱️ Parse time: 47ms
📊 Throughput: 13.8 KB/s
⏱️ Total time: 47ms
| true
|
random
| 14
| 5
|
[
"class_definition"
] | 0.612
| null |
example_range
|
range_tool.py
|
#!/usr/bin/env python3
"""Range Example - Range operations CLI."""
import argparse
def main():
parser = argparse.ArgumentParser(description="Range tool")
subs = parser.add_subparsers(dest="cmd", required=True)
u = subs.add_parser("upto")
u.add_argument("n", type=int)
b = subs.add_parser("between")
b.add_argument("start", type=int)
b.add_argument("end", type=int)
st = subs.add_parser("step")
st.add_argument("start", type=int)
st.add_argument("end", type=int)
st.add_argument("step", type=int)
args = parser.parse_args()
if args.cmd == "upto":
result = ""
i = 0
while i < args.n:
if i > 0:
result = result + " "
result = result + str(i)
i = i + 1
print(result)
elif args.cmd == "between":
result = ""
i = args.start
while i < args.end:
if i > args.start:
result = result + " "
result = result + str(i)
i = i + 1
print(result)
elif args.cmd == "step":
result = ""
i = args.start
first = True
while i < args.end:
if not first:
result = result + " "
first = False
result = result + str(i)
i = i + args.step
print(result)
if __name__ == "__main__":
main()
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_range/range_tool.py (1397 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_range/range_tool.rs (2114 bytes)
📦 Cargo.toml: /home/noah/src/reprorusted-python-cli/examples/example_range/Cargo.toml (1 dependencies)
⏱️ Parse time: 47ms
📊 Throughput: 28.8 KB/s
⏱️ Total time: 47ms
| true
|
range
| 54
| 6
|
[] | 0
| null |
example_range
|
test_range_tool.py
|
"""Tests for range_tool - EXTREME TDD."""
import subprocess
from pathlib import Path
SCRIPT = Path(__file__).parent / "range_tool.py"
def run(cmd):
return subprocess.run(
["python3", str(SCRIPT)] + cmd.split(),
capture_output=True,
text=True,
)
def test_upto():
r = run("upto 5")
assert r.returncode == 0
assert r.stdout.strip() == "0 1 2 3 4"
def test_between():
r = run("between 2 7")
assert r.returncode == 0
assert r.stdout.strip() == "2 3 4 5 6"
def test_step():
r = run("step 0 10 2")
assert r.returncode == 0
assert r.stdout.strip() == "0 2 4 6 8"
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_range/test_range_tool.py (633 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_range/test_range_tool.rs (1841 bytes)
📦 Cargo.toml: /home/noah/src/reprorusted-python-cli/examples/example_range/Cargo.toml (2 dependencies)
⏱️ Parse time: 46ms
📊 Throughput: 13.4 KB/s
⏱️ Total time: 46ms
| true
|
range
| 32
| 6
|
[] | 0
| null |
example_rate_limiter
|
ratelimit_cli.py
|
#!/usr/bin/env python3
"""Rate limiter CLI.
Token bucket and sliding window rate limiting.
"""
import argparse
import json
import sys
import time
from dataclasses import dataclass
@dataclass
class TokenBucket:
"""Token bucket rate limiter."""
capacity: int
refill_rate: float # tokens per second
tokens: float
last_refill: float
def __init__(self, capacity: int, refill_rate: float):
self.capacity = capacity
self.refill_rate = refill_rate
self.tokens = float(capacity)
self.last_refill = time.time()
def refill(self) -> None:
"""Refill tokens based on elapsed time."""
now = time.time()
elapsed = now - self.last_refill
self.tokens = min(self.capacity, self.tokens + elapsed * self.refill_rate)
self.last_refill = now
def try_acquire(self, tokens: int = 1) -> bool:
"""Try to acquire tokens. Returns True if successful."""
self.refill()
if self.tokens >= tokens:
self.tokens -= tokens
return True
return False
def wait_time(self, tokens: int = 1) -> float:
"""Calculate wait time until tokens available."""
self.refill()
if self.tokens >= tokens:
return 0.0
needed = tokens - self.tokens
return needed / self.refill_rate
def to_dict(self) -> dict:
return {
"capacity": self.capacity,
"refill_rate": self.refill_rate,
"tokens": self.tokens,
"last_refill": self.last_refill,
}
@classmethod
def from_dict(cls, data: dict) -> "TokenBucket":
bucket = cls(data["capacity"], data["refill_rate"])
bucket.tokens = data["tokens"]
bucket.last_refill = data["last_refill"]
return bucket
@dataclass
class SlidingWindow:
"""Sliding window rate limiter."""
window_size: float # seconds
max_requests: int
requests: list[float] # timestamps
def __init__(self, window_size: float, max_requests: int):
self.window_size = window_size
self.max_requests = max_requests
self.requests = []
def cleanup(self) -> None:
"""Remove expired timestamps."""
cutoff = time.time() - self.window_size
self.requests = [t for t in self.requests if t > cutoff]
def try_acquire(self) -> bool:
"""Try to make a request. Returns True if allowed."""
self.cleanup()
if len(self.requests) < self.max_requests:
self.requests.append(time.time())
return True
return False
def remaining(self) -> int:
"""Get remaining requests in current window."""
self.cleanup()
return max(0, self.max_requests - len(self.requests))
def reset_time(self) -> float:
"""Get time until next slot available."""
self.cleanup()
if len(self.requests) < self.max_requests:
return 0.0
oldest = min(self.requests)
return max(0.0, oldest + self.window_size - time.time())
def to_dict(self) -> dict:
return {
"window_size": self.window_size,
"max_requests": self.max_requests,
"requests": self.requests,
}
@classmethod
def from_dict(cls, data: dict) -> "SlidingWindow":
window = cls(data["window_size"], data["max_requests"])
window.requests = data.get("requests", [])
return window
def create_rate_limiter(
limiter_type: str,
capacity: int,
rate: float,
) -> TokenBucket | SlidingWindow:
"""Create a rate limiter."""
if limiter_type == "token":
return TokenBucket(capacity, rate)
elif limiter_type == "window":
return SlidingWindow(rate, capacity)
raise ValueError(f"Unknown limiter type: {limiter_type}")
def check_rate_limit(limiter: TokenBucket | SlidingWindow) -> dict:
"""Check rate limit status without acquiring."""
if isinstance(limiter, TokenBucket):
limiter.refill()
return {
"type": "token_bucket",
"allowed": limiter.tokens >= 1,
"tokens": limiter.tokens,
"capacity": limiter.capacity,
"wait_time": limiter.wait_time(),
}
else:
limiter.cleanup()
return {
"type": "sliding_window",
"allowed": limiter.remaining() > 0,
"remaining": limiter.remaining(),
"max_requests": limiter.max_requests,
"reset_time": limiter.reset_time(),
}
def acquire_rate_limit(limiter: TokenBucket | SlidingWindow) -> dict:
"""Try to acquire rate limit."""
allowed = limiter.try_acquire()
status = check_rate_limit(limiter)
status["acquired"] = allowed
return status
def main() -> int:
parser = argparse.ArgumentParser(description="Rate limiting tool")
parser.add_argument(
"--type", choices=["token", "window"], default="token", help="Rate limiter type"
)
parser.add_argument("--capacity", type=int, default=10, help="Bucket capacity or max requests")
parser.add_argument(
"--rate", type=float, default=1.0, help="Refill rate (tokens/sec) or window size (seconds)"
)
parser.add_argument("-f", "--file", default="ratelimit.json", help="State file")
parser.add_argument("--check", action="store_true", help="Check status without acquiring")
parser.add_argument("--acquire", action="store_true", help="Try to acquire")
parser.add_argument("--reset", action="store_true", help="Reset rate limiter")
parser.add_argument("--wait", action="store_true", help="Wait until available (with --acquire)")
parser.add_argument("--json", action="store_true", help="Output as JSON")
args = parser.parse_args()
# Load or create limiter
try:
with open(args.file) as f:
data = json.load(f)
if data.get("type") == "token_bucket":
limiter = TokenBucket.from_dict(data)
else:
limiter = SlidingWindow.from_dict(data)
except FileNotFoundError:
limiter = create_rate_limiter(args.type, args.capacity, args.rate)
def save():
with open(args.file, "w") as f:
d = limiter.to_dict()
d["type"] = "token_bucket" if isinstance(limiter, TokenBucket) else "sliding_window"
json.dump(d, f, indent=2)
# Commands
if args.reset:
limiter = create_rate_limiter(args.type, args.capacity, args.rate)
save()
print("Rate limiter reset")
return 0
if args.acquire:
if args.wait:
while not limiter.try_acquire():
status = check_rate_limit(limiter)
wait = status.get("wait_time") or status.get("reset_time") or 0.1
time.sleep(wait)
save()
if args.json:
print(json.dumps({"acquired": True}))
else:
print("Acquired")
return 0
else:
result = acquire_rate_limit(limiter)
save()
if args.json:
print(json.dumps(result, indent=2))
else:
if result["acquired"]:
print("Acquired")
else:
wait = result.get("wait_time") or result.get("reset_time") or 0
print(f"Rate limited. Wait {wait:.2f}s")
return 1
return 0 if result["acquired"] else 1
# Default: check status
status = check_rate_limit(limiter)
if args.json:
print(json.dumps(status, indent=2))
else:
print(f"Type: {status['type']}")
print(f"Allowed: {status['allowed']}")
if "tokens" in status:
print(f"Tokens: {status['tokens']:.2f}/{status['capacity']}")
else:
print(f"Remaining: {status['remaining']}/{status['max_requests']}")
return 0
if __name__ == "__main__":
sys.exit(main())
| false
|
rate_limiter
| 248
| 0
|
[
"context_manager",
"class_definition",
"exception_handling",
"decorator"
] | 0.652
|
Error: Unsupported type annotation: Constant(ExprConstant { range: 1627..1640, value: Str("TokenBucket"), kind: None })
|
|
example_rate_limiter
|
test_ratelimit_cli.py
|
"""Tests for ratelimit_cli.py"""
import time
import pytest
from ratelimit_cli import (
SlidingWindow,
TokenBucket,
acquire_rate_limit,
check_rate_limit,
create_rate_limiter,
)
class TestTokenBucket:
def test_init(self):
bucket = TokenBucket(10, 1.0)
assert bucket.capacity == 10
assert bucket.tokens == 10.0
assert bucket.refill_rate == 1.0
def test_try_acquire(self):
bucket = TokenBucket(10, 1.0)
assert bucket.try_acquire() is True
assert bucket.tokens == 9.0
def test_try_acquire_multiple(self):
bucket = TokenBucket(10, 1.0)
assert bucket.try_acquire(5) is True
assert bucket.tokens == 5.0
def test_try_acquire_insufficient(self):
bucket = TokenBucket(10, 1.0)
bucket.tokens = 0.5
bucket.last_refill = time.time() # Reset refill time
assert bucket.try_acquire() is False
assert bucket.tokens < 1.0 # Not consumed, but may have tiny refill
def test_refill(self):
bucket = TokenBucket(10, 10.0) # 10 tokens/sec
bucket.tokens = 0
bucket.last_refill = time.time() - 0.5 # 0.5 seconds ago
bucket.refill()
assert bucket.tokens >= 4.5 # At least 5 tokens added
def test_refill_capped(self):
bucket = TokenBucket(10, 100.0) # Very fast refill
bucket.tokens = 5
bucket.last_refill = time.time() - 1 # 1 second ago
bucket.refill()
assert bucket.tokens == 10.0 # Capped at capacity
def test_wait_time(self):
bucket = TokenBucket(10, 2.0) # 2 tokens/sec
bucket.tokens = 0
bucket.last_refill = time.time()
wait = bucket.wait_time(1)
assert 0.4 <= wait <= 0.6 # About 0.5 seconds
def test_wait_time_zero(self):
bucket = TokenBucket(10, 1.0)
assert bucket.wait_time() == 0.0
def test_to_from_dict(self):
bucket = TokenBucket(10, 2.0)
bucket.try_acquire(3)
d = bucket.to_dict()
restored = TokenBucket.from_dict(d)
assert restored.capacity == 10
assert restored.refill_rate == 2.0
assert restored.tokens == bucket.tokens
class TestSlidingWindow:
def test_init(self):
window = SlidingWindow(60.0, 10) # 10 requests per 60 seconds
assert window.window_size == 60.0
assert window.max_requests == 10
assert len(window.requests) == 0
def test_try_acquire(self):
window = SlidingWindow(60.0, 10)
assert window.try_acquire() is True
assert len(window.requests) == 1
def test_try_acquire_limit(self):
window = SlidingWindow(60.0, 3)
assert window.try_acquire() is True
assert window.try_acquire() is True
assert window.try_acquire() is True
assert window.try_acquire() is False
def test_remaining(self):
window = SlidingWindow(60.0, 10)
assert window.remaining() == 10
window.try_acquire()
window.try_acquire()
assert window.remaining() == 8
def test_cleanup(self):
window = SlidingWindow(1.0, 10) # 1 second window
# Add old request
window.requests.append(time.time() - 2) # 2 seconds ago
window.requests.append(time.time()) # Now
window.cleanup()
assert len(window.requests) == 1
def test_reset_time(self):
window = SlidingWindow(1.0, 1) # 1 request per second
window.try_acquire()
reset = window.reset_time()
assert 0.9 <= reset <= 1.1
def test_reset_time_available(self):
window = SlidingWindow(1.0, 10)
assert window.reset_time() == 0.0
def test_to_from_dict(self):
window = SlidingWindow(60.0, 10)
window.try_acquire()
window.try_acquire()
d = window.to_dict()
restored = SlidingWindow.from_dict(d)
assert restored.window_size == 60.0
assert restored.max_requests == 10
assert len(restored.requests) == 2
class TestCreateRateLimiter:
def test_create_token(self):
limiter = create_rate_limiter("token", 10, 2.0)
assert isinstance(limiter, TokenBucket)
assert limiter.capacity == 10
assert limiter.refill_rate == 2.0
def test_create_window(self):
limiter = create_rate_limiter("window", 10, 60.0)
assert isinstance(limiter, SlidingWindow)
assert limiter.max_requests == 10
assert limiter.window_size == 60.0
def test_create_invalid(self):
with pytest.raises(ValueError):
create_rate_limiter("invalid", 10, 1.0)
class TestCheckRateLimit:
def test_token_bucket(self):
bucket = TokenBucket(10, 1.0)
status = check_rate_limit(bucket)
assert status["type"] == "token_bucket"
assert status["allowed"] is True
assert status["tokens"] == 10.0
assert status["capacity"] == 10
def test_sliding_window(self):
window = SlidingWindow(60.0, 10)
status = check_rate_limit(window)
assert status["type"] == "sliding_window"
assert status["allowed"] is True
assert status["remaining"] == 10
class TestAcquireRateLimit:
def test_token_bucket_success(self):
bucket = TokenBucket(10, 1.0)
result = acquire_rate_limit(bucket)
assert result["acquired"] is True
assert 8.9 <= result["tokens"] <= 9.1 # Allow small variance
def test_sliding_window_success(self):
window = SlidingWindow(60.0, 10)
result = acquire_rate_limit(window)
assert result["acquired"] is True
assert result["remaining"] == 9
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_rate_limiter/test_ratelimit_cli.py (5680 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_rate_limiter/test_ratelimit_cli.rs (8358 bytes)
⏱️ Parse time: 48ms
📊 Throughput: 114.3 KB/s
⏱️ Total time: 48ms
| true
|
rate_limiter
| 191
| 5
|
[
"context_manager",
"class_definition"
] | 0.652
| null |
example_re
|
re_tool.py
|
#!/usr/bin/env python3
"""Regex Example - Simple pattern matching CLI."""
import argparse
def main():
parser = argparse.ArgumentParser(description="Pattern matching tool")
subs = parser.add_subparsers(dest="cmd", required=True)
m = subs.add_parser("match")
m.add_argument("pattern")
m.add_argument("text")
c = subs.add_parser("count")
c.add_argument("char")
c.add_argument("text")
args = parser.parse_args()
if args.cmd == "match":
if args.pattern in args.text:
print("yes")
else:
print("no")
elif args.cmd == "count":
print(args.text.count(args.char))
if __name__ == "__main__":
main()
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_re/re_tool.py (690 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_re/re_tool.rs (1092 bytes)
📦 Cargo.toml: /home/noah/src/reprorusted-python-cli/examples/example_re/Cargo.toml (1 dependencies)
⏱️ Parse time: 49ms
📊 Throughput: 13.5 KB/s
⏱️ Total time: 50ms
| true
|
re
| 29
| 6
|
[] | 0
| null |
example_re
|
test_re_tool.py
|
#!/usr/bin/env python3
"""EXTREME TDD: Tests for regex CLI."""
import subprocess
SCRIPT = "re_tool.py"
def run(args): return subprocess.run(["python3", SCRIPT] + args, capture_output=True, text=True, cwd=__file__.rsplit("/", 1)[0])
class TestRe:
def test_match_yes(self): r = run(["match", "hello", "hello world"]); assert r.returncode == 0 and "yes" in r.stdout.lower()
def test_match_no(self): r = run(["match", "xyz", "hello world"]); assert r.returncode == 0 and "no" in r.stdout.lower()
def test_count(self): r = run(["count", "l", "hello"]); assert r.returncode == 0 and "2" in r.stdout
class TestHelp:
def test_help(self): assert run(["--help"]).returncode == 0
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_re/test_re_tool.py (688 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_re/test_re_tool.rs (2080 bytes)
⏱️ Parse time: 48ms
📊 Throughput: 13.8 KB/s
⏱️ Total time: 49ms
| true
|
re
| 14
| 5
|
[
"class_definition"
] | 0.612
| null |
example_redis_protocol
|
redis_cli.py
|
#!/usr/bin/env python3
"""Redis Protocol CLI.
Parse and encode RESP (Redis Serialization Protocol) messages.
"""
import argparse
import sys
from dataclasses import dataclass
from enum import Enum
class RESPType(Enum):
"""RESP data types."""
SIMPLE_STRING = "+"
ERROR = "-"
INTEGER = ":"
BULK_STRING = "$"
ARRAY = "*"
NULL = "_"
BOOLEAN = "#"
DOUBLE = ","
BIG_NUMBER = "("
BULK_ERROR = "!"
VERBATIM_STRING = "="
MAP = "%"
SET = "~"
PUSH = ">"
@dataclass
class RESPValue:
"""RESP value container."""
type: RESPType
value: any
def encode_simple_string(s: str) -> bytes:
"""Encode simple string (+OK\r\n)."""
return f"+{s}\r\n".encode()
def encode_error(s: str) -> bytes:
"""Encode error message (-ERR message\r\n)."""
return f"-{s}\r\n".encode()
def encode_integer(n: int) -> bytes:
"""Encode integer (:<number>\r\n)."""
return f":{n}\r\n".encode()
def encode_bulk_string(s: str | None) -> bytes:
"""Encode bulk string ($<len>\r\n<data>\r\n or $-1\r\n for null)."""
if s is None:
return b"$-1\r\n"
data = s.encode("utf-8")
return f"${len(data)}\r\n".encode() + data + b"\r\n"
def encode_array(items: list | None) -> bytes:
"""Encode array (*<count>\r\n<items> or *-1\r\n for null)."""
if items is None:
return b"*-1\r\n"
result = f"*{len(items)}\r\n".encode()
for item in items:
result += encode_value(item)
return result
def encode_value(value: any) -> bytes:
"""Encode any value to RESP format."""
if value is None:
return encode_bulk_string(None)
if isinstance(value, bool):
return f"#{('t' if value else 'f')}\r\n".encode()
if isinstance(value, int):
return encode_integer(value)
if isinstance(value, float):
return f",{value}\r\n".encode()
if isinstance(value, str):
# Use bulk string for strings with special chars
if "\r" in value or "\n" in value:
return encode_bulk_string(value)
return encode_bulk_string(value)
if isinstance(value, (list, tuple)):
return encode_array(list(value))
if isinstance(value, dict):
# Encode as RESP3 map
result = f"%{len(value)}\r\n".encode()
for k, v in value.items():
result += encode_value(k)
result += encode_value(v)
return result
if isinstance(value, set):
# Encode as RESP3 set
result = f"~{len(value)}\r\n".encode()
for item in value:
result += encode_value(item)
return result
return encode_bulk_string(str(value))
def parse_simple_string(data: bytes, pos: int) -> tuple[str, int]:
"""Parse simple string."""
end = data.find(b"\r\n", pos)
if end == -1:
raise ValueError("Incomplete simple string")
return data[pos:end].decode("utf-8"), end + 2
def parse_error(data: bytes, pos: int) -> tuple[str, int]:
"""Parse error message."""
end = data.find(b"\r\n", pos)
if end == -1:
raise ValueError("Incomplete error")
return data[pos:end].decode("utf-8"), end + 2
def parse_integer(data: bytes, pos: int) -> tuple[int, int]:
"""Parse integer."""
end = data.find(b"\r\n", pos)
if end == -1:
raise ValueError("Incomplete integer")
return int(data[pos:end].decode("utf-8")), end + 2
def parse_bulk_string(data: bytes, pos: int) -> tuple[str | None, int]:
"""Parse bulk string."""
end = data.find(b"\r\n", pos)
if end == -1:
raise ValueError("Incomplete bulk string length")
length = int(data[pos:end].decode("utf-8"))
if length == -1:
return None, end + 2
start = end + 2
string_end = start + length
if len(data) < string_end + 2:
raise ValueError("Incomplete bulk string data")
return data[start:string_end].decode("utf-8"), string_end + 2
def parse_array(data: bytes, pos: int) -> tuple[list | None, int]:
"""Parse array."""
end = data.find(b"\r\n", pos)
if end == -1:
raise ValueError("Incomplete array length")
count = int(data[pos:end].decode("utf-8"))
if count == -1:
return None, end + 2
items = []
current_pos = end + 2
for _ in range(count):
value, current_pos = parse_value(data, current_pos)
items.append(value)
return items, current_pos
def parse_value(data: bytes, pos: int = 0) -> tuple[any, int]:
"""Parse any RESP value."""
if pos >= len(data):
raise ValueError("Unexpected end of data")
type_byte = chr(data[pos])
pos += 1
if type_byte == "+":
return parse_simple_string(data, pos)
if type_byte == "-":
error, new_pos = parse_error(data, pos)
return RESPValue(RESPType.ERROR, error), new_pos
if type_byte == ":":
return parse_integer(data, pos)
if type_byte == "$":
return parse_bulk_string(data, pos)
if type_byte == "*":
return parse_array(data, pos)
if type_byte == "#":
# Boolean
end = data.find(b"\r\n", pos)
value = data[pos:end].decode("utf-8") == "t"
return value, end + 2
if type_byte == ",":
# Double
end = data.find(b"\r\n", pos)
value = float(data[pos:end].decode("utf-8"))
return value, end + 2
if type_byte == "_":
# Null
end = data.find(b"\r\n", pos)
return None, end + 2
raise ValueError(f"Unknown RESP type: {type_byte}")
def encode_command(name: str, *args: str) -> bytes:
"""Encode Redis command to RESP array format."""
items = [name] + list(args)
return encode_array(items)
def parse_command(data: bytes) -> tuple[str, list[str]]:
"""Parse Redis command from RESP format."""
value, _ = parse_value(data)
if not isinstance(value, list) or not value:
raise ValueError("Invalid command format")
command = value[0].upper()
args = value[1:]
return command, args
class RedisCommandBuilder:
"""Builder for common Redis commands."""
@staticmethod
def get(key: str) -> bytes:
"""GET key."""
return encode_command("GET", key)
@staticmethod
def set(key: str, value: str, ex: int | None = None, px: int | None = None) -> bytes:
"""SET key value [EX seconds] [PX milliseconds]."""
args = [key, value]
if ex is not None:
args.extend(["EX", str(ex)])
elif px is not None:
args.extend(["PX", str(px)])
return encode_command("SET", *args)
@staticmethod
def del_key(*keys: str) -> bytes:
"""DEL key [key ...]."""
return encode_command("DEL", *keys)
@staticmethod
def mget(*keys: str) -> bytes:
"""MGET key [key ...]."""
return encode_command("MGET", *keys)
@staticmethod
def mset(**kwargs: str) -> bytes:
"""MSET key value [key value ...]."""
args = []
for k, v in kwargs.items():
args.extend([k, v])
return encode_command("MSET", *args)
@staticmethod
def incr(key: str) -> bytes:
"""INCR key."""
return encode_command("INCR", key)
@staticmethod
def decr(key: str) -> bytes:
"""DECR key."""
return encode_command("DECR", key)
@staticmethod
def lpush(key: str, *values: str) -> bytes:
"""LPUSH key value [value ...]."""
return encode_command("LPUSH", key, *values)
@staticmethod
def rpush(key: str, *values: str) -> bytes:
"""RPUSH key value [value ...]."""
return encode_command("RPUSH", key, *values)
@staticmethod
def lrange(key: str, start: int, stop: int) -> bytes:
"""LRANGE key start stop."""
return encode_command("LRANGE", key, str(start), str(stop))
@staticmethod
def hset(key: str, field: str, value: str) -> bytes:
"""HSET key field value."""
return encode_command("HSET", key, field, value)
@staticmethod
def hget(key: str, field: str) -> bytes:
"""HGET key field."""
return encode_command("HGET", key, field)
@staticmethod
def hgetall(key: str) -> bytes:
"""HGETALL key."""
return encode_command("HGETALL", key)
@staticmethod
def sadd(key: str, *members: str) -> bytes:
"""SADD key member [member ...]."""
return encode_command("SADD", key, *members)
@staticmethod
def smembers(key: str) -> bytes:
"""SMEMBERS key."""
return encode_command("SMEMBERS", key)
@staticmethod
def zadd(key: str, score: float, member: str) -> bytes:
"""ZADD key score member."""
return encode_command("ZADD", key, str(score), member)
@staticmethod
def zrange(key: str, start: int, stop: int, withscores: bool = False) -> bytes:
"""ZRANGE key start stop [WITHSCORES]."""
args = [key, str(start), str(stop)]
if withscores:
args.append("WITHSCORES")
return encode_command("ZRANGE", *args)
@staticmethod
def ping(message: str | None = None) -> bytes:
"""PING [message]."""
if message:
return encode_command("PING", message)
return encode_command("PING")
@staticmethod
def info(section: str | None = None) -> bytes:
"""INFO [section]."""
if section:
return encode_command("INFO", section)
return encode_command("INFO")
def format_value(value: any, indent: int = 0) -> str:
"""Format RESP value for display."""
prefix = " " * indent
if value is None:
return f"{prefix}(nil)"
if isinstance(value, RESPValue):
if value.type == RESPType.ERROR:
return f"{prefix}(error) {value.value}"
return f"{prefix}{value.value}"
if isinstance(value, bool):
return f"{prefix}(boolean) {'true' if value else 'false'}"
if isinstance(value, int):
return f"{prefix}(integer) {value}"
if isinstance(value, float):
return f"{prefix}(double) {value}"
if isinstance(value, str):
return f'{prefix}"{value}"'
if isinstance(value, list):
if not value:
return f"{prefix}(empty array)"
lines = [f"{prefix}(array)"]
for i, item in enumerate(value):
lines.append(f"{prefix}{i + 1}) {format_value(item, indent + 1).strip()}")
return "\n".join(lines)
return f"{prefix}{value}"
def main() -> int:
parser = argparse.ArgumentParser(description="Redis protocol parser")
parser.add_argument(
"--mode",
choices=["encode", "parse", "command", "builder"],
default="command",
help="Operation mode",
)
parser.add_argument("--cmd", default="PING", help="Redis command")
parser.add_argument("args", nargs="*", help="Command arguments")
parser.add_argument("--hex", help="Hex-encoded RESP data to parse")
args = parser.parse_args()
if args.mode == "encode":
value = args.args[0] if args.args else "OK"
encoded = encode_simple_string(value)
print(f"Encoded: {encoded}")
print(f"Hex: {encoded.hex()}")
elif args.mode == "parse":
if args.hex:
data = bytes.fromhex(args.hex.replace(" ", ""))
value, _ = parse_value(data)
print(format_value(value))
elif args.mode == "command":
encoded = encode_command(args.cmd, *args.args)
print(f"Command: {args.cmd} {' '.join(args.args)}")
print(f"RESP: {encoded}")
print(f"Hex: {encoded.hex()}")
elif args.mode == "builder":
builder = RedisCommandBuilder()
examples = [
("GET", builder.get("mykey")),
("SET", builder.set("mykey", "myvalue", ex=60)),
("PING", builder.ping()),
("LPUSH", builder.lpush("mylist", "a", "b", "c")),
("HSET", builder.hset("myhash", "field1", "value1")),
]
for name, encoded in examples:
print(f"\n{name}:")
print(f" RESP: {encoded}")
print(f" Hex: {encoded.hex()}")
return 0
if __name__ == "__main__":
sys.exit(main())
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_redis_protocol/redis_cli.py (12185 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_redis_protocol/redis_cli.rs (27405 bytes)
📦 Cargo.toml: /home/noah/src/reprorusted-python-cli/examples/example_redis_protocol/Cargo.toml (3 dependencies)
⏱️ Parse time: 118ms
📊 Throughput: 100.2 KB/s
⏱️ Total time: 119ms
| true
|
redis_protocol
| 445
| 6
|
[
"context_manager",
"class_definition",
"exception_handling",
"decorator"
] | 0.652
| null |
example_redis_protocol
|
test_redis_cli.py
|
"""Tests for redis_cli.py"""
import pytest
from redis_cli import (
RedisCommandBuilder,
RESPType,
RESPValue,
encode_array,
encode_bulk_string,
encode_command,
encode_error,
encode_integer,
encode_simple_string,
encode_value,
format_value,
parse_array,
parse_bulk_string,
parse_command,
parse_error,
parse_integer,
parse_simple_string,
parse_value,
)
class TestEncodeSimpleString:
def test_ok(self):
assert encode_simple_string("OK") == b"+OK\r\n"
def test_pong(self):
assert encode_simple_string("PONG") == b"+PONG\r\n"
def test_empty(self):
assert encode_simple_string("") == b"+\r\n"
class TestEncodeError:
def test_basic(self):
assert encode_error("ERR unknown command") == b"-ERR unknown command\r\n"
def test_wrong_type(self):
assert encode_error("WRONGTYPE Operation") == b"-WRONGTYPE Operation\r\n"
class TestEncodeInteger:
def test_positive(self):
assert encode_integer(42) == b":42\r\n"
def test_zero(self):
assert encode_integer(0) == b":0\r\n"
def test_negative(self):
assert encode_integer(-1) == b":-1\r\n"
class TestEncodeBulkString:
def test_simple(self):
assert encode_bulk_string("hello") == b"$5\r\nhello\r\n"
def test_empty(self):
assert encode_bulk_string("") == b"$0\r\n\r\n"
def test_null(self):
assert encode_bulk_string(None) == b"$-1\r\n"
def test_binary_safe(self):
# Bulk strings can contain newlines
result = encode_bulk_string("hello\r\nworld")
assert b"$12\r\n" in result
class TestEncodeArray:
def test_simple(self):
result = encode_array(["foo", "bar"])
assert result.startswith(b"*2\r\n")
def test_empty(self):
assert encode_array([]) == b"*0\r\n"
def test_null(self):
assert encode_array(None) == b"*-1\r\n"
def test_mixed(self):
result = encode_array(["hello", 42])
assert b"*2\r\n" in result
assert b":42\r\n" in result
class TestEncodeValue:
def test_string(self):
result = encode_value("hello")
assert b"$5\r\nhello\r\n" == result
def test_integer(self):
assert encode_value(42) == b":42\r\n"
def test_list(self):
result = encode_value(["a", "b"])
assert result.startswith(b"*2\r\n")
def test_bool_true(self):
assert encode_value(True) == b"#t\r\n"
def test_bool_false(self):
assert encode_value(False) == b"#f\r\n"
def test_float(self):
result = encode_value(3.14)
assert b",3.14\r\n" == result
def test_none(self):
assert encode_value(None) == b"$-1\r\n"
class TestParseSimpleString:
def test_basic(self):
value, pos = parse_simple_string(b"OK\r\n", 0)
assert value == "OK"
assert pos == 4
def test_with_offset(self):
value, pos = parse_simple_string(b"+OK\r\n", 1)
assert value == "OK"
class TestParseError:
def test_basic(self):
value, pos = parse_error(b"ERR unknown command\r\n", 0)
assert value == "ERR unknown command"
class TestParseInteger:
def test_positive(self):
value, pos = parse_integer(b"42\r\n", 0)
assert value == 42
def test_negative(self):
value, pos = parse_integer(b"-1\r\n", 0)
assert value == -1
class TestParseBulkString:
def test_simple(self):
value, pos = parse_bulk_string(b"5\r\nhello\r\n", 0)
assert value == "hello"
assert pos == 10 # Position after parsing
def test_null(self):
value, pos = parse_bulk_string(b"-1\r\n", 0)
assert value is None
def test_empty(self):
value, pos = parse_bulk_string(b"0\r\n\r\n", 0)
assert value == ""
class TestParseArray:
def test_simple(self):
data = b"2\r\n$3\r\nfoo\r\n$3\r\nbar\r\n"
value, pos = parse_array(data, 0)
assert value == ["foo", "bar"]
def test_null(self):
value, pos = parse_array(b"-1\r\n", 0)
assert value is None
def test_empty(self):
value, pos = parse_array(b"0\r\n", 0)
assert value == []
class TestParseValue:
def test_simple_string(self):
value, _ = parse_value(b"+OK\r\n")
assert value == "OK"
def test_error(self):
value, _ = parse_value(b"-ERR error\r\n")
assert isinstance(value, RESPValue)
assert value.type == RESPType.ERROR
assert value.value == "ERR error"
def test_integer(self):
value, _ = parse_value(b":42\r\n")
assert value == 42
def test_bulk_string(self):
value, _ = parse_value(b"$5\r\nhello\r\n")
assert value == "hello"
def test_array(self):
data = b"*2\r\n$3\r\nfoo\r\n$3\r\nbar\r\n"
value, _ = parse_value(data)
assert value == ["foo", "bar"]
def test_boolean_true(self):
value, _ = parse_value(b"#t\r\n")
assert value is True
def test_boolean_false(self):
value, _ = parse_value(b"#f\r\n")
assert value is False
def test_double(self):
value, _ = parse_value(b",3.14\r\n")
assert value == pytest.approx(3.14)
class TestEncodeCommand:
def test_simple(self):
result = encode_command("PING")
assert result == b"*1\r\n$4\r\nPING\r\n"
def test_with_args(self):
result = encode_command("GET", "mykey")
assert b"*2\r\n" in result
assert b"$3\r\nGET\r\n" in result
assert b"$5\r\nmykey\r\n" in result
def test_set(self):
result = encode_command("SET", "key", "value")
assert b"*3\r\n" in result
class TestParseCommand:
def test_simple(self):
data = b"*1\r\n$4\r\nPING\r\n"
cmd, args = parse_command(data)
assert cmd == "PING"
assert args == []
def test_with_args(self):
data = b"*2\r\n$3\r\nGET\r\n$5\r\nmykey\r\n"
cmd, args = parse_command(data)
assert cmd == "GET"
assert args == ["mykey"]
class TestRedisCommandBuilder:
def test_get(self):
result = RedisCommandBuilder.get("mykey")
cmd, args = parse_command(result)
assert cmd == "GET"
assert args == ["mykey"]
def test_set(self):
result = RedisCommandBuilder.set("mykey", "myvalue")
cmd, args = parse_command(result)
assert cmd == "SET"
assert args == ["mykey", "myvalue"]
def test_set_with_ex(self):
result = RedisCommandBuilder.set("mykey", "myvalue", ex=60)
cmd, args = parse_command(result)
assert cmd == "SET"
assert "EX" in args
assert "60" in args
def test_ping(self):
result = RedisCommandBuilder.ping()
cmd, args = parse_command(result)
assert cmd == "PING"
assert args == []
def test_ping_with_message(self):
result = RedisCommandBuilder.ping("hello")
cmd, args = parse_command(result)
assert cmd == "PING"
assert args == ["hello"]
def test_lpush(self):
result = RedisCommandBuilder.lpush("mylist", "a", "b", "c")
cmd, args = parse_command(result)
assert cmd == "LPUSH"
assert args[0] == "mylist"
assert "a" in args
assert "b" in args
assert "c" in args
def test_hset(self):
result = RedisCommandBuilder.hset("myhash", "field1", "value1")
cmd, args = parse_command(result)
assert cmd == "HSET"
assert args == ["myhash", "field1", "value1"]
class TestFormatValue:
def test_nil(self):
assert "(nil)" in format_value(None)
def test_integer(self):
assert "(integer) 42" in format_value(42)
def test_string(self):
assert '"hello"' in format_value("hello")
def test_boolean(self):
assert "(boolean) true" in format_value(True)
assert "(boolean) false" in format_value(False)
def test_array(self):
result = format_value(["a", "b"])
assert "(array)" in result
assert "1)" in result
assert "2)" in result
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_redis_protocol/test_redis_cli.py (8152 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_redis_protocol/test_redis_cli.rs (16081 bytes)
⏱️ Parse time: 54ms
📊 Throughput: 146.8 KB/s
⏱️ Total time: 54ms
| true
|
redis_protocol
| 315
| 5
|
[
"class_definition"
] | 0.612
| null |
example_regex
|
pattern_matcher.py
|
#!/usr/bin/env python3
"""
Regex Example - Regular expression pattern matching
Demonstrates:
- re.match(), re.search(), re.findall()
- re.sub() for text replacement
- Compiled patterns with re.compile()
- Named groups and backreferences
- Case-insensitive matching
- Multi-line patterns
This validates depyler's ability to transpile regex operations
to Rust (regex crate).
"""
import argparse
import re
import sys
def match_pattern(pattern, text, ignore_case=False):
"""
Match pattern at start of text
Args:
pattern: Regular expression pattern
text: Text to search
ignore_case: Case-insensitive matching
Depyler: proven to terminate
"""
flags = re.IGNORECASE if ignore_case else 0
match = re.match(pattern, text, flags)
if match:
print(f"Match found: {match.group(0)}")
if match.groups():
print(f"Groups: {match.groups()}")
return True
else:
print("No match")
return False
def search_pattern(pattern, text, ignore_case=False):
"""
Search for pattern anywhere in text
Args:
pattern: Regular expression pattern
text: Text to search
ignore_case: Case-insensitive matching
Depyler: proven to terminate
"""
flags = re.IGNORECASE if ignore_case else 0
match = re.search(pattern, text, flags)
if match:
print(f"Found at position {match.start()}: {match.group(0)}")
return True
else:
print("Pattern not found")
return False
def find_all(pattern, text, ignore_case=False):
"""
Find all occurrences of pattern
Args:
pattern: Regular expression pattern
text: Text to search
ignore_case: Case-insensitive matching
Depyler: proven to terminate
"""
flags = re.IGNORECASE if ignore_case else 0
matches = re.findall(pattern, text, flags)
print(f"Found {len(matches)} matches:")
for i, match in enumerate(matches, 1):
print(f" {i}. {match}")
return matches
def substitute(pattern, replacement, text, ignore_case=False):
"""
Replace pattern with replacement text
Args:
pattern: Regular expression pattern
replacement: Replacement string
text: Text to process
ignore_case: Case-insensitive matching
Depyler: proven to terminate
"""
flags = re.IGNORECASE if ignore_case else 0
result = re.sub(pattern, replacement, text, flags=flags)
print(f"Original: {text}")
print(f"Result: {result}")
return result
def validate_email(email):
"""
Validate email address format
Args:
email: Email address to validate
Depyler: proven to terminate
"""
pattern = r"^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}$"
compiled = re.compile(pattern)
match = compiled.match(email)
if match:
print(f"Valid email: {email}")
return True
else:
print(f"Invalid email: {email}")
return False
def extract_numbers(text):
"""
Extract all numbers from text
Args:
text: Text to search
Depyler: proven to terminate
"""
# Match integers and floats
pattern = r"-?\d+\.?\d*"
numbers = re.findall(pattern, text)
print(f"Found {len(numbers)} numbers: {numbers}")
return numbers
def main():
"""
Main entry point for pattern matcher CLI
Demonstrates various regex operations.
"""
parser = argparse.ArgumentParser(
description="Regular expression pattern matching",
prog="pattern_matcher.py",
)
parser.add_argument("--version", action="version", version="1.0.0")
parser.add_argument(
"-i",
"--ignore-case",
action="store_true",
help="Case-insensitive matching",
)
subparsers = parser.add_subparsers(dest="command", required=True)
# Match command
match_parser = subparsers.add_parser("match", help="Match pattern at start of text")
match_parser.add_argument("pattern", help="Regular expression pattern")
match_parser.add_argument("text", help="Text to search")
# Search command
search_parser = subparsers.add_parser("search", help="Search for pattern in text")
search_parser.add_argument("pattern", help="Regular expression pattern")
search_parser.add_argument("text", help="Text to search")
# Findall command
findall_parser = subparsers.add_parser("findall", help="Find all occurrences")
findall_parser.add_argument("pattern", help="Regular expression pattern")
findall_parser.add_argument("text", help="Text to search")
# Substitute command
sub_parser = subparsers.add_parser("sub", help="Replace pattern with text")
sub_parser.add_argument("pattern", help="Regular expression pattern")
sub_parser.add_argument("replacement", help="Replacement text")
sub_parser.add_argument("text", help="Text to process")
# Validate email command
email_parser = subparsers.add_parser("email", help="Validate email address")
email_parser.add_argument("address", help="Email address to validate")
# Extract numbers command
numbers_parser = subparsers.add_parser("numbers", help="Extract numbers from text")
numbers_parser.add_argument("text", help="Text to search")
args = parser.parse_args()
# Execute command
if args.command == "match":
match_pattern(args.pattern, args.text, args.ignore_case)
elif args.command == "search":
search_pattern(args.pattern, args.text, args.ignore_case)
elif args.command == "findall":
find_all(args.pattern, args.text, args.ignore_case)
elif args.command == "sub":
substitute(args.pattern, args.replacement, args.text, args.ignore_case)
elif args.command == "email":
if not validate_email(args.address):
sys.exit(1)
elif args.command == "numbers":
extract_numbers(args.text)
if __name__ == "__main__":
main()
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_regex/pattern_matcher.py (5973 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_regex/pattern_matcher.rs (7900 bytes)
📦 Cargo.toml: /home/noah/src/reprorusted-python-cli/examples/example_regex/Cargo.toml (2 dependencies)
⏱️ Parse time: 51ms
📊 Throughput: 113.8 KB/s
⏱️ Total time: 51ms
| true
|
regex
| 223
| 6
|
[
"context_manager",
"decorator"
] | 0.652
| null |
example_regex_basic
|
regex_basic_cli.py
|
#!/usr/bin/env python3
"""Regex Basic CLI.
Regular expression operations.
"""
import argparse
import re
import sys
def match(pattern: str, text: str) -> bool:
"""Check if pattern matches at start of text."""
return re.match(pattern, text) is not None
def search(pattern: str, text: str) -> str | None:
"""Find first match of pattern in text."""
m = re.search(pattern, text)
return m.group(0) if m else None
def findall(pattern: str, text: str) -> list[str]:
"""Find all matches of pattern in text."""
return re.findall(pattern, text)
def findall_groups(pattern: str, text: str) -> list[tuple[str, ...]]:
"""Find all matches with groups."""
return re.findall(pattern, text)
def finditer_positions(pattern: str, text: str) -> list[tuple[int, int, str]]:
"""Find all matches with positions (start, end, match)."""
return [(m.start(), m.end(), m.group(0)) for m in re.finditer(pattern, text)]
def sub(pattern: str, replacement: str, text: str) -> str:
"""Replace pattern matches with replacement."""
return re.sub(pattern, replacement, text)
def sub_count(pattern: str, replacement: str, text: str, count: int) -> str:
"""Replace first n matches."""
return re.sub(pattern, replacement, text, count=count)
def subn(pattern: str, replacement: str, text: str) -> tuple[str, int]:
"""Replace and return (new_text, replacement_count)."""
return re.subn(pattern, replacement, text)
def split(pattern: str, text: str) -> list[str]:
"""Split text by pattern."""
return re.split(pattern, text)
def split_maxsplit(pattern: str, text: str, maxsplit: int) -> list[str]:
"""Split text by pattern with max splits."""
return re.split(pattern, text, maxsplit=maxsplit)
def escape(text: str) -> str:
"""Escape special regex characters."""
return re.escape(text)
def is_valid_pattern(pattern: str) -> bool:
"""Check if pattern is valid regex."""
try:
re.compile(pattern)
return True
except re.error:
return False
def match_groups(pattern: str, text: str) -> tuple[str, ...] | None:
"""Match and return groups."""
m = re.match(pattern, text)
return m.groups() if m else None
def match_groupdict(pattern: str, text: str) -> dict[str, str] | None:
"""Match and return named groups as dict."""
m = re.match(pattern, text)
return m.groupdict() if m else None
def fullmatch(pattern: str, text: str) -> bool:
"""Check if pattern matches entire text."""
return re.fullmatch(pattern, text) is not None
def extract_numbers(text: str) -> list[int]:
"""Extract all integers from text."""
return [int(n) for n in re.findall(r"-?\d+", text)]
def extract_floats(text: str) -> list[float]:
"""Extract all floats from text."""
return [float(n) for n in re.findall(r"-?\d+\.?\d*", text)]
def extract_words(text: str) -> list[str]:
"""Extract all words from text."""
return re.findall(r"\b\w+\b", text)
def extract_emails(text: str) -> list[str]:
"""Extract email addresses from text."""
return re.findall(r"[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}", text)
def extract_urls(text: str) -> list[str]:
"""Extract URLs from text."""
return re.findall(r"https?://[^\s<>\"]+", text)
def extract_ipv4(text: str) -> list[str]:
"""Extract IPv4 addresses from text."""
return re.findall(r"\b(?:\d{1,3}\.){3}\d{1,3}\b", text)
def extract_hashtags(text: str) -> list[str]:
"""Extract hashtags from text."""
return re.findall(r"#\w+", text)
def extract_mentions(text: str) -> list[str]:
"""Extract @mentions from text."""
return re.findall(r"@\w+", text)
def is_email(text: str) -> bool:
"""Check if text is a valid email format."""
pattern = r"^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}$"
return bool(re.match(pattern, text))
def is_url(text: str) -> bool:
"""Check if text is a URL."""
pattern = r"^https?://[^\s<>\"]+$"
return bool(re.match(pattern, text))
def is_ipv4(text: str) -> bool:
"""Check if text is IPv4 address."""
pattern = r"^(?:\d{1,3}\.){3}\d{1,3}$"
if not re.match(pattern, text):
return False
parts = text.split(".")
return all(0 <= int(p) <= 255 for p in parts)
def is_phone(text: str) -> bool:
"""Check if text looks like a phone number."""
pattern = r"^[\d\s\-\(\)\+]+$"
digits = re.sub(r"\D", "", text)
return bool(re.match(pattern, text)) and 7 <= len(digits) <= 15
def remove_html_tags(text: str) -> str:
"""Remove HTML tags from text."""
return re.sub(r"<[^>]+>", "", text)
def remove_punctuation(text: str) -> str:
"""Remove punctuation from text."""
return re.sub(r"[^\w\s]", "", text)
def remove_extra_spaces(text: str) -> str:
"""Remove extra whitespace from text."""
return re.sub(r"\s+", " ", text).strip()
def camel_to_snake(text: str) -> str:
"""Convert camelCase to snake_case."""
s1 = re.sub(r"([A-Z]+)([A-Z][a-z])", r"\1_\2", text)
return re.sub(r"([a-z\d])([A-Z])", r"\1_\2", s1).lower()
def snake_to_camel(text: str) -> str:
"""Convert snake_case to camelCase."""
components = text.split("_")
return components[0] + "".join(x.title() for x in components[1:])
def count_matches(pattern: str, text: str) -> int:
"""Count pattern matches in text."""
return len(re.findall(pattern, text))
def replace_func(pattern: str, text: str, func: callable) -> str:
"""Replace using function on each match."""
return re.sub(pattern, func, text)
def main() -> int:
parser = argparse.ArgumentParser(description="Regex basic CLI")
subparsers = parser.add_subparsers(dest="command", help="Commands")
# match
match_p = subparsers.add_parser("match", help="Check pattern match")
match_p.add_argument("pattern", help="Regex pattern")
match_p.add_argument("text", help="Text to match")
# find
find_p = subparsers.add_parser("find", help="Find all matches")
find_p.add_argument("pattern", help="Regex pattern")
find_p.add_argument("text", help="Text to search")
# replace
replace_p = subparsers.add_parser("replace", help="Replace matches")
replace_p.add_argument("pattern", help="Regex pattern")
replace_p.add_argument("replacement", help="Replacement")
replace_p.add_argument("text", help="Text to modify")
# split
split_p = subparsers.add_parser("split", help="Split by pattern")
split_p.add_argument("pattern", help="Regex pattern")
split_p.add_argument("text", help="Text to split")
# extract
extract_p = subparsers.add_parser("extract", help="Extract patterns")
extract_p.add_argument("type", choices=["numbers", "emails", "urls", "words"])
extract_p.add_argument("text", help="Text to extract from")
args = parser.parse_args()
if args.command == "match":
result = match(args.pattern, args.text)
print(f"Match: {result}")
elif args.command == "find":
matches = findall(args.pattern, args.text)
print(f"Matches: {matches}")
elif args.command == "replace":
result = sub(args.pattern, args.replacement, args.text)
print(f"Result: {result}")
elif args.command == "split":
parts = split(args.pattern, args.text)
print(f"Parts: {parts}")
elif args.command == "extract":
if args.type == "numbers":
result = extract_numbers(args.text)
elif args.type == "emails":
result = extract_emails(args.text)
elif args.type == "urls":
result = extract_urls(args.text)
else:
result = extract_words(args.text)
print(f"Extracted: {result}")
else:
parser.print_help()
return 0
if __name__ == "__main__":
sys.exit(main())
| false
|
regex_basic
| 265
| 0
|
[
"context_manager",
"exception_handling",
"decorator"
] | 0.652
|
Error: Unsupported type annotation: Constant(ExprConstant { range: 635..638, value: Ellipsis, kind: None })
|
|
example_regex_basic
|
test_regex_basic_cli.py
|
"""Tests for regex_basic_cli.py"""
from regex_basic_cli import (
camel_to_snake,
count_matches,
escape,
extract_emails,
extract_floats,
extract_hashtags,
extract_ipv4,
extract_mentions,
extract_numbers,
extract_urls,
extract_words,
findall,
findall_groups,
finditer_positions,
fullmatch,
is_email,
is_ipv4,
is_phone,
is_url,
is_valid_pattern,
match,
match_groupdict,
match_groups,
remove_extra_spaces,
remove_html_tags,
remove_punctuation,
search,
snake_to_camel,
split,
split_maxsplit,
sub,
sub_count,
subn,
)
class TestMatch:
def test_match_success(self):
assert match(r"\d+", "123abc") is True
def test_match_fail(self):
assert match(r"\d+", "abc123") is False
class TestSearch:
def test_search_found(self):
result = search(r"\d+", "abc123def")
assert result == "123"
def test_search_not_found(self):
result = search(r"\d+", "abcdef")
assert result is None
class TestFindall:
def test_findall(self):
result = findall(r"\d+", "a1b22c333")
assert result == ["1", "22", "333"]
def test_findall_empty(self):
result = findall(r"\d+", "abc")
assert result == []
class TestFindallGroups:
def test_findall_groups(self):
result = findall_groups(r"(\w+)@(\w+)", "a@b c@d")
assert result == [("a", "b"), ("c", "d")]
class TestFinditerPositions:
def test_finditer_positions(self):
result = finditer_positions(r"\d+", "a1b22c")
assert result == [(1, 2, "1"), (3, 5, "22")]
class TestSub:
def test_sub(self):
result = sub(r"\d+", "X", "a1b2c3")
assert result == "aXbXcX"
def test_sub_count(self):
result = sub_count(r"\d+", "X", "a1b2c3", 2)
assert result == "aXbXc3"
class TestSubn:
def test_subn(self):
text, count = subn(r"\d+", "X", "a1b2c3")
assert text == "aXbXcX"
assert count == 3
class TestSplit:
def test_split(self):
result = split(r"\s+", "a b c")
assert result == ["a", "b", "c"]
def test_split_maxsplit(self):
result = split_maxsplit(r"\s+", "a b c d", 2)
assert result == ["a", "b", "c d"]
class TestEscape:
def test_escape(self):
result = escape("hello.world")
assert result == r"hello\.world"
class TestValidPattern:
def test_valid_pattern(self):
assert is_valid_pattern(r"\d+") is True
def test_invalid_pattern(self):
assert is_valid_pattern(r"[") is False
class TestMatchGroups:
def test_match_groups(self):
result = match_groups(r"(\d+)-(\d+)", "123-456")
assert result == ("123", "456")
def test_match_groupdict(self):
result = match_groupdict(r"(?P<first>\d+)-(?P<second>\d+)", "123-456")
assert result == {"first": "123", "second": "456"}
class TestFullmatch:
def test_fullmatch_success(self):
assert fullmatch(r"\d+", "123") is True
def test_fullmatch_fail(self):
assert fullmatch(r"\d+", "123abc") is False
class TestExtractNumbers:
def test_extract_numbers(self):
result = extract_numbers("a1b22c-3d")
assert result == [1, 22, -3]
def test_extract_floats(self):
result = extract_floats("1.5 and 2.7")
assert result == [1.5, 2.7]
class TestExtractWords:
def test_extract_words(self):
result = extract_words("hello, world!")
assert result == ["hello", "world"]
class TestExtractEmails:
def test_extract_emails(self):
result = extract_emails("Contact us at test@example.com or info@site.org")
assert "test@example.com" in result
assert "info@site.org" in result
class TestExtractUrls:
def test_extract_urls(self):
result = extract_urls("Visit https://example.com and http://test.org")
assert "https://example.com" in result
assert "http://test.org" in result
class TestExtractIPv4:
def test_extract_ipv4(self):
result = extract_ipv4("Server at 192.168.1.1 and 10.0.0.1")
assert "192.168.1.1" in result
assert "10.0.0.1" in result
class TestExtractHashtagsMentions:
def test_extract_hashtags(self):
result = extract_hashtags("Hello #world and #test")
assert result == ["#world", "#test"]
def test_extract_mentions(self):
result = extract_mentions("Hello @user1 and @user2")
assert result == ["@user1", "@user2"]
class TestValidation:
def test_is_email_valid(self):
assert is_email("test@example.com") is True
def test_is_email_invalid(self):
assert is_email("not-an-email") is False
def test_is_url_valid(self):
assert is_url("https://example.com") is True
def test_is_url_invalid(self):
assert is_url("not-a-url") is False
def test_is_ipv4_valid(self):
assert is_ipv4("192.168.1.1") is True
def test_is_ipv4_invalid(self):
assert is_ipv4("999.999.999.999") is False
def test_is_phone_valid(self):
assert is_phone("+1-234-567-8900") is True
def test_is_phone_invalid(self):
assert is_phone("123") is False
class TestRemove:
def test_remove_html_tags(self):
result = remove_html_tags("<p>Hello</p>")
assert result == "Hello"
def test_remove_punctuation(self):
result = remove_punctuation("Hello, World!")
assert result == "Hello World"
def test_remove_extra_spaces(self):
result = remove_extra_spaces(" hello world ")
assert result == "hello world"
class TestCaseConversion:
def test_camel_to_snake(self):
assert camel_to_snake("camelCase") == "camel_case"
assert camel_to_snake("HTTPRequest") == "http_request"
def test_snake_to_camel(self):
assert snake_to_camel("snake_case") == "snakeCase"
assert snake_to_camel("hello_world_test") == "helloWorldTest"
class TestCountMatches:
def test_count_matches(self):
assert count_matches(r"\d+", "a1b22c333") == 3
class TestEdgeCases:
def test_empty_text(self):
assert findall(r"\d+", "") == []
assert search(r"\d+", "") is None
def test_special_characters(self):
result = findall(r"\$\d+", "Price: $100 and $200")
assert result == ["$100", "$200"]
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_regex_basic/test_regex_basic_cli.py (6399 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_regex_basic/test_regex_basic_cli.rs (19388 bytes)
⏱️ Parse time: 51ms
📊 Throughput: 122.1 KB/s
⏱️ Total time: 51ms
| true
|
regex_basic
| 248
| 5
|
[
"class_definition",
"decorator"
] | 0.612
| null |
example_regex_matcher
|
regex_cli.py
|
#!/usr/bin/env python3
"""Regex Matcher CLI.
Simple regex engine implementation for educational purposes.
"""
import argparse
import sys
from dataclasses import dataclass
from enum import Enum, auto
class NodeType(Enum):
"""AST node types."""
LITERAL = auto()
DOT = auto()
STAR = auto()
PLUS = auto()
QUESTION = auto()
CONCAT = auto()
ALTERNATION = auto()
GROUP = auto()
CHAR_CLASS = auto()
ANCHOR_START = auto()
ANCHOR_END = auto()
@dataclass
class Node:
"""Regex AST node."""
type: NodeType
value: str | None = None
children: list["Node"] | None = None
negated: bool = False
class Lexer:
"""Regex tokenizer."""
def __init__(self, pattern: str):
self.pattern = pattern
self.pos = 0
def peek(self) -> str:
if self.pos >= len(self.pattern):
return ""
return self.pattern[self.pos]
def advance(self) -> str:
char = self.peek()
self.pos += 1
return char
class Parser:
"""Regex parser."""
def __init__(self, pattern: str):
self.lexer = Lexer(pattern)
def parse(self) -> Node:
"""Parse pattern into AST."""
return self.alternation()
def alternation(self) -> Node:
"""Parse alternation (|)."""
left = self.concatenation()
if self.lexer.peek() == "|":
self.lexer.advance()
right = self.alternation()
return Node(NodeType.ALTERNATION, children=[left, right])
return left
def concatenation(self) -> Node:
"""Parse concatenation."""
nodes = []
while self.lexer.peek() and self.lexer.peek() not in "|)":
nodes.append(self.quantifier())
if not nodes:
return Node(NodeType.LITERAL, value="")
if len(nodes) == 1:
return nodes[0]
result = nodes[0]
for node in nodes[1:]:
result = Node(NodeType.CONCAT, children=[result, node])
return result
def quantifier(self) -> Node:
"""Parse quantifiers (*, +, ?)."""
node = self.atom()
if self.lexer.peek() == "*":
self.lexer.advance()
return Node(NodeType.STAR, children=[node])
if self.lexer.peek() == "+":
self.lexer.advance()
return Node(NodeType.PLUS, children=[node])
if self.lexer.peek() == "?":
self.lexer.advance()
return Node(NodeType.QUESTION, children=[node])
return node
def atom(self) -> Node:
"""Parse atoms."""
char = self.lexer.peek()
if char == "(":
self.lexer.advance()
node = self.alternation()
if self.lexer.peek() == ")":
self.lexer.advance()
return Node(NodeType.GROUP, children=[node])
if char == "[":
return self.char_class()
if char == ".":
self.lexer.advance()
return Node(NodeType.DOT)
if char == "^":
self.lexer.advance()
return Node(NodeType.ANCHOR_START)
if char == "$":
self.lexer.advance()
return Node(NodeType.ANCHOR_END)
if char == "\\":
self.lexer.advance()
escaped = self.lexer.advance()
return Node(NodeType.LITERAL, value=escaped)
if char:
self.lexer.advance()
return Node(NodeType.LITERAL, value=char)
return Node(NodeType.LITERAL, value="")
def char_class(self) -> Node:
"""Parse character class [...]."""
self.lexer.advance() # [
negated = False
chars = []
if self.lexer.peek() == "^":
negated = True
self.lexer.advance()
while self.lexer.peek() and self.lexer.peek() != "]":
char = self.lexer.advance()
if self.lexer.peek() == "-" and self.lexer.pos + 1 < len(self.lexer.pattern):
self.lexer.advance() # -
end_char = self.lexer.advance()
# Expand range
for c in range(ord(char), ord(end_char) + 1):
chars.append(chr(c))
else:
chars.append(char)
if self.lexer.peek() == "]":
self.lexer.advance()
return Node(NodeType.CHAR_CLASS, value="".join(chars), negated=negated)
def match_node(node: Node, text: str, pos: int) -> list[int]:
"""Match node against text starting at pos. Returns list of end positions."""
if node.type == NodeType.LITERAL:
if pos < len(text) and text[pos : pos + len(node.value or "")] == node.value:
return [pos + len(node.value or "")]
if not node.value:
return [pos]
return []
if node.type == NodeType.DOT:
if pos < len(text):
return [pos + 1]
return []
if node.type == NodeType.CHAR_CLASS:
if pos < len(text):
in_class = text[pos] in (node.value or "")
if (in_class and not node.negated) or (not in_class and node.negated):
return [pos + 1]
return []
if node.type == NodeType.ANCHOR_START:
if pos == 0:
return [0]
return []
if node.type == NodeType.ANCHOR_END:
if pos == len(text):
return [pos]
return []
if node.type == NodeType.STAR:
# Zero or more
child = node.children[0]
results = [pos] # Zero matches
current = [pos]
while current:
new_positions = []
for p in current:
for np in match_node(child, text, p):
if np not in results:
results.append(np)
new_positions.append(np)
current = new_positions
return results
if node.type == NodeType.PLUS:
# One or more
child = node.children[0]
first = match_node(child, text, pos)
if not first:
return []
results = list(first)
current = list(first)
while current:
new_positions = []
for p in current:
for np in match_node(child, text, p):
if np not in results:
results.append(np)
new_positions.append(np)
current = new_positions
return results
if node.type == NodeType.QUESTION:
# Zero or one
child = node.children[0]
results = [pos] # Zero matches
results.extend(match_node(child, text, pos))
return list(set(results))
if node.type == NodeType.CONCAT:
left, right = node.children[0], node.children[1]
left_matches = match_node(left, text, pos)
results = []
for lp in left_matches:
results.extend(match_node(right, text, lp))
return results
if node.type == NodeType.ALTERNATION:
left, right = node.children[0], node.children[1]
results = match_node(left, text, pos)
results.extend(match_node(right, text, pos))
return list(set(results))
if node.type == NodeType.GROUP:
return match_node(node.children[0], text, pos)
return []
def match(pattern: str, text: str) -> bool:
"""Check if pattern matches entire text."""
parser = Parser(pattern)
ast = parser.parse()
end_positions = match_node(ast, text, 0)
return len(text) in end_positions
def search(pattern: str, text: str) -> tuple[int, int] | None:
"""Find first match in text. Returns (start, end) or None."""
parser = Parser(pattern)
ast = parser.parse()
for start in range(len(text) + 1):
end_positions = match_node(ast, text, start)
if end_positions:
return start, max(end_positions)
return None
def find_all(pattern: str, text: str) -> list[tuple[int, int]]:
"""Find all non-overlapping matches."""
parser = Parser(pattern)
ast = parser.parse()
matches = []
pos = 0
while pos <= len(text):
end_positions = match_node(ast, text, pos)
if end_positions:
end = max(end_positions)
if end > pos:
matches.append((pos, end))
pos = end
else:
pos += 1
else:
pos += 1
return matches
def replace(pattern: str, text: str, replacement: str) -> str:
"""Replace all matches with replacement."""
matches = find_all(pattern, text)
if not matches:
return text
result = []
last_end = 0
for start, end in matches:
result.append(text[last_end:start])
result.append(replacement)
last_end = end
result.append(text[last_end:])
return "".join(result)
def split(pattern: str, text: str) -> list[str]:
"""Split text by pattern."""
matches = find_all(pattern, text)
if not matches:
return [text]
result = []
last_end = 0
for start, end in matches:
result.append(text[last_end:start])
last_end = end
result.append(text[last_end:])
return result
def main() -> int:
parser = argparse.ArgumentParser(description="Simple regex matcher")
parser.add_argument("pattern", help="Regex pattern")
parser.add_argument("text", nargs="?", help="Text to match")
parser.add_argument(
"--mode",
choices=["match", "search", "findall", "replace", "split"],
default="match",
help="Operation mode",
)
parser.add_argument("--replacement", "-r", help="Replacement string")
args = parser.parse_args()
if not args.text:
text = sys.stdin.read().strip()
else:
text = args.text
if args.mode == "match":
if match(args.pattern, text):
print("Match")
return 0
print("No match")
return 1
elif args.mode == "search":
result = search(args.pattern, text)
if result:
start, end = result
print(f"Found at [{start}:{end}]: {text[start:end]!r}")
return 0
print("Not found")
return 1
elif args.mode == "findall":
matches = find_all(args.pattern, text)
for start, end in matches:
print(f"[{start}:{end}]: {text[start:end]!r}")
print(f"Total: {len(matches)} matches")
elif args.mode == "replace":
repl = args.replacement or ""
result = replace(args.pattern, text, repl)
print(result)
elif args.mode == "split":
parts = split(args.pattern, text)
for i, part in enumerate(parts):
print(f"{i}: {part!r}")
return 0
if __name__ == "__main__":
sys.exit(main())
| false
|
regex_matcher
| 411
| 0
|
[
"context_manager",
"class_definition",
"stdin_usage",
"decorator"
] | 0.652
|
Error: Unsupported type annotation: Constant(ExprConstant { range: 605..611, value: Str("Node"), kind: None })
|
|
example_regex_matcher
|
test_regex_cli.py
|
"""Tests for regex_cli.py"""
from regex_cli import (
find_all,
match,
replace,
search,
split,
)
class TestMatch:
def test_literal(self):
assert match("hello", "hello") is True
assert match("hello", "world") is False
def test_dot(self):
assert match("h.llo", "hello") is True
assert match("h.llo", "hxllo") is True
assert match("....", "test") is True
assert match("....", "tes") is False
def test_star(self):
assert match("a*", "") is True
assert match("a*", "a") is True
assert match("a*", "aaa") is True
assert match("a*b", "b") is True
assert match("a*b", "ab") is True
assert match("a*b", "aaab") is True
def test_plus(self):
assert match("a+", "") is False
assert match("a+", "a") is True
assert match("a+", "aaa") is True
assert match("a+b", "b") is False
assert match("a+b", "ab") is True
def test_question(self):
assert match("a?b", "b") is True
assert match("a?b", "ab") is True
assert match("a?b", "aab") is False
def test_alternation(self):
assert match("a|b", "a") is True
assert match("a|b", "b") is True
assert match("a|b", "c") is False
assert match("cat|dog", "cat") is True
assert match("cat|dog", "dog") is True
def test_groups(self):
assert match("(ab)+", "ab") is True
assert match("(ab)+", "abab") is True
assert match("(a|b)+", "abba") is True
def test_char_class(self):
assert match("[abc]", "a") is True
assert match("[abc]", "b") is True
assert match("[abc]", "d") is False
assert match("[a-z]", "m") is True
assert match("[a-z]", "A") is False
def test_negated_char_class(self):
assert match("[^abc]", "d") is True
assert match("[^abc]", "a") is False
def test_anchors(self):
assert match("^hello", "hello") is True
assert match("hello$", "hello") is True
assert match("^hello$", "hello") is True
def test_escape(self):
assert match(r"\.", ".") is True
assert match(r"\.", "a") is False
assert match(r"\*", "*") is True
class TestSearch:
def test_found(self):
result = search("world", "hello world")
assert result == (6, 11)
def test_not_found(self):
result = search("xyz", "hello world")
assert result is None
def test_at_start(self):
result = search("hello", "hello world")
assert result == (0, 5)
def test_pattern(self):
result = search("w.rld", "hello world")
assert result == (6, 11)
class TestFindAll:
def test_multiple(self):
matches = find_all("a", "banana")
assert len(matches) == 3
def test_no_matches(self):
matches = find_all("x", "banana")
assert len(matches) == 0
def test_overlapping(self):
# Non-overlapping matches
matches = find_all("ana", "banana")
assert len(matches) == 1
class TestReplace:
def test_simple(self):
result = replace("world", "hello world", "there")
assert result == "hello there"
def test_multiple(self):
result = replace("a", "banana", "o")
assert result == "bonono"
def test_no_match(self):
result = replace("x", "hello", "y")
assert result == "hello"
def test_pattern(self):
result = replace("[0-9]+", "abc123def456", "X")
assert result == "abcXdefX"
class TestSplit:
def test_simple(self):
parts = split(",", "a,b,c")
assert parts == ["a", "b", "c"]
def test_no_match(self):
parts = split(",", "abc")
assert parts == ["abc"]
def test_multiple(self):
parts = split(" +", "hello world test")
assert parts == ["hello", "world", "test"]
class TestComplexPatterns:
def test_email_like(self):
# Simplified email pattern
assert match("[a-z]+@[a-z]+", "test@example") is True
def test_phone_like(self):
# Simplified phone pattern
assert match("[0-9][0-9][0-9]-[0-9][0-9][0-9][0-9]", "123-4567") is True
def test_nested_groups(self):
assert match("((ab)+c)+", "abcababc") is True
def test_mixed(self):
assert match("a.*b", "axxxb") is True
assert match("a.*b", "ab") is True
assert match("a.+b", "ab") is False
assert match("a.+b", "axb") is True
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_regex_matcher/test_regex_cli.py (4511 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_regex_matcher/test_regex_cli.rs (8772 bytes)
⏱️ Parse time: 51ms
📊 Throughput: 86.0 KB/s
⏱️ Total time: 51ms
| true
|
regex_matcher
| 158
| 5
|
[
"class_definition",
"decorator"
] | 0.612
| null |
example_rename_batch
|
rename_cli.py
|
#!/usr/bin/env python3
"""Batch file rename CLI.
Rename files using patterns and transformations.
"""
import argparse
import os
import re
import sys
def apply_pattern(
filename: str,
find: str,
replace: str,
use_regex: bool = False,
) -> str:
"""Apply find/replace pattern to filename."""
if use_regex:
return re.sub(find, replace, filename)
return filename.replace(find, replace)
def apply_case(filename: str, case: str) -> str:
"""Apply case transformation to filename."""
name, ext = os.path.splitext(filename)
if case == "lower":
return name.lower() + ext.lower()
elif case == "upper":
return name.upper() + ext.upper()
elif case == "title":
return name.title() + ext.lower()
elif case == "snake":
# Convert CamelCase or spaces to snake_case
s = re.sub(r"([A-Z])", r"_\1", name)
s = re.sub(r"[\s-]+", "_", s)
s = re.sub(r"_+", "_", s)
return s.strip("_").lower() + ext.lower()
elif case == "kebab":
# Convert to kebab-case
s = re.sub(r"([A-Z])", r"-\1", name)
s = re.sub(r"[\s_]+", "-", s)
s = re.sub(r"-+", "-", s)
return s.strip("-").lower() + ext.lower()
return filename
def apply_numbering(
filename: str,
index: int,
start: int = 1,
width: int = 3,
position: str = "prefix",
) -> str:
"""Add number to filename."""
name, ext = os.path.splitext(filename)
num = str(index + start).zfill(width)
if position == "prefix":
return f"{num}_{name}{ext}"
elif position == "suffix":
return f"{name}_{num}{ext}"
return filename
def apply_trim(filename: str, chars: int, position: str = "start") -> str:
"""Trim characters from filename."""
name, ext = os.path.splitext(filename)
if position == "start":
name = name[chars:]
elif position == "end":
name = name[:-chars] if chars > 0 else name
return name + ext
def apply_extension(filename: str, new_ext: str) -> str:
"""Change file extension."""
name, _ = os.path.splitext(filename)
if not new_ext.startswith("."):
new_ext = "." + new_ext
return name + new_ext
def preview_rename(
files: list[str],
transform_fn,
) -> list[tuple[str, str]]:
"""Preview renames without applying.
Returns list of (old_name, new_name) tuples.
"""
result = []
for i, filepath in enumerate(files):
dirname = os.path.dirname(filepath)
filename = os.path.basename(filepath)
new_filename = transform_fn(filename, i)
if new_filename != filename:
new_path = os.path.join(dirname, new_filename)
result.append((filepath, new_path))
return result
def check_conflicts(renames: list[tuple[str, str]]) -> list[str]:
"""Check for naming conflicts."""
conflicts = []
new_names = {}
for old, new in renames:
if new in new_names:
conflicts.append(f"Conflict: {old} and {new_names[new]} -> {new}")
new_names[new] = old
if os.path.exists(new) and new not in [old for old, _ in renames]:
conflicts.append(f"Target exists: {new}")
return conflicts
def execute_renames(
renames: list[tuple[str, str]],
dry_run: bool = False,
) -> tuple[int, int]:
"""Execute renames.
Returns (success_count, error_count).
"""
success = 0
errors = 0
for old, new in renames:
if dry_run:
success += 1
continue
try:
# Create target directory if needed
new_dir = os.path.dirname(new)
if new_dir and not os.path.exists(new_dir):
os.makedirs(new_dir)
os.rename(old, new)
success += 1
except OSError:
errors += 1
return success, errors
def list_files(
directory: str,
pattern: str = "",
recursive: bool = False,
) -> list[str]:
"""List files in directory."""
files = []
if recursive:
for root, _, filenames in os.walk(directory):
for filename in filenames:
if pattern and not re.search(pattern, filename):
continue
files.append(os.path.join(root, filename))
else:
for entry in os.listdir(directory):
path = os.path.join(directory, entry)
if not os.path.isfile(path):
continue
if pattern and not re.search(pattern, entry):
continue
files.append(path)
return sorted(files)
def main() -> int:
parser = argparse.ArgumentParser(description="Batch rename files")
parser.add_argument("path", nargs="?", default=".", help="Directory or file")
parser.add_argument("--find", metavar="PATTERN", help="Find pattern")
parser.add_argument("--replace", metavar="STRING", default="", help="Replace with")
parser.add_argument("-E", "--regex", action="store_true", help="Use regex for find/replace")
parser.add_argument(
"--case", choices=["lower", "upper", "title", "snake", "kebab"], help="Change case"
)
parser.add_argument("--number", action="store_true", help="Add sequential numbers")
parser.add_argument("--number-start", type=int, default=1, help="Starting number")
parser.add_argument("--number-width", type=int, default=3, help="Number padding width")
parser.add_argument(
"--number-pos", choices=["prefix", "suffix"], default="prefix", help="Number position"
)
parser.add_argument("--trim-start", type=int, help="Trim N chars from start")
parser.add_argument("--trim-end", type=int, help="Trim N chars from end")
parser.add_argument("--ext", metavar="EXT", help="Change extension")
parser.add_argument("--filter", metavar="PATTERN", help="Filter files by regex")
parser.add_argument("-r", "--recursive", action="store_true", help="Process subdirectories")
parser.add_argument("-n", "--dry-run", action="store_true", help="Preview without renaming")
args = parser.parse_args()
# Get files
if os.path.isfile(args.path):
files = [args.path]
elif os.path.isdir(args.path):
files = list_files(args.path, args.filter, args.recursive)
else:
print(f"Error: {args.path} not found", file=sys.stderr)
return 1
if not files:
print("No files found")
return 0
# Build transform function
def transform(filename: str, index: int) -> str:
result = filename
if args.find:
result = apply_pattern(result, args.find, args.replace, args.regex)
if args.case:
result = apply_case(result, args.case)
if args.trim_start:
result = apply_trim(result, args.trim_start, "start")
if args.trim_end:
result = apply_trim(result, args.trim_end, "end")
if args.ext:
result = apply_extension(result, args.ext)
if args.number:
result = apply_numbering(
result, index, args.number_start, args.number_width, args.number_pos
)
return result
# Preview and execute
renames = preview_rename(files, transform)
if not renames:
print("No files to rename")
return 0
conflicts = check_conflicts(renames)
if conflicts:
print("Conflicts detected:", file=sys.stderr)
for c in conflicts:
print(f" {c}", file=sys.stderr)
return 1
# Show preview
print(f"{'[DRY RUN] ' if args.dry_run else ''}Renaming {len(renames)} files:")
for old, new in renames:
old_name = os.path.basename(old)
new_name = os.path.basename(new)
print(f" {old_name} -> {new_name}")
success, errors = execute_renames(renames, args.dry_run)
print(f"\nRenamed: {success}, Errors: {errors}")
return 0 if errors == 0 else 1
if __name__ == "__main__":
sys.exit(main())
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_rename_batch/rename_cli.py (7981 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_rename_batch/rename_cli.rs (17839 bytes)
📦 Cargo.toml: /home/noah/src/reprorusted-python-cli/examples/example_rename_batch/Cargo.toml (4 dependencies)
⏱️ Parse time: 56ms
📊 Throughput: 138.3 KB/s
⏱️ Total time: 56ms
| true
|
rename_batch
| 274
| 6
|
[
"exception_handling",
"multiprocessing"
] | 0.577
| null |
example_rename_batch
|
test_rename_cli.py
|
"""Tests for rename_cli.py"""
import os
import tempfile
import pytest
from rename_cli import (
apply_case,
apply_extension,
apply_numbering,
apply_pattern,
apply_trim,
check_conflicts,
execute_renames,
list_files,
preview_rename,
)
@pytest.fixture
def temp_dir():
"""Create directory with test files."""
with tempfile.TemporaryDirectory() as tmpdir:
# Create test files
for name in ["file1.txt", "file2.txt", "Document.PDF", "Test File.doc"]:
path = os.path.join(tmpdir, name)
with open(path, "w") as f:
f.write("test")
yield tmpdir
class TestApplyPattern:
def test_simple_replace(self):
assert apply_pattern("file.txt", ".txt", ".md") == "file.md"
def test_no_match(self):
assert apply_pattern("file.txt", ".doc", ".md") == "file.txt"
def test_regex_replace(self):
result = apply_pattern("file_001.txt", r"_\d+", "", use_regex=True)
assert result == "file.txt"
def test_regex_group(self):
result = apply_pattern("IMG_20231225.jpg", r"IMG_(\d+)", r"photo_\1", use_regex=True)
assert result == "photo_20231225.jpg"
class TestApplyCase:
def test_lowercase(self):
assert apply_case("FILE.TXT", "lower") == "file.txt"
def test_uppercase(self):
assert apply_case("file.txt", "upper") == "FILE.TXT"
def test_titlecase(self):
assert apply_case("hello world.txt", "title") == "Hello World.txt"
def test_snakecase(self):
assert apply_case("HelloWorld.txt", "snake") == "hello_world.txt"
assert apply_case("hello world.txt", "snake") == "hello_world.txt"
def test_kebabcase(self):
assert apply_case("HelloWorld.txt", "kebab") == "hello-world.txt"
assert apply_case("hello_world.txt", "kebab") == "hello-world.txt"
class TestApplyNumbering:
def test_prefix(self):
result = apply_numbering("file.txt", 0, start=1, width=3, position="prefix")
assert result == "001_file.txt"
def test_suffix(self):
result = apply_numbering("file.txt", 0, start=1, width=3, position="suffix")
assert result == "file_001.txt"
def test_custom_start(self):
result = apply_numbering("file.txt", 0, start=100, width=4)
assert result == "0100_file.txt"
def test_increment(self):
result = apply_numbering("file.txt", 5, start=1, width=2)
assert result == "06_file.txt"
class TestApplyTrim:
def test_trim_start(self):
assert apply_trim("prefix_file.txt", 7, "start") == "file.txt"
def test_trim_end(self):
assert apply_trim("file_suffix.txt", 7, "end") == "file.txt"
def test_trim_zero(self):
assert apply_trim("file.txt", 0, "start") == "file.txt"
class TestApplyExtension:
def test_change_extension(self):
assert apply_extension("file.txt", ".md") == "file.md"
def test_without_dot(self):
assert apply_extension("file.txt", "md") == "file.md"
def test_no_extension(self):
assert apply_extension("file", ".txt") == "file.txt"
class TestPreviewRename:
def test_preview(self, temp_dir):
files = list_files(temp_dir)
def transform(name, i):
return name.lower()
renames = preview_rename(files, transform)
# Should include Document.PDF and Test File.doc (uppercase)
old_names = [os.path.basename(old) for old, new in renames]
assert "Document.PDF" in old_names
def test_no_changes(self, temp_dir):
files = list_files(temp_dir)
def transform(name, i):
return name # No change
renames = preview_rename(files, transform)
assert len(renames) == 0
class TestCheckConflicts:
def test_no_conflicts(self):
renames = [("/a/file1.txt", "/a/new1.txt"), ("/a/file2.txt", "/a/new2.txt")]
conflicts = check_conflicts(renames)
assert len(conflicts) == 0
def test_target_conflict(self):
renames = [("/a/file1.txt", "/a/same.txt"), ("/a/file2.txt", "/a/same.txt")]
conflicts = check_conflicts(renames)
assert len(conflicts) == 1
assert "Conflict" in conflicts[0]
class TestExecuteRenames:
def test_dry_run(self, temp_dir):
files = list_files(temp_dir)
def transform(name, i):
return "renamed_" + name
renames = preview_rename(files, transform)
success, errors = execute_renames(renames, dry_run=True)
# Original files should still exist
assert os.path.exists(os.path.join(temp_dir, "file1.txt"))
assert success > 0
assert errors == 0
def test_actual_rename(self, temp_dir):
original = os.path.join(temp_dir, "file1.txt")
renamed = os.path.join(temp_dir, "renamed.txt")
success, errors = execute_renames([(original, renamed)])
assert success == 1
assert errors == 0
assert os.path.exists(renamed)
assert not os.path.exists(original)
class TestListFiles:
def test_list_all(self, temp_dir):
files = list_files(temp_dir)
assert len(files) == 4
def test_filter_pattern(self, temp_dir):
files = list_files(temp_dir, pattern=r"\.txt$")
assert len(files) == 2
assert all(f.endswith(".txt") for f in files)
def test_sorted(self, temp_dir):
files = list_files(temp_dir)
assert files == sorted(files)
| false
|
rename_batch
| 182
| 0
|
[
"generator",
"context_manager",
"class_definition",
"decorator"
] | 0.927
|
Type inference hints:
Hint: str for variable 'path' [Medium] (usage patterns suggest this type)
Profiling Report
══════════════════════════════════════════════════
Summary
Total estimated instructions: 1
Total estimated allocations: 0
Functions analyzed: 1
Hot Paths
[1] temp_dir (100.0% of execution time)
Function Metrics
🔥 temp_dir 100.0% time | 1 inst | 0 alloc
Performance Predictions
• Rust's memory layout is more cache-friendly than Python (1.3x
|
|
example_replace
|
replace_tool.py
|
#!/usr/bin/env python3
"""Replace Example - String replace operations CLI.
Examples:
>>> replace_char("hello", "l", "x")
'hexxo'
>>> replace_all("abc", "X")
'XXX'
"""
import argparse
def replace_char(text: str, old: str, new: str) -> str:
"""Replace all occurrences of old with new.
>>> replace_char("hello", "l", "x")
'hexxo'
>>> replace_char("banana", "a", "o")
'bonono'
>>> replace_char("test", "x", "y")
'test'
"""
return text.replace(old, new)
def replace_all(text: str, new: str) -> str:
"""Replace each character with new.
>>> replace_all("abc", "X")
'XXX'
>>> replace_all("", "X")
''
>>> replace_all("hello", "*")
'*****'
"""
result = ""
i = 0
while i < len(text):
result = result + new
i = i + 1
return result
def main():
parser = argparse.ArgumentParser(description="String replace tool")
subs = parser.add_subparsers(dest="cmd", required=True)
ch = subs.add_parser("char")
ch.add_argument("text")
ch.add_argument("old")
ch.add_argument("new")
f = subs.add_parser("first")
f.add_argument("text")
f.add_argument("new")
a = subs.add_parser("all")
a.add_argument("text")
a.add_argument("new")
args = parser.parse_args()
if args.cmd == "char":
print(replace_char(args.text, args.old, args.new))
elif args.cmd == "first":
result = ""
found = False
i = 0
while i < len(args.text):
if not found and args.text[i] == args.text[0]:
result = result + args.new
found = True
else:
result = result + args.text[i]
i = i + 1
print(result)
elif args.cmd == "all":
print(replace_all(args.text, args.new))
if __name__ == "__main__":
main()
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_replace/replace_tool.py (1869 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_replace/replace_tool.rs (4539 bytes)
📦 Cargo.toml: /home/noah/src/reprorusted-python-cli/examples/example_replace/Cargo.toml (1 dependencies)
⏱️ Parse time: 49ms
📊 Throughput: 36.6 KB/s
⏱️ Total time: 50ms
| true
|
replace
| 80
| 6
|
[
"context_manager"
] | 0.652
| null |
example_replace
|
test_replace_tool.py
|
"""Tests for replace_tool - EXTREME TDD."""
import subprocess
from pathlib import Path
SCRIPT = Path(__file__).parent / "replace_tool.py"
def run(cmd):
return subprocess.run(
["python3", str(SCRIPT)] + cmd.split(),
capture_output=True,
text=True,
)
def test_char():
r = run("char hello_world _ -")
assert r.returncode == 0
assert r.stdout.strip() == "hello-world"
def test_first():
r = run("first aaa x")
assert r.returncode == 0
assert r.stdout.strip() == "xaa"
def test_all():
r = run("all aaa x")
assert r.returncode == 0
assert r.stdout.strip() == "xxx"
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_replace/test_replace_tool.py (636 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_replace/test_replace_tool.rs (1842 bytes)
📦 Cargo.toml: /home/noah/src/reprorusted-python-cli/examples/example_replace/Cargo.toml (2 dependencies)
⏱️ Parse time: 45ms
📊 Throughput: 13.6 KB/s
⏱️ Total time: 45ms
| true
|
replace
| 32
| 6
|
[] | 0
| null |
example_repr
|
repr_tool.py
|
#!/usr/bin/env python3
"""Repr Example - Representation operations CLI."""
import argparse
def main():
parser = argparse.ArgumentParser(description="Representation tool")
subs = parser.add_subparsers(dest="cmd", required=True)
s = subs.add_parser("string")
s.add_argument("text")
n = subs.add_parser("number")
n.add_argument("num", type=int)
e = subs.add_parser("escape")
e.add_argument("name")
args = parser.parse_args()
if args.cmd == "string":
print(f"'{args.text}'")
elif args.cmd == "number":
print(args.num)
elif args.cmd == "escape":
if args.name == "tab":
print(repr("\t"))
elif args.name == "newline":
print(repr("\n"))
else:
print(repr(args.name))
if __name__ == "__main__":
main()
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_repr/repr_tool.py (827 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_repr/repr_tool.rs (1317 bytes)
📦 Cargo.toml: /home/noah/src/reprorusted-python-cli/examples/example_repr/Cargo.toml (1 dependencies)
⏱️ Parse time: 46ms
📊 Throughput: 17.2 KB/s
⏱️ Total time: 47ms
| true
|
repr
| 33
| 6
|
[] | 0
| null |
example_repr
|
test_repr_tool.py
|
"""Tests for repr_tool - EXTREME TDD."""
import subprocess
from pathlib import Path
SCRIPT = Path(__file__).parent / "repr_tool.py"
def run(cmd):
return subprocess.run(
["python3", str(SCRIPT)] + cmd.split(),
capture_output=True,
text=True,
)
def test_string():
r = run("string hello")
assert r.returncode == 0
assert r.stdout.strip() == "'hello'"
def test_number():
r = run("number 42")
assert r.returncode == 0
assert r.stdout.strip() == "42"
def test_escape():
r = run("escape tab")
assert r.returncode == 0
assert "\\t" in r.stdout
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_repr/test_repr_tool.py (614 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_repr/test_repr_tool.rs (1824 bytes)
📦 Cargo.toml: /home/noah/src/reprorusted-python-cli/examples/example_repr/Cargo.toml (2 dependencies)
⏱️ Parse time: 47ms
📊 Throughput: 12.6 KB/s
⏱️ Total time: 47ms
| true
|
repr
| 32
| 6
|
[] | 0
| null |
example_result_mapping
|
result_mapping_cli.py
|
#!/usr/bin/env python3
"""Result Mapping CLI.
Result<T,E> pattern mapping from exceptions to explicit error handling.
"""
import argparse
import sys
from collections.abc import Callable
from dataclasses import dataclass
from typing import TypeVar
T = TypeVar("T")
U = TypeVar("U")
E = TypeVar("E")
@dataclass
class Ok:
"""Success result."""
value: object
def is_ok(self) -> bool:
return True
def is_err(self) -> bool:
return False
@dataclass
class Err:
"""Error result."""
error: str
def is_ok(self) -> bool:
return False
def is_err(self) -> bool:
return True
Result = Ok | Err
def ok(value: object) -> Ok:
"""Create success result."""
return Ok(value)
def err(error: str) -> Err:
"""Create error result."""
return Err(error)
def is_ok(result: Result) -> bool:
"""Check if result is Ok."""
return isinstance(result, Ok)
def is_err(result: Result) -> bool:
"""Check if result is Err."""
return isinstance(result, Err)
def unwrap(result: Result) -> object:
"""Unwrap Ok value, raise on Err."""
if isinstance(result, Ok):
return result.value
raise ValueError(f"Called unwrap on Err: {result.error}")
def unwrap_or(result: Result, default: object) -> object:
"""Unwrap Ok value or return default."""
if isinstance(result, Ok):
return result.value
return default
def unwrap_err(result: Result) -> str:
"""Unwrap Err value, raise on Ok."""
if isinstance(result, Err):
return result.error
raise ValueError("Called unwrap_err on Ok")
def map_result(result: Result, f: Callable[[object], object]) -> Result:
"""Map function over Ok value."""
if isinstance(result, Ok):
return Ok(f(result.value))
return result
def map_err(result: Result, f: Callable[[str], str]) -> Result:
"""Map function over Err value."""
if isinstance(result, Err):
return Err(f(result.error))
return result
def and_then(result: Result, f: Callable[[object], Result]) -> Result:
"""Chain Result-returning function."""
if isinstance(result, Ok):
return f(result.value)
return result
def or_else(result: Result, f: Callable[[str], Result]) -> Result:
"""Chain on Err."""
if isinstance(result, Err):
return f(result.error)
return result
def parse_int(value: str) -> Result:
"""Parse string to int, return Result."""
try:
return Ok(int(value))
except ValueError:
return Err(f"Invalid integer: {value}")
def parse_float(value: str) -> Result:
"""Parse string to float, return Result."""
try:
return Ok(float(value))
except ValueError:
return Err(f"Invalid float: {value}")
def safe_divide(a: float, b: float) -> Result:
"""Safely divide, return Result."""
if b == 0:
return Err("Division by zero")
return Ok(a / b)
def safe_index(lst: list[int], idx: int) -> Result:
"""Safely index list, return Result."""
if 0 <= idx < len(lst):
return Ok(lst[idx])
return Err(f"Index out of bounds: {idx}")
def safe_key(d: dict[str, int], key: str) -> Result:
"""Safely get dict value, return Result."""
if key in d:
return Ok(d[key])
return Err(f"Key not found: {key}")
def chain_parse_and_double(value: str) -> Result:
"""Parse and double value using and_then."""
return and_then(parse_int(value), lambda x: Ok(x * 2))
def chain_multiple(value: str) -> Result:
"""Chain multiple operations."""
result = parse_int(value)
result = and_then(result, lambda x: Ok(x * 2))
result = and_then(result, lambda x: Ok(x + 10))
return result
def validate_positive(value: int) -> Result:
"""Validate value is positive."""
if value > 0:
return Ok(value)
return Err(f"Value must be positive: {value}")
def validate_range(value: int, min_val: int, max_val: int) -> Result:
"""Validate value is in range."""
if min_val <= value <= max_val:
return Ok(value)
return Err(f"Value {value} not in range [{min_val}, {max_val}]")
def parse_and_validate(value: str, min_val: int, max_val: int) -> Result:
"""Parse and validate in one chain."""
result = parse_int(value)
result = and_then(result, lambda x: validate_range(x, min_val, max_val))
return result
def collect_results(values: list[str]) -> Result:
"""Parse all values, fail on first error."""
results: list[int] = []
for v in values:
result = parse_int(v)
if isinstance(result, Err):
return result
results.append(result.value)
return Ok(results)
def collect_ok_values(values: list[str]) -> list[int]:
"""Collect only successful parses."""
results: list[int] = []
for v in values:
result = parse_int(v)
if isinstance(result, Ok):
results.append(result.value)
return results
def partition_results(values: list[str]) -> tuple[list[int], list[str]]:
"""Partition into successes and errors."""
oks: list[int] = []
errs: list[str] = []
for v in values:
result = parse_int(v)
if isinstance(result, Ok):
oks.append(result.value)
else:
errs.append(result.error)
return (oks, errs)
def first_ok(results: list[Result]) -> Result:
"""Return first Ok, or last Err."""
last_err: Result = Err("No results")
for r in results:
if isinstance(r, Ok):
return r
last_err = r
return last_err
def all_ok(results: list[Result]) -> bool:
"""Check if all results are Ok."""
return all(isinstance(r, Ok) for r in results)
def any_ok(results: list[Result]) -> bool:
"""Check if any result is Ok."""
return any(isinstance(r, Ok) for r in results)
def count_ok(results: list[Result]) -> int:
"""Count Ok results."""
return sum(1 for r in results if isinstance(r, Ok))
def count_err(results: list[Result]) -> int:
"""Count Err results."""
return sum(1 for r in results if isinstance(r, Err))
def try_operations(value: str) -> Result:
"""Try multiple operations, return first success."""
ops = [
lambda v: parse_int(v),
lambda v: Ok(0) if v == "zero" else Err("not zero"),
lambda v: Ok(-1) if v == "negative" else Err("not negative"),
]
for op in ops:
result = op(value)
if isinstance(result, Ok):
return result
return Err(f"All operations failed for: {value}")
def result_to_option(result: Result) -> object | None:
"""Convert Result to Option (Ok value or None)."""
if isinstance(result, Ok):
return result.value
return None
def option_to_result(value: object | None, err_msg: str) -> Result:
"""Convert Option to Result."""
if value is not None:
return Ok(value)
return Err(err_msg)
def flatten_result(result: Result) -> Result:
"""Flatten nested Result."""
if isinstance(result, Ok) and isinstance(result.value, (Ok, Err)):
return result.value
return result
def main() -> int:
parser = argparse.ArgumentParser(description="Result mapping CLI")
subparsers = parser.add_subparsers(dest="command", help="Commands")
# parse
parse_p = subparsers.add_parser("parse", help="Parse value")
parse_p.add_argument("value")
parse_p.add_argument("--type", choices=["int", "float"], default="int")
# divide
div_p = subparsers.add_parser("divide", help="Safe divide")
div_p.add_argument("a", type=float)
div_p.add_argument("b", type=float)
# chain
chain_p = subparsers.add_parser("chain", help="Chain operations")
chain_p.add_argument("value")
# validate
val_p = subparsers.add_parser("validate", help="Parse and validate")
val_p.add_argument("value")
val_p.add_argument("--min", type=int, default=0)
val_p.add_argument("--max", type=int, default=100)
# collect
collect_p = subparsers.add_parser("collect", help="Collect results")
collect_p.add_argument("values", nargs="+")
args = parser.parse_args()
if args.command == "parse":
if args.type == "int":
result = parse_int(args.value)
else:
result = parse_float(args.value)
if isinstance(result, Ok):
print(f"Ok: {result.value}")
else:
print(f"Err: {result.error}")
elif args.command == "divide":
result = safe_divide(args.a, args.b)
if isinstance(result, Ok):
print(f"Result: {result.value}")
else:
print(f"Error: {result.error}")
elif args.command == "chain":
result = chain_multiple(args.value)
if isinstance(result, Ok):
print(f"Final: {result.value}")
else:
print(f"Error: {result.error}")
elif args.command == "validate":
result = parse_and_validate(args.value, args.min, args.max)
if isinstance(result, Ok):
print(f"Valid: {result.value}")
else:
print(f"Invalid: {result.error}")
elif args.command == "collect":
oks, errs = partition_results(args.values)
print(f"Successes: {oks}")
print(f"Errors: {len(errs)}")
else:
parser.print_help()
return 0
if __name__ == "__main__":
sys.exit(main())
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_result_mapping/result_mapping_cli.py (9362 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_result_mapping/result_mapping_cli.rs (15806 bytes)
📦 Cargo.toml: /home/noah/src/reprorusted-python-cli/examples/example_result_mapping/Cargo.toml (3 dependencies)
⏱️ Parse time: 55ms
📊 Throughput: 165.0 KB/s
⏱️ Total time: 55ms
| true
|
result_mapping
| 359
| 6
|
[
"lambda",
"class_definition",
"exception_handling",
"decorator"
] | 0.783
| null |
example_result_mapping
|
test_result_mapping_cli.py
|
"""Tests for result_mapping_cli.py"""
import pytest
from result_mapping_cli import (
Err,
Ok,
all_ok,
and_then,
any_ok,
chain_multiple,
chain_parse_and_double,
collect_ok_values,
collect_results,
count_err,
count_ok,
err,
first_ok,
flatten_result,
is_err,
is_ok,
map_err,
map_result,
ok,
option_to_result,
or_else,
parse_and_validate,
parse_float,
parse_int,
partition_results,
result_to_option,
safe_divide,
safe_index,
safe_key,
try_operations,
unwrap,
unwrap_err,
unwrap_or,
validate_positive,
validate_range,
)
class TestOkErr:
def test_ok_is_ok(self):
result = Ok(42)
assert result.is_ok() is True
assert result.is_err() is False
def test_err_is_err(self):
result = Err("error")
assert result.is_ok() is False
assert result.is_err() is True
class TestConstructors:
def test_ok(self):
result = ok(42)
assert isinstance(result, Ok)
assert result.value == 42
def test_err(self):
result = err("error")
assert isinstance(result, Err)
assert result.error == "error"
class TestIsOkIsErr:
def test_is_ok(self):
assert is_ok(Ok(42)) is True
assert is_ok(Err("e")) is False
def test_is_err(self):
assert is_err(Err("e")) is True
assert is_err(Ok(42)) is False
class TestUnwrap:
def test_unwrap_ok(self):
assert unwrap(Ok(42)) == 42
def test_unwrap_err_raises(self):
with pytest.raises(ValueError):
unwrap(Err("error"))
class TestUnwrapOr:
def test_ok_returns_value(self):
assert unwrap_or(Ok(42), 0) == 42
def test_err_returns_default(self):
assert unwrap_or(Err("e"), 0) == 0
class TestUnwrapErr:
def test_err_returns_error(self):
assert unwrap_err(Err("error")) == "error"
def test_ok_raises(self):
with pytest.raises(ValueError):
unwrap_err(Ok(42))
class TestMapResult:
def test_map_ok(self):
result = map_result(Ok(2), lambda x: x * 2)
assert isinstance(result, Ok)
assert result.value == 4
def test_map_err_unchanged(self):
result = map_result(Err("e"), lambda x: x * 2)
assert isinstance(result, Err)
class TestMapErr:
def test_map_err(self):
result = map_err(Err("e"), lambda x: f"Error: {x}")
assert isinstance(result, Err)
assert result.error == "Error: e"
def test_map_ok_unchanged(self):
result = map_err(Ok(42), lambda x: f"Error: {x}")
assert isinstance(result, Ok)
class TestAndThen:
def test_ok_chains(self):
result = and_then(Ok(2), lambda x: Ok(x * 2))
assert isinstance(result, Ok)
assert result.value == 4
def test_err_short_circuits(self):
result = and_then(Err("e"), lambda x: Ok(x * 2))
assert isinstance(result, Err)
def test_chain_to_err(self):
result = and_then(Ok(0), lambda x: Err("zero") if x == 0 else Ok(1 / x))
assert isinstance(result, Err)
class TestOrElse:
def test_ok_unchanged(self):
result = or_else(Ok(42), lambda e: Ok(0))
assert isinstance(result, Ok)
assert result.value == 42
def test_err_recovers(self):
result = or_else(Err("e"), lambda e: Ok(0))
assert isinstance(result, Ok)
assert result.value == 0
class TestParseInt:
def test_valid(self):
result = parse_int("42")
assert isinstance(result, Ok)
assert result.value == 42
def test_invalid(self):
result = parse_int("abc")
assert isinstance(result, Err)
class TestParseFloat:
def test_valid(self):
result = parse_float("3.14")
assert isinstance(result, Ok)
assert result.value == 3.14
def test_invalid(self):
result = parse_float("xyz")
assert isinstance(result, Err)
class TestSafeDivide:
def test_normal(self):
result = safe_divide(10.0, 2.0)
assert isinstance(result, Ok)
assert result.value == 5.0
def test_zero_division(self):
result = safe_divide(10.0, 0.0)
assert isinstance(result, Err)
class TestSafeIndex:
def test_valid_index(self):
result = safe_index([1, 2, 3], 1)
assert isinstance(result, Ok)
assert result.value == 2
def test_invalid_index(self):
result = safe_index([1, 2, 3], 10)
assert isinstance(result, Err)
class TestSafeKey:
def test_valid_key(self):
result = safe_key({"a": 1}, "a")
assert isinstance(result, Ok)
assert result.value == 1
def test_invalid_key(self):
result = safe_key({"a": 1}, "b")
assert isinstance(result, Err)
class TestChainParseAndDouble:
def test_valid(self):
result = chain_parse_and_double("21")
assert isinstance(result, Ok)
assert result.value == 42
def test_invalid(self):
result = chain_parse_and_double("abc")
assert isinstance(result, Err)
class TestChainMultiple:
def test_valid(self):
result = chain_multiple("5")
assert isinstance(result, Ok)
assert result.value == 20 # (5 * 2) + 10
def test_invalid(self):
result = chain_multiple("abc")
assert isinstance(result, Err)
class TestValidatePositive:
def test_positive(self):
assert isinstance(validate_positive(5), Ok)
def test_zero(self):
assert isinstance(validate_positive(0), Err)
def test_negative(self):
assert isinstance(validate_positive(-1), Err)
class TestValidateRange:
def test_in_range(self):
assert isinstance(validate_range(5, 0, 10), Ok)
def test_below_range(self):
assert isinstance(validate_range(-1, 0, 10), Err)
def test_above_range(self):
assert isinstance(validate_range(20, 0, 10), Err)
class TestParseAndValidate:
def test_valid(self):
result = parse_and_validate("5", 0, 10)
assert isinstance(result, Ok)
def test_invalid_parse(self):
result = parse_and_validate("abc", 0, 10)
assert isinstance(result, Err)
def test_invalid_range(self):
result = parse_and_validate("20", 0, 10)
assert isinstance(result, Err)
class TestCollectResults:
def test_all_valid(self):
result = collect_results(["1", "2", "3"])
assert isinstance(result, Ok)
assert result.value == [1, 2, 3]
def test_first_invalid(self):
result = collect_results(["1", "abc", "3"])
assert isinstance(result, Err)
class TestCollectOkValues:
def test_all_valid(self):
assert collect_ok_values(["1", "2", "3"]) == [1, 2, 3]
def test_some_invalid(self):
assert collect_ok_values(["1", "abc", "3"]) == [1, 3]
class TestPartitionResults:
def test_all_valid(self):
oks, errs = partition_results(["1", "2", "3"])
assert oks == [1, 2, 3]
assert errs == []
def test_mixed(self):
oks, errs = partition_results(["1", "abc", "3"])
assert oks == [1, 3]
assert len(errs) == 1
class TestFirstOk:
def test_first_is_ok(self):
results = [Ok(1), Ok(2), Err("e")]
result = first_ok(results)
assert isinstance(result, Ok)
assert result.value == 1
def test_all_err(self):
results = [Err("a"), Err("b")]
result = first_ok(results)
assert isinstance(result, Err)
def test_empty(self):
result = first_ok([])
assert isinstance(result, Err)
class TestAllOk:
def test_all_ok(self):
assert all_ok([Ok(1), Ok(2)]) is True
def test_some_err(self):
assert all_ok([Ok(1), Err("e")]) is False
class TestAnyOk:
def test_any_ok(self):
assert any_ok([Err("e"), Ok(1)]) is True
def test_all_err(self):
assert any_ok([Err("a"), Err("b")]) is False
class TestCountOkErr:
def test_count_ok(self):
results = [Ok(1), Err("e"), Ok(2)]
assert count_ok(results) == 2
def test_count_err(self):
results = [Ok(1), Err("e"), Ok(2)]
assert count_err(results) == 1
class TestTryOperations:
def test_valid_int(self):
result = try_operations("42")
assert isinstance(result, Ok)
def test_zero_keyword(self):
result = try_operations("zero")
assert isinstance(result, Ok)
assert result.value == 0
def test_all_fail(self):
result = try_operations("unknown")
assert isinstance(result, Err)
class TestResultToOption:
def test_ok_to_value(self):
assert result_to_option(Ok(42)) == 42
def test_err_to_none(self):
assert result_to_option(Err("e")) is None
class TestOptionToResult:
def test_value_to_ok(self):
result = option_to_result(42, "error")
assert isinstance(result, Ok)
def test_none_to_err(self):
result = option_to_result(None, "error")
assert isinstance(result, Err)
class TestFlattenResult:
def test_nested_ok(self):
result = flatten_result(Ok(Ok(42)))
assert isinstance(result, Ok)
assert result.value == 42
def test_nested_err(self):
result = flatten_result(Ok(Err("e")))
assert isinstance(result, Err)
def test_not_nested(self):
result = flatten_result(Ok(42))
assert isinstance(result, Ok)
assert result.value == 42
class TestEdgeCases:
def test_empty_list_collect(self):
result = collect_results([])
assert isinstance(result, Ok)
assert result.value == []
def test_negative_index(self):
result = safe_index([1, 2, 3], -1)
assert isinstance(result, Err)
def test_parse_whitespace(self):
result = parse_int(" 42 ")
assert isinstance(result, Ok)
| false
|
result_mapping
| 400
| 0
|
[
"lambda",
"context_manager",
"class_definition"
] | 0.783
|
Error: Expression type not yet supported: IfExpr { test: Binary { op: Eq, left: Var("x"), right: Literal(Int(0)) }, body: Call { func: "Err", args: [Literal(String("zero"))], kwargs: [] }, orelse: Call { func: "Ok", args: [Binary { op: Div, left: Literal(Int(1)), right: Var("x") }], kwargs: [] } }
|
|
example_retry_logic
|
retry_cli.py
|
#!/usr/bin/env python3
"""Retry logic CLI.
Retry strategies with exponential backoff.
"""
import argparse
import random
import sys
import time
from collections.abc import Callable
from dataclasses import dataclass
from enum import Enum
class RetryStrategy(Enum):
FIXED = "fixed"
LINEAR = "linear"
EXPONENTIAL = "exponential"
FIBONACCI = "fibonacci"
@dataclass
class RetryConfig:
"""Retry configuration."""
max_attempts: int
initial_delay: float
max_delay: float
strategy: RetryStrategy
jitter: float # 0.0 to 1.0
timeout: float | None # Total timeout
def __init__(
self,
max_attempts: int = 3,
initial_delay: float = 1.0,
max_delay: float = 60.0,
strategy: RetryStrategy = RetryStrategy.EXPONENTIAL,
jitter: float = 0.0,
timeout: float | None = None,
):
self.max_attempts = max_attempts
self.initial_delay = initial_delay
self.max_delay = max_delay
self.strategy = strategy
self.jitter = jitter
self.timeout = timeout
@dataclass
class RetryResult:
"""Result of a retry operation."""
success: bool
attempts: int
total_time: float
final_error: str | None
delays: list[float]
def calculate_delay(
config: RetryConfig,
attempt: int,
) -> float:
"""Calculate delay for a given attempt."""
if config.strategy == RetryStrategy.FIXED:
base_delay = config.initial_delay
elif config.strategy == RetryStrategy.LINEAR:
base_delay = config.initial_delay * attempt
elif config.strategy == RetryStrategy.EXPONENTIAL:
base_delay = config.initial_delay * (2 ** (attempt - 1))
elif config.strategy == RetryStrategy.FIBONACCI:
# Fibonacci sequence for delays
a, b = 1, 1
for _ in range(attempt - 1):
a, b = b, a + b
base_delay = config.initial_delay * a
else:
base_delay = config.initial_delay
# Apply jitter
if config.jitter > 0:
jitter_range = base_delay * config.jitter
base_delay += random.uniform(-jitter_range, jitter_range)
# Cap at max delay
return min(max(0, base_delay), config.max_delay)
def simulate_operation(
success_probability: float,
fail_until: int = 0,
attempt: int = 0,
) -> tuple[bool, str]:
"""Simulate an operation that might fail.
Returns (success, error_message).
"""
if attempt < fail_until:
return False, f"Simulated failure (attempt {attempt + 1})"
if random.random() < success_probability:
return True, ""
return False, "Random failure"
def retry_operation(
operation: Callable[[], tuple[bool, str]],
config: RetryConfig,
verbose: bool = False,
) -> RetryResult:
"""Retry an operation according to config."""
start_time = time.time()
delays = []
for attempt in range(1, config.max_attempts + 1):
# Check timeout
if config.timeout:
elapsed = time.time() - start_time
if elapsed >= config.timeout:
return RetryResult(
success=False,
attempts=attempt - 1,
total_time=elapsed,
final_error="Timeout exceeded",
delays=delays,
)
# Execute operation
success, error = operation()
if success:
return RetryResult(
success=True,
attempts=attempt,
total_time=time.time() - start_time,
final_error=None,
delays=delays,
)
if verbose:
print(f"Attempt {attempt} failed: {error}")
# Calculate delay for next attempt
if attempt < config.max_attempts:
delay = calculate_delay(config, attempt)
delays.append(delay)
if verbose:
print(f"Waiting {delay:.2f}s before retry...")
time.sleep(delay)
return RetryResult(
success=False,
attempts=config.max_attempts,
total_time=time.time() - start_time,
final_error=error,
delays=delays,
)
def preview_delays(config: RetryConfig) -> list[float]:
"""Preview delay sequence without running."""
delays = []
for attempt in range(1, config.max_attempts):
# Use deterministic delays for preview (no jitter)
temp_config = RetryConfig(
max_attempts=config.max_attempts,
initial_delay=config.initial_delay,
max_delay=config.max_delay,
strategy=config.strategy,
jitter=0.0,
)
delays.append(calculate_delay(temp_config, attempt))
return delays
def total_max_time(config: RetryConfig) -> float:
"""Calculate maximum total time for all retries."""
delays = preview_delays(config)
return sum(delays)
def main() -> int:
parser = argparse.ArgumentParser(description="Retry logic with backoff strategies")
parser.add_argument("--attempts", type=int, default=5, help="Maximum retry attempts")
parser.add_argument("--delay", type=float, default=1.0, help="Initial delay in seconds")
parser.add_argument("--max-delay", type=float, default=60.0, help="Maximum delay in seconds")
parser.add_argument(
"--strategy",
choices=["fixed", "linear", "exponential", "fibonacci"],
default="exponential",
help="Backoff strategy",
)
parser.add_argument("--jitter", type=float, default=0.0, help="Jitter factor (0.0 to 1.0)")
parser.add_argument("--timeout", type=float, help="Total timeout in seconds")
parser.add_argument("--preview", action="store_true", help="Preview delay sequence")
parser.add_argument("--simulate", action="store_true", help="Simulate with random failures")
parser.add_argument(
"--success-rate", type=float, default=0.5, help="Success probability for simulation"
)
parser.add_argument("--fail-until", type=int, default=0, help="Force failure until attempt N")
parser.add_argument("-v", "--verbose", action="store_true", help="Verbose output")
args = parser.parse_args()
config = RetryConfig(
max_attempts=args.attempts,
initial_delay=args.delay,
max_delay=args.max_delay,
strategy=RetryStrategy(args.strategy),
jitter=args.jitter,
timeout=args.timeout,
)
if args.preview:
delays = preview_delays(config)
total = sum(delays)
print(f"Strategy: {config.strategy.value}")
print(f"Attempts: {config.max_attempts}")
print("\nDelay sequence:")
for i, delay in enumerate(delays, 1):
print(f" After attempt {i}: {delay:.2f}s")
print(f"\nTotal max wait: {total:.2f}s")
return 0
if args.simulate:
attempt_counter = [0]
def operation():
result = simulate_operation(
args.success_rate,
args.fail_until,
attempt_counter[0],
)
attempt_counter[0] += 1
return result
result = retry_operation(operation, config, args.verbose)
print(f"\nResult: {'SUCCESS' if result.success else 'FAILED'}")
print(f"Attempts: {result.attempts}")
print(f"Total time: {result.total_time:.2f}s")
if result.final_error:
print(f"Final error: {result.final_error}")
return 0 if result.success else 1
# Default: show config
print("Retry Configuration:")
print(f" Max attempts: {config.max_attempts}")
print(f" Initial delay: {config.initial_delay}s")
print(f" Max delay: {config.max_delay}s")
print(f" Strategy: {config.strategy.value}")
print(f" Jitter: {config.jitter}")
if config.timeout:
print(f" Timeout: {config.timeout}s")
print(f"\nMax total wait: {total_max_time(config):.2f}s")
return 0
if __name__ == "__main__":
sys.exit(main())
| false
|
retry_logic
| 272
| 0
|
[
"context_manager",
"class_definition",
"decorator"
] | 0.652
|
Type inference hints:
Hint: int for variable 'attempt' [High] (usage patterns suggest this type)
Hint: int for variable 'a' [Medium] (usage patterns suggest this type)
Hint: int for variable 'base_delay' [Medium] (usage patterns suggest this type)
Type inference hints:
Hint: str for variable 'attempt' [Medium] (usage patterns suggest this type)
Hint: list[Any] for variable 'delays' [High] (usage patterns suggest this type)
Hint: str for variable 'delay' [Medium] (usage patterns suggest this typ
|
|
example_retry_logic
|
test_retry_cli.py
|
"""Tests for retry_cli.py"""
from retry_cli import (
RetryConfig,
RetryStrategy,
calculate_delay,
preview_delays,
retry_operation,
simulate_operation,
total_max_time,
)
class TestRetryConfig:
def test_defaults(self):
config = RetryConfig()
assert config.max_attempts == 3
assert config.initial_delay == 1.0
assert config.max_delay == 60.0
assert config.strategy == RetryStrategy.EXPONENTIAL
assert config.jitter == 0.0
def test_custom(self):
config = RetryConfig(
max_attempts=5,
initial_delay=0.5,
strategy=RetryStrategy.LINEAR,
)
assert config.max_attempts == 5
assert config.initial_delay == 0.5
assert config.strategy == RetryStrategy.LINEAR
class TestCalculateDelay:
def test_fixed(self):
config = RetryConfig(
initial_delay=1.0,
strategy=RetryStrategy.FIXED,
)
assert calculate_delay(config, 1) == 1.0
assert calculate_delay(config, 2) == 1.0
assert calculate_delay(config, 5) == 1.0
def test_linear(self):
config = RetryConfig(
initial_delay=1.0,
strategy=RetryStrategy.LINEAR,
)
assert calculate_delay(config, 1) == 1.0
assert calculate_delay(config, 2) == 2.0
assert calculate_delay(config, 5) == 5.0
def test_exponential(self):
config = RetryConfig(
initial_delay=1.0,
strategy=RetryStrategy.EXPONENTIAL,
)
assert calculate_delay(config, 1) == 1.0
assert calculate_delay(config, 2) == 2.0
assert calculate_delay(config, 3) == 4.0
assert calculate_delay(config, 4) == 8.0
def test_fibonacci(self):
config = RetryConfig(
initial_delay=1.0,
strategy=RetryStrategy.FIBONACCI,
)
assert calculate_delay(config, 1) == 1.0
assert calculate_delay(config, 2) == 1.0
assert calculate_delay(config, 3) == 2.0
assert calculate_delay(config, 4) == 3.0
assert calculate_delay(config, 5) == 5.0
def test_max_delay_cap(self):
config = RetryConfig(
initial_delay=10.0,
max_delay=15.0,
strategy=RetryStrategy.EXPONENTIAL,
)
# 10 * 2^2 = 40, but capped at 15
assert calculate_delay(config, 3) == 15.0
def test_jitter(self):
config = RetryConfig(
initial_delay=10.0,
max_delay=60.0,
strategy=RetryStrategy.FIXED,
jitter=0.5,
)
# With 50% jitter, delay should be 5-15
delays = [calculate_delay(config, 1) for _ in range(100)]
assert all(5.0 <= d <= 15.0 for d in delays)
class TestSimulateOperation:
def test_always_success(self):
success, error = simulate_operation(1.0)
assert success is True
assert error == ""
def test_always_fail(self):
success, error = simulate_operation(0.0)
assert success is False
assert error != ""
def test_fail_until(self):
# Should fail for first 3 attempts
success, _ = simulate_operation(1.0, fail_until=3, attempt=0)
assert success is False
success, _ = simulate_operation(1.0, fail_until=3, attempt=2)
assert success is False
success, _ = simulate_operation(1.0, fail_until=3, attempt=3)
assert success is True
class TestRetryOperation:
def test_immediate_success(self):
def always_succeed():
return True, ""
config = RetryConfig(max_attempts=3, initial_delay=0.01)
result = retry_operation(always_succeed, config)
assert result.success is True
assert result.attempts == 1
assert len(result.delays) == 0
def test_retry_then_succeed(self):
counter = [0]
def succeed_on_third():
counter[0] += 1
if counter[0] < 3:
return False, "Not yet"
return True, ""
config = RetryConfig(max_attempts=5, initial_delay=0.01)
result = retry_operation(succeed_on_third, config)
assert result.success is True
assert result.attempts == 3
assert len(result.delays) == 2
def test_all_fail(self):
def always_fail():
return False, "Always fails"
config = RetryConfig(max_attempts=3, initial_delay=0.01)
result = retry_operation(always_fail, config)
assert result.success is False
assert result.attempts == 3
assert result.final_error == "Always fails"
def test_timeout(self):
def slow_fail():
return False, "Failed"
config = RetryConfig(
max_attempts=10,
initial_delay=0.5,
timeout=0.1,
)
result = retry_operation(slow_fail, config)
assert result.success is False
assert "Timeout" in result.final_error
class TestPreviewDelays:
def test_preview(self):
config = RetryConfig(
max_attempts=5,
initial_delay=1.0,
strategy=RetryStrategy.EXPONENTIAL,
)
delays = preview_delays(config)
assert len(delays) == 4 # max_attempts - 1
assert delays == [1.0, 2.0, 4.0, 8.0]
class TestTotalMaxTime:
def test_total(self):
config = RetryConfig(
max_attempts=4,
initial_delay=1.0,
strategy=RetryStrategy.FIXED,
)
total = total_max_time(config)
assert total == 3.0 # 3 delays of 1 second each
def test_exponential_total(self):
config = RetryConfig(
max_attempts=4,
initial_delay=1.0,
strategy=RetryStrategy.EXPONENTIAL,
)
total = total_max_time(config)
assert total == 7.0 # 1 + 2 + 4
| false
|
retry_logic
| 202
| 0
|
[
"class_definition"
] | 0.612
|
Error: Expression type not yet supported: GeneratorExp { element: Binary { op: And, left: Binary { op: LtEq, left: Literal(Float(5.0)), right: Var("d") }, right: Binary { op: LtEq, left: Var("d"), right: Literal(Float(15.0)) } }, generators: [HirComprehension { target: "d", iter: Var("delays"), conditions: [] }] }
|
|
example_reversed
|
reversed_tool.py
|
#!/usr/bin/env python3
"""Reversed Example - Reverse operations CLI."""
import argparse
def main():
parser = argparse.ArgumentParser(description="Reverse tool")
subs = parser.add_subparsers(dest="cmd", required=True)
s = subs.add_parser("string")
s.add_argument("text")
d = subs.add_parser("digits")
d.add_argument("num", type=int)
w = subs.add_parser("words")
w.add_argument("text")
args = parser.parse_args()
if args.cmd == "string":
result = ""
i = len(args.text) - 1
while i >= 0:
result = result + args.text[i]
i = i - 1
print(result)
elif args.cmd == "digits":
n = args.num
result = 0
while n > 0:
result = result * 10 + n % 10
n = n // 10
print(result)
elif args.cmd == "words":
parts = args.text.split("_")
result = parts[2] + "_" + parts[1] + "_" + parts[0]
print(result)
if __name__ == "__main__":
main()
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_reversed/reversed_tool.py (1010 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_reversed/reversed_tool.rs (3755 bytes)
📦 Cargo.toml: /home/noah/src/reprorusted-python-cli/examples/example_reversed/Cargo.toml (1 dependencies)
⏱️ Parse time: 47ms
📊 Throughput: 20.9 KB/s
⏱️ Total time: 47ms
| true
|
reversed
| 40
| 6
|
[] | 0
| null |
example_reversed
|
test_reversed_tool.py
|
"""Tests for reversed_tool - EXTREME TDD."""
import subprocess
from pathlib import Path
SCRIPT = Path(__file__).parent / "reversed_tool.py"
def run(cmd):
return subprocess.run(
["python3", str(SCRIPT)] + cmd.split(),
capture_output=True,
text=True,
)
def test_string():
r = run("string hello")
assert r.returncode == 0
assert r.stdout.strip() == "olleh"
def test_digits():
r = run("digits 12345")
assert r.returncode == 0
assert r.stdout.strip() == "54321"
def test_words():
r = run("words one_two_three")
assert r.returncode == 0
assert r.stdout.strip() == "three_two_one"
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_reversed/test_reversed_tool.py (652 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_reversed/test_reversed_tool.rs (1857 bytes)
📦 Cargo.toml: /home/noah/src/reprorusted-python-cli/examples/example_reversed/Cargo.toml (2 dependencies)
⏱️ Parse time: 47ms
📊 Throughput: 13.4 KB/s
⏱️ Total time: 47ms
| true
|
reversed
| 32
| 6
|
[] | 0
| null |
example_round
|
round_tool.py
|
#!/usr/bin/env python3
"""Round Example - Rounding operations CLI.
Examples:
>>> round_nearest(2.4)
2
>>> round_nearest(2.6)
3
>>> round_floor(2.9)
2
>>> round_ceil(2.1)
3
"""
import argparse
import math
def round_nearest(x: float) -> int:
"""Round to nearest integer.
>>> round_nearest(0.0)
0
>>> round_nearest(2.5)
2
>>> round_nearest(3.5)
4
>>> round_nearest(-2.5)
-2
"""
return round(x)
def round_floor(x: float) -> int:
"""Round down to nearest integer.
>>> round_floor(2.0)
2
>>> round_floor(2.9)
2
>>> round_floor(-2.1)
-3
"""
return math.floor(x)
def round_ceil(x: float) -> int:
"""Round up to nearest integer.
>>> round_ceil(2.0)
2
>>> round_ceil(2.1)
3
>>> round_ceil(-2.9)
-2
"""
return math.ceil(x)
def main():
parser = argparse.ArgumentParser(description="Rounding tool")
subs = parser.add_subparsers(dest="cmd", required=True)
n = subs.add_parser("nearest")
n.add_argument("x", type=float)
f = subs.add_parser("floor")
f.add_argument("x", type=float)
c = subs.add_parser("ceil")
c.add_argument("x", type=float)
args = parser.parse_args()
if args.cmd == "nearest":
print(round_nearest(args.x))
elif args.cmd == "floor":
print(round_floor(args.x))
elif args.cmd == "ceil":
print(round_ceil(args.x))
if __name__ == "__main__":
main()
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_round/round_tool.py (1487 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_round/round_tool.rs (1854 bytes)
📦 Cargo.toml: /home/noah/src/reprorusted-python-cli/examples/example_round/Cargo.toml (1 dependencies)
⏱️ Parse time: 46ms
📊 Throughput: 31.3 KB/s
⏱️ Total time: 46ms
| true
|
round
| 81
| 6
|
[] | 0
| null |
example_round
|
test_round_tool.py
|
"""Tests for round_tool - EXTREME TDD."""
import subprocess
from pathlib import Path
SCRIPT = Path(__file__).parent / "round_tool.py"
def run(cmd):
return subprocess.run(
["python3", str(SCRIPT)] + cmd.split(),
capture_output=True,
text=True,
)
def test_nearest():
r = run("nearest 3.7")
assert r.returncode == 0
assert r.stdout.strip() == "4"
def test_floor():
r = run("floor 3.7")
assert r.returncode == 0
assert r.stdout.strip() == "3"
def test_ceil():
r = run("ceil 3.2")
assert r.returncode == 0
assert r.stdout.strip() == "4"
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_round/test_round_tool.py (610 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_round/test_round_tool.rs (1818 bytes)
📦 Cargo.toml: /home/noah/src/reprorusted-python-cli/examples/example_round/Cargo.toml (2 dependencies)
⏱️ Parse time: 47ms
📊 Throughput: 12.6 KB/s
⏱️ Total time: 47ms
| true
|
round
| 32
| 6
|
[] | 0
| null |
example_schedule_parser
|
schedule_cli.py
|
#!/usr/bin/env python3
"""Schedule parser CLI.
Parse and evaluate time-based schedules.
"""
import argparse
import re
import sys
from datetime import datetime, timedelta
def parse_time_range(spec: str) -> tuple[int, int] | None:
"""Parse time range like '09:00-17:00'.
Returns (start_minutes, end_minutes) from midnight.
"""
match = re.match(r"(\d{1,2}):(\d{2})-(\d{1,2}):(\d{2})", spec)
if not match:
return None
start_h, start_m, end_h, end_m = map(int, match.groups())
start = start_h * 60 + start_m
end = end_h * 60 + end_m
return start, end
def parse_days(spec: str) -> list[int]:
"""Parse day specification.
Examples: 'Mon-Fri', 'Mon,Wed,Fri', '1-5', 'weekdays'
Returns list of weekday numbers (0=Monday).
"""
spec = spec.lower().strip()
# Named shortcuts
if spec == "weekdays":
return [0, 1, 2, 3, 4]
if spec == "weekends":
return [5, 6]
if spec == "daily" or spec == "*":
return [0, 1, 2, 3, 4, 5, 6]
day_names = {
"mon": 0,
"monday": 0,
"tue": 1,
"tuesday": 1,
"wed": 2,
"wednesday": 2,
"thu": 3,
"thursday": 3,
"fri": 4,
"friday": 4,
"sat": 5,
"saturday": 5,
"sun": 6,
"sunday": 6,
}
result = []
# Handle comma-separated
for part in spec.split(","):
part = part.strip()
# Handle range
if "-" in part:
start, end = part.split("-", 1)
start = start.strip()
end = end.strip()
if start.isdigit() and end.isdigit():
# Numeric range
for d in range(int(start), int(end) + 1):
if 0 <= d <= 6:
result.append(d)
else:
# Named range
start_d = day_names.get(start)
end_d = day_names.get(end)
if start_d is not None and end_d is not None:
if start_d <= end_d:
result.extend(range(start_d, end_d + 1))
else:
# Wrap around (Fri-Mon)
result.extend(range(start_d, 7))
result.extend(range(0, end_d + 1))
else:
# Single day
if part.isdigit():
d = int(part)
if 0 <= d <= 6:
result.append(d)
else:
d = day_names.get(part)
if d is not None:
result.append(d)
return sorted(set(result))
def parse_schedule(spec: str) -> dict:
"""Parse full schedule specification.
Format: 'days time_range' (e.g., 'Mon-Fri 09:00-17:00')
"""
parts = spec.strip().split()
if len(parts) == 0:
return {"days": [], "start": 0, "end": 1440}
if len(parts) == 1:
# Could be just days or just time
time_range = parse_time_range(parts[0])
if time_range:
return {"days": list(range(7)), "start": time_range[0], "end": time_range[1]}
return {"days": parse_days(parts[0]), "start": 0, "end": 1440}
days = parse_days(parts[0])
time_range = parse_time_range(parts[1])
return {
"days": days,
"start": time_range[0] if time_range else 0,
"end": time_range[1] if time_range else 1440,
}
def is_in_schedule(schedule: dict, dt: datetime) -> bool:
"""Check if datetime falls within schedule."""
weekday = dt.weekday()
minutes = dt.hour * 60 + dt.minute
if weekday not in schedule["days"]:
return False
start = schedule["start"]
end = schedule["end"]
# Handle overnight ranges
if start <= end:
return start <= minutes < end
else:
return minutes >= start or minutes < end
def next_occurrence(schedule: dict, after: datetime) -> datetime | None:
"""Find next datetime that matches schedule."""
current = after.replace(second=0, microsecond=0)
# Check up to 7 days
for _ in range(7 * 24 * 60): # Max iterations
current += timedelta(minutes=1)
if is_in_schedule(schedule, current):
return current
return None
def time_until_schedule(schedule: dict, dt: datetime) -> timedelta | None:
"""Calculate time until schedule starts."""
if is_in_schedule(schedule, dt):
return timedelta(0)
next_start = next_occurrence(schedule, dt)
if next_start:
return next_start - dt
return None
def format_schedule(schedule: dict) -> str:
"""Format schedule for display."""
day_names = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
days = schedule["days"]
if days == list(range(7)):
days_str = "Daily"
elif days == [0, 1, 2, 3, 4]:
days_str = "Weekdays"
elif days == [5, 6]:
days_str = "Weekends"
else:
days_str = ", ".join(day_names[d] for d in days)
start_h, start_m = divmod(schedule["start"], 60)
end_h, end_m = divmod(schedule["end"], 60)
time_str = f"{start_h:02d}:{start_m:02d}-{end_h:02d}:{end_m:02d}"
return f"{days_str} {time_str}"
def main() -> int:
parser = argparse.ArgumentParser(description="Parse and evaluate schedules")
parser.add_argument("schedule", nargs="?", help="Schedule specification")
parser.add_argument("--check", metavar="TIME", help="Check if time matches schedule")
parser.add_argument("--next", action="store_true", help="Show next occurrence")
parser.add_argument("--until", action="store_true", help="Show time until schedule")
parser.add_argument("--format", action="store_true", help="Show formatted schedule")
args = parser.parse_args()
if not args.schedule:
print("Usage: schedule_cli.py 'Mon-Fri 09:00-17:00'")
return 1
schedule = parse_schedule(args.schedule)
if not schedule["days"]:
print("Invalid schedule specification", file=sys.stderr)
return 1
if args.format:
print(format_schedule(schedule))
return 0
# Get check time
if args.check:
# Parse time to check
try:
if "T" in args.check:
check_dt = datetime.fromisoformat(args.check)
else:
check_dt = datetime.strptime(args.check, "%Y-%m-%d %H:%M")
except ValueError:
print(f"Invalid time format: {args.check}", file=sys.stderr)
return 1
else:
check_dt = datetime.now()
# Check/display
in_schedule = is_in_schedule(schedule, check_dt)
if args.next:
next_dt = next_occurrence(schedule, check_dt)
if next_dt:
print(f"Next: {next_dt.strftime('%Y-%m-%d %H:%M')}")
else:
print("No upcoming occurrence")
return 0
if args.until:
remaining = time_until_schedule(schedule, check_dt)
if remaining:
hours = int(remaining.total_seconds() // 3600)
minutes = int((remaining.total_seconds() % 3600) // 60)
if hours > 0:
print(f"Time until schedule: {hours}h {minutes}m")
else:
print(f"Time until schedule: {minutes}m")
else:
print("Schedule not found")
return 0
# Default: check current status
status = "IN SCHEDULE" if in_schedule else "NOT IN SCHEDULE"
print(f"Schedule: {format_schedule(schedule)}")
print(f"Status: {status}")
return 0
if __name__ == "__main__":
sys.exit(main())
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_schedule_parser/schedule_cli.py (7565 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_schedule_parser/schedule_cli.rs (14040 bytes)
📦 Cargo.toml: /home/noah/src/reprorusted-python-cli/examples/example_schedule_parser/Cargo.toml (5 dependencies)
⏱️ Parse time: 55ms
📊 Throughput: 134.2 KB/s
⏱️ Total time: 55ms
| true
|
schedule_parser
| 266
| 6
|
[
"exception_handling"
] | 0.577
| null |
example_schedule_parser
|
test_schedule_cli.py
|
"""Tests for schedule_cli.py"""
from datetime import datetime, timedelta
from schedule_cli import (
format_schedule,
is_in_schedule,
next_occurrence,
parse_days,
parse_schedule,
parse_time_range,
time_until_schedule,
)
class TestParseTimeRange:
def test_simple_range(self):
result = parse_time_range("09:00-17:00")
assert result == (9 * 60, 17 * 60)
def test_midnight(self):
result = parse_time_range("00:00-23:59")
assert result == (0, 23 * 60 + 59)
def test_single_digit_hour(self):
result = parse_time_range("9:00-5:00")
assert result == (9 * 60, 5 * 60)
def test_invalid(self):
assert parse_time_range("invalid") is None
assert parse_time_range("9-5") is None
class TestParseDays:
def test_weekdays(self):
assert parse_days("weekdays") == [0, 1, 2, 3, 4]
def test_weekends(self):
assert parse_days("weekends") == [5, 6]
def test_daily(self):
assert parse_days("daily") == [0, 1, 2, 3, 4, 5, 6]
def test_star(self):
assert parse_days("*") == [0, 1, 2, 3, 4, 5, 6]
def test_named_range(self):
assert parse_days("Mon-Fri") == [0, 1, 2, 3, 4]
def test_numeric_range(self):
assert parse_days("0-4") == [0, 1, 2, 3, 4]
def test_comma_separated(self):
assert parse_days("Mon,Wed,Fri") == [0, 2, 4]
def test_wrap_around(self):
result = parse_days("Fri-Mon")
assert set(result) == {0, 4, 5, 6}
class TestParseSchedule:
def test_full_spec(self):
result = parse_schedule("Mon-Fri 09:00-17:00")
assert result["days"] == [0, 1, 2, 3, 4]
assert result["start"] == 9 * 60
assert result["end"] == 17 * 60
def test_days_only(self):
result = parse_schedule("weekdays")
assert result["days"] == [0, 1, 2, 3, 4]
assert result["start"] == 0
assert result["end"] == 1440
def test_time_only(self):
result = parse_schedule("09:00-17:00")
assert result["days"] == list(range(7))
assert result["start"] == 9 * 60
class TestIsInSchedule:
def test_in_schedule(self):
schedule = parse_schedule("Mon-Fri 09:00-17:00")
# Wednesday at 12:00
dt = datetime(2023, 12, 27, 12, 0) # Wednesday
assert is_in_schedule(schedule, dt) is True
def test_not_in_schedule_time(self):
schedule = parse_schedule("Mon-Fri 09:00-17:00")
# Wednesday at 18:00
dt = datetime(2023, 12, 27, 18, 0) # Wednesday
assert is_in_schedule(schedule, dt) is False
def test_not_in_schedule_day(self):
schedule = parse_schedule("Mon-Fri 09:00-17:00")
# Saturday at 12:00
dt = datetime(2023, 12, 30, 12, 0) # Saturday
assert is_in_schedule(schedule, dt) is False
def test_edge_start(self):
schedule = parse_schedule("daily 09:00-17:00")
dt = datetime(2023, 12, 27, 9, 0)
assert is_in_schedule(schedule, dt) is True
def test_edge_end(self):
schedule = parse_schedule("daily 09:00-17:00")
dt = datetime(2023, 12, 27, 17, 0)
# End is exclusive
assert is_in_schedule(schedule, dt) is False
class TestNextOccurrence:
def test_same_day(self):
schedule = parse_schedule("daily 09:00-17:00")
dt = datetime(2023, 12, 27, 8, 0) # Wednesday 8am
result = next_occurrence(schedule, dt)
assert result is not None
assert result.hour == 9
assert result.day == 27
def test_next_day(self):
schedule = parse_schedule("daily 09:00-17:00")
dt = datetime(2023, 12, 27, 18, 0) # Wednesday 6pm
result = next_occurrence(schedule, dt)
assert result is not None
assert result.hour == 9
assert result.day == 28
def test_skip_weekend(self):
schedule = parse_schedule("Mon-Fri 09:00-17:00")
dt = datetime(2023, 12, 29, 18, 0) # Friday 6pm
result = next_occurrence(schedule, dt)
assert result is not None
# Should be Monday
assert result.weekday() == 0
class TestTimeUntilSchedule:
def test_in_schedule(self):
schedule = parse_schedule("daily 09:00-17:00")
dt = datetime(2023, 12, 27, 12, 0)
result = time_until_schedule(schedule, dt)
assert result == timedelta(0)
def test_before_schedule(self):
schedule = parse_schedule("daily 09:00-17:00")
dt = datetime(2023, 12, 27, 8, 0)
result = time_until_schedule(schedule, dt)
assert result is not None
# Should be about 1 hour
assert 59 <= result.total_seconds() // 60 <= 61
class TestFormatSchedule:
def test_weekdays(self):
schedule = parse_schedule("Mon-Fri 09:00-17:00")
result = format_schedule(schedule)
assert "Weekdays" in result
assert "09:00-17:00" in result
def test_daily(self):
schedule = parse_schedule("daily 00:00-23:59")
result = format_schedule(schedule)
assert "Daily" in result
def test_weekends(self):
schedule = parse_schedule("Sat,Sun 10:00-18:00")
result = format_schedule(schedule)
assert "Weekends" in result
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_schedule_parser/test_schedule_cli.py (5248 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_schedule_parser/test_schedule_cli.rs (10094 bytes)
📦 Cargo.toml: /home/noah/src/reprorusted-python-cli/examples/example_schedule_parser/Cargo.toml (1 dependencies)
⏱️ Parse time: 51ms
📊 Throughput: 98.7 KB/s
⏱️ Total time: 52ms
| true
|
schedule_parser
| 168
| 6
|
[
"class_definition"
] | 0.612
| null |
example_schema_checker
|
schema_cli.py
|
#!/usr/bin/env python3
"""Schema Checker CLI.
JSON Schema-like validation for data structures.
"""
import argparse
import json
import sys
from dataclasses import dataclass, field
from enum import Enum, auto
class SchemaType(Enum):
"""Schema types."""
STRING = auto()
INTEGER = auto()
NUMBER = auto()
BOOLEAN = auto()
ARRAY = auto()
OBJECT = auto()
NULL = auto()
ANY = auto()
@dataclass
class SchemaError:
"""Schema validation error."""
path: str
message: str
@dataclass
class Schema:
"""Schema definition."""
schema_type: SchemaType | list[SchemaType] = SchemaType.ANY
properties: dict[str, "Schema"] = field(default_factory=dict)
required: list[str] = field(default_factory=list)
items: "Schema | None" = None
minimum: float | None = None
maximum: float | None = None
min_length: int | None = None
max_length: int | None = None
min_items: int | None = None
max_items: int | None = None
pattern: str | None = None
enum: list = field(default_factory=list)
const: any = None
additional_properties: bool = True
unique_items: bool = False
def get_type(value: any) -> SchemaType:
"""Get schema type for a value."""
if value is None:
return SchemaType.NULL
if isinstance(value, bool):
return SchemaType.BOOLEAN
if isinstance(value, int):
return SchemaType.INTEGER
if isinstance(value, float):
return SchemaType.NUMBER
if isinstance(value, str):
return SchemaType.STRING
if isinstance(value, list):
return SchemaType.ARRAY
if isinstance(value, dict):
return SchemaType.OBJECT
return SchemaType.ANY
def type_name(t: SchemaType) -> str:
"""Get human-readable type name."""
return t.name.lower()
def check_type(value: any, expected: SchemaType | list[SchemaType]) -> bool:
"""Check if value matches expected type(s)."""
actual = get_type(value)
if isinstance(expected, list):
return actual in expected
if expected == SchemaType.ANY:
return True
# Integer is valid number
if expected == SchemaType.NUMBER and actual == SchemaType.INTEGER:
return True
return actual == expected
def validate(data: any, schema: Schema, path: str = "") -> list[SchemaError]:
"""Validate data against schema."""
errors = []
# Type check
if not check_type(data, schema.schema_type):
if isinstance(schema.schema_type, list):
expected = " or ".join(type_name(t) for t in schema.schema_type)
else:
expected = type_name(schema.schema_type)
actual = type_name(get_type(data))
errors.append(SchemaError(path or "$", f"Expected {expected}, got {actual}"))
return errors
# Const check
if schema.const is not None and data != schema.const:
errors.append(SchemaError(path or "$", f"Value must be {schema.const!r}"))
# Enum check
if schema.enum and data not in schema.enum:
errors.append(SchemaError(path or "$", f"Value must be one of: {schema.enum}"))
# String validations
if isinstance(data, str):
if schema.min_length is not None and len(data) < schema.min_length:
errors.append(
SchemaError(
path or "$",
f"String length {len(data)} is less than minimum {schema.min_length}",
)
)
if schema.max_length is not None and len(data) > schema.max_length:
errors.append(
SchemaError(
path or "$", f"String length {len(data)} exceeds maximum {schema.max_length}"
)
)
if schema.pattern:
import re
if not re.match(schema.pattern, data):
errors.append(
SchemaError(path or "$", f"String does not match pattern '{schema.pattern}'")
)
# Number validations
if isinstance(data, (int, float)) and not isinstance(data, bool):
if schema.minimum is not None and data < schema.minimum:
errors.append(
SchemaError(path or "$", f"Value {data} is less than minimum {schema.minimum}")
)
if schema.maximum is not None and data > schema.maximum:
errors.append(
SchemaError(path or "$", f"Value {data} exceeds maximum {schema.maximum}")
)
# Array validations
if isinstance(data, list):
if schema.min_items is not None and len(data) < schema.min_items:
errors.append(
SchemaError(
path or "$", f"Array length {len(data)} is less than minimum {schema.min_items}"
)
)
if schema.max_items is not None and len(data) > schema.max_items:
errors.append(
SchemaError(
path or "$", f"Array length {len(data)} exceeds maximum {schema.max_items}"
)
)
if schema.unique_items:
seen = []
for item in data:
item_str = json.dumps(item, sort_keys=True)
if item_str in seen:
errors.append(SchemaError(path or "$", "Array contains duplicate items"))
break
seen.append(item_str)
if schema.items:
for i, item in enumerate(data):
item_path = f"{path}[{i}]" if path else f"[{i}]"
errors.extend(validate(item, schema.items, item_path))
# Object validations
if isinstance(data, dict):
# Required properties
for prop in schema.required:
if prop not in data:
prop_path = f"{path}.{prop}" if path else prop
errors.append(SchemaError(prop_path, f"Required property '{prop}' is missing"))
# Property validations
for prop, prop_schema in schema.properties.items():
if prop in data:
prop_path = f"{path}.{prop}" if path else prop
errors.extend(validate(data[prop], prop_schema, prop_path))
# Additional properties check
if not schema.additional_properties:
for prop in data:
if prop not in schema.properties:
prop_path = f"{path}.{prop}" if path else prop
errors.append(
SchemaError(prop_path, f"Additional property '{prop}' is not allowed")
)
return errors
def parse_type_str(type_str: str | list[str]) -> SchemaType | list[SchemaType]:
"""Parse type string to SchemaType."""
type_map = {
"string": SchemaType.STRING,
"integer": SchemaType.INTEGER,
"number": SchemaType.NUMBER,
"boolean": SchemaType.BOOLEAN,
"array": SchemaType.ARRAY,
"object": SchemaType.OBJECT,
"null": SchemaType.NULL,
}
if isinstance(type_str, list):
return [type_map.get(t.lower(), SchemaType.ANY) for t in type_str]
return type_map.get(type_str.lower(), SchemaType.ANY)
def parse_schema(schema_dict: dict) -> Schema:
"""Parse JSON Schema-like dict to Schema object."""
schema = Schema()
if "type" in schema_dict:
schema.schema_type = parse_type_str(schema_dict["type"])
if "properties" in schema_dict:
schema.properties = {k: parse_schema(v) for k, v in schema_dict["properties"].items()}
if "required" in schema_dict:
schema.required = schema_dict["required"]
if "items" in schema_dict:
schema.items = parse_schema(schema_dict["items"])
if "minimum" in schema_dict:
schema.minimum = schema_dict["minimum"]
if "maximum" in schema_dict:
schema.maximum = schema_dict["maximum"]
if "minLength" in schema_dict:
schema.min_length = schema_dict["minLength"]
if "maxLength" in schema_dict:
schema.max_length = schema_dict["maxLength"]
if "minItems" in schema_dict:
schema.min_items = schema_dict["minItems"]
if "maxItems" in schema_dict:
schema.max_items = schema_dict["maxItems"]
if "pattern" in schema_dict:
schema.pattern = schema_dict["pattern"]
if "enum" in schema_dict:
schema.enum = schema_dict["enum"]
if "const" in schema_dict:
schema.const = schema_dict["const"]
if "additionalProperties" in schema_dict:
schema.additional_properties = schema_dict["additionalProperties"]
if "uniqueItems" in schema_dict:
schema.unique_items = schema_dict["uniqueItems"]
return schema
def infer_schema(data: any, strict: bool = False) -> dict:
"""Infer schema from data."""
if data is None:
return {"type": "null"}
if isinstance(data, bool):
return {"type": "boolean"}
if isinstance(data, int):
return {"type": "integer"}
if isinstance(data, float):
return {"type": "number"}
if isinstance(data, str):
result = {"type": "string"}
if strict:
result["minLength"] = len(data)
result["maxLength"] = len(data)
return result
if isinstance(data, list):
result = {"type": "array"}
if strict:
result["minItems"] = len(data)
result["maxItems"] = len(data)
if data:
# Try to find common item schema
item_schemas = [infer_schema(item, strict) for item in data]
if all(s == item_schemas[0] for s in item_schemas):
result["items"] = item_schemas[0]
return result
if isinstance(data, dict):
result = {"type": "object", "properties": {}}
if strict:
result["required"] = list(data.keys())
result["additionalProperties"] = False
for key, value in data.items():
result["properties"][key] = infer_schema(value, strict)
return result
return {}
def generate_sample(schema: Schema) -> any:
"""Generate sample data from schema."""
if schema.const is not None:
return schema.const
if schema.enum:
return schema.enum[0]
schema_type = schema.schema_type
if isinstance(schema_type, list):
schema_type = schema_type[0]
if schema_type == SchemaType.NULL:
return None
if schema_type == SchemaType.BOOLEAN:
return False
if schema_type == SchemaType.INTEGER:
if schema.minimum is not None:
return int(schema.minimum)
return 0
if schema_type == SchemaType.NUMBER:
if schema.minimum is not None:
return schema.minimum
return 0.0
if schema_type == SchemaType.STRING:
if schema.min_length:
return "x" * schema.min_length
return ""
if schema_type == SchemaType.ARRAY:
result = []
count = schema.min_items or 0
if schema.items:
for _ in range(count):
result.append(generate_sample(schema.items))
return result
if schema_type == SchemaType.OBJECT:
result = {}
for prop in schema.required:
if prop in schema.properties:
result[prop] = generate_sample(schema.properties[prop])
return result
return None
def main() -> int:
parser = argparse.ArgumentParser(description="JSON Schema checker")
subparsers = parser.add_subparsers(dest="command", help="Commands")
# validate command
validate_parser = subparsers.add_parser("validate", help="Validate data against schema")
validate_parser.add_argument("data", help="Data file (JSON)")
validate_parser.add_argument("--schema", "-s", required=True, help="Schema file (JSON)")
# infer command
infer_parser = subparsers.add_parser("infer", help="Infer schema from data")
infer_parser.add_argument("data", help="Data file (JSON)")
infer_parser.add_argument("--strict", action="store_true", help="Generate strict schema")
# sample command
sample_parser = subparsers.add_parser("sample", help="Generate sample data from schema")
sample_parser.add_argument("schema", help="Schema file (JSON)")
args = parser.parse_args()
if args.command == "validate":
with open(args.data) as f:
data = json.load(f)
with open(args.schema) as f:
schema_dict = json.load(f)
schema = parse_schema(schema_dict)
errors = validate(data, schema)
if errors:
for error in errors:
print(f"Error at '{error.path}': {error.message}", file=sys.stderr)
return 1
print("Validation passed")
return 0
if args.command == "infer":
with open(args.data) as f:
data = json.load(f)
schema = infer_schema(data, args.strict)
print(json.dumps(schema, indent=2))
return 0
if args.command == "sample":
with open(args.schema) as f:
schema_dict = json.load(f)
schema = parse_schema(schema_dict)
sample = generate_sample(schema)
print(json.dumps(sample, indent=2))
return 0
parser.print_help()
return 0
if __name__ == "__main__":
sys.exit(main())
| false
|
schema_checker
| 431
| 0
|
[
"context_manager",
"class_definition",
"decorator"
] | 0.652
|
Error: Unsupported type annotation: Constant(ExprConstant { range: 662..670, value: Str("Schema"), kind: None })
|
|
example_schema_checker
|
test_schema_cli.py
|
"""Tests for schema_cli.py"""
from schema_cli import (
Schema,
SchemaType,
check_type,
generate_sample,
get_type,
infer_schema,
parse_schema,
parse_type_str,
validate,
)
class TestGetType:
def test_null(self):
assert get_type(None) == SchemaType.NULL
def test_boolean(self):
assert get_type(True) == SchemaType.BOOLEAN
assert get_type(False) == SchemaType.BOOLEAN
def test_integer(self):
assert get_type(42) == SchemaType.INTEGER
assert get_type(-1) == SchemaType.INTEGER
def test_number(self):
assert get_type(3.14) == SchemaType.NUMBER
assert get_type(-0.5) == SchemaType.NUMBER
def test_string(self):
assert get_type("hello") == SchemaType.STRING
assert get_type("") == SchemaType.STRING
def test_array(self):
assert get_type([1, 2, 3]) == SchemaType.ARRAY
assert get_type([]) == SchemaType.ARRAY
def test_object(self):
assert get_type({"a": 1}) == SchemaType.OBJECT
assert get_type({}) == SchemaType.OBJECT
class TestCheckType:
def test_exact_match(self):
assert check_type("hello", SchemaType.STRING) is True
assert check_type(42, SchemaType.INTEGER) is True
assert check_type(True, SchemaType.BOOLEAN) is True
def test_any_matches_all(self):
assert check_type("hello", SchemaType.ANY) is True
assert check_type(42, SchemaType.ANY) is True
assert check_type(None, SchemaType.ANY) is True
def test_integer_is_number(self):
assert check_type(42, SchemaType.NUMBER) is True
def test_type_list(self):
assert check_type("hello", [SchemaType.STRING, SchemaType.NULL]) is True
assert check_type(None, [SchemaType.STRING, SchemaType.NULL]) is True
assert check_type(42, [SchemaType.STRING, SchemaType.NULL]) is False
def test_mismatch(self):
assert check_type("hello", SchemaType.INTEGER) is False
assert check_type(42, SchemaType.STRING) is False
class TestValidateTypes:
def test_string(self):
schema = Schema(schema_type=SchemaType.STRING)
assert len(validate("hello", schema)) == 0
assert len(validate(42, schema)) == 1
def test_integer(self):
schema = Schema(schema_type=SchemaType.INTEGER)
assert len(validate(42, schema)) == 0
assert len(validate("hello", schema)) == 1
def test_number(self):
schema = Schema(schema_type=SchemaType.NUMBER)
assert len(validate(3.14, schema)) == 0
assert len(validate(42, schema)) == 0 # int is valid number
assert len(validate("hello", schema)) == 1
def test_boolean(self):
schema = Schema(schema_type=SchemaType.BOOLEAN)
assert len(validate(True, schema)) == 0
assert len(validate(False, schema)) == 0
assert len(validate(1, schema)) == 1
def test_null(self):
schema = Schema(schema_type=SchemaType.NULL)
assert len(validate(None, schema)) == 0
assert len(validate("null", schema)) == 1
def test_nullable_string(self):
schema = Schema(schema_type=[SchemaType.STRING, SchemaType.NULL])
assert len(validate("hello", schema)) == 0
assert len(validate(None, schema)) == 0
assert len(validate(42, schema)) == 1
class TestValidateStringConstraints:
def test_min_length(self):
schema = Schema(schema_type=SchemaType.STRING, min_length=3)
assert len(validate("hello", schema)) == 0
assert len(validate("ab", schema)) == 1
def test_max_length(self):
schema = Schema(schema_type=SchemaType.STRING, max_length=5)
assert len(validate("hello", schema)) == 0
assert len(validate("hello world", schema)) == 1
def test_pattern(self):
schema = Schema(schema_type=SchemaType.STRING, pattern=r"^\d{3}-\d{4}$")
assert len(validate("123-4567", schema)) == 0
assert len(validate("invalid", schema)) == 1
class TestValidateNumberConstraints:
def test_minimum(self):
schema = Schema(schema_type=SchemaType.NUMBER, minimum=0)
assert len(validate(10, schema)) == 0
assert len(validate(-5, schema)) == 1
def test_maximum(self):
schema = Schema(schema_type=SchemaType.NUMBER, maximum=100)
assert len(validate(50, schema)) == 0
assert len(validate(150, schema)) == 1
def test_range(self):
schema = Schema(schema_type=SchemaType.NUMBER, minimum=0, maximum=100)
assert len(validate(50, schema)) == 0
assert len(validate(-1, schema)) == 1
assert len(validate(101, schema)) == 1
class TestValidateArrayConstraints:
def test_min_items(self):
schema = Schema(schema_type=SchemaType.ARRAY, min_items=2)
assert len(validate([1, 2, 3], schema)) == 0
assert len(validate([1], schema)) == 1
def test_max_items(self):
schema = Schema(schema_type=SchemaType.ARRAY, max_items=3)
assert len(validate([1, 2], schema)) == 0
assert len(validate([1, 2, 3, 4], schema)) == 1
def test_unique_items(self):
schema = Schema(schema_type=SchemaType.ARRAY, unique_items=True)
assert len(validate([1, 2, 3], schema)) == 0
assert len(validate([1, 2, 2], schema)) == 1
def test_items_schema(self):
schema = Schema(schema_type=SchemaType.ARRAY, items=Schema(schema_type=SchemaType.INTEGER))
assert len(validate([1, 2, 3], schema)) == 0
errors = validate([1, "two", 3], schema)
assert len(errors) == 1
assert "[1]" in errors[0].path
class TestValidateObjectConstraints:
def test_required(self):
schema = Schema(schema_type=SchemaType.OBJECT, required=["name"])
assert len(validate({"name": "test"}, schema)) == 0
assert len(validate({}, schema)) == 1
def test_properties(self):
schema = Schema(
schema_type=SchemaType.OBJECT, properties={"name": Schema(schema_type=SchemaType.STRING), "age": Schema(schema_type=SchemaType.INTEGER)}
)
assert len(validate({"name": "Alice", "age": 30}, schema)) == 0
errors = validate({"name": 123, "age": "thirty"}, schema)
assert len(errors) == 2
def test_additional_properties_false(self):
schema = Schema(schema_type=SchemaType.OBJECT, properties={"name": Schema(schema_type=SchemaType.STRING)}, additional_properties=False)
assert len(validate({"name": "test"}, schema)) == 0
errors = validate({"name": "test", "extra": "field"}, schema)
assert len(errors) == 1
assert "extra" in errors[0].path
class TestValidateEnumConst:
def test_enum(self):
schema = Schema(schema_type=SchemaType.STRING, enum=["a", "b", "c"])
assert len(validate("a", schema)) == 0
assert len(validate("d", schema)) == 1
def test_const(self):
schema = Schema(schema_type=SchemaType.STRING, const="fixed")
assert len(validate("fixed", schema)) == 0
assert len(validate("other", schema)) == 1
class TestParseTypeStr:
def test_single_type(self):
assert parse_type_str("string") == SchemaType.STRING
assert parse_type_str("integer") == SchemaType.INTEGER
assert parse_type_str("number") == SchemaType.NUMBER
assert parse_type_str("boolean") == SchemaType.BOOLEAN
assert parse_type_str("array") == SchemaType.ARRAY
assert parse_type_str("object") == SchemaType.OBJECT
assert parse_type_str("null") == SchemaType.NULL
def test_case_insensitive(self):
assert parse_type_str("STRING") == SchemaType.STRING
assert parse_type_str("Integer") == SchemaType.INTEGER
def test_type_list(self):
result = parse_type_str(["string", "null"])
assert result == [SchemaType.STRING, SchemaType.NULL]
class TestParseSchema:
def test_simple_type(self):
schema = parse_schema({"type": "string"})
assert schema.schema_type == SchemaType.STRING
def test_with_constraints(self):
schema = parse_schema({"type": "integer", "minimum": 0, "maximum": 100})
assert schema.schema_type == SchemaType.INTEGER
assert schema.minimum == 0
assert schema.maximum == 100
def test_with_string_constraints(self):
schema = parse_schema({"type": "string", "minLength": 1, "maxLength": 100, "pattern": "^[a-z]+$"})
assert schema.min_length == 1
assert schema.max_length == 100
assert schema.pattern == "^[a-z]+$"
def test_with_array_constraints(self):
schema = parse_schema({"type": "array", "minItems": 1, "maxItems": 10, "uniqueItems": True, "items": {"type": "integer"}})
assert schema.min_items == 1
assert schema.max_items == 10
assert schema.unique_items is True
assert schema.items.schema_type == SchemaType.INTEGER
def test_with_object_constraints(self):
schema = parse_schema(
{"type": "object", "required": ["name"], "properties": {"name": {"type": "string"}}, "additionalProperties": False}
)
assert "name" in schema.required
assert "name" in schema.properties
assert schema.additional_properties is False
class TestInferSchema:
def test_null(self):
schema = infer_schema(None)
assert schema == {"type": "null"}
def test_boolean(self):
schema = infer_schema(True)
assert schema == {"type": "boolean"}
def test_integer(self):
schema = infer_schema(42)
assert schema == {"type": "integer"}
def test_number(self):
schema = infer_schema(3.14)
assert schema == {"type": "number"}
def test_string(self):
schema = infer_schema("hello")
assert schema == {"type": "string"}
def test_array(self):
schema = infer_schema([1, 2, 3])
assert schema["type"] == "array"
assert schema["items"] == {"type": "integer"}
def test_object(self):
schema = infer_schema({"name": "test", "age": 30})
assert schema["type"] == "object"
assert "name" in schema["properties"]
assert "age" in schema["properties"]
def test_strict_mode(self):
schema = infer_schema({"name": "test"}, strict=True)
assert "required" in schema
assert "name" in schema["required"]
assert schema["additionalProperties"] is False
class TestGenerateSample:
def test_null(self):
schema = Schema(schema_type=SchemaType.NULL)
assert generate_sample(schema) is None
def test_boolean(self):
schema = Schema(schema_type=SchemaType.BOOLEAN)
assert generate_sample(schema) is False
def test_integer(self):
schema = Schema(schema_type=SchemaType.INTEGER)
assert generate_sample(schema) == 0
def test_integer_with_minimum(self):
schema = Schema(schema_type=SchemaType.INTEGER, minimum=10)
assert generate_sample(schema) == 10
def test_string(self):
schema = Schema(schema_type=SchemaType.STRING)
assert generate_sample(schema) == ""
def test_string_with_min_length(self):
schema = Schema(schema_type=SchemaType.STRING, min_length=5)
assert len(generate_sample(schema)) == 5
def test_enum(self):
schema = Schema(enum=["a", "b", "c"])
assert generate_sample(schema) == "a"
def test_const(self):
schema = Schema(const="fixed")
assert generate_sample(schema) == "fixed"
def test_array(self):
schema = Schema(schema_type=SchemaType.ARRAY, min_items=2, items=Schema(schema_type=SchemaType.INTEGER))
sample = generate_sample(schema)
assert len(sample) == 2
assert all(isinstance(x, int) for x in sample)
def test_object(self):
schema = Schema(
schema_type=SchemaType.OBJECT, required=["name"], properties={"name": Schema(schema_type=SchemaType.STRING, min_length=3)}
)
sample = generate_sample(schema)
assert "name" in sample
assert len(sample["name"]) >= 3
class TestNestedValidation:
def test_nested_object(self):
schema = Schema(
schema_type=SchemaType.OBJECT,
properties={"user": Schema(schema_type=SchemaType.OBJECT, required=["name"], properties={"name": Schema(schema_type=SchemaType.STRING)})},
)
assert len(validate({"user": {"name": "Alice"}}, schema)) == 0
errors = validate({"user": {}}, schema)
assert len(errors) == 1
assert "user.name" in errors[0].path
def test_array_of_objects(self):
schema = Schema(
schema_type=SchemaType.ARRAY,
items=Schema(schema_type=SchemaType.OBJECT, required=["id"], properties={"id": Schema(schema_type=SchemaType.INTEGER)}),
)
assert len(validate([{"id": 1}, {"id": 2}], schema)) == 0
errors = validate([{"id": 1}, {"name": "missing id"}], schema)
assert len(errors) == 1
assert "[1]" in errors[0].path
| false
|
schema_checker
| 355
| 0
|
[
"class_definition"
] | 0.612
|
Error: Expression type not yet supported: GeneratorExp { element: Call { func: "isinstance", args: [Var("x"), Var("int")], kwargs: [] }, generators: [HirComprehension { target: "x", iter: Var("sample"), conditions: [] }] }
|
|
example_secrets
|
secret_tool.py
|
#!/usr/bin/env python3
"""Secrets Example - Cryptographic random CLI."""
import argparse
import secrets
import string
def cmd_token(args):
"""Generate secure token. Depyler: proven to terminate"""
if args.type == "hex":
print(secrets.token_hex(args.bytes))
elif args.type == "url":
print(secrets.token_urlsafe(args.bytes))
else:
print(secrets.token_bytes(args.bytes).hex())
def cmd_password(args):
"""Generate secure password. Depyler: proven to terminate"""
alphabet = string.ascii_letters + string.digits + string.punctuation
password = "".join(secrets.choice(alphabet) for _ in range(args.length))
print(password)
def cmd_randbelow(args):
"""Generate random int below n. Depyler: proven to terminate"""
print(secrets.randbelow(args.n))
def main():
parser = argparse.ArgumentParser(description="Secure random tool")
subparsers = parser.add_subparsers(dest="command", required=True)
token = subparsers.add_parser("token")
token.add_argument("--type", choices=["hex", "url", "bytes"], default="hex")
token.add_argument("--bytes", type=int, default=32)
pwd = subparsers.add_parser("password")
pwd.add_argument("--length", type=int, default=16)
rand = subparsers.add_parser("randbelow")
rand.add_argument("n", type=int)
args = parser.parse_args()
{"token": cmd_token, "password": cmd_password, "randbelow": cmd_randbelow}[args.command](args)
if __name__ == "__main__":
main()
| false
|
secrets
| 50
| 0
|
[] | 0
|
Error: Unsupported function call type: Subscript(ExprSubscript { range: 1366..1454, value: Dict(ExprDict { range: 1366..1440, keys: [Some(Constant(ExprConstant { range: 1367..1374, value: Str("token"), kind: None })), Some(Constant(ExprConstant { range: 1387..1397, value: Str("password"), kind: None })), Some(Constant(ExprConstant { range: 1413..1424, value: Str("randbelow"), kind: None }))], values: [Name(ExprName { range: 1376..1385, id: Identifier("cmd_token"), ctx: Load }), Name(ExprName { r
|
|
example_secrets
|
test_secret_tool.py
|
#!/usr/bin/env python3
"""EXTREME TDD: Tests for secrets CLI."""
import subprocess
from pathlib import Path
SCRIPT = Path(__file__).parent / "secret_tool.py"
SCRIPT = "secret_tool.py"
def run(args):
return subprocess.run(["python3", SCRIPT] + args, capture_output=True, text=True,
cwd=__file__.rsplit("/", 1)[0])
class TestToken:
def test_token_hex(self):
result = run(["token", "--type", "hex", "--bytes", "16"])
assert result.returncode == 0
assert len(result.stdout.strip()) == 32
def test_token_url(self):
result = run(["token", "--type", "url", "--bytes", "16"])
assert result.returncode == 0
class TestPassword:
def test_password_default(self):
result = run(["password"])
assert result.returncode == 0
assert len(result.stdout.strip()) >= 12
def test_password_length(self):
result = run(["password", "--length", "20"])
assert result.returncode == 0
assert len(result.stdout.strip()) == 20
class TestRandom:
def test_randbelow(self):
result = run(["randbelow", "100"])
assert result.returncode == 0
num = int(result.stdout.strip())
assert 0 <= num < 100
class TestHelp:
def test_help(self):
result = run(["--help"])
assert result.returncode == 0
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_secrets/test_secret_tool.py (1350 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_secrets/test_secret_tool.rs (3233 bytes)
📦 Cargo.toml: /home/noah/src/reprorusted-python-cli/examples/example_secrets/Cargo.toml (2 dependencies)
⏱️ Parse time: 50ms
📊 Throughput: 26.2 KB/s
⏱️ Total time: 50ms
| true
|
secrets
| 45
| 6
|
[
"class_definition"
] | 0.612
| null |
example_serial_csv
|
serial_csv_cli.py
|
"""CSV Parser and Writer CLI with Type Inference.
Demonstrates CSV parsing, writing, type inference, and transformation patterns.
"""
import re
import sys
from dataclasses import dataclass, field
from typing import Any
@dataclass
class CsvConfig:
"""CSV configuration options."""
delimiter: str = ","
quote_char: str = '"'
escape_char: str = "\\"
has_header: bool = True
skip_empty: bool = True
trim_whitespace: bool = True
@dataclass
class CsvDocument:
"""CSV document with optional headers."""
headers: list[str] = field(default_factory=list)
rows: list[list[str]] = field(default_factory=list)
config: CsvConfig = field(default_factory=CsvConfig)
def __len__(self) -> int:
return len(self.rows)
def column(self, name_or_index: str | int) -> list[str]:
"""Get column by name or index."""
if isinstance(name_or_index, str):
if name_or_index in self.headers:
idx = self.headers.index(name_or_index)
else:
return []
else:
idx = name_or_index
return [row[idx] if idx < len(row) else "" for row in self.rows]
def row(self, index: int) -> list[str]:
"""Get row by index."""
if 0 <= index < len(self.rows):
return self.rows[index]
return []
def as_dicts(self) -> list[dict[str, str]]:
"""Convert to list of dicts."""
if not self.headers:
return []
return [
{h: row[i] if i < len(row) else "" for i, h in enumerate(self.headers)}
for row in self.rows
]
def filter_rows(self, column: str, value: str) -> list[list[str]]:
"""Filter rows by column value."""
if column not in self.headers:
return []
idx = self.headers.index(column)
return [row for row in self.rows if idx < len(row) and row[idx] == value]
def sort_by(self, column: str, reverse: bool = False) -> list[list[str]]:
"""Sort rows by column."""
if column not in self.headers:
return self.rows.copy()
idx = self.headers.index(column)
return sorted(self.rows, key=lambda r: r[idx] if idx < len(r) else "", reverse=reverse)
class CsvParser:
"""CSV parser."""
def __init__(self, config: CsvConfig | None = None) -> None:
self.config = config or CsvConfig()
def parse(self, text: str) -> CsvDocument:
"""Parse CSV text."""
doc = CsvDocument(config=self.config)
lines = text.splitlines()
for i, line in enumerate(lines):
if self.config.skip_empty and not line.strip():
continue
row = self._parse_line(line)
if i == 0 and self.config.has_header:
doc.headers = row
else:
doc.rows.append(row)
return doc
def _parse_line(self, line: str) -> list[str]:
"""Parse a single CSV line."""
fields = []
current = ""
in_quotes = False
prev_char = ""
for char in line:
if char == self.config.quote_char and prev_char != self.config.escape_char:
in_quotes = not in_quotes
elif char == self.config.delimiter and not in_quotes:
fields.append(self._clean_field(current))
current = ""
else:
current += char
prev_char = char
fields.append(self._clean_field(current))
return fields
def _clean_field(self, value: str) -> str:
"""Clean a field value."""
if self.config.trim_whitespace:
value = value.strip()
# Remove surrounding quotes
if len(value) >= 2:
if value.startswith(self.config.quote_char) and value.endswith(self.config.quote_char):
value = value[1:-1]
# Unescape quotes
value = value.replace(self.config.quote_char * 2, self.config.quote_char)
return value
class CsvWriter:
"""CSV writer."""
def __init__(self, config: CsvConfig | None = None) -> None:
self.config = config or CsvConfig()
def write(self, doc: CsvDocument) -> str:
"""Write CSV document to string."""
lines = []
if self.config.has_header and doc.headers:
lines.append(self._format_row(doc.headers))
for row in doc.rows:
lines.append(self._format_row(row))
return "\n".join(lines) + "\n"
def _format_row(self, row: list[str]) -> str:
"""Format a row."""
return self.config.delimiter.join(self._quote_field(f) for f in row)
def _quote_field(self, value: str) -> str:
"""Quote a field if necessary."""
needs_quote = (
self.config.delimiter in value
or self.config.quote_char in value
or "\n" in value
or "\r" in value
)
if needs_quote:
escaped = value.replace(self.config.quote_char, self.config.quote_char * 2)
return f"{self.config.quote_char}{escaped}{self.config.quote_char}"
return value
def csv_parse(text: str, config: CsvConfig | None = None) -> CsvDocument:
"""Parse CSV text."""
return CsvParser(config).parse(text)
def csv_dump(doc: CsvDocument) -> str:
"""Dump CSV document to string."""
return CsvWriter(doc.config).write(doc)
def infer_type(value: str) -> Any:
"""Infer type from string value."""
if not value or value.lower() in ("null", "none", "na", "n/a", ""):
return None
# Boolean
if value.lower() in ("true", "yes", "1"):
return True
if value.lower() in ("false", "no", "0"):
return False
# Integer
if re.match(r"^-?\d+$", value):
return int(value)
# Float
if re.match(r"^-?\d*\.\d+$", value) or re.match(r"^-?\d+\.\d*$", value):
return float(value)
# Scientific notation
if re.match(r"^-?\d+\.?\d*[eE][+-]?\d+$", value):
return float(value)
return value
def infer_column_type(values: list[str]) -> str:
"""Infer column type from values."""
types: dict[str, int] = {"null": 0, "bool": 0, "int": 0, "float": 0, "str": 0}
for value in values:
inferred = infer_type(value)
if inferred is None:
types["null"] += 1
elif isinstance(inferred, bool):
types["bool"] += 1
elif isinstance(inferred, int):
types["int"] += 1
elif isinstance(inferred, float):
types["float"] += 1
else:
types["str"] += 1
# Return most common non-null type
non_null = {k: v for k, v in types.items() if k != "null" and v > 0}
if not non_null:
return "str"
return max(non_null, key=lambda k: non_null[k])
def csv_schema(doc: CsvDocument) -> dict[str, str]:
"""Infer schema for CSV document."""
schema = {}
for i, header in enumerate(doc.headers):
column_values = [row[i] if i < len(row) else "" for row in doc.rows]
schema[header] = infer_column_type(column_values)
return schema
def csv_to_typed(doc: CsvDocument) -> list[dict[str, Any]]:
"""Convert CSV to list of typed dicts."""
if not doc.headers:
return []
result = []
for row in doc.rows:
record = {}
for i, header in enumerate(doc.headers):
value = row[i] if i < len(row) else ""
record[header] = infer_type(value)
result.append(record)
return result
def csv_stats(doc: CsvDocument, column: str) -> dict[str, Any]:
"""Calculate statistics for a numeric column."""
if column not in doc.headers:
return {}
values = []
for value in doc.column(column):
typed = infer_type(value)
if isinstance(typed, (int, float)):
values.append(float(typed))
if not values:
return {}
sorted_vals = sorted(values)
n = len(values)
return {
"count": n,
"min": min(values),
"max": max(values),
"sum": sum(values),
"mean": sum(values) / n,
"median": sorted_vals[n // 2]
if n % 2
else (sorted_vals[n // 2 - 1] + sorted_vals[n // 2]) / 2,
}
def csv_select(doc: CsvDocument, columns: list[str]) -> CsvDocument:
"""Select specific columns."""
indices = []
new_headers = []
for col in columns:
if col in doc.headers:
indices.append(doc.headers.index(col))
new_headers.append(col)
new_rows = [[row[i] if i < len(row) else "" for i in indices] for row in doc.rows]
return CsvDocument(headers=new_headers, rows=new_rows, config=doc.config)
def csv_join(left: CsvDocument, right: CsvDocument, on: str) -> CsvDocument:
"""Join two CSV documents on a column."""
if on not in left.headers or on not in right.headers:
return CsvDocument()
left_idx = left.headers.index(on)
right_idx = right.headers.index(on)
# Build lookup from right
right_lookup: dict[str, list[list[str]]] = {}
for row in right.rows:
key = row[right_idx] if right_idx < len(row) else ""
if key not in right_lookup:
right_lookup[key] = []
right_lookup[key].append(row)
# New headers (exclude duplicate key column from right)
new_headers = left.headers + [h for i, h in enumerate(right.headers) if i != right_idx]
new_rows = []
for left_row in left.rows:
key = left_row[left_idx] if left_idx < len(left_row) else ""
if key in right_lookup:
for right_row in right_lookup[key]:
new_row = list(left_row) + [v for i, v in enumerate(right_row) if i != right_idx]
new_rows.append(new_row)
return CsvDocument(headers=new_headers, rows=new_rows)
def csv_group_by(doc: CsvDocument, column: str) -> dict[str, list[list[str]]]:
"""Group rows by column value."""
if column not in doc.headers:
return {}
idx = doc.headers.index(column)
groups: dict[str, list[list[str]]] = {}
for row in doc.rows:
key = row[idx] if idx < len(row) else ""
if key not in groups:
groups[key] = []
groups[key].append(row)
return groups
def csv_aggregate(doc: CsvDocument, group_col: str, agg_col: str, func: str) -> dict[str, float]:
"""Aggregate column values by group."""
groups = csv_group_by(doc, group_col)
agg_idx = doc.headers.index(agg_col) if agg_col in doc.headers else -1
if agg_idx < 0:
return {}
result = {}
for key, rows in groups.items():
values = []
for row in rows:
if agg_idx < len(row):
typed = infer_type(row[agg_idx])
if isinstance(typed, (int, float)):
values.append(float(typed))
if values:
if func == "sum":
result[key] = sum(values)
elif func == "avg":
result[key] = sum(values) / len(values)
elif func == "min":
result[key] = min(values)
elif func == "max":
result[key] = max(values)
elif func == "count":
result[key] = float(len(values))
return result
def simulate_csv(operations: list[str]) -> list[str]:
"""Simulate CSV operations."""
results = []
context: CsvDocument | None = None
for op in operations:
parts = op.split(":", 1)
cmd = parts[0]
if cmd == "parse":
context = csv_parse(parts[1])
results.append("ok")
elif cmd == "rows" and context:
results.append(str(len(context)))
elif cmd == "cols" and context:
results.append(str(len(context.headers)))
elif cmd == "headers" and context:
results.append(",".join(context.headers))
elif cmd == "schema" and context:
schema = csv_schema(context)
results.append(",".join(f"{k}:{v}" for k, v in schema.items()))
elif cmd == "column" and context:
col = context.column(parts[1])
results.append(",".join(col))
elif cmd == "filter" and context:
col_val = parts[1].split("=", 1)
if len(col_val) == 2:
filtered = context.filter_rows(col_val[0], col_val[1])
results.append(str(len(filtered)))
elif cmd == "stats" and context:
stats = csv_stats(context, parts[1])
results.append(",".join(f"{k}:{v}" for k, v in stats.items()))
return results
def main() -> int:
"""CLI entry point."""
import json
if len(sys.argv) < 2:
print("Usage: serial_csv_cli.py <command> [args...]")
print("Commands: parse, schema, stats, select, filter, join")
return 1
cmd = sys.argv[1]
if cmd == "parse":
text = sys.stdin.read()
doc = csv_parse(text)
data = csv_to_typed(doc)
print(json.dumps(data, indent=2))
elif cmd == "schema":
text = sys.stdin.read()
doc = csv_parse(text)
schema = csv_schema(doc)
print(json.dumps(schema, indent=2))
elif cmd == "stats":
if len(sys.argv) < 3:
print("Usage: stats <column>", file=sys.stderr)
return 1
text = sys.stdin.read()
doc = csv_parse(text)
stats = csv_stats(doc, sys.argv[2])
print(json.dumps(stats, indent=2))
elif cmd == "select":
if len(sys.argv) < 3:
print("Usage: select <col1,col2,...>", file=sys.stderr)
return 1
text = sys.stdin.read()
doc = csv_parse(text)
columns = sys.argv[2].split(",")
selected = csv_select(doc, columns)
print(csv_dump(selected))
elif cmd == "filter":
if len(sys.argv) < 4:
print("Usage: filter <column> <value>", file=sys.stderr)
return 1
text = sys.stdin.read()
doc = csv_parse(text)
filtered_rows = doc.filter_rows(sys.argv[2], sys.argv[3])
result = CsvDocument(headers=doc.headers, rows=filtered_rows, config=doc.config)
print(csv_dump(result))
return 0
if __name__ == "__main__":
sys.exit(main())
| false
|
serial_csv
| 478
| 0
|
[
"lambda",
"context_manager",
"class_definition",
"stdin_usage",
"decorator"
] | 0.783
|
Type inference hints:
Hint: bool for return type [High] (explicit return, explicit return)
Hint: str for variable 'value' [High] (usage patterns suggest this type)
Type inference hints:
Hint: list[Any] for variable 'result' [High] (usage patterns suggest this type)
Type inference hints:
Hint: list[Any] for variable 'values' [High] (usage patterns suggest this type)
Type inference hints:
Hint: list[Any] for variable 'new_headers' [High] (usage patterns suggest this type)
Hint: list[Any] for va
|
|
example_serial_csv
|
test_serial_csv_cli.py
|
"""Tests for serial_csv_cli.py"""
from serial_csv_cli import (
CsvConfig,
CsvDocument,
csv_aggregate,
csv_dump,
csv_group_by,
csv_join,
csv_parse,
csv_schema,
csv_select,
csv_stats,
csv_to_typed,
infer_column_type,
infer_type,
simulate_csv,
)
class TestCsvParser:
def test_simple(self):
text = "name,age\nAlice,30\nBob,25"
doc = csv_parse(text)
assert doc.headers == ["name", "age"]
assert len(doc.rows) == 2
def test_no_header(self):
config = CsvConfig(has_header=False)
text = "Alice,30\nBob,25"
doc = csv_parse(text, config)
assert doc.headers == []
assert len(doc.rows) == 2
def test_quoted_field(self):
text = 'name,bio\nAlice,"Hello, World"'
doc = csv_parse(text)
assert doc.rows[0][1] == "Hello, World"
def test_escaped_quote(self):
text = 'name,quote\nAlice,"He said Hi"'
doc = csv_parse(text)
assert "He said Hi" in doc.rows[0][1]
def test_custom_delimiter(self):
config = CsvConfig(delimiter=";")
text = "name;age\nAlice;30"
doc = csv_parse(text, config)
assert doc.headers == ["name", "age"]
def test_whitespace_trim(self):
text = "name , age\n Alice , 30 "
doc = csv_parse(text)
assert doc.headers == ["name", "age"]
assert doc.rows[0] == ["Alice", "30"]
class TestCsvDocument:
def test_len(self):
doc = CsvDocument(headers=["a"], rows=[["1"], ["2"], ["3"]])
assert len(doc) == 3
def test_column_by_name(self):
doc = CsvDocument(headers=["name", "age"], rows=[["Alice", "30"], ["Bob", "25"]])
assert doc.column("name") == ["Alice", "Bob"]
def test_column_by_index(self):
doc = CsvDocument(headers=["name", "age"], rows=[["Alice", "30"], ["Bob", "25"]])
assert doc.column(1) == ["30", "25"]
def test_row(self):
doc = CsvDocument(headers=["name"], rows=[["Alice"], ["Bob"]])
assert doc.row(0) == ["Alice"]
assert doc.row(1) == ["Bob"]
def test_as_dicts(self):
doc = CsvDocument(headers=["name", "age"], rows=[["Alice", "30"]])
dicts = doc.as_dicts()
assert dicts == [{"name": "Alice", "age": "30"}]
def test_filter_rows(self):
doc = CsvDocument(headers=["name", "city"], rows=[["Alice", "NYC"], ["Bob", "LA"], ["Carol", "NYC"]])
filtered = doc.filter_rows("city", "NYC")
assert len(filtered) == 2
def test_sort_by(self):
doc = CsvDocument(headers=["name", "age"], rows=[["Bob", "25"], ["Alice", "30"]])
sorted_rows = doc.sort_by("name")
assert sorted_rows[0][0] == "Alice"
class TestCsvWriter:
def test_simple(self):
doc = CsvDocument(headers=["name", "age"], rows=[["Alice", "30"]])
result = csv_dump(doc)
assert "name,age" in result
assert "Alice,30" in result
def test_quote_comma(self):
doc = CsvDocument(headers=["name"], rows=[["Hello, World"]])
result = csv_dump(doc)
assert '"Hello, World"' in result
def test_no_header(self):
config = CsvConfig(has_header=False)
doc = CsvDocument(headers=[], rows=[["Alice", "30"]], config=config)
result = csv_dump(doc)
assert "name" not in result
class TestInferType:
def test_null(self):
assert infer_type("") is None
assert infer_type("null") is None
assert infer_type("NA") is None
def test_boolean_true(self):
assert infer_type("true") is True
assert infer_type("yes") is True
assert infer_type("1") is True
def test_boolean_false(self):
assert infer_type("false") is False
assert infer_type("no") is False
assert infer_type("0") is False
def test_integer(self):
assert infer_type("42") == 42
assert infer_type("-10") == -10
def test_float(self):
assert infer_type("3.14") == 3.14
assert infer_type("-2.5") == -2.5
assert infer_type(".5") == 0.5
def test_scientific(self):
assert infer_type("1e10") == 1e10
assert infer_type("1.5E-3") == 1.5e-3
def test_string(self):
assert infer_type("hello") == "hello"
class TestInferColumnType:
def test_int_column(self):
values = ["1", "2", "3"]
assert infer_column_type(values) == "int"
def test_float_column(self):
values = ["1.5", "2.5", "3.5"]
assert infer_column_type(values) == "float"
def test_bool_column(self):
values = ["true", "false", "true"]
assert infer_column_type(values) == "bool"
def test_mixed_numeric(self):
values = ["10", "2.5", "30"]
result = infer_column_type(values)
assert result in ("int", "float")
def test_string_column(self):
values = ["Alice", "Bob", "Carol"]
assert infer_column_type(values) == "str"
class TestCsvSchema:
def test_schema(self):
doc = CsvDocument(
headers=["name", "age", "active"],
rows=[["Alice", "30", "true"], ["Bob", "25", "false"]],
)
schema = csv_schema(doc)
assert schema["name"] == "str"
assert schema["age"] == "int"
assert schema["active"] == "bool"
class TestCsvToTyped:
def test_conversion(self):
doc = CsvDocument(headers=["name", "age", "rate"], rows=[["Alice", "30", "3.14"]])
typed = csv_to_typed(doc)
assert typed[0]["name"] == "Alice"
assert typed[0]["age"] == 30
assert typed[0]["rate"] == 3.14
class TestCsvStats:
def test_basic_stats(self):
doc = CsvDocument(headers=["value"], rows=[["10"], ["20"], ["30"]])
stats = csv_stats(doc, "value")
assert stats["count"] == 3
assert stats["min"] == 10
assert stats["max"] == 30
assert stats["sum"] == 60
assert stats["mean"] == 20
def test_median_odd(self):
doc = CsvDocument(headers=["value"], rows=[["1"], ["2"], ["3"]])
stats = csv_stats(doc, "value")
assert stats["median"] == 2
def test_median_even(self):
doc = CsvDocument(headers=["value"], rows=[["1"], ["2"], ["3"], ["4"]])
stats = csv_stats(doc, "value")
assert stats["median"] == 2.5
class TestCsvSelect:
def test_select(self):
doc = CsvDocument(headers=["a", "b", "c"], rows=[["1", "2", "3"]])
selected = csv_select(doc, ["a", "c"])
assert selected.headers == ["a", "c"]
assert selected.rows[0] == ["1", "3"]
class TestCsvJoin:
def test_inner_join(self):
left = CsvDocument(headers=["id", "name"], rows=[["1", "Alice"], ["2", "Bob"]])
right = CsvDocument(headers=["id", "age"], rows=[["1", "30"], ["3", "25"]])
joined = csv_join(left, right, "id")
assert len(joined.rows) == 1
assert joined.headers == ["id", "name", "age"]
class TestCsvGroupBy:
def test_group(self):
doc = CsvDocument(
headers=["city", "name"], rows=[["NYC", "Alice"], ["LA", "Bob"], ["NYC", "Carol"]]
)
groups = csv_group_by(doc, "city")
assert len(groups["NYC"]) == 2
assert len(groups["LA"]) == 1
class TestCsvAggregate:
def test_sum(self):
doc = CsvDocument(
headers=["city", "sales"],
rows=[["NYC", "100"], ["LA", "200"], ["NYC", "150"]],
)
result = csv_aggregate(doc, "city", "sales", "sum")
assert result["NYC"] == 250
assert result["LA"] == 200
def test_avg(self):
doc = CsvDocument(
headers=["city", "sales"],
rows=[["NYC", "100"], ["NYC", "200"]],
)
result = csv_aggregate(doc, "city", "sales", "avg")
assert result["NYC"] == 150
class TestSimulateCsv:
def test_parse(self):
result = simulate_csv(["parse:name,age\nAlice,30"])
assert result == ["ok"]
def test_rows(self):
result = simulate_csv(["parse:name,age\nAlice,30\nBob,25", "rows"])
assert result[1] == "2"
def test_cols(self):
result = simulate_csv(["parse:name,age\nAlice,30", "cols"])
assert result[1] == "2"
def test_headers(self):
result = simulate_csv(["parse:name,age\nAlice,30", "headers"])
assert result[1] == "name,age"
def test_schema(self):
result = simulate_csv(["parse:name,age\nAlice,30", "schema"])
assert "name:str" in result[1]
assert "age:int" in result[1]
def test_column(self):
result = simulate_csv(["parse:name,age\nAlice,30\nBob,25", "column:name"])
assert result[1] == "Alice,Bob"
def test_filter(self):
result = simulate_csv(["parse:name,age\nAlice,30\nBob,25", "filter:age=30"])
assert result[1] == "1"
def test_stats(self):
result = simulate_csv(["parse:value\n10\n20\n30", "stats:value"])
assert "count:3" in result[1]
class TestRoundTrip:
def test_roundtrip(self):
original = "name,age\nAlice,30\nBob,25\n"
doc = csv_parse(original)
dumped = csv_dump(doc)
reparsed = csv_parse(dumped)
assert reparsed.headers == doc.headers
assert reparsed.rows == doc.rows
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_serial_csv/test_serial_csv_cli.py (9284 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_serial_csv/test_serial_csv_cli.rs (16834 bytes)
⏱️ Parse time: 58ms
📊 Throughput: 154.6 KB/s
⏱️ Total time: 58ms
| true
|
serial_csv
| 297
| 5
|
[
"class_definition"
] | 0.612
| null |
example_serial_ini
|
serial_ini_cli.py
|
"""INI File Parser and Writer CLI.
Demonstrates INI parsing, writing, and manipulation patterns.
"""
import re
import sys
from dataclasses import dataclass, field
from typing import Any
@dataclass
class IniSection:
"""INI file section."""
name: str
values: dict[str, str] = field(default_factory=dict)
comments: list[str] = field(default_factory=list)
@dataclass
class IniDocument:
"""INI document."""
sections: dict[str, IniSection] = field(default_factory=dict)
global_values: dict[str, str] = field(default_factory=dict)
header_comments: list[str] = field(default_factory=list)
def get(self, section: str, key: str, default: str = "") -> str:
"""Get value from section."""
if section in self.sections:
return self.sections[section].values.get(key, default)
return default
def set(self, section: str, key: str, value: str) -> None:
"""Set value in section."""
if section not in self.sections:
self.sections[section] = IniSection(name=section)
self.sections[section].values[key] = value
def has_section(self, name: str) -> bool:
"""Check if section exists."""
return name in self.sections
def has_key(self, section: str, key: str) -> bool:
"""Check if key exists in section."""
return section in self.sections and key in self.sections[section].values
def remove_key(self, section: str, key: str) -> bool:
"""Remove key from section."""
if section in self.sections and key in self.sections[section].values:
del self.sections[section].values[key]
return True
return False
def remove_section(self, section: str) -> bool:
"""Remove section."""
if section in self.sections:
del self.sections[section]
return True
return False
def section_names(self) -> list[str]:
"""List section names."""
return list(self.sections.keys())
def keys(self, section: str) -> list[str]:
"""List keys in section."""
if section in self.sections:
return list(self.sections[section].values.keys())
return []
class IniParser:
"""INI file parser."""
SECTION_PATTERN = re.compile(r"^\s*\[([^\]]+)\]\s*$")
KEY_VALUE_PATTERN = re.compile(r"^\s*([^=]+?)\s*=\s*(.*)$")
COMMENT_PATTERN = re.compile(r"^\s*[;#](.*)$")
def __init__(self, text: str) -> None:
self.text = text
self.lines = text.splitlines()
self.pos = 0
def parse(self) -> IniDocument:
"""Parse INI text."""
doc = IniDocument()
current_section: IniSection | None = None
pending_comments: list[str] = []
for line in self.lines:
# Comment
comment_match = self.COMMENT_PATTERN.match(line)
if comment_match:
pending_comments.append(comment_match.group(1).strip())
continue
# Empty line
if not line.strip():
pending_comments.clear()
continue
# Section header
section_match = self.SECTION_PATTERN.match(line)
if section_match:
section_name = section_match.group(1).strip()
current_section = IniSection(name=section_name, comments=pending_comments.copy())
doc.sections[section_name] = current_section
pending_comments.clear()
continue
# Key-value pair
kv_match = self.KEY_VALUE_PATTERN.match(line)
if kv_match:
key = kv_match.group(1).strip()
value = kv_match.group(2).strip()
# Remove quotes if present
if len(value) >= 2:
if (value.startswith('"') and value.endswith('"')) or (
value.startswith("'") and value.endswith("'")
):
value = value[1:-1]
if current_section:
current_section.values[key] = value
else:
doc.global_values[key] = value
pending_comments.clear()
return doc
class IniWriter:
"""INI file writer."""
def __init__(self, doc: IniDocument) -> None:
self.doc = doc
def write(self) -> str:
"""Write INI document to string."""
lines = []
# Header comments
for comment in self.doc.header_comments:
lines.append(f"; {comment}")
# Global values
for key, value in self.doc.global_values.items():
lines.append(self._format_value(key, value))
if self.doc.global_values:
lines.append("")
# Sections
for section_name, section in self.doc.sections.items():
# Section comments
for comment in section.comments:
lines.append(f"; {comment}")
lines.append(f"[{section_name}]")
for key, value in section.values.items():
lines.append(self._format_value(key, value))
lines.append("")
return "\n".join(lines).rstrip() + "\n"
def _format_value(self, key: str, value: str) -> str:
"""Format a key-value pair."""
# Quote values with special characters
if " " in value or ";" in value or "#" in value or "=" in value:
value = f'"{value}"'
return f"{key} = {value}"
def ini_parse(text: str) -> IniDocument:
"""Parse INI text."""
return IniParser(text).parse()
def ini_dump(doc: IniDocument) -> str:
"""Dump INI document to string."""
return IniWriter(doc).write()
def ini_get(doc: IniDocument, section: str, key: str, default: str = "") -> str:
"""Get value from INI document."""
return doc.get(section, key, default)
def ini_set(doc: IniDocument, section: str, key: str, value: str) -> None:
"""Set value in INI document."""
doc.set(section, key, value)
def ini_to_dict(doc: IniDocument) -> dict[str, dict[str, str]]:
"""Convert INI document to nested dict."""
result: dict[str, dict[str, str]] = {}
for section_name, section in doc.sections.items():
result[section_name] = dict(section.values)
return result
def dict_to_ini(data: dict[str, dict[str, str]]) -> IniDocument:
"""Convert nested dict to INI document."""
doc = IniDocument()
for section_name, values in data.items():
section = IniSection(name=section_name, values=dict(values))
doc.sections[section_name] = section
return doc
def ini_merge(base: IniDocument, overlay: IniDocument) -> IniDocument:
"""Merge two INI documents."""
result = IniDocument()
# Copy base
for name, section in base.sections.items():
result.sections[name] = IniSection(
name=name, values=dict(section.values), comments=list(section.comments)
)
# Merge overlay
for name, section in overlay.sections.items():
if name not in result.sections:
result.sections[name] = IniSection(name=name)
result.sections[name].values.update(section.values)
return result
def ini_diff(doc1: IniDocument, doc2: IniDocument) -> list[str]:
"""Find differences between two INI documents."""
diffs = []
all_sections = set(doc1.sections.keys()) | set(doc2.sections.keys())
for section in sorted(all_sections):
if section not in doc1.sections:
diffs.append(f"+ [{section}]")
continue
if section not in doc2.sections:
diffs.append(f"- [{section}]")
continue
keys1 = doc1.sections[section].values
keys2 = doc2.sections[section].values
all_keys = set(keys1.keys()) | set(keys2.keys())
for key in sorted(all_keys):
if key not in keys1:
diffs.append(f"+ [{section}] {key} = {keys2[key]}")
elif key not in keys2:
diffs.append(f"- [{section}] {key} = {keys1[key]}")
elif keys1[key] != keys2[key]:
diffs.append(f"~ [{section}] {key}: {keys1[key]} -> {keys2[key]}")
return diffs
def ini_validate(doc: IniDocument, schema: dict[str, list[str]]) -> list[str]:
"""Validate INI document against schema of required keys per section."""
errors = []
for section_name, required_keys in schema.items():
if section_name not in doc.sections:
errors.append(f"Missing section: [{section_name}]")
continue
section = doc.sections[section_name]
for key in required_keys:
if key not in section.values:
errors.append(f"Missing key: [{section_name}] {key}")
return errors
def ini_interpolate(doc: IniDocument) -> IniDocument:
"""Expand ${section.key} references in values."""
result = IniDocument()
# Copy structure
for name, section in doc.sections.items():
result.sections[name] = IniSection(name=name, values=dict(section.values))
# Interpolate
pattern = re.compile(r"\$\{([^}]+)\}")
for section in result.sections.values():
for key, value in section.values.items():
new_value = value
for match in pattern.finditer(value):
ref = match.group(1)
parts = ref.split(".")
if len(parts) == 2:
ref_section, ref_key = parts
ref_value = result.get(ref_section, ref_key, "")
new_value = new_value.replace(match.group(0), ref_value)
section.values[key] = new_value
return result
def simulate_ini(operations: list[str]) -> list[str]:
"""Simulate INI operations."""
results = []
context: IniDocument | None = None
for op in operations:
parts = op.split(":", 1)
cmd = parts[0]
if cmd == "parse":
context = ini_parse(parts[1])
results.append("ok")
elif cmd == "get" and context:
path = parts[1].split(".")
if len(path) == 2:
value = context.get(path[0], path[1])
results.append(value if value else "null")
elif cmd == "set" and context:
path_value = parts[1].split("=", 1)
path = path_value[0].split(".")
if len(path) == 2:
context.set(path[0], path[1], path_value[1])
results.append("ok")
elif cmd == "sections" and context:
results.append(",".join(context.section_names()))
elif cmd == "keys" and context:
results.append(",".join(context.keys(parts[1])))
elif cmd == "has_section" and context:
results.append("1" if context.has_section(parts[1]) else "0")
elif cmd == "has_key" and context:
path = parts[1].split(".")
if len(path) == 2:
results.append("1" if context.has_key(path[0], path[1]) else "0")
return results
def type_infer(value: str) -> Any:
"""Infer type from string value."""
if value.lower() in ("true", "yes", "on", "1"):
return True
if value.lower() in ("false", "no", "off", "0"):
return False
try:
return int(value)
except ValueError:
pass
try:
return float(value)
except ValueError:
pass
return value
def ini_to_typed_dict(doc: IniDocument) -> dict[str, dict[str, Any]]:
"""Convert INI document to dict with type inference."""
result: dict[str, dict[str, Any]] = {}
for section_name, section in doc.sections.items():
result[section_name] = {key: type_infer(value) for key, value in section.values.items()}
return result
def main() -> int:
"""CLI entry point."""
import json
if len(sys.argv) < 2:
print("Usage: serial_ini_cli.py <command> [args...]")
print("Commands: parse, get, set, dump, sections, validate")
return 1
cmd = sys.argv[1]
if cmd == "parse":
text = sys.stdin.read()
doc = ini_parse(text)
print(json.dumps(ini_to_dict(doc), indent=2))
elif cmd == "get":
if len(sys.argv) < 4:
print("Usage: get <section> <key>", file=sys.stderr)
return 1
text = sys.stdin.read()
doc = ini_parse(text)
value = doc.get(sys.argv[2], sys.argv[3])
print(value)
elif cmd == "sections":
text = sys.stdin.read()
doc = ini_parse(text)
for section in doc.section_names():
print(section)
elif cmd == "dump":
data = json.loads(sys.stdin.read())
doc = dict_to_ini(data)
print(ini_dump(doc))
elif cmd == "validate":
if len(sys.argv) < 3:
print("Usage: validate <schema_json>", file=sys.stderr)
return 1
schema = json.loads(sys.argv[2])
text = sys.stdin.read()
doc = ini_parse(text)
errors = ini_validate(doc, schema)
if errors:
for err in errors:
print(err)
return 1
print("Valid")
return 0
if __name__ == "__main__":
sys.exit(main())
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_serial_ini/serial_ini_cli.py (13238 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_serial_ini/serial_ini_cli.rs (28237 bytes)
📦 Cargo.toml: /home/noah/src/reprorusted-python-cli/examples/example_serial_ini/Cargo.toml (3 dependencies)
⏱️ Parse time: 61ms
📊 Throughput: 208.9 KB/s
⏱️ Total time: 62ms
| true
|
serial_ini
| 432
| 6
|
[
"context_manager",
"class_definition",
"exception_handling",
"stdin_usage",
"decorator"
] | 0.652
| null |
example_serial_ini
|
test_serial_ini_cli.py
|
"""Tests for serial_ini_cli.py"""
from serial_ini_cli import (
IniDocument,
IniSection,
dict_to_ini,
ini_diff,
ini_dump,
ini_interpolate,
ini_merge,
ini_parse,
ini_to_dict,
ini_to_typed_dict,
ini_validate,
simulate_ini,
type_infer,
)
class TestIniParser:
def test_simple(self):
text = "[section]\nkey = value"
doc = ini_parse(text)
assert "section" in doc.sections
assert doc.sections["section"].values["key"] == "value"
def test_multiple_keys(self):
text = "[section]\nkey1 = value1\nkey2 = value2"
doc = ini_parse(text)
assert doc.sections["section"].values["key1"] == "value1"
assert doc.sections["section"].values["key2"] == "value2"
def test_multiple_sections(self):
text = "[section1]\na = 1\n\n[section2]\nb = 2"
doc = ini_parse(text)
assert "section1" in doc.sections
assert "section2" in doc.sections
def test_quoted_value(self):
text = '[section]\nkey = "value with spaces"'
doc = ini_parse(text)
assert doc.sections["section"].values["key"] == "value with spaces"
def test_comment(self):
text = "; comment\n[section]\nkey = value"
doc = ini_parse(text)
assert doc.sections["section"].values["key"] == "value"
def test_hash_comment(self):
text = "# comment\n[section]\nkey = value"
doc = ini_parse(text)
assert doc.sections["section"].values["key"] == "value"
class TestIniDocument:
def test_get(self):
doc = IniDocument()
doc.sections["db"] = IniSection(name="db", values={"host": "localhost"})
assert doc.get("db", "host") == "localhost"
def test_get_default(self):
doc = IniDocument()
assert doc.get("missing", "key", "default") == "default"
def test_set(self):
doc = IniDocument()
doc.set("db", "host", "localhost")
assert doc.sections["db"].values["host"] == "localhost"
def test_has_section(self):
doc = IniDocument()
doc.sections["db"] = IniSection(name="db")
assert doc.has_section("db")
assert not doc.has_section("missing")
def test_has_key(self):
doc = IniDocument()
doc.sections["db"] = IniSection(name="db", values={"host": "localhost"})
assert doc.has_key("db", "host")
assert not doc.has_key("db", "missing")
def test_remove_key(self):
doc = IniDocument()
doc.sections["db"] = IniSection(name="db", values={"host": "localhost"})
assert doc.remove_key("db", "host")
assert "host" not in doc.sections["db"].values
def test_remove_section(self):
doc = IniDocument()
doc.sections["db"] = IniSection(name="db")
assert doc.remove_section("db")
assert "db" not in doc.sections
def test_section_names(self):
doc = IniDocument()
doc.sections["a"] = IniSection(name="a")
doc.sections["b"] = IniSection(name="b")
assert set(doc.section_names()) == {"a", "b"}
def test_keys(self):
doc = IniDocument()
doc.sections["db"] = IniSection(name="db", values={"host": "localhost", "port": "5432"})
assert set(doc.keys("db")) == {"host", "port"}
class TestIniWriter:
def test_simple(self):
doc = IniDocument()
doc.sections["db"] = IniSection(name="db", values={"host": "localhost"})
result = ini_dump(doc)
assert "[db]" in result
assert "host = localhost" in result
def test_quoting(self):
doc = IniDocument()
doc.sections["db"] = IniSection(name="db", values={"name": "with spaces"})
result = ini_dump(doc)
assert '"with spaces"' in result
class TestIniToDict:
def test_conversion(self):
doc = IniDocument()
doc.sections["db"] = IniSection(name="db", values={"host": "localhost", "port": "5432"})
result = ini_to_dict(doc)
assert result == {"db": {"host": "localhost", "port": "5432"}}
class TestDictToIni:
def test_conversion(self):
data = {"db": {"host": "localhost"}}
doc = dict_to_ini(data)
assert "db" in doc.sections
assert doc.sections["db"].values["host"] == "localhost"
class TestIniMerge:
def test_simple(self):
doc1 = IniDocument()
doc1.sections["a"] = IniSection(name="a", values={"x": "1"})
doc2 = IniDocument()
doc2.sections["b"] = IniSection(name="b", values={"y": "2"})
result = ini_merge(doc1, doc2)
assert "a" in result.sections
assert "b" in result.sections
def test_override(self):
doc1 = IniDocument()
doc1.sections["a"] = IniSection(name="a", values={"x": "1"})
doc2 = IniDocument()
doc2.sections["a"] = IniSection(name="a", values={"x": "2"})
result = ini_merge(doc1, doc2)
assert result.sections["a"].values["x"] == "2"
class TestIniDiff:
def test_added_section(self):
doc1 = IniDocument()
doc2 = IniDocument()
doc2.sections["new"] = IniSection(name="new")
diffs = ini_diff(doc1, doc2)
assert any("+ [new]" in d for d in diffs)
def test_removed_section(self):
doc1 = IniDocument()
doc1.sections["old"] = IniSection(name="old")
doc2 = IniDocument()
diffs = ini_diff(doc1, doc2)
assert any("- [old]" in d for d in diffs)
def test_changed_value(self):
doc1 = IniDocument()
doc1.sections["a"] = IniSection(name="a", values={"x": "1"})
doc2 = IniDocument()
doc2.sections["a"] = IniSection(name="a", values={"x": "2"})
diffs = ini_diff(doc1, doc2)
assert len(diffs) == 1
assert "~" in diffs[0]
class TestIniValidate:
def test_valid(self):
doc = IniDocument()
doc.sections["db"] = IniSection(name="db", values={"host": "localhost"})
schema = {"db": ["host"]}
errors = ini_validate(doc, schema)
assert errors == []
def test_missing_section(self):
doc = IniDocument()
schema = {"db": ["host"]}
errors = ini_validate(doc, schema)
assert any("Missing section" in e for e in errors)
def test_missing_key(self):
doc = IniDocument()
doc.sections["db"] = IniSection(name="db", values={})
schema = {"db": ["host"]}
errors = ini_validate(doc, schema)
assert any("Missing key" in e for e in errors)
class TestIniInterpolate:
def test_simple(self):
doc = IniDocument()
doc.sections["paths"] = IniSection(name="paths", values={"base": "/home"})
doc.sections["app"] = IniSection(name="app", values={"data": "${paths.base}/data"})
result = ini_interpolate(doc)
assert result.sections["app"].values["data"] == "/home/data"
class TestTypeInfer:
def test_true(self):
assert type_infer("true") is True
assert type_infer("yes") is True
assert type_infer("on") is True
def test_false(self):
assert type_infer("false") is False
assert type_infer("no") is False
assert type_infer("off") is False
def test_integer(self):
assert type_infer("42") == 42
assert type_infer("-10") == -10
def test_float(self):
assert type_infer("3.14") == 3.14
def test_string(self):
assert type_infer("hello") == "hello"
class TestIniToTypedDict:
def test_conversion(self):
doc = IniDocument()
doc.sections["config"] = IniSection(
name="config", values={"enabled": "true", "port": "8080", "rate": "3.14", "name": "test"}
)
result = ini_to_typed_dict(doc)
assert result["config"]["enabled"] is True
assert result["config"]["port"] == 8080
assert result["config"]["rate"] == 3.14
assert result["config"]["name"] == "test"
class TestSimulateIni:
def test_parse(self):
result = simulate_ini(["parse:[db]\nhost = localhost"])
assert result == ["ok"]
def test_get(self):
result = simulate_ini(["parse:[db]\nhost = localhost", "get:db.host"])
assert result[1] == "localhost"
def test_set(self):
result = simulate_ini(["parse:[db]\nhost = old", "set:db.host=new", "get:db.host"])
assert result[2] == "new"
def test_sections(self):
result = simulate_ini(["parse:[a]\nx = 1\n[b]\ny = 2", "sections"])
assert "a" in result[1]
assert "b" in result[1]
def test_keys(self):
result = simulate_ini(["parse:[db]\nhost = localhost\nport = 5432", "keys:db"])
assert "host" in result[1]
assert "port" in result[1]
def test_has_section(self):
result = simulate_ini(["parse:[db]\nhost = localhost", "has_section:db", "has_section:missing"])
assert result[1] == "1"
assert result[2] == "0"
def test_has_key(self):
result = simulate_ini(["parse:[db]\nhost = localhost", "has_key:db.host", "has_key:db.missing"])
assert result[1] == "1"
assert result[2] == "0"
class TestRoundTrip:
def test_roundtrip(self):
original = {"database": {"host": "localhost", "port": "5432"}}
doc = dict_to_ini(original)
dumped = ini_dump(doc)
parsed = ini_parse(dumped)
result = ini_to_dict(parsed)
assert result == original
| false
|
serial_ini
| 292
| 0
|
[
"context_manager",
"class_definition"
] | 0.652
|
Error: Expression type not yet supported: GeneratorExp { element: Binary { op: In, left: Literal(String("+ [new]")), right: Var("d") }, generators: [HirComprehension { target: "d", iter: Var("diffs"), conditions: [] }] }
|
|
example_serial_json
|
serial_json_cli.py
|
"""JSON Schema Validation and Manipulation CLI.
Demonstrates JSON parsing, validation, schema checking, and transformation patterns.
"""
import json
import sys
from dataclasses import dataclass
from typing import Any
@dataclass
class SchemaType:
"""Represents a JSON schema type."""
type_name: str
required: bool = True
default: Any = None
min_value: float | None = None
max_value: float | None = None
min_length: int | None = None
max_length: int | None = None
pattern: str | None = None
enum_values: list[Any] | None = None
@dataclass
class Schema:
"""JSON Schema definition."""
fields: dict[str, SchemaType]
allow_extra: bool = False
def validate_type(value: Any, schema_type: SchemaType) -> tuple[bool, str]:
"""Validate a value against a schema type."""
type_map = {
"string": str,
"number": (int, float),
"integer": int,
"boolean": bool,
"array": list,
"object": dict,
"null": type(None),
}
expected = type_map.get(schema_type.type_name)
if expected is None:
return False, f"Unknown type: {schema_type.type_name}"
if not isinstance(value, expected):
return False, f"Expected {schema_type.type_name}, got {type(value).__name__}"
if schema_type.type_name == "string":
if schema_type.min_length is not None and len(value) < schema_type.min_length:
return False, f"String too short: {len(value)} < {schema_type.min_length}"
if schema_type.max_length is not None and len(value) > schema_type.max_length:
return False, f"String too long: {len(value)} > {schema_type.max_length}"
if schema_type.type_name in ("number", "integer"):
if schema_type.min_value is not None and value < schema_type.min_value:
return False, f"Value too small: {value} < {schema_type.min_value}"
if schema_type.max_value is not None and value > schema_type.max_value:
return False, f"Value too large: {value} > {schema_type.max_value}"
if schema_type.enum_values is not None and value not in schema_type.enum_values:
return False, f"Value not in enum: {value}"
return True, ""
def validate_object(data: dict[str, Any], schema: Schema) -> list[str]:
"""Validate a JSON object against a schema."""
errors = []
for field_name, field_type in schema.fields.items():
if field_name not in data:
if field_type.required and field_type.default is None:
errors.append(f"Missing required field: {field_name}")
continue
valid, msg = validate_type(data[field_name], field_type)
if not valid:
errors.append(f"Field '{field_name}': {msg}")
if not schema.allow_extra:
for key in data:
if key not in schema.fields:
errors.append(f"Unexpected field: {key}")
return errors
def json_parse(text: str) -> tuple[Any, str | None]:
"""Parse JSON string safely."""
try:
return json.loads(text), None
except json.JSONDecodeError as e:
return None, str(e)
def json_stringify(data: Any, pretty: bool = False) -> str:
"""Convert data to JSON string."""
if pretty:
return json.dumps(data, indent=2, sort_keys=True)
return json.dumps(data, separators=(",", ":"))
def json_get(data: Any, path: str) -> Any:
"""Get value at JSON path (dot notation)."""
if not path:
return data
parts = path.split(".")
current = data
for part in parts:
if isinstance(current, dict) and part in current:
current = current[part]
elif isinstance(current, list):
try:
idx = int(part)
if 0 <= idx < len(current):
current = current[idx]
else:
return None
except ValueError:
return None
else:
return None
return current
def json_set(data: dict[str, Any], path: str, value: Any) -> dict[str, Any]:
"""Set value at JSON path (returns new dict)."""
result = json.loads(json.dumps(data))
parts = path.split(".")
current = result
for part in parts[:-1]:
if part not in current:
current[part] = {}
current = current[part]
current[parts[-1]] = value
return result
def json_delete(data: dict[str, Any], path: str) -> dict[str, Any]:
"""Delete value at JSON path (returns new dict)."""
result = json.loads(json.dumps(data))
parts = path.split(".")
current = result
for part in parts[:-1]:
if part not in current:
return result
current = current[part]
if parts[-1] in current:
del current[parts[-1]]
return result
def json_merge(base: dict[str, Any], overlay: dict[str, Any]) -> dict[str, Any]:
"""Deep merge two JSON objects."""
result = json.loads(json.dumps(base))
for key, value in overlay.items():
if key in result and isinstance(result[key], dict) and isinstance(value, dict):
result[key] = json_merge(result[key], value)
else:
result[key] = json.loads(json.dumps(value))
return result
def json_diff(obj1: Any, obj2: Any, path: str = "") -> list[str]:
"""Find differences between two JSON values."""
diffs = []
if type(obj1) is not type(obj2):
diffs.append(f"{path}: type {type(obj1).__name__} != {type(obj2).__name__}")
return diffs
if isinstance(obj1, dict):
all_keys = set(obj1.keys()) | set(obj2.keys())
for key in sorted(all_keys):
new_path = f"{path}.{key}" if path else key
if key not in obj1:
diffs.append(f"{new_path}: added")
elif key not in obj2:
diffs.append(f"{new_path}: removed")
else:
diffs.extend(json_diff(obj1[key], obj2[key], new_path))
elif isinstance(obj1, list):
if len(obj1) != len(obj2):
diffs.append(f"{path}: length {len(obj1)} != {len(obj2)}")
else:
for i, (a, b) in enumerate(zip(obj1, obj2, strict=False)):
diffs.extend(json_diff(a, b, f"{path}[{i}]"))
elif obj1 != obj2:
diffs.append(f"{path}: {obj1!r} != {obj2!r}")
return diffs
def json_flatten(data: Any, prefix: str = "") -> dict[str, Any]:
"""Flatten nested JSON to dot-notation keys."""
result = {}
if isinstance(data, dict):
for key, value in data.items():
new_key = f"{prefix}.{key}" if prefix else key
result.update(json_flatten(value, new_key))
elif isinstance(data, list):
for i, value in enumerate(data):
new_key = f"{prefix}[{i}]"
result.update(json_flatten(value, new_key))
else:
result[prefix] = data
return result
def json_unflatten(data: dict[str, Any]) -> dict[str, Any]:
"""Unflatten dot-notation keys to nested JSON."""
result: dict[str, Any] = {}
for key, value in data.items():
parts = key.replace("[", ".").replace("]", "").split(".")
current = result
for i, part in enumerate(parts[:-1]):
next_part = parts[i + 1]
is_array = next_part.isdigit()
if part not in current:
current[part] = [] if is_array else {}
current = current[part]
if is_array and isinstance(current, list):
idx = int(next_part)
while len(current) <= idx:
current.append({})
final_part = parts[-1]
if final_part.isdigit() and isinstance(current, list):
idx = int(final_part)
while len(current) <= idx:
current.append(None)
current[idx] = value
else:
current[final_part] = value
return result
def json_query(data: list[dict[str, Any]], conditions: dict[str, Any]) -> list[dict[str, Any]]:
"""Query array of objects with conditions."""
results = []
for item in data:
matches = True
for key, expected in conditions.items():
actual = json_get(item, key)
if actual != expected:
matches = False
break
if matches:
results.append(item)
return results
def simulate_json(operations: list[str]) -> list[str]:
"""Simulate JSON operations from command strings."""
results = []
context: dict[str, Any] = {}
for op in operations:
parts = op.split(":", 1)
cmd = parts[0]
if cmd == "parse":
data, err = json_parse(parts[1])
if err:
results.append(f"error:{err}")
else:
context["data"] = data
results.append("ok")
elif cmd == "stringify":
results.append(json_stringify(context.get("data", {})))
elif cmd == "get":
value = json_get(context.get("data"), parts[1])
results.append(json.dumps(value))
elif cmd == "set":
path_value = parts[1].split("=", 1)
value, _ = json_parse(path_value[1])
context["data"] = json_set(context.get("data", {}), path_value[0], value)
results.append("ok")
elif cmd == "delete":
context["data"] = json_delete(context.get("data", {}), parts[1])
results.append("ok")
elif cmd == "flatten":
flat = json_flatten(context.get("data", {}))
results.append(json_stringify(flat))
return results
def main() -> int:
"""CLI entry point."""
if len(sys.argv) < 2:
print("Usage: serial_json_cli.py <command> [args...]")
print("Commands: parse, validate, get, set, merge, diff, flatten")
return 1
cmd = sys.argv[1]
if cmd == "parse":
text = sys.stdin.read()
data, err = json_parse(text)
if err:
print(f"Error: {err}", file=sys.stderr)
return 1
print(json_stringify(data, pretty=True))
elif cmd == "validate":
if len(sys.argv) < 3:
print("Usage: validate <schema_json>", file=sys.stderr)
return 1
schema_data, _ = json_parse(sys.argv[2])
data, _ = json_parse(sys.stdin.read())
fields = {}
for name, spec in schema_data.get("fields", {}).items():
fields[name] = SchemaType(
type_name=spec.get("type", "string"),
required=spec.get("required", True),
)
schema = Schema(fields=fields)
errors = validate_object(data, schema)
if errors:
for err in errors:
print(err)
return 1
print("Valid")
elif cmd == "get":
if len(sys.argv) < 3:
print("Usage: get <path>", file=sys.stderr)
return 1
data, _ = json_parse(sys.stdin.read())
value = json_get(data, sys.argv[2])
print(json_stringify(value, pretty=True))
elif cmd == "flatten":
data, _ = json_parse(sys.stdin.read())
flat = json_flatten(data)
print(json_stringify(flat, pretty=True))
return 0
if __name__ == "__main__":
sys.exit(main())
| false
|
serial_json
| 370
| 0
|
[
"context_manager",
"class_definition",
"exception_handling",
"stdin_usage",
"decorator"
] | 0.652
|
Error: 'is not' operator not yet supported (use != for value comparison)
|
|
example_serial_json
|
test_serial_json_cli.py
|
"""Tests for serial_json_cli.py"""
from serial_json_cli import (
Schema,
SchemaType,
json_delete,
json_diff,
json_flatten,
json_get,
json_merge,
json_parse,
json_query,
json_set,
json_stringify,
json_unflatten,
simulate_json,
validate_object,
validate_type,
)
class TestJsonParse:
def test_simple_object(self):
data, err = json_parse('{"name": "test"}')
assert err is None
assert data == {"name": "test"}
def test_nested_object(self):
data, err = json_parse('{"a": {"b": 1}}')
assert err is None
assert data["a"]["b"] == 1
def test_array(self):
data, err = json_parse("[1, 2, 3]")
assert err is None
assert data == [1, 2, 3]
def test_invalid_json(self):
data, err = json_parse("{invalid}")
assert err is not None
assert data is None
def test_empty_object(self):
data, err = json_parse("{}")
assert err is None
assert data == {}
class TestJsonStringify:
def test_simple(self):
result = json_stringify({"a": 1})
assert result == '{"a":1}'
def test_pretty(self):
result = json_stringify({"a": 1}, pretty=True)
assert '"a": 1' in result
assert "\n" in result
def test_nested(self):
result = json_stringify({"a": {"b": 1}})
assert '"a"' in result
assert '"b"' in result
class TestValidateType:
def test_string(self):
valid, msg = validate_type("hello", SchemaType("string"))
assert valid
def test_integer(self):
valid, msg = validate_type(42, SchemaType("integer"))
assert valid
def test_number(self):
valid, msg = validate_type(3.14, SchemaType("number"))
assert valid
def test_boolean(self):
valid, msg = validate_type(True, SchemaType("boolean"))
assert valid
def test_array(self):
valid, msg = validate_type([1, 2, 3], SchemaType("array"))
assert valid
def test_object(self):
valid, msg = validate_type({"a": 1}, SchemaType("object"))
assert valid
def test_null(self):
valid, msg = validate_type(None, SchemaType("null"))
assert valid
def test_type_mismatch(self):
valid, msg = validate_type("hello", SchemaType("integer"))
assert not valid
def test_min_length(self):
valid, msg = validate_type("hi", SchemaType("string", min_length=5))
assert not valid
def test_max_length(self):
valid, msg = validate_type("hello world", SchemaType("string", max_length=5))
assert not valid
def test_min_value(self):
valid, msg = validate_type(5, SchemaType("integer", min_value=10))
assert not valid
def test_max_value(self):
valid, msg = validate_type(15, SchemaType("integer", max_value=10))
assert not valid
def test_enum_valid(self):
valid, msg = validate_type("red", SchemaType("string", enum_values=["red", "blue"]))
assert valid
def test_enum_invalid(self):
valid, msg = validate_type("green", SchemaType("string", enum_values=["red", "blue"]))
assert not valid
class TestValidateObject:
def test_valid_object(self):
schema = Schema(
fields={
"name": SchemaType("string"),
"age": SchemaType("integer"),
}
)
errors = validate_object({"name": "John", "age": 30}, schema)
assert errors == []
def test_missing_required(self):
schema = Schema(fields={"name": SchemaType("string", required=True)})
errors = validate_object({}, schema)
assert len(errors) == 1
def test_unexpected_field(self):
schema = Schema(fields={"name": SchemaType("string")}, allow_extra=False)
errors = validate_object({"name": "John", "extra": "value"}, schema)
assert any("Unexpected" in e for e in errors)
def test_allow_extra(self):
schema = Schema(fields={"name": SchemaType("string")}, allow_extra=True)
errors = validate_object({"name": "John", "extra": "value"}, schema)
assert errors == []
class TestJsonGet:
def test_simple(self):
data = {"name": "test"}
assert json_get(data, "name") == "test"
def test_nested(self):
data = {"a": {"b": {"c": 1}}}
assert json_get(data, "a.b.c") == 1
def test_array_index(self):
data = {"items": [10, 20, 30]}
assert json_get(data, "items.1") == 20
def test_missing(self):
data = {"name": "test"}
assert json_get(data, "missing") is None
def test_empty_path(self):
data = {"name": "test"}
assert json_get(data, "") == data
class TestJsonSet:
def test_simple(self):
data = {"name": "old"}
result = json_set(data, "name", "new")
assert result["name"] == "new"
def test_nested(self):
data = {"a": {"b": 1}}
result = json_set(data, "a.b", 2)
assert result["a"]["b"] == 2
def test_new_key(self):
data = {"a": 1}
result = json_set(data, "b", 2)
assert result["b"] == 2
def test_deep_new(self):
data = {}
result = json_set(data, "a.b.c", 1)
assert result["a"]["b"]["c"] == 1
class TestJsonDelete:
def test_simple(self):
data = {"a": 1, "b": 2}
result = json_delete(data, "a")
assert "a" not in result
assert "b" in result
def test_nested(self):
data = {"a": {"b": 1, "c": 2}}
result = json_delete(data, "a.b")
assert "b" not in result["a"]
def test_missing(self):
data = {"a": 1}
result = json_delete(data, "missing")
assert result == {"a": 1}
class TestJsonMerge:
def test_simple(self):
base = {"a": 1}
overlay = {"b": 2}
result = json_merge(base, overlay)
assert result == {"a": 1, "b": 2}
def test_override(self):
base = {"a": 1}
overlay = {"a": 2}
result = json_merge(base, overlay)
assert result["a"] == 2
def test_deep_merge(self):
base = {"a": {"b": 1, "c": 2}}
overlay = {"a": {"c": 3, "d": 4}}
result = json_merge(base, overlay)
assert result["a"] == {"b": 1, "c": 3, "d": 4}
class TestJsonDiff:
def test_equal(self):
obj = {"a": 1}
diffs = json_diff(obj, obj)
assert diffs == []
def test_added(self):
obj1 = {"a": 1}
obj2 = {"a": 1, "b": 2}
diffs = json_diff(obj1, obj2)
assert any("added" in d for d in diffs)
def test_removed(self):
obj1 = {"a": 1, "b": 2}
obj2 = {"a": 1}
diffs = json_diff(obj1, obj2)
assert any("removed" in d for d in diffs)
def test_changed(self):
obj1 = {"a": 1}
obj2 = {"a": 2}
diffs = json_diff(obj1, obj2)
assert len(diffs) == 1
class TestJsonFlatten:
def test_simple(self):
data = {"a": {"b": 1}}
result = json_flatten(data)
assert result == {"a.b": 1}
def test_array(self):
data = {"items": [1, 2]}
result = json_flatten(data)
assert result["items[0]"] == 1
assert result["items[1]"] == 2
def test_deep(self):
data = {"a": {"b": {"c": 1}}}
result = json_flatten(data)
assert result == {"a.b.c": 1}
class TestJsonUnflatten:
def test_simple(self):
data = {"a.b": 1}
result = json_unflatten(data)
assert result["a"]["b"] == 1
class TestJsonQuery:
def test_single_condition(self):
data = [{"name": "Alice", "age": 30}, {"name": "Bob", "age": 25}]
result = json_query(data, {"name": "Alice"})
assert len(result) == 1
assert result[0]["age"] == 30
def test_no_match(self):
data = [{"name": "Alice"}]
result = json_query(data, {"name": "Bob"})
assert result == []
class TestSimulateJson:
def test_parse(self):
result = simulate_json(['parse:{"a":1}'])
assert result == ["ok"]
def test_stringify(self):
result = simulate_json(['parse:{"a":1}', "stringify"])
assert '{"a":1}' in result[1]
def test_get(self):
result = simulate_json(['parse:{"a":{"b":2}}', "get:a.b"])
assert result[1] == "2"
def test_set(self):
result = simulate_json(['parse:{"a":1}', "set:b=2"])
assert result[1] == "ok"
def test_delete(self):
result = simulate_json(['parse:{"a":1,"b":2}', "delete:a"])
assert result[1] == "ok"
def test_flatten(self):
result = simulate_json(['parse:{"a":{"b":1}}', "flatten"])
assert "a.b" in result[1]
| false
|
serial_json
| 318
| 0
|
[
"class_definition"
] | 0.612
|
Error: Expression type not yet supported: GeneratorExp { element: Binary { op: In, left: Literal(String("Unexpected")), right: Var("e") }, generators: [HirComprehension { target: "e", iter: Var("errors"), conditions: [] }] }
|
|
example_serial_msgpack
|
serial_msgpack_cli.py
|
"""MessagePack-style Binary Encoding CLI.
Demonstrates binary serialization patterns without external dependencies.
Implements a subset of MessagePack format for educational purposes.
"""
import struct
import sys
from dataclasses import dataclass
from typing import Any
# MessagePack format markers
FIXINT_POS_MAX = 0x7F
FIXINT_NEG_MIN = 0xE0
FIXMAP_MAX = 0x8F
FIXARRAY_MAX = 0x9F
FIXSTR_MAX = 0xBF
NIL = 0xC0
FALSE = 0xC2
TRUE = 0xC3
BIN8 = 0xC4
BIN16 = 0xC5
BIN32 = 0xC6
FLOAT32 = 0xCA
FLOAT64 = 0xCB
UINT8 = 0xCC
UINT16 = 0xCD
UINT32 = 0xCE
UINT64 = 0xCF
INT8 = 0xD0
INT16 = 0xD1
INT32 = 0xD2
INT64 = 0xD3
STR8 = 0xD9
STR16 = 0xDA
STR32 = 0xDB
ARRAY16 = 0xDC
ARRAY32 = 0xDD
MAP16 = 0xDE
MAP32 = 0xDF
@dataclass
class EncodeResult:
"""Result of encoding operation."""
data: bytes
size: int
@dataclass
class DecodeResult:
"""Result of decoding operation."""
value: Any
bytes_consumed: int
class MsgPackEncoder:
"""MessagePack encoder."""
def __init__(self) -> None:
self.buffer = bytearray()
def encode(self, value: Any) -> bytes:
"""Encode a value to MessagePack format."""
self.buffer.clear()
self._encode_value(value)
return bytes(self.buffer)
def _encode_value(self, value: Any) -> None:
"""Encode any value."""
if value is None:
self.buffer.append(NIL)
elif isinstance(value, bool):
self.buffer.append(TRUE if value else FALSE)
elif isinstance(value, int):
self._encode_int(value)
elif isinstance(value, float):
self._encode_float(value)
elif isinstance(value, str):
self._encode_str(value)
elif isinstance(value, bytes):
self._encode_bin(value)
elif isinstance(value, list):
self._encode_array(value)
elif isinstance(value, dict):
self._encode_map(value)
else:
raise ValueError(f"Cannot encode type: {type(value)}")
def _encode_int(self, value: int) -> None:
"""Encode an integer."""
if 0 <= value <= FIXINT_POS_MAX:
self.buffer.append(value)
elif -32 <= value < 0:
self.buffer.append(value & 0xFF)
elif 0 <= value <= 0xFF:
self.buffer.append(UINT8)
self.buffer.append(value)
elif 0 <= value <= 0xFFFF:
self.buffer.append(UINT16)
self.buffer.extend(struct.pack(">H", value))
elif 0 <= value <= 0xFFFFFFFF:
self.buffer.append(UINT32)
self.buffer.extend(struct.pack(">I", value))
elif 0 <= value <= 0xFFFFFFFFFFFFFFFF:
self.buffer.append(UINT64)
self.buffer.extend(struct.pack(">Q", value))
elif -128 <= value < 0:
self.buffer.append(INT8)
self.buffer.extend(struct.pack(">b", value))
elif -32768 <= value < 0:
self.buffer.append(INT16)
self.buffer.extend(struct.pack(">h", value))
elif -2147483648 <= value < 0:
self.buffer.append(INT32)
self.buffer.extend(struct.pack(">i", value))
else:
self.buffer.append(INT64)
self.buffer.extend(struct.pack(">q", value))
def _encode_float(self, value: float) -> None:
"""Encode a float as 64-bit."""
self.buffer.append(FLOAT64)
self.buffer.extend(struct.pack(">d", value))
def _encode_str(self, value: str) -> None:
"""Encode a string."""
data = value.encode("utf-8")
length = len(data)
if length <= 31:
self.buffer.append(0xA0 | length)
elif length <= 0xFF:
self.buffer.append(STR8)
self.buffer.append(length)
elif length <= 0xFFFF:
self.buffer.append(STR16)
self.buffer.extend(struct.pack(">H", length))
else:
self.buffer.append(STR32)
self.buffer.extend(struct.pack(">I", length))
self.buffer.extend(data)
def _encode_bin(self, value: bytes) -> None:
"""Encode binary data."""
length = len(value)
if length <= 0xFF:
self.buffer.append(BIN8)
self.buffer.append(length)
elif length <= 0xFFFF:
self.buffer.append(BIN16)
self.buffer.extend(struct.pack(">H", length))
else:
self.buffer.append(BIN32)
self.buffer.extend(struct.pack(">I", length))
self.buffer.extend(value)
def _encode_array(self, value: list[Any]) -> None:
"""Encode an array."""
length = len(value)
if length <= 15:
self.buffer.append(0x90 | length)
elif length <= 0xFFFF:
self.buffer.append(ARRAY16)
self.buffer.extend(struct.pack(">H", length))
else:
self.buffer.append(ARRAY32)
self.buffer.extend(struct.pack(">I", length))
for item in value:
self._encode_value(item)
def _encode_map(self, value: dict[str, Any]) -> None:
"""Encode a map."""
length = len(value)
if length <= 15:
self.buffer.append(0x80 | length)
elif length <= 0xFFFF:
self.buffer.append(MAP16)
self.buffer.extend(struct.pack(">H", length))
else:
self.buffer.append(MAP32)
self.buffer.extend(struct.pack(">I", length))
for key, val in value.items():
self._encode_value(key)
self._encode_value(val)
class MsgPackDecoder:
"""MessagePack decoder."""
def __init__(self, data: bytes) -> None:
self.data = data
self.pos = 0
def decode(self) -> Any:
"""Decode a MessagePack value."""
if self.pos >= len(self.data):
raise ValueError("Unexpected end of data")
marker = self.data[self.pos]
self.pos += 1
# Positive fixint
if marker <= FIXINT_POS_MAX:
return marker
# Fixmap
if 0x80 <= marker <= FIXMAP_MAX:
return self._decode_map(marker & 0x0F)
# Fixarray
if 0x90 <= marker <= FIXARRAY_MAX:
return self._decode_array(marker & 0x0F)
# Fixstr
if 0xA0 <= marker <= FIXSTR_MAX:
return self._decode_str(marker & 0x1F)
# Negative fixint
if marker >= FIXINT_NEG_MIN:
return struct.unpack(">b", bytes([marker]))[0]
# Other types
if marker == NIL:
return None
if marker == FALSE:
return False
if marker == TRUE:
return True
if marker == BIN8:
return self._decode_bin(self._read_uint8())
if marker == BIN16:
return self._decode_bin(self._read_uint16())
if marker == BIN32:
return self._decode_bin(self._read_uint32())
if marker == FLOAT32:
return self._read_float32()
if marker == FLOAT64:
return self._read_float64()
if marker == UINT8:
return self._read_uint8()
if marker == UINT16:
return self._read_uint16()
if marker == UINT32:
return self._read_uint32()
if marker == UINT64:
return self._read_uint64()
if marker == INT8:
return self._read_int8()
if marker == INT16:
return self._read_int16()
if marker == INT32:
return self._read_int32()
if marker == INT64:
return self._read_int64()
if marker == STR8:
return self._decode_str(self._read_uint8())
if marker == STR16:
return self._decode_str(self._read_uint16())
if marker == STR32:
return self._decode_str(self._read_uint32())
if marker == ARRAY16:
return self._decode_array(self._read_uint16())
if marker == ARRAY32:
return self._decode_array(self._read_uint32())
if marker == MAP16:
return self._decode_map(self._read_uint16())
if marker == MAP32:
return self._decode_map(self._read_uint32())
raise ValueError(f"Unknown marker: 0x{marker:02X}")
def _read_bytes(self, n: int) -> bytes:
"""Read n bytes from buffer."""
if self.pos + n > len(self.data):
raise ValueError("Unexpected end of data")
result = self.data[self.pos : self.pos + n]
self.pos += n
return result
def _read_uint8(self) -> int:
return self._read_bytes(1)[0]
def _read_uint16(self) -> int:
return struct.unpack(">H", self._read_bytes(2))[0]
def _read_uint32(self) -> int:
return struct.unpack(">I", self._read_bytes(4))[0]
def _read_uint64(self) -> int:
return struct.unpack(">Q", self._read_bytes(8))[0]
def _read_int8(self) -> int:
return struct.unpack(">b", self._read_bytes(1))[0]
def _read_int16(self) -> int:
return struct.unpack(">h", self._read_bytes(2))[0]
def _read_int32(self) -> int:
return struct.unpack(">i", self._read_bytes(4))[0]
def _read_int64(self) -> int:
return struct.unpack(">q", self._read_bytes(8))[0]
def _read_float32(self) -> float:
return struct.unpack(">f", self._read_bytes(4))[0]
def _read_float64(self) -> float:
return struct.unpack(">d", self._read_bytes(8))[0]
def _decode_str(self, length: int) -> str:
return self._read_bytes(length).decode("utf-8")
def _decode_bin(self, length: int) -> bytes:
return bytes(self._read_bytes(length))
def _decode_array(self, length: int) -> list[Any]:
return [self.decode() for _ in range(length)]
def _decode_map(self, length: int) -> dict[str, Any]:
result: dict[str, Any] = {}
for _ in range(length):
key = self.decode()
value = self.decode()
result[key] = value
return result
def msgpack_encode(value: Any) -> bytes:
"""Encode value to MessagePack."""
return MsgPackEncoder().encode(value)
def msgpack_decode(data: bytes) -> Any:
"""Decode MessagePack data."""
return MsgPackDecoder(data).decode()
def msgpack_size(value: Any) -> int:
"""Calculate encoded size."""
return len(msgpack_encode(value))
def bytes_to_hex(data: bytes) -> str:
"""Convert bytes to hex string."""
return data.hex()
def hex_to_bytes(hex_str: str) -> bytes:
"""Convert hex string to bytes."""
return bytes.fromhex(hex_str)
def compare_encoded(val1: Any, val2: Any) -> bool:
"""Compare two values by their encoded form."""
return msgpack_encode(val1) == msgpack_encode(val2)
def encode_batch(values: list[Any]) -> bytes:
"""Encode multiple values as an array."""
return msgpack_encode(values)
def decode_batch(data: bytes) -> list[Any]:
"""Decode array of values."""
result = msgpack_decode(data)
if not isinstance(result, list):
raise ValueError("Expected array")
return result
def simulate_msgpack(operations: list[str]) -> list[str]:
"""Simulate MessagePack operations."""
results = []
context: dict[str, Any] = {}
for op in operations:
parts = op.split(":", 1)
cmd = parts[0]
if cmd == "encode":
import json
value = json.loads(parts[1])
encoded = msgpack_encode(value)
context["encoded"] = encoded
results.append(bytes_to_hex(encoded))
elif cmd == "decode":
data = hex_to_bytes(parts[1])
value = msgpack_decode(data)
import json
results.append(json.dumps(value))
elif cmd == "size":
results.append(str(len(context.get("encoded", b""))))
elif cmd == "roundtrip":
import json
value = json.loads(parts[1])
encoded = msgpack_encode(value)
decoded = msgpack_decode(encoded)
results.append("ok" if value == decoded else "fail")
return results
def main() -> int:
"""CLI entry point."""
import json
if len(sys.argv) < 2:
print("Usage: serial_msgpack_cli.py <command> [args...]")
print("Commands: encode, decode, size")
return 1
cmd = sys.argv[1]
if cmd == "encode":
data = json.loads(sys.stdin.read())
encoded = msgpack_encode(data)
sys.stdout.buffer.write(encoded)
elif cmd == "decode":
data = sys.stdin.buffer.read()
decoded = msgpack_decode(data)
print(json.dumps(decoded, indent=2))
elif cmd == "size":
data = json.loads(sys.stdin.read())
encoded = msgpack_encode(data)
print(len(encoded))
elif cmd == "hex":
data = json.loads(sys.stdin.read())
encoded = msgpack_encode(data)
print(bytes_to_hex(encoded))
return 0
if __name__ == "__main__":
sys.exit(main())
| false
|
serial_msgpack
| 452
| 0
|
[
"class_definition",
"exception_handling",
"stdin_usage",
"decorator"
] | 0.612
|
Type inference hints:
Hint: list[Any] for variable 'parts' [High] (usage patterns suggest this type)
Hint: list[Any] for variable 'results' [High] (usage patterns suggest this type)
Hint: str for variable 'op' [Medium] (usage patterns suggest this type)
Type inference hints:
Hint: list[Any] for variable 'encoded' [High] (usage patterns suggest this type)
Migration Suggestions
══════════════════════════════════════════════════
[1] [Warning] Consider filter_map() for conditional transformation
|
|
example_serial_msgpack
|
test_serial_msgpack_cli.py
|
"""Tests for serial_msgpack_cli.py"""
from serial_msgpack_cli import (
MsgPackDecoder,
MsgPackEncoder,
bytes_to_hex,
compare_encoded,
decode_batch,
encode_batch,
hex_to_bytes,
msgpack_decode,
msgpack_encode,
msgpack_size,
simulate_msgpack,
)
class TestMsgPackEncode:
def test_nil(self):
encoded = msgpack_encode(None)
assert encoded == bytes([0xC0])
def test_true(self):
encoded = msgpack_encode(True)
assert encoded == bytes([0xC3])
def test_false(self):
encoded = msgpack_encode(False)
assert encoded == bytes([0xC2])
def test_positive_fixint(self):
encoded = msgpack_encode(42)
assert encoded == bytes([42])
def test_negative_fixint(self):
encoded = msgpack_encode(-1)
assert encoded == bytes([0xFF])
def test_uint8(self):
encoded = msgpack_encode(200)
assert encoded[0] == 0xCC
assert encoded[1] == 200
def test_uint16(self):
encoded = msgpack_encode(1000)
assert encoded[0] == 0xCD
def test_uint32(self):
encoded = msgpack_encode(100000)
assert encoded[0] == 0xCE
def test_int8(self):
encoded = msgpack_encode(-100)
assert encoded[0] == 0xD0
def test_float(self):
encoded = msgpack_encode(3.14)
assert encoded[0] == 0xCB
def test_fixstr(self):
encoded = msgpack_encode("hello")
assert (encoded[0] & 0xA0) == 0xA0
def test_str8(self):
long_str = "a" * 100
encoded = msgpack_encode(long_str)
assert encoded[0] == 0xD9
def test_fixarray(self):
encoded = msgpack_encode([1, 2, 3])
assert (encoded[0] & 0x90) == 0x90
def test_fixmap(self):
encoded = msgpack_encode({"a": 1})
assert (encoded[0] & 0x80) == 0x80
def test_bin(self):
encoded = msgpack_encode(b"\x00\x01\x02")
assert encoded[0] == 0xC4
class TestMsgPackDecode:
def test_nil(self):
decoded = msgpack_decode(bytes([0xC0]))
assert decoded is None
def test_true(self):
decoded = msgpack_decode(bytes([0xC3]))
assert decoded is True
def test_false(self):
decoded = msgpack_decode(bytes([0xC2]))
assert decoded is False
def test_positive_fixint(self):
decoded = msgpack_decode(bytes([42]))
assert decoded == 42
def test_negative_fixint(self):
decoded = msgpack_decode(bytes([0xFF]))
assert decoded == -1
def test_uint8(self):
decoded = msgpack_decode(bytes([0xCC, 200]))
assert decoded == 200
def test_string(self):
encoded = msgpack_encode("hello")
decoded = msgpack_decode(encoded)
assert decoded == "hello"
def test_array(self):
encoded = msgpack_encode([1, 2, 3])
decoded = msgpack_decode(encoded)
assert decoded == [1, 2, 3]
def test_map(self):
encoded = msgpack_encode({"a": 1, "b": 2})
decoded = msgpack_decode(encoded)
assert decoded == {"a": 1, "b": 2}
class TestRoundTrip:
def test_nil(self):
assert msgpack_decode(msgpack_encode(None)) is None
def test_bool(self):
assert msgpack_decode(msgpack_encode(True)) is True
assert msgpack_decode(msgpack_encode(False)) is False
def test_integers(self):
for i in [0, 1, 127, 128, 255, 256, 65535, 65536, -1, -32, -128, -32768]:
assert msgpack_decode(msgpack_encode(i)) == i
def test_float(self):
value = 3.14159
decoded = msgpack_decode(msgpack_encode(value))
assert abs(decoded - value) < 0.0001
def test_string(self):
assert msgpack_decode(msgpack_encode("")) == ""
assert msgpack_decode(msgpack_encode("hello")) == "hello"
assert msgpack_decode(msgpack_encode("a" * 100)) == "a" * 100
def test_binary(self):
data = bytes([0, 1, 2, 255])
assert msgpack_decode(msgpack_encode(data)) == data
def test_array(self):
assert msgpack_decode(msgpack_encode([])) == []
assert msgpack_decode(msgpack_encode([1, 2, 3])) == [1, 2, 3]
def test_nested_array(self):
data = [[1, 2], [3, 4]]
assert msgpack_decode(msgpack_encode(data)) == data
def test_map(self):
assert msgpack_decode(msgpack_encode({})) == {}
data = {"a": 1, "b": "hello"}
assert msgpack_decode(msgpack_encode(data)) == data
def test_nested_map(self):
data = {"outer": {"inner": 42}}
assert msgpack_decode(msgpack_encode(data)) == data
def test_complex(self):
data = {
"name": "test",
"values": [1, 2, 3],
"nested": {"a": True, "b": None},
}
assert msgpack_decode(msgpack_encode(data)) == data
class TestMsgPackSize:
def test_small(self):
assert msgpack_size(None) == 1
assert msgpack_size(True) == 1
assert msgpack_size(42) == 1
def test_string(self):
size = msgpack_size("hello")
assert size == 6 # 1 byte header + 5 bytes
def test_array(self):
size = msgpack_size([1, 2, 3])
assert size == 4 # 1 byte header + 3 bytes
class TestHexConversion:
def test_to_hex(self):
assert bytes_to_hex(bytes([0xDE, 0xAD])) == "dead"
def test_from_hex(self):
assert hex_to_bytes("dead") == bytes([0xDE, 0xAD])
def test_roundtrip(self):
data = bytes([1, 2, 3, 255])
assert hex_to_bytes(bytes_to_hex(data)) == data
class TestCompareEncoded:
def test_equal(self):
assert compare_encoded({"a": 1}, {"a": 1})
def test_different(self):
assert not compare_encoded({"a": 1}, {"a": 2})
class TestBatch:
def test_encode_decode(self):
values = [1, "hello", True]
encoded = encode_batch(values)
decoded = decode_batch(encoded)
assert decoded == values
class TestEncoder:
def test_reusable(self):
encoder = MsgPackEncoder()
data1 = encoder.encode({"a": 1})
data2 = encoder.encode({"b": 2})
assert msgpack_decode(data1) == {"a": 1}
assert msgpack_decode(data2) == {"b": 2}
class TestDecoder:
def test_decoder(self):
data = msgpack_encode([1, 2, 3])
decoder = MsgPackDecoder(data)
result = decoder.decode()
assert result == [1, 2, 3]
class TestSimulateMsgpack:
def test_encode(self):
result = simulate_msgpack(['encode:{"a":1}'])
assert len(result) == 1
assert len(result[0]) > 0
def test_decode(self):
encoded = bytes_to_hex(msgpack_encode({"a": 1}))
result = simulate_msgpack([f"decode:{encoded}"])
assert '"a"' in result[0]
def test_roundtrip(self):
result = simulate_msgpack(['roundtrip:{"a":1}'])
assert result[0] == "ok"
def test_size(self):
result = simulate_msgpack(['encode:{"a":1}', "size"])
assert int(result[1]) > 0
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_serial_msgpack/test_serial_msgpack_cli.py (7021 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_serial_msgpack/test_serial_msgpack_cli.rs (15053 bytes)
⏱️ Parse time: 50ms
📊 Throughput: 135.2 KB/s
⏱️ Total time: 50ms
| true
|
serial_msgpack
| 252
| 5
|
[
"class_definition"
] | 0.612
| null |
example_serial_toml
|
serial_toml_cli.py
|
"""TOML Parser and Writer CLI.
Demonstrates TOML parsing, writing, and manipulation patterns.
Implements a subset of TOML format for educational purposes.
"""
import re
import sys
from dataclasses import dataclass
from datetime import date, datetime, time
from typing import Any
@dataclass
class TomlToken:
"""Token from TOML lexer."""
kind: str
value: str
line: int
col: int
class TomlLexer:
"""TOML lexer."""
def __init__(self, text: str) -> None:
self.text = text
self.pos = 0
self.line = 1
self.col = 1
def tokenize(self) -> list[TomlToken]:
"""Tokenize TOML text."""
tokens = []
while self.pos < len(self.text):
ch = self.text[self.pos]
# Skip whitespace (not newlines)
if ch in " \t":
self._advance()
continue
# Newline
if ch == "\n":
tokens.append(TomlToken("newline", "\n", self.line, self.col))
self._advance()
self.line += 1
self.col = 1
continue
# Comment
if ch == "#":
while self.pos < len(self.text) and self.text[self.pos] != "\n":
self._advance()
continue
# Table header
if ch == "[":
tokens.append(self._read_table_header())
continue
# String
if ch in "\"'":
tokens.append(self._read_string())
continue
# Number or date
if ch.isdigit() or ch == "-" or ch == "+":
tokens.append(self._read_number_or_date())
continue
# Boolean or identifier
if ch.isalpha() or ch == "_":
tokens.append(self._read_identifier())
continue
# Operators
if ch == "=":
tokens.append(TomlToken("equals", "=", self.line, self.col))
self._advance()
continue
if ch == ",":
tokens.append(TomlToken("comma", ",", self.line, self.col))
self._advance()
continue
if ch == "{":
tokens.append(TomlToken("lbrace", "{", self.line, self.col))
self._advance()
continue
if ch == "}":
tokens.append(TomlToken("rbrace", "}", self.line, self.col))
self._advance()
continue
self._advance()
return tokens
def _advance(self) -> str:
ch = self.text[self.pos]
self.pos += 1
self.col += 1
return ch
def _read_table_header(self) -> TomlToken:
"""Read table header [name] or [[name]]."""
start_line, start_col = self.line, self.col
self._advance() # Skip first [
is_array = False
if self.pos < len(self.text) and self.text[self.pos] == "[":
is_array = True
self._advance()
name = ""
while self.pos < len(self.text) and self.text[self.pos] != "]":
name += self._advance()
self._advance() # Skip ]
if is_array and self.pos < len(self.text) and self.text[self.pos] == "]":
self._advance()
kind = "array_table" if is_array else "table"
return TomlToken(kind, name.strip(), start_line, start_col)
def _read_string(self) -> TomlToken:
"""Read string literal."""
start_line, start_col = self.line, self.col
quote = self._advance()
value = ""
while self.pos < len(self.text) and self.text[self.pos] != quote:
ch = self._advance()
if ch == "\\":
if self.pos < len(self.text):
escaped = self._advance()
if escaped == "n":
value += "\n"
elif escaped == "t":
value += "\t"
elif escaped == "\\":
value += "\\"
elif escaped == '"':
value += '"'
else:
value += escaped
else:
value += ch
if self.pos < len(self.text):
self._advance() # Skip closing quote
return TomlToken("string", value, start_line, start_col)
def _read_number_or_date(self) -> TomlToken:
"""Read number or date/time literal."""
start_line, start_col = self.line, self.col
value = ""
while self.pos < len(self.text):
ch = self.text[self.pos]
if ch in "0123456789+-.:TZeE_":
value += self._advance()
elif ch.isalpha():
value += self._advance()
else:
break
# Remove underscores from numbers
clean_value = value.replace("_", "")
# Determine type
if re.match(r"^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}", clean_value):
return TomlToken("datetime", clean_value, start_line, start_col)
if re.match(r"^\d{4}-\d{2}-\d{2}$", clean_value):
return TomlToken("date", clean_value, start_line, start_col)
if re.match(r"^\d{2}:\d{2}:\d{2}", clean_value):
return TomlToken("time", clean_value, start_line, start_col)
if "." in clean_value or "e" in clean_value.lower():
return TomlToken("float", clean_value, start_line, start_col)
return TomlToken("integer", clean_value, start_line, start_col)
def _read_identifier(self) -> TomlToken:
"""Read identifier or boolean."""
start_line, start_col = self.line, self.col
value = ""
while self.pos < len(self.text):
ch = self.text[self.pos]
if ch.isalnum() or ch in "_-":
value += self._advance()
else:
break
if value == "true":
return TomlToken("boolean", "true", start_line, start_col)
if value == "false":
return TomlToken("boolean", "false", start_line, start_col)
return TomlToken("identifier", value, start_line, start_col)
class TomlParser:
"""TOML parser."""
def __init__(self, tokens: list[TomlToken]) -> None:
self.tokens = tokens
self.pos = 0
self.result: dict[str, Any] = {}
self.current_table: dict[str, Any] = self.result
def parse(self) -> dict[str, Any]:
"""Parse tokens into a dict."""
while self.pos < len(self.tokens):
token = self.tokens[self.pos]
if token.kind == "newline":
self.pos += 1
continue
if token.kind == "table":
self._handle_table(token.value)
self.pos += 1
continue
if token.kind == "array_table":
self._handle_array_table(token.value)
self.pos += 1
continue
if token.kind == "identifier":
self._handle_key_value()
continue
self.pos += 1
return self.result
def _handle_table(self, name: str) -> None:
"""Handle table header."""
parts = name.split(".")
self.current_table = self.result
for part in parts:
if part not in self.current_table:
self.current_table[part] = {}
self.current_table = self.current_table[part]
def _handle_array_table(self, name: str) -> None:
"""Handle array of tables header."""
parts = name.split(".")
current = self.result
for _i, part in enumerate(parts[:-1]):
if part not in current:
current[part] = {}
current = current[part]
final = parts[-1]
if final not in current:
current[final] = []
new_table: dict[str, Any] = {}
current[final].append(new_table)
self.current_table = new_table
def _handle_key_value(self) -> None:
"""Handle key = value."""
key = self.tokens[self.pos].value
self.pos += 1
# Skip newlines
while self.pos < len(self.tokens) and self.tokens[self.pos].kind == "newline":
self.pos += 1
# Expect equals
if self.pos < len(self.tokens) and self.tokens[self.pos].kind == "equals":
self.pos += 1
# Skip newlines
while self.pos < len(self.tokens) and self.tokens[self.pos].kind == "newline":
self.pos += 1
# Get value
if self.pos < len(self.tokens):
value = self._parse_value()
self.current_table[key] = value
def _parse_value(self) -> Any:
"""Parse a value."""
token = self.tokens[self.pos]
self.pos += 1
if token.kind == "string":
return token.value
if token.kind == "integer":
return int(token.value)
if token.kind == "float":
return float(token.value)
if token.kind == "boolean":
return token.value == "true"
if token.kind == "datetime":
return datetime.fromisoformat(token.value.replace("Z", "+00:00"))
if token.kind == "date":
return date.fromisoformat(token.value)
if token.kind == "time":
return time.fromisoformat(token.value)
if token.kind == "lbrace":
return self._parse_inline_table()
return token.value
def _parse_inline_table(self) -> dict[str, Any]:
"""Parse inline table { key = value, ... }."""
result: dict[str, Any] = {}
while self.pos < len(self.tokens):
token = self.tokens[self.pos]
if token.kind == "rbrace":
self.pos += 1
break
if token.kind == "comma":
self.pos += 1
continue
if token.kind == "identifier":
key = token.value
self.pos += 1
if self.pos < len(self.tokens) and self.tokens[self.pos].kind == "equals":
self.pos += 1
if self.pos < len(self.tokens):
result[key] = self._parse_value()
return result
def toml_parse(text: str) -> dict[str, Any]:
"""Parse TOML text."""
lexer = TomlLexer(text)
tokens = lexer.tokenize()
parser = TomlParser(tokens)
return parser.parse()
def toml_dump(data: dict[str, Any], prefix: str = "") -> str:
"""Convert dict to TOML string."""
lines = []
tables = []
for key, value in data.items():
if isinstance(value, dict):
table_name = f"{prefix}.{key}" if prefix else key
tables.append((table_name, value))
else:
lines.append(f"{key} = {_value_to_toml(value)}")
result = "\n".join(lines)
for table_name, table_data in tables:
if result:
result += "\n\n"
result += f"[{table_name}]\n"
result += toml_dump(table_data, table_name)
return result
def _value_to_toml(value: Any) -> str:
"""Convert a value to TOML representation."""
if isinstance(value, bool):
return "true" if value else "false"
if isinstance(value, int):
return str(value)
if isinstance(value, float):
return str(value)
if isinstance(value, str):
escaped = value.replace("\\", "\\\\").replace('"', '\\"').replace("\n", "\\n")
return f'"{escaped}"'
if isinstance(value, datetime):
return value.isoformat()
if isinstance(value, date):
return value.isoformat()
if isinstance(value, time):
return value.isoformat()
if isinstance(value, list):
items = ", ".join(_value_to_toml(v) for v in value)
return f"[{items}]"
if isinstance(value, dict):
items = ", ".join(f"{k} = {_value_to_toml(v)}" for k, v in value.items())
return f"{{ {items} }}"
return str(value)
def toml_get(data: dict[str, Any], path: str) -> Any:
"""Get value at dotted path."""
parts = path.split(".")
current = data
for part in parts:
if isinstance(current, dict) and part in current:
current = current[part]
else:
return None
return current
def toml_set(data: dict[str, Any], path: str, value: Any) -> dict[str, Any]:
"""Set value at dotted path."""
import json
result = json.loads(json.dumps(data, default=str))
parts = path.split(".")
current = result
for part in parts[:-1]:
if part not in current:
current[part] = {}
current = current[part]
current[parts[-1]] = value
return result
def toml_merge(base: dict[str, Any], overlay: dict[str, Any]) -> dict[str, Any]:
"""Merge two TOML documents."""
import json
result = json.loads(json.dumps(base, default=str))
for key, value in overlay.items():
if key in result and isinstance(result[key], dict) and isinstance(value, dict):
result[key] = toml_merge(result[key], value)
else:
result[key] = json.loads(json.dumps(value, default=str))
return result
def toml_tables(data: dict[str, Any], prefix: str = "") -> list[str]:
"""List all table names in TOML data."""
tables = []
for key, value in data.items():
if isinstance(value, dict):
table_name = f"{prefix}.{key}" if prefix else key
tables.append(table_name)
tables.extend(toml_tables(value, table_name))
return tables
def simulate_toml(operations: list[str]) -> list[str]:
"""Simulate TOML operations."""
results = []
context: dict[str, Any] = {}
for op in operations:
parts = op.split(":", 1)
cmd = parts[0]
if cmd == "parse":
context["data"] = toml_parse(parts[1])
results.append("ok")
elif cmd == "get":
value = toml_get(context.get("data", {}), parts[1])
results.append(str(value) if value is not None else "null")
elif cmd == "set":
path_value = parts[1].split("=", 1)
context["data"] = toml_set(context.get("data", {}), path_value[0], path_value[1])
results.append("ok")
elif cmd == "tables":
tables = toml_tables(context.get("data", {}))
results.append(",".join(tables))
elif cmd == "dump":
results.append(toml_dump(context.get("data", {})))
return results
def main() -> int:
"""CLI entry point."""
if len(sys.argv) < 2:
print("Usage: serial_toml_cli.py <command> [args...]")
print("Commands: parse, get, dump, tables")
return 1
cmd = sys.argv[1]
if cmd == "parse":
text = sys.stdin.read()
data = toml_parse(text)
import json
print(json.dumps(data, indent=2, default=str))
elif cmd == "get":
if len(sys.argv) < 3:
print("Usage: get <path>", file=sys.stderr)
return 1
text = sys.stdin.read()
data = toml_parse(text)
value = toml_get(data, sys.argv[2])
print(value)
elif cmd == "dump":
import json
data = json.loads(sys.stdin.read())
print(toml_dump(data))
elif cmd == "tables":
text = sys.stdin.read()
data = toml_parse(text)
for table in toml_tables(data):
print(table)
return 0
if __name__ == "__main__":
sys.exit(main())
| false
|
serial_toml
| 532
| 0
|
[
"class_definition",
"stdin_usage",
"decorator"
] | 0.612
|
Type inference hints:
Hint: list[Any] for variable 'lines' [High] (usage patterns suggest this type)
Hint: list[Any] for variable 'tables' [High] (usage patterns suggest this type)
Hint: str for variable 'key' [Medium] (usage patterns suggest this type)
Hint: str for variable 'table_name' [Medium] (usage patterns suggest this type)
Hint: int for variable 'result' [High] (usage patterns suggest this type)
Type inference hints:
Hint: str for variable 'escaped' [Medium] (usage patterns suggest thi
|
|
example_serial_toml
|
test_serial_toml_cli.py
|
"""Tests for serial_toml_cli.py"""
from serial_toml_cli import (
TomlLexer,
simulate_toml,
toml_dump,
toml_get,
toml_merge,
toml_parse,
toml_set,
toml_tables,
)
class TestTomlLexer:
def test_simple_key_value(self):
lexer = TomlLexer('name = "test"')
tokens = lexer.tokenize()
assert any(t.kind == "identifier" for t in tokens)
assert any(t.kind == "string" for t in tokens)
def test_integer(self):
lexer = TomlLexer("port = 8080")
tokens = lexer.tokenize()
assert any(t.kind == "integer" and t.value == "8080" for t in tokens)
def test_float(self):
lexer = TomlLexer("pi = 3.14")
tokens = lexer.tokenize()
assert any(t.kind == "float" for t in tokens)
def test_boolean(self):
lexer = TomlLexer("enabled = true")
tokens = lexer.tokenize()
assert any(t.kind == "boolean" and t.value == "true" for t in tokens)
def test_table_header(self):
lexer = TomlLexer("[database]")
tokens = lexer.tokenize()
assert any(t.kind == "table" for t in tokens)
def test_array_table(self):
lexer = TomlLexer("[[servers]]")
tokens = lexer.tokenize()
assert any(t.kind == "array_table" for t in tokens)
def test_comment(self):
lexer = TomlLexer("# this is a comment\nname = 'test'")
tokens = lexer.tokenize()
assert not any(t.kind == "comment" for t in tokens)
class TestTomlParser:
def test_simple(self):
data = toml_parse('name = "test"')
assert data["name"] == "test"
def test_integer(self):
data = toml_parse("port = 8080")
assert data["port"] == 8080
def test_float(self):
data = toml_parse("pi = 3.14")
assert abs(data["pi"] - 3.14) < 0.001
def test_boolean(self):
data = toml_parse("enabled = true\ndisabled = false")
assert data["enabled"] is True
assert data["disabled"] is False
def test_table(self):
data = toml_parse("[database]\nhost = 'localhost'\nport = 5432")
assert data["database"]["host"] == "localhost"
assert data["database"]["port"] == 5432
def test_nested_table(self):
data = toml_parse("[servers.alpha]\nip = '10.0.0.1'")
assert data["servers"]["alpha"]["ip"] == "10.0.0.1"
def test_multiple_tables(self):
data = toml_parse("[a]\nx = 1\n\n[b]\ny = 2")
assert data["a"]["x"] == 1
assert data["b"]["y"] == 2
def test_inline_table(self):
data = toml_parse("point = { x = 1, y = 2 }")
assert data["point"]["x"] == 1
assert data["point"]["y"] == 2
class TestTomlDump:
def test_simple(self):
data = {"name": "test"}
result = toml_dump(data)
assert 'name = "test"' in result
def test_integer(self):
data = {"port": 8080}
result = toml_dump(data)
assert "port = 8080" in result
def test_boolean(self):
data = {"enabled": True}
result = toml_dump(data)
assert "enabled = true" in result
def test_nested(self):
data = {"database": {"host": "localhost"}}
result = toml_dump(data)
assert "[database]" in result
assert 'host = "localhost"' in result
class TestTomlGet:
def test_simple(self):
data = {"name": "test"}
assert toml_get(data, "name") == "test"
def test_nested(self):
data = {"database": {"host": "localhost"}}
assert toml_get(data, "database.host") == "localhost"
def test_missing(self):
data = {"name": "test"}
assert toml_get(data, "missing") is None
class TestTomlSet:
def test_simple(self):
data = {"name": "old"}
result = toml_set(data, "name", "new")
assert result["name"] == "new"
def test_nested(self):
data = {"database": {"host": "old"}}
result = toml_set(data, "database.host", "new")
assert result["database"]["host"] == "new"
def test_new_key(self):
data = {}
result = toml_set(data, "name", "test")
assert result["name"] == "test"
class TestTomlMerge:
def test_simple(self):
base = {"a": 1}
overlay = {"b": 2}
result = toml_merge(base, overlay)
assert result == {"a": 1, "b": 2}
def test_override(self):
base = {"a": 1}
overlay = {"a": 2}
result = toml_merge(base, overlay)
assert result["a"] == 2
def test_deep(self):
base = {"db": {"host": "old", "port": 5432}}
overlay = {"db": {"host": "new"}}
result = toml_merge(base, overlay)
assert result["db"]["host"] == "new"
assert result["db"]["port"] == 5432
class TestTomlTables:
def test_flat(self):
data = {"database": {"host": "localhost"}}
tables = toml_tables(data)
assert "database" in tables
def test_nested(self):
data = {"servers": {"alpha": {"ip": "10.0.0.1"}}}
tables = toml_tables(data)
assert "servers" in tables
assert "servers.alpha" in tables
class TestSimulateToml:
def test_parse(self):
result = simulate_toml(['parse:name = "test"'])
assert result == ["ok"]
def test_get(self):
result = simulate_toml(['parse:name = "test"', "get:name"])
assert result[1] == "test"
def test_set(self):
result = simulate_toml(['parse:name = "old"', "set:name=new", "get:name"])
assert result[2] == "new"
def test_tables(self):
result = simulate_toml(["parse:[database]\nhost = 'localhost'", "tables"])
assert "database" in result[1]
class TestTomlRoundTrip:
def test_roundtrip(self):
original = {"name": "test", "port": 8080, "enabled": True}
dumped = toml_dump(original)
parsed = toml_parse(dumped)
assert parsed["name"] == original["name"]
assert parsed["port"] == original["port"]
assert parsed["enabled"] == original["enabled"]
def test_nested_roundtrip(self):
original = {"database": {"host": "localhost", "port": 5432}}
dumped = toml_dump(original)
parsed = toml_parse(dumped)
assert parsed["database"]["host"] == original["database"]["host"]
class TestTomlEscaping:
def test_newline_in_string(self):
data = {"text": "line1\nline2"}
dumped = toml_dump(data)
assert "\\n" in dumped
| false
|
serial_toml
| 217
| 0
|
[
"class_definition"
] | 0.612
|
Error: Expression type not yet supported: GeneratorExp { element: Binary { op: Eq, left: Attribute { value: Var("t"), attr: "kind" }, right: Literal(String("identifier")) }, generators: [HirComprehension { target: "t", iter: Var("tokens"), conditions: [] }] }
|
|
example_set_comprehension
|
set_comprehension_cli.py
|
#!/usr/bin/env python3
"""Set Comprehension CLI.
Set comprehension patterns with transforms and filtering.
"""
import argparse
import sys
def unique_squares(items: list[int]) -> set[int]:
"""Get unique squares of items."""
return {x * x for x in items}
def unique_lengths(words: list[str]) -> set[int]:
"""Get unique lengths of words."""
return {len(w) for w in words}
def unique_first_chars(words: list[str]) -> set[str]:
"""Get unique first characters."""
return {w[0] for w in words if w}
def unique_last_chars(words: list[str]) -> set[str]:
"""Get unique last characters."""
return {w[-1] for w in words if w}
def even_numbers(items: list[int]) -> set[int]:
"""Get unique even numbers."""
return {x for x in items if x % 2 == 0}
def odd_numbers(items: list[int]) -> set[int]:
"""Get unique odd numbers."""
return {x for x in items if x % 2 == 1}
def positive_numbers(items: list[int]) -> set[int]:
"""Get unique positive numbers."""
return {x for x in items if x > 0}
def negative_numbers(items: list[int]) -> set[int]:
"""Get unique negative numbers."""
return {x for x in items if x < 0}
def divisible_by(items: list[int], divisor: int) -> set[int]:
"""Get numbers divisible by divisor."""
return {x for x in items if x % divisor == 0}
def in_range(items: list[int], min_val: int, max_val: int) -> set[int]:
"""Get numbers in range."""
return {x for x in items if min_val <= x <= max_val}
def vowels_in_text(text: str) -> set[str]:
"""Get unique vowels in text."""
return {c for c in text.lower() if c in "aeiou"}
def consonants_in_text(text: str) -> set[str]:
"""Get unique consonants in text."""
return {c for c in text.lower() if c.isalpha() and c not in "aeiou"}
def digits_in_text(text: str) -> set[str]:
"""Get unique digits in text."""
return {c for c in text if c.isdigit()}
def uppercase_chars(text: str) -> set[str]:
"""Get unique uppercase characters."""
return {c for c in text if c.isupper()}
def lowercase_chars(text: str) -> set[str]:
"""Get unique lowercase characters."""
return {c for c in text if c.islower()}
def words_with_length(words: list[str], length: int) -> set[str]:
"""Get words with specific length."""
return {w for w in words if len(w) == length}
def words_starting_with(words: list[str], prefix: str) -> set[str]:
"""Get words starting with prefix."""
return {w for w in words if w.startswith(prefix)}
def words_ending_with(words: list[str], suffix: str) -> set[str]:
"""Get words ending with suffix."""
return {w for w in words if w.endswith(suffix)}
def words_containing(words: list[str], substring: str) -> set[str]:
"""Get words containing substring."""
return {w for w in words if substring in w}
def flatten_to_set(lists: list[list[int]]) -> set[int]:
"""Flatten list of lists to set."""
return {x for lst in lists for x in lst}
def cross_product_set(list1: list[int], list2: list[int]) -> set[tuple[int, int]]:
"""Cartesian product as set of tuples."""
return {(x, y) for x in list1 for y in list2}
def pair_sums(items: list[int]) -> set[int]:
"""Get all possible pair sums."""
return {x + y for x in items for y in items}
def pair_products(items: list[int]) -> set[int]:
"""Get all possible pair products."""
return {x * y for x in items for y in items}
def symmetric_difference_comprehension(set1: set[int], set2: set[int]) -> set[int]:
"""Symmetric difference using comprehension."""
return {x for x in set1 if x not in set2} | {x for x in set2 if x not in set1}
def intersection_comprehension(set1: set[int], set2: set[int]) -> set[int]:
"""Intersection using comprehension."""
return {x for x in set1 if x in set2}
def difference_comprehension(set1: set[int], set2: set[int]) -> set[int]:
"""Difference using comprehension."""
return {x for x in set1 if x not in set2}
def common_elements(lists: list[list[int]]) -> set[int]:
"""Get elements common to all lists."""
if not lists:
return set()
result = set(lists[0])
for lst in lists[1:]:
result = {x for x in result if x in lst}
return result
def unique_from_dict_values(d: dict[str, list[int]]) -> set[int]:
"""Get unique values from dict of lists."""
return {x for values in d.values() for x in values}
def unique_from_dict_keys(d: dict[str, int]) -> set[str]:
"""Get unique keys (already unique but with transform)."""
return {k.upper() for k in d.keys()}
def prime_factors(n: int) -> set[int]:
"""Get prime factors of n."""
factors: set[int] = set()
d = 2
while d * d <= n:
while n % d == 0:
factors.add(d)
n //= d
d += 1
if n > 1:
factors.add(n)
return factors
def perfect_squares_in_range(start: int, end: int) -> set[int]:
"""Get perfect squares in range."""
return {i * i for i in range(1, end) if start <= i * i <= end}
def multiples_set(base: int, count: int) -> set[int]:
"""Get first count multiples of base."""
return {base * i for i in range(1, count + 1)}
def string_transforms(text: str) -> set[str]:
"""Get various transforms of text."""
return {text.lower(), text.upper(), text.title(), text.swapcase()}
def word_lengths_distribution(words: list[str]) -> set[int]:
"""Get distribution of word lengths."""
return {len(w) for w in words}
def main() -> int:
parser = argparse.ArgumentParser(description="Set comprehension CLI")
subparsers = parser.add_subparsers(dest="command", help="Commands")
# squares
sq_p = subparsers.add_parser("squares", help="Unique squares")
sq_p.add_argument("items", type=int, nargs="+")
# vowels
vow_p = subparsers.add_parser("vowels", help="Vowels in text")
vow_p.add_argument("text")
# filter
filt_p = subparsers.add_parser("filter", help="Filter numbers")
filt_p.add_argument("items", type=int, nargs="+")
filt_p.add_argument("--even", action="store_true")
filt_p.add_argument("--odd", action="store_true")
# words
word_p = subparsers.add_parser("words", help="Filter words")
word_p.add_argument("words", nargs="+")
word_p.add_argument("--prefix")
word_p.add_argument("--suffix")
args = parser.parse_args()
if args.command == "squares":
result = unique_squares(args.items)
print(sorted(result))
elif args.command == "vowels":
result = vowels_in_text(args.text)
print(sorted(result))
elif args.command == "filter":
if args.even:
result = even_numbers(args.items)
elif args.odd:
result = odd_numbers(args.items)
else:
result = set(args.items)
print(sorted(result))
elif args.command == "words":
words = set(args.words)
if args.prefix:
words = words_starting_with(list(words), args.prefix)
if args.suffix:
words = words_ending_with(list(words), args.suffix)
print(sorted(words))
else:
parser.print_help()
return 0
if __name__ == "__main__":
sys.exit(main())
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_set_comprehension/set_comprehension_cli.py (7221 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_set_comprehension/set_comprehension_cli.rs (17303 bytes)
📦 Cargo.toml: /home/noah/src/reprorusted-python-cli/examples/example_set_comprehension/Cargo.toml (2 dependencies)
⏱️ Parse time: 56ms
📊 Throughput: 124.5 KB/s
⏱️ Total time: 56ms
| true
|
set_comprehension
| 253
| 6
|
[
"context_manager"
] | 0.652
| null |
example_set_comprehension
|
test_set_comprehension_cli.py
|
"""Tests for set_comprehension_cli.py"""
from set_comprehension_cli import (
common_elements,
consonants_in_text,
cross_product_set,
difference_comprehension,
digits_in_text,
divisible_by,
even_numbers,
flatten_to_set,
in_range,
intersection_comprehension,
lowercase_chars,
multiples_set,
negative_numbers,
odd_numbers,
pair_products,
pair_sums,
perfect_squares_in_range,
positive_numbers,
prime_factors,
string_transforms,
symmetric_difference_comprehension,
unique_first_chars,
unique_from_dict_keys,
unique_from_dict_values,
unique_last_chars,
unique_lengths,
unique_squares,
uppercase_chars,
vowels_in_text,
word_lengths_distribution,
words_containing,
words_ending_with,
words_starting_with,
words_with_length,
)
class TestUniqueSquares:
def test_basic(self):
assert unique_squares([1, 2, 3]) == {1, 4, 9}
def test_duplicates(self):
assert unique_squares([2, 2, 3, 3]) == {4, 9}
def test_negatives(self):
assert unique_squares([-2, 2]) == {4}
class TestUniqueLengths:
def test_basic(self):
assert unique_lengths(["a", "bb", "ccc"]) == {1, 2, 3}
def test_duplicates(self):
assert unique_lengths(["a", "b", "cc"]) == {1, 2}
class TestUniqueFirstChars:
def test_basic(self):
assert unique_first_chars(["apple", "banana"]) == {"a", "b"}
def test_empty_words(self):
assert unique_first_chars(["", "apple"]) == {"a"}
class TestUniqueLastChars:
def test_basic(self):
assert unique_last_chars(["apple", "banana"]) == {"e", "a"}
class TestEvenNumbers:
def test_basic(self):
assert even_numbers([1, 2, 3, 4, 5]) == {2, 4}
class TestOddNumbers:
def test_basic(self):
assert odd_numbers([1, 2, 3, 4, 5]) == {1, 3, 5}
class TestPositiveNumbers:
def test_basic(self):
assert positive_numbers([-1, 0, 1, 2]) == {1, 2}
class TestNegativeNumbers:
def test_basic(self):
assert negative_numbers([-2, -1, 0, 1]) == {-2, -1}
class TestDivisibleBy:
def test_by_3(self):
assert divisible_by([1, 2, 3, 6, 9], 3) == {3, 6, 9}
class TestInRange:
def test_range(self):
assert in_range([1, 5, 10, 15, 20], 5, 15) == {5, 10, 15}
class TestVowelsInText:
def test_basic(self):
assert vowels_in_text("hello") == {"e", "o"}
def test_case_insensitive(self):
assert vowels_in_text("AEIOU") == {"a", "e", "i", "o", "u"}
class TestConsonantsInText:
def test_basic(self):
assert consonants_in_text("hello") == {"h", "l"}
class TestDigitsInText:
def test_basic(self):
assert digits_in_text("abc123def") == {"1", "2", "3"}
class TestUppercaseChars:
def test_basic(self):
assert uppercase_chars("Hello World") == {"H", "W"}
class TestLowercaseChars:
def test_basic(self):
assert lowercase_chars("Hello") == {"e", "l", "o"}
class TestWordsWithLength:
def test_basic(self):
assert words_with_length(["a", "bb", "ccc", "dd"], 2) == {"bb", "dd"}
class TestWordsStartingWith:
def test_basic(self):
assert words_starting_with(["apple", "apricot", "banana"], "ap") == {"apple", "apricot"}
class TestWordsEndingWith:
def test_basic(self):
assert words_ending_with(["apple", "maple", "banana"], "le") == {"apple", "maple"}
class TestWordsContaining:
def test_basic(self):
assert words_containing(["apple", "happy", "banana"], "pp") == {"apple", "happy"}
class TestFlattenToSet:
def test_basic(self):
assert flatten_to_set([[1, 2], [2, 3], [3, 4]]) == {1, 2, 3, 4}
class TestCrossProductSet:
def test_basic(self):
result = cross_product_set([1, 2], [3, 4])
assert result == {(1, 3), (1, 4), (2, 3), (2, 4)}
class TestPairSums:
def test_basic(self):
assert pair_sums([1, 2, 3]) == {2, 3, 4, 5, 6}
class TestPairProducts:
def test_basic(self):
assert pair_products([1, 2, 3]) == {1, 2, 3, 4, 6, 9}
class TestSymmetricDifferenceComprehension:
def test_basic(self):
result = symmetric_difference_comprehension({1, 2, 3}, {2, 3, 4})
assert result == {1, 4}
class TestIntersectionComprehension:
def test_basic(self):
result = intersection_comprehension({1, 2, 3}, {2, 3, 4})
assert result == {2, 3}
class TestDifferenceComprehension:
def test_basic(self):
result = difference_comprehension({1, 2, 3}, {2, 3, 4})
assert result == {1}
class TestCommonElements:
def test_common(self):
result = common_elements([[1, 2, 3], [2, 3, 4], [3, 4, 5]])
assert result == {3}
def test_empty(self):
assert common_elements([]) == set()
class TestUniqueFromDictValues:
def test_basic(self):
d = {"a": [1, 2], "b": [2, 3]}
assert unique_from_dict_values(d) == {1, 2, 3}
class TestUniqueFromDictKeys:
def test_basic(self):
d = {"a": 1, "b": 2}
assert unique_from_dict_keys(d) == {"A", "B"}
class TestPrimeFactors:
def test_12(self):
assert prime_factors(12) == {2, 3}
def test_prime(self):
assert prime_factors(7) == {7}
class TestPerfectSquaresInRange:
def test_range(self):
assert perfect_squares_in_range(1, 20) == {1, 4, 9, 16}
class TestMultiplesSet:
def test_multiples(self):
assert multiples_set(3, 4) == {3, 6, 9, 12}
class TestStringTransforms:
def test_transforms(self):
result = string_transforms("Hello")
assert "hello" in result
assert "HELLO" in result
class TestWordLengthsDistribution:
def test_distribution(self):
result = word_lengths_distribution(["a", "bb", "ccc", "dd"])
assert result == {1, 2, 3}
class TestEdgeCases:
def test_empty_list(self):
assert unique_squares([]) == set()
assert even_numbers([]) == set()
def test_empty_string(self):
assert vowels_in_text("") == set()
assert digits_in_text("") == set()
def test_no_match(self):
assert even_numbers([1, 3, 5]) == set()
assert vowels_in_text("xyz") == set()
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_set_comprehension/test_set_comprehension_cli.py (6215 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_set_comprehension/test_set_comprehension_cli.rs (29551 bytes)
⏱️ Parse time: 55ms
📊 Throughput: 108.6 KB/s
⏱️ Total time: 56ms
| true
|
set_comprehension
| 253
| 5
|
[
"class_definition"
] | 0.612
| null |
example_settings_loader
|
settings_cli.py
|
#!/usr/bin/env python3
"""Settings Loader CLI.
Load settings from various file formats (INI, JSON, TOML-like).
"""
import argparse
import json
import sys
from dataclasses import dataclass
from enum import Enum, auto
class FileFormat(Enum):
"""Settings file formats."""
JSON = auto()
INI = auto()
TOML = auto()
PROPERTIES = auto()
@dataclass
class ParseError:
"""Parsing error."""
line: int
message: str
def detect_format(filename: str) -> FileFormat:
"""Detect file format from extension."""
lower = filename.lower()
if lower.endswith(".json"):
return FileFormat.JSON
if lower.endswith(".ini") or lower.endswith(".cfg"):
return FileFormat.INI
if lower.endswith(".toml"):
return FileFormat.TOML
if lower.endswith(".properties"):
return FileFormat.PROPERTIES
return FileFormat.INI # Default
def parse_json(content: str) -> tuple[dict, list[ParseError]]:
"""Parse JSON content."""
try:
return json.loads(content), []
except json.JSONDecodeError as e:
return {}, [ParseError(e.lineno, str(e))]
def parse_ini(content: str) -> tuple[dict, list[ParseError]]:
"""Parse INI content."""
result = {}
errors = []
current_section = None
for line_num, line in enumerate(content.split("\n"), 1):
line = line.strip()
# Skip empty lines and comments
if not line or line.startswith("#") or line.startswith(";"):
continue
# Section header
if line.startswith("[") and line.endswith("]"):
section_name = line[1:-1].strip()
if section_name:
current_section = section_name
if section_name not in result:
result[section_name] = {}
else:
errors.append(ParseError(line_num, "Empty section name"))
continue
# Key-value pair
if "=" in line:
key, value = line.split("=", 1)
key = key.strip()
value = value.strip()
# Remove quotes
if (value.startswith('"') and value.endswith('"')) or (
value.startswith("'") and value.endswith("'")
):
value = value[1:-1]
# Type conversion
value = convert_ini_value(value)
if current_section:
result[current_section][key] = value
else:
result[key] = value
else:
errors.append(ParseError(line_num, f"Invalid line: {line}"))
return result, errors
def convert_ini_value(value: str) -> any:
"""Convert INI string value to appropriate type."""
# Boolean
if value.lower() in ("true", "yes", "on", "1"):
return True
if value.lower() in ("false", "no", "off", "0"):
return False
# Integer
try:
return int(value)
except ValueError:
pass
# Float
try:
return float(value)
except ValueError:
pass
# List (comma-separated)
if "," in value:
return [v.strip() for v in value.split(",")]
return value
def parse_toml(content: str) -> tuple[dict, list[ParseError]]:
"""Parse simple TOML-like content."""
result = {}
errors = []
current_section = result
for line_num, line in enumerate(content.split("\n"), 1):
line = line.strip()
# Skip empty lines and comments
if not line or line.startswith("#"):
continue
# Table header
if line.startswith("["):
if line.startswith("[[") and line.endswith("]]"):
# Array of tables
section_name = line[2:-2].strip()
parts = section_name.split(".")
# Navigate to parent
parent = result
for part in parts[:-1]:
if part not in parent:
parent[part] = {}
parent = parent[part]
# Create array entry
if parts[-1] not in parent:
parent[parts[-1]] = []
parent[parts[-1]].append({})
current_section = parent[parts[-1]][-1]
elif line.endswith("]"):
# Regular table
section_name = line[1:-1].strip()
parts = section_name.split(".")
# Navigate/create nested sections
current = result
for part in parts:
if part not in current:
current[part] = {}
current = current[part]
current_section = current
continue
# Key-value pair
if "=" in line:
key, value = line.split("=", 1)
key = key.strip()
value = value.strip()
parsed_value = parse_toml_value(value)
current_section[key] = parsed_value
else:
errors.append(ParseError(line_num, f"Invalid line: {line}"))
return result, errors
def parse_toml_value(value: str) -> any:
"""Parse TOML value."""
value = value.strip()
# String (basic)
if value.startswith('"') and value.endswith('"'):
return value[1:-1].replace('\\"', '"')
# String (literal)
if value.startswith("'") and value.endswith("'"):
return value[1:-1]
# Boolean
if value == "true":
return True
if value == "false":
return False
# Array
if value.startswith("[") and value.endswith("]"):
inner = value[1:-1].strip()
if not inner:
return []
items = []
for item in split_array_items(inner):
items.append(parse_toml_value(item.strip()))
return items
# Integer
try:
return int(value)
except ValueError:
pass
# Float
try:
return float(value)
except ValueError:
pass
return value
def split_array_items(s: str) -> list[str]:
"""Split array items respecting nested brackets and quotes."""
items = []
current = []
depth = 0
in_string = False
string_char = None
for char in s:
if char in ('"', "'") and not in_string:
in_string = True
string_char = char
elif char == string_char and in_string:
in_string = False
string_char = None
elif char == "[" and not in_string:
depth += 1
elif char == "]" and not in_string:
depth -= 1
elif char == "," and depth == 0 and not in_string:
items.append("".join(current))
current = []
continue
current.append(char)
if current:
items.append("".join(current))
return items
def parse_properties(content: str) -> tuple[dict, list[ParseError]]:
"""Parse Java properties format."""
result = {}
errors = []
for _line_num, line in enumerate(content.split("\n"), 1):
line = line.strip()
# Skip empty lines and comments
if not line or line.startswith("#") or line.startswith("!"):
continue
# Find separator (= or :)
sep_idx = -1
for i, char in enumerate(line):
if char == "\\":
continue # Skip escaped chars
if char in ("=", ":"):
sep_idx = i
break
if sep_idx > 0:
key = line[:sep_idx].strip()
value = line[sep_idx + 1 :].strip()
result[key] = convert_ini_value(value)
else:
# Key without value
result[line] = ""
return result, errors
def load(content: str, file_format: FileFormat) -> tuple[dict, list[ParseError]]:
"""Load settings from content."""
if file_format == FileFormat.JSON:
return parse_json(content)
if file_format == FileFormat.INI:
return parse_ini(content)
if file_format == FileFormat.TOML:
return parse_toml(content)
if file_format == FileFormat.PROPERTIES:
return parse_properties(content)
return {}, [ParseError(0, f"Unknown format: {file_format}")]
def dump_json(settings: dict) -> str:
"""Dump settings to JSON."""
return json.dumps(settings, indent=2)
def dump_ini(settings: dict, section: str = "") -> str:
"""Dump settings to INI format."""
lines = []
# Top-level values
for key, value in settings.items():
if not isinstance(value, dict):
if isinstance(value, bool):
lines.append(f"{key} = {'true' if value else 'false'}")
elif isinstance(value, list):
lines.append(f"{key} = {', '.join(str(v) for v in value)}")
else:
lines.append(f"{key} = {value}")
# Sections
for key, value in settings.items():
if isinstance(value, dict):
section_name = f"{section}.{key}" if section else key
lines.append(f"\n[{section_name}]")
for k, v in value.items():
if isinstance(v, dict):
lines.append(dump_ini({k: v}, section_name))
else:
if isinstance(v, bool):
lines.append(f"{k} = {'true' if v else 'false'}")
elif isinstance(v, list):
lines.append(f"{k} = {', '.join(str(x) for x in v)}")
else:
lines.append(f"{k} = {v}")
return "\n".join(lines)
def dump_toml(settings: dict, prefix: str = "") -> str:
"""Dump settings to TOML format."""
lines = []
sections = []
for key, value in settings.items():
if isinstance(value, dict):
sections.append((key, value))
elif isinstance(value, list):
formatted = ", ".join(format_toml_value(v) for v in value)
lines.append(f"{key} = [{formatted}]")
else:
lines.append(f"{key} = {format_toml_value(value)}")
for section_name, section_value in sections:
full_name = f"{prefix}.{section_name}" if prefix else section_name
lines.append(f"\n[{full_name}]")
lines.append(dump_toml(section_value, full_name))
return "\n".join(lines)
def format_toml_value(value: any) -> str:
"""Format value for TOML output."""
if isinstance(value, bool):
return "true" if value else "false"
if isinstance(value, str):
return f'"{value}"'
if isinstance(value, (int, float)):
return str(value)
return str(value)
def get_value(settings: dict, path: str) -> any:
"""Get value by dot-notation path."""
parts = path.split(".")
current = settings
for part in parts:
if isinstance(current, dict) and part in current:
current = current[part]
else:
return None
return current
def set_value(settings: dict, path: str, value: any) -> dict:
"""Set value by dot-notation path."""
parts = path.split(".")
current = settings
for part in parts[:-1]:
if part not in current:
current[part] = {}
current = current[part]
current[parts[-1]] = value
return settings
def merge_settings(base: dict, override: dict) -> dict:
"""Deep merge settings."""
result = dict(base)
for key, value in override.items():
if key in result and isinstance(result[key], dict) and isinstance(value, dict):
result[key] = merge_settings(result[key], value)
else:
result[key] = value
return result
def main() -> int:
parser = argparse.ArgumentParser(description="Settings loader")
subparsers = parser.add_subparsers(dest="command", help="Commands")
# load command
load_parser = subparsers.add_parser("load", help="Load settings file")
load_parser.add_argument("file", help="Settings file")
load_parser.add_argument(
"--format",
"-f",
choices=["json", "ini", "toml", "properties"],
help="File format (auto-detected)",
)
load_parser.add_argument(
"--output", "-o", choices=["json", "ini", "toml"], default="json", help="Output format"
)
# get command
get_parser = subparsers.add_parser("get", help="Get value by path")
get_parser.add_argument("file", help="Settings file")
get_parser.add_argument("path", help="Dot-notation path")
# set command
set_parser = subparsers.add_parser("set", help="Set value by path")
set_parser.add_argument("file", help="Settings file")
set_parser.add_argument("path", help="Dot-notation path")
set_parser.add_argument("value", help="Value to set")
# merge command
merge_parser = subparsers.add_parser("merge", help="Merge multiple settings files")
merge_parser.add_argument("files", nargs="+", help="Settings files to merge")
merge_parser.add_argument(
"--output", "-o", choices=["json", "ini", "toml"], default="json", help="Output format"
)
# convert command
convert_parser = subparsers.add_parser("convert", help="Convert between formats")
convert_parser.add_argument("input", help="Input file")
convert_parser.add_argument(
"--to", "-t", required=True, choices=["json", "ini", "toml"], help="Target format"
)
args = parser.parse_args()
format_map = {
"json": FileFormat.JSON,
"ini": FileFormat.INI,
"toml": FileFormat.TOML,
"properties": FileFormat.PROPERTIES,
}
if args.command == "load":
with open(args.file) as f:
content = f.read()
file_format = format_map.get(args.format) if args.format else detect_format(args.file)
settings, errors = load(content, file_format)
if errors:
for err in errors:
print(f"Line {err.line}: {err.message}", file=sys.stderr)
return 1
if args.output == "json":
print(dump_json(settings))
elif args.output == "ini":
print(dump_ini(settings))
elif args.output == "toml":
print(dump_toml(settings))
return 0
if args.command == "get":
with open(args.file) as f:
content = f.read()
file_format = detect_format(args.file)
settings, errors = load(content, file_format)
if errors:
for err in errors:
print(f"Line {err.line}: {err.message}", file=sys.stderr)
return 1
value = get_value(settings, args.path)
if value is None:
print(f"Path '{args.path}' not found", file=sys.stderr)
return 1
if isinstance(value, dict):
print(dump_json(value))
else:
print(value)
return 0
if args.command == "set":
with open(args.file) as f:
content = f.read()
file_format = detect_format(args.file)
settings, errors = load(content, file_format)
if errors:
for err in errors:
print(f"Line {err.line}: {err.message}", file=sys.stderr)
return 1
# Convert value
value = convert_ini_value(args.value)
settings = set_value(settings, args.path, value)
print(dump_json(settings))
return 0
if args.command == "merge":
result = {}
for file_path in args.files:
with open(file_path) as f:
content = f.read()
file_format = detect_format(file_path)
settings, errors = load(content, file_format)
if errors:
for err in errors:
print(f"{file_path} line {err.line}: {err.message}", file=sys.stderr)
return 1
result = merge_settings(result, settings)
if args.output == "json":
print(dump_json(result))
elif args.output == "ini":
print(dump_ini(result))
elif args.output == "toml":
print(dump_toml(result))
return 0
if args.command == "convert":
with open(args.input) as f:
content = f.read()
file_format = detect_format(args.input)
settings, errors = load(content, file_format)
if errors:
for err in errors:
print(f"Line {err.line}: {err.message}", file=sys.stderr)
return 1
if args.to == "json":
print(dump_json(settings))
elif args.to == "ini":
print(dump_ini(settings))
elif args.to == "toml":
print(dump_toml(settings))
return 0
parser.print_help()
return 0
if __name__ == "__main__":
sys.exit(main())
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_settings_loader/settings_cli.py (16747 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_settings_loader/settings_cli.rs (41368 bytes)
📦 Cargo.toml: /home/noah/src/reprorusted-python-cli/examples/example_settings_loader/Cargo.toml (3 dependencies)
⏱️ Parse time: 69ms
📊 Throughput: 234.9 KB/s
⏱️ Total time: 69ms
| true
|
settings_loader
| 586
| 6
|
[
"context_manager",
"class_definition",
"exception_handling",
"decorator"
] | 0.652
| null |
example_settings_loader
|
test_settings_cli.py
|
"""Tests for settings_cli.py"""
from settings_cli import (
FileFormat,
convert_ini_value,
detect_format,
dump_ini,
dump_json,
dump_toml,
format_toml_value,
get_value,
load,
merge_settings,
parse_ini,
parse_json,
parse_properties,
parse_toml,
parse_toml_value,
set_value,
split_array_items,
)
class TestDetectFormat:
def test_json(self):
assert detect_format("config.json") == FileFormat.JSON
assert detect_format("settings.JSON") == FileFormat.JSON
def test_ini(self):
assert detect_format("config.ini") == FileFormat.INI
assert detect_format("settings.cfg") == FileFormat.INI
def test_toml(self):
assert detect_format("config.toml") == FileFormat.TOML
def test_properties(self):
assert detect_format("app.properties") == FileFormat.PROPERTIES
def test_unknown(self):
assert detect_format("config.txt") == FileFormat.INI # Default
class TestParseJson:
def test_simple(self):
settings, errors = parse_json('{"key": "value"}')
assert errors == []
assert settings == {"key": "value"}
def test_nested(self):
settings, errors = parse_json('{"db": {"host": "localhost", "port": 5432}}')
assert errors == []
assert settings["db"]["host"] == "localhost"
def test_invalid(self):
settings, errors = parse_json("{invalid}")
assert len(errors) == 1
class TestConvertIniValue:
def test_string(self):
assert convert_ini_value("hello") == "hello"
def test_integer(self):
assert convert_ini_value("42") == 42
assert convert_ini_value("-10") == -10
def test_float(self):
assert convert_ini_value("3.14") == 3.14
def test_boolean_true(self):
assert convert_ini_value("true") is True
assert convert_ini_value("yes") is True
assert convert_ini_value("on") is True
assert convert_ini_value("1") is True
def test_boolean_false(self):
assert convert_ini_value("false") is False
assert convert_ini_value("no") is False
assert convert_ini_value("off") is False
assert convert_ini_value("0") is False
def test_list(self):
assert convert_ini_value("a, b, c") == ["a", "b", "c"]
class TestParseIni:
def test_simple(self):
content = """
key = value
number = 42
"""
settings, errors = parse_ini(content)
assert errors == []
assert settings["key"] == "value"
assert settings["number"] == 42
def test_sections(self):
content = """
[database]
host = localhost
port = 5432
"""
settings, errors = parse_ini(content)
assert errors == []
assert settings["database"]["host"] == "localhost"
assert settings["database"]["port"] == 5432
def test_comments(self):
content = """
# This is a comment
; This is also a comment
key = value
"""
settings, errors = parse_ini(content)
assert errors == []
assert settings["key"] == "value"
def test_quoted_values(self):
content = '''
key1 = "value with spaces"
key2 = 'single quoted'
'''
settings, errors = parse_ini(content)
assert settings["key1"] == "value with spaces"
assert settings["key2"] == "single quoted"
class TestParseTomlValue:
def test_string(self):
assert parse_toml_value('"hello"') == "hello"
assert parse_toml_value("'literal'") == "literal"
def test_integer(self):
assert parse_toml_value("42") == 42
def test_float(self):
assert parse_toml_value("3.14") == 3.14
def test_boolean(self):
assert parse_toml_value("true") is True
assert parse_toml_value("false") is False
def test_array(self):
assert parse_toml_value("[1, 2, 3]") == [1, 2, 3]
assert parse_toml_value('["a", "b"]') == ["a", "b"]
class TestSplitArrayItems:
def test_simple(self):
assert split_array_items("1, 2, 3") == ["1", " 2", " 3"]
def test_nested(self):
items = split_array_items("[1, 2], [3, 4]")
assert len(items) == 2
def test_strings(self):
items = split_array_items('"a, b", "c"')
assert len(items) == 2
class TestParseToml:
def test_simple(self):
content = """
key = "value"
number = 42
"""
settings, errors = parse_toml(content)
assert errors == []
assert settings["key"] == "value"
assert settings["number"] == 42
def test_tables(self):
content = """
[database]
host = "localhost"
port = 5432
"""
settings, errors = parse_toml(content)
assert errors == []
assert settings["database"]["host"] == "localhost"
assert settings["database"]["port"] == 5432
def test_nested_tables(self):
content = """
[server.http]
port = 8080
[server.https]
port = 443
"""
settings, errors = parse_toml(content)
assert settings["server"]["http"]["port"] == 8080
assert settings["server"]["https"]["port"] == 443
def test_array_of_tables(self):
content = """
[[servers]]
host = "alpha"
[[servers]]
host = "beta"
"""
settings, errors = parse_toml(content)
assert len(settings["servers"]) == 2
assert settings["servers"][0]["host"] == "alpha"
assert settings["servers"][1]["host"] == "beta"
class TestParseProperties:
def test_simple(self):
content = """
key = value
another.key = another value
"""
settings, errors = parse_properties(content)
assert errors == []
assert settings["key"] == "value"
assert settings["another.key"] == "another value"
def test_colon_separator(self):
content = "key: value"
settings, errors = parse_properties(content)
assert settings["key"] == "value"
def test_comments(self):
content = """
# Comment
! Another comment
key = value
"""
settings, errors = parse_properties(content)
assert settings == {"key": "value"}
class TestLoad:
def test_json(self):
content = '{"key": "value"}'
settings, errors = load(content, FileFormat.JSON)
assert settings == {"key": "value"}
def test_ini(self):
content = "key = value"
settings, errors = load(content, FileFormat.INI)
assert settings == {"key": "value"}
def test_toml(self):
content = 'key = "value"'
settings, errors = load(content, FileFormat.TOML)
assert settings == {"key": "value"}
def test_properties(self):
content = "key = value"
settings, errors = load(content, FileFormat.PROPERTIES)
assert settings == {"key": "value"}
class TestDumpJson:
def test_simple(self):
settings = {"key": "value"}
result = dump_json(settings)
assert '"key"' in result
assert '"value"' in result
class TestDumpIni:
def test_simple(self):
settings = {"key": "value", "number": 42}
result = dump_ini(settings)
assert "key = value" in result
assert "number = 42" in result
def test_sections(self):
settings = {"database": {"host": "localhost", "port": 5432}}
result = dump_ini(settings)
assert "[database]" in result
assert "host = localhost" in result
def test_boolean(self):
settings = {"enabled": True, "disabled": False}
result = dump_ini(settings)
assert "enabled = true" in result
assert "disabled = false" in result
def test_list(self):
settings = {"items": ["a", "b", "c"]}
result = dump_ini(settings)
assert "items = a, b, c" in result
class TestFormatTomlValue:
def test_string(self):
assert format_toml_value("hello") == '"hello"'
def test_integer(self):
assert format_toml_value(42) == "42"
def test_boolean(self):
assert format_toml_value(True) == "true"
assert format_toml_value(False) == "false"
class TestDumpToml:
def test_simple(self):
settings = {"key": "value", "number": 42}
result = dump_toml(settings)
assert 'key = "value"' in result
assert "number = 42" in result
def test_sections(self):
settings = {"database": {"host": "localhost"}}
result = dump_toml(settings)
assert "[database]" in result
class TestGetValue:
def test_simple(self):
settings = {"key": "value"}
assert get_value(settings, "key") == "value"
def test_nested(self):
settings = {"db": {"host": "localhost"}}
assert get_value(settings, "db.host") == "localhost"
def test_not_found(self):
settings = {"key": "value"}
assert get_value(settings, "missing") is None
def test_nested_not_found(self):
settings = {"db": {"host": "localhost"}}
assert get_value(settings, "db.port") is None
class TestSetValue:
def test_simple(self):
settings = {}
result = set_value(settings, "key", "value")
assert result["key"] == "value"
def test_nested(self):
settings = {}
result = set_value(settings, "db.host", "localhost")
assert result["db"]["host"] == "localhost"
def test_existing(self):
settings = {"db": {"host": "old"}}
result = set_value(settings, "db.host", "new")
assert result["db"]["host"] == "new"
class TestMergeSettings:
def test_simple(self):
base = {"a": 1, "b": 2}
override = {"b": 3, "c": 4}
result = merge_settings(base, override)
assert result == {"a": 1, "b": 3, "c": 4}
def test_nested(self):
base = {"db": {"host": "localhost", "port": 5432}}
override = {"db": {"port": 3306}}
result = merge_settings(base, override)
assert result["db"]["host"] == "localhost"
assert result["db"]["port"] == 3306
def test_override_dict_with_value(self):
base = {"db": {"host": "localhost"}}
override = {"db": "sqlite:///db.sqlite"}
result = merge_settings(base, override)
assert result["db"] == "sqlite:///db.sqlite"
class TestRoundTrip:
def test_ini_roundtrip(self):
original = {"key": "value", "section": {"nested": "data"}}
ini = dump_ini(original)
restored, _ = parse_ini(ini)
assert restored["key"] == "value"
assert restored["section"]["nested"] == "data"
def test_json_roundtrip(self):
original = {"key": "value", "nested": {"data": [1, 2, 3]}}
json_str = dump_json(original)
restored, _ = parse_json(json_str)
assert restored == original
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_settings_loader/test_settings_cli.py (10696 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_settings_loader/test_settings_cli.rs (23946 bytes)
⏱️ Parse time: 55ms
📊 Throughput: 187.5 KB/s
⏱️ Total time: 55ms
| true
|
settings_loader
| 383
| 5
|
[
"context_manager",
"class_definition"
] | 0.652
| null |
example_shutil
|
file_ops.py
|
#!/usr/bin/env python3
"""Shutil Example - File operations CLI."""
import argparse
import shutil
def cmd_copy(args):
"""Copy file. Depyler: proven to terminate"""
shutil.copy2(args.src, args.dst)
print(f"Copied: {args.src} -> {args.dst}")
def cmd_move(args):
"""Move file. Depyler: proven to terminate"""
shutil.move(args.src, args.dst)
print(f"Moved: {args.src} -> {args.dst}")
def main():
parser = argparse.ArgumentParser(description="File operations tool")
subparsers = parser.add_subparsers(dest="command", required=True)
cp = subparsers.add_parser("copy")
cp.add_argument("src")
cp.add_argument("dst")
mv = subparsers.add_parser("move")
mv.add_argument("src")
mv.add_argument("dst")
args = parser.parse_args()
if args.command == "copy":
cmd_copy(args)
elif args.command == "move":
cmd_move(args)
if __name__ == "__main__":
main()
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_shutil/file_ops.py (935 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_shutil/file_ops.rs (1403 bytes)
📦 Cargo.toml: /home/noah/src/reprorusted-python-cli/examples/example_shutil/Cargo.toml (1 dependencies)
⏱️ Parse time: 49ms
📊 Throughput: 18.5 KB/s
⏱️ Total time: 49ms
| true
|
shutil
| 40
| 6
|
[] | 0
| null |
example_shutil
|
test_file_ops.py
|
#!/usr/bin/env python3
"""EXTREME TDD: Tests for shutil CLI."""
import os
import subprocess
import tempfile
SCRIPT = "file_ops.py"
def run(args):
return subprocess.run(["python3", SCRIPT] + args, capture_output=True, text=True,
cwd=__file__.rsplit("/", 1)[0])
class TestCopy:
def test_copy_file(self):
with tempfile.NamedTemporaryFile(delete=False, suffix=".txt") as src:
src.write(b"test content")
src_path = src.name
dst_path = src_path + ".copy"
try:
result = run(["copy", src_path, dst_path])
assert result.returncode == 0
assert os.path.exists(dst_path)
finally:
os.unlink(src_path)
if os.path.exists(dst_path):
os.unlink(dst_path)
class TestMove:
def test_move_file(self):
with tempfile.NamedTemporaryFile(delete=False, suffix=".txt") as src:
src.write(b"move me")
src_path = src.name
dst_path = src_path + ".moved"
try:
result = run(["move", src_path, dst_path])
assert result.returncode == 0
assert os.path.exists(dst_path)
assert not os.path.exists(src_path)
finally:
if os.path.exists(dst_path):
os.unlink(dst_path)
class TestHelp:
def test_help(self):
result = run(["--help"])
assert result.returncode == 0
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_shutil/test_file_ops.py (1445 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_shutil/test_file_ops.rs (2901 bytes)
📦 Cargo.toml: /home/noah/src/reprorusted-python-cli/examples/example_shutil/Cargo.toml (1 dependencies)
⏱️ Parse time: 47ms
📊 Throughput: 29.6 KB/s
⏱️ Total time: 47ms
| true
|
shutil
| 46
| 6
|
[
"context_manager",
"class_definition",
"exception_handling"
] | 0.652
| null |
example_signal
|
signal_tool.py
|
#!/usr/bin/env python3
"""Signal Example - Signal info CLI."""
import argparse
def main():
parser = argparse.ArgumentParser(description="Signal info tool")
subs = parser.add_subparsers(dest="cmd", required=True)
n = subs.add_parser("name")
n.add_argument("num", type=int)
nu = subs.add_parser("number")
nu.add_argument("name")
subs.add_parser("list")
args = parser.parse_args()
if args.cmd == "name":
if args.num == 9:
print("SIGKILL")
elif args.num == 15:
print("SIGTERM")
elif args.num == 2:
print("SIGINT")
elif args.cmd == "number":
if args.name == "KILL":
print(9)
elif args.name == "TERM":
print(15)
elif args.name == "INT":
print(2)
elif args.cmd == "list":
print("SIGINT SIGTERM SIGKILL")
if __name__ == "__main__":
main()
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_signal/signal_tool.py (914 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_signal/signal_tool.rs (1777 bytes)
📦 Cargo.toml: /home/noah/src/reprorusted-python-cli/examples/example_signal/Cargo.toml (1 dependencies)
⏱️ Parse time: 45ms
📊 Throughput: 19.4 KB/s
⏱️ Total time: 46ms
| true
|
signal
| 37
| 6
|
[] | 0
| null |
example_signal
|
test_signal_tool.py
|
#!/usr/bin/env python3
"""EXTREME TDD: Tests for signal CLI."""
import subprocess
SCRIPT = "signal_tool.py"
def run(args): return subprocess.run(["python3", SCRIPT] + args, capture_output=True, text=True, cwd=__file__.rsplit("/", 1)[0])
class TestSignal:
def test_name(self): r = run(["name", "9"]); assert r.returncode == 0 and "KILL" in r.stdout
def test_number(self): r = run(["number", "TERM"]); assert r.returncode == 0 and "15" in r.stdout
def test_list(self): r = run(["list"]); assert r.returncode == 0
class TestHelp:
def test_help(self): assert run(["--help"]).returncode == 0
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_signal/test_signal_tool.py (606 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_signal/test_signal_tool.rs (1830 bytes)
⏱️ Parse time: 47ms
📊 Throughput: 12.4 KB/s
⏱️ Total time: 47ms
| true
|
signal
| 14
| 5
|
[
"class_definition"
] | 0.612
| null |
example_simple
|
test_trivial_cli.py
|
"""
Test suite for trivial_cli.py
Ensures 100% coverage before transpilation
Following extreme TDD methodology:
1. Write tests first (RED)
2. Implement code to pass tests (GREEN)
3. Refactor (REFACTOR)
"""
import subprocess
from pathlib import Path
import pytest
# Path to the CLI script
SCRIPT = Path(__file__).parent / "trivial_cli.py"
def run_cli(*args):
"""
Helper to run CLI and capture output
Args:
*args: Command-line arguments to pass to the script
Returns:
subprocess.CompletedProcess: Result with returncode, stdout, stderr
"""
result = subprocess.run(["python3", str(SCRIPT), *args], capture_output=True, text=True)
return result
class TestTrivialCLI:
"""Test suite for trivial_cli.py"""
def test_help_flag(self):
"""Test --help displays usage information"""
result = run_cli("--help")
assert result.returncode == 0, "Help should exit successfully"
assert "usage:" in result.stdout.lower(), "Help should show usage"
assert "trivial_cli.py" in result.stdout, "Should show script name"
assert "--name" in result.stdout, "--name argument should be documented"
assert "Name to greet" in result.stdout or "name to greet" in result.stdout.lower(), (
"Should describe --name argument"
)
def test_version_flag(self):
"""Test --version displays version information"""
result = run_cli("--version")
assert result.returncode == 0, "Version should exit successfully"
assert "1.0.0" in result.stdout, "Should display version 1.0.0"
def test_basic_execution_with_name(self):
"""Test basic CLI execution with --name argument"""
result = run_cli("--name", "Alice")
assert result.returncode == 0, "Should execute successfully"
assert "Hello, Alice!" in result.stdout, "Should greet Alice"
def test_missing_required_argument(self):
"""Test error handling when required --name argument is missing"""
result = run_cli()
assert result.returncode != 0, "Should fail when --name is missing"
assert "required" in result.stderr.lower(), "Error should mention 'required'"
assert "--name" in result.stderr, "Error should mention --name"
@pytest.mark.parametrize(
"name,expected",
[
("Alice", "Hello, Alice!"),
("Bob", "Hello, Bob!"),
("Charlie", "Hello, Charlie!"),
("Dr. Smith", "Hello, Dr. Smith!"),
("123", "Hello, 123!"),
("", "Hello, !"), # Edge case: empty string
],
)
def test_parametrized_names(self, name, expected):
"""
Test various input names to ensure consistent behavior
Args:
name: Name to pass to --name argument
expected: Expected output string
"""
result = run_cli("--name", name)
assert result.returncode == 0, f"Should succeed for name='{name}'"
assert expected in result.stdout, f"Should output '{expected}' for name='{name}'"
def test_name_with_special_characters(self):
"""Test handling of special characters in names"""
special_names = [
"O'Brien",
"José",
"François",
"北京", # Chinese characters
"مرحبا", # Arabic
]
for name in special_names:
result = run_cli("--name", name)
assert result.returncode == 0, f"Should handle special chars: {name}"
assert name in result.stdout, f"Should preserve special chars in output: {name}"
def test_long_name(self):
"""Test handling of very long names"""
long_name = "A" * 1000
result = run_cli("--name", long_name)
assert result.returncode == 0, "Should handle long names"
assert long_name in result.stdout, "Should output full long name"
def test_name_with_whitespace(self):
"""Test handling of names with leading/trailing whitespace"""
result = run_cli("--name", " Alice ")
assert result.returncode == 0, "Should handle whitespace"
# Argparse preserves whitespace
assert " Alice " in result.stdout, "Should preserve whitespace"
def test_invalid_flag(self):
"""Test error handling for invalid flags"""
result = run_cli("--name", "Alice", "--invalid-flag")
assert result.returncode != 0, "Should fail for invalid flag"
assert "unrecognized" in result.stderr.lower() or "invalid" in result.stderr.lower(), (
"Error should mention unrecognized/invalid argument"
)
def test_name_flag_without_value(self):
"""Test error handling when --name is provided without a value"""
result = run_cli("--name")
assert result.returncode != 0, "Should fail when --name has no value"
assert (
"expected one argument" in result.stderr.lower()
or "argument --name" in result.stderr.lower()
), "Error should mention missing value for --name"
def test_duplicate_name_flag(self):
"""Test behavior when --name is specified multiple times"""
result = run_cli("--name", "Alice", "--name", "Bob")
assert result.returncode == 0, "Should accept duplicate flags"
# Argparse uses the last value
assert "Hello, Bob!" in result.stdout, "Should use last provided value"
def test_stderr_is_empty_on_success(self):
"""Test that stderr is empty on successful execution"""
result = run_cli("--name", "Alice")
assert result.returncode == 0
assert result.stderr == "", "stderr should be empty on success"
def test_stdout_ends_with_newline(self):
"""Test that output ends with newline (proper CLI behavior)"""
result = run_cli("--name", "Alice")
assert result.returncode == 0
assert result.stdout.endswith("\n"), "Output should end with newline"
def test_script_is_executable_as_main(self):
"""Test that script can be executed as __main__"""
# This is tested implicitly by all other tests
# but we explicitly verify the script structure
result = run_cli("--name", "Test")
assert result.returncode == 0
# If this passes, __main__ block is working
def test_deterministic_output(self):
"""Test that output is deterministic across multiple runs"""
results = [run_cli("--name", "Alice") for _ in range(3)]
# All should succeed
assert all(r.returncode == 0 for r in results), "All runs should succeed"
# All should have identical output
first_output = results[0].stdout
assert all(r.stdout == first_output for r in results), "Output should be deterministic"
class TestEdgeCases:
"""Additional edge case tests for robustness"""
def test_empty_args_list(self):
"""Test behavior with absolutely no arguments"""
result = run_cli()
assert result.returncode != 0, "Should fail with no arguments"
def test_help_with_other_args(self):
"""Test that --help takes precedence over other arguments"""
result = run_cli("--help", "--name", "Alice")
assert result.returncode == 0, "Help should succeed"
assert "usage:" in result.stdout.lower(), "Should show help"
# When --help is present, other args are ignored
def test_version_with_other_args(self):
"""Test that --version takes precedence over other arguments"""
result = run_cli("--version", "--name", "Alice")
assert result.returncode == 0, "Version should succeed"
assert "1.0.0" in result.stdout, "Should show version"
# When --version is present, other args are ignored
| false
|
simple
| 195
| 0
|
[
"context_manager",
"class_definition",
"decorator",
"multiprocessing"
] | 0.652
|
Performance Warnings
══════════════════════════════════════════════════
[1] [Medium] Large value 'args' passed by copy
Location: run_cli, line 0
Impact: Complexity: O(n), Scales: Yes, Hot path: No
Why: Passing large values by copy is inefficient
Fix: Consider passing by reference (&) or using Box/Arc for large types
Summary: Found 1 warnings (0 critical, 0 high severity)
Profiling Report
══════════════════════════════════════════════════
Summary
Total estimated instructions:
|
|
example_simple
|
trivial_cli.py
|
#!/usr/bin/env python3
"""
Trivial CLI - Simplest argparse example
This script demonstrates basic argparse usage:
- Required argument (--name)
- Version flag (--version)
- Help flag (--help)
This is the simplest example in the reprorusted-python-cli suite,
designed to validate basic Python-to-Rust transpilation via depyler.
"""
import argparse
def main():
"""
Main entry point for the trivial CLI
Creates argument parser, parses arguments, and greets the user.
"""
parser = argparse.ArgumentParser(
description="A trivial CLI example for argparse-to-Rust validation", prog="trivial_cli.py"
)
parser.add_argument("--name", type=str, required=True, help="Name to greet")
parser.add_argument("--version", action="version", version="1.0.0")
args = parser.parse_args()
# Output greeting
print(f"Hello, {args.name}!")
if __name__ == "__main__":
main()
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_simple/trivial_cli.py (916 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_simple/trivial_cli.rs (605 bytes)
📦 Cargo.toml: /home/noah/src/reprorusted-python-cli/examples/example_simple/Cargo.toml (1 dependencies)
⏱️ Parse time: 46ms
📊 Throughput: 19.1 KB/s
⏱️ Total time: 46ms
| true
|
simple
| 38
| 6
|
[] | 0
| null |
example_sklearn_dtree
|
dtree_tool.py
|
#!/usr/bin/env python3
"""DecisionTreeClassifier CLI tool.
A CLI for sklearn-style DecisionTree classification.
Designed for Python-to-Rust transpilation via depyler → aprender.
Academic Reference: Breiman (2001) Random Forests [8] - GINI criterion
Usage:
echo '{"X": [[0], [1], [2], [3]], "y": [0, 0, 1, 1]}' | python dtree_tool.py fit
"""
import argparse
import json
import sys
from typing import Any
def gini_impurity(y: list[int]) -> float:
"""Calculate GINI impurity."""
if len(y) == 0:
return 0.0
classes = set(y)
impurity = 1.0
for c in classes:
p = sum(1 for yi in y if yi == c) / len(y)
impurity -= p**2
return impurity
def best_split(X: list[list[float]], y: list[int], max_features: int) -> tuple:
"""Find best split using GINI criterion."""
n_samples = len(X)
n_features = len(X[0]) if n_samples > 0 else 0
if n_samples == 0:
return None, None, None, None
best_gini = float("inf")
best_feature = None
best_threshold = None
best_left_idx = None
best_right_idx = None
features_to_check = list(range(min(n_features, max_features)))
for feature in features_to_check:
values = sorted({X[i][feature] for i in range(n_samples)})
thresholds = [(values[i] + values[i + 1]) / 2 for i in range(len(values) - 1)]
for threshold in thresholds:
left_idx = [i for i in range(n_samples) if X[i][feature] <= threshold]
right_idx = [i for i in range(n_samples) if X[i][feature] > threshold]
if len(left_idx) == 0 or len(right_idx) == 0:
continue
left_y = [y[i] for i in left_idx]
right_y = [y[i] for i in right_idx]
gini = (
len(left_y) * gini_impurity(left_y) + len(right_y) * gini_impurity(right_y)
) / n_samples
if gini < best_gini:
best_gini = gini
best_feature = feature
best_threshold = threshold
best_left_idx = left_idx
best_right_idx = right_idx
return best_feature, best_threshold, best_left_idx, best_right_idx
def build_tree(X: list[list[float]], y: list[int], depth: int, max_depth: int) -> dict[str, Any]:
"""Recursively build decision tree."""
classes = set(y)
# Pure node or max depth reached
if len(classes) == 1 or depth >= max_depth or len(y) < 2:
# Return most common class
class_counts = {}
for yi in y:
class_counts[yi] = class_counts.get(yi, 0) + 1
majority_class = max(class_counts.keys(), key=lambda k: class_counts[k])
return {"class": majority_class}
feature, threshold, left_idx, right_idx = best_split(X, y, len(X[0]))
if feature is None:
class_counts = {}
for yi in y:
class_counts[yi] = class_counts.get(yi, 0) + 1
majority_class = max(class_counts.keys(), key=lambda k: class_counts[k])
return {"class": majority_class}
left_X = [X[i] for i in left_idx]
left_y = [y[i] for i in left_idx]
right_X = [X[i] for i in right_idx]
right_y = [y[i] for i in right_idx]
return {
"feature": feature,
"threshold": threshold,
"left": build_tree(left_X, left_y, depth + 1, max_depth),
"right": build_tree(right_X, right_y, depth + 1, max_depth),
}
def predict_single(tree: dict[str, Any], x: list[float]) -> int:
"""Predict class for single sample."""
if "class" in tree:
return tree["class"]
if x[tree["feature"]] <= tree["threshold"]:
return predict_single(tree["left"], x)
else:
return predict_single(tree["right"], x)
def fit(X: list[list[float]], y: list[int], max_depth: int = 10) -> dict[str, Any]:
"""Fit decision tree classifier."""
if len(X) == 0 or len(y) == 0:
raise ValueError("Empty input data")
if len(X) != len(y):
raise ValueError("X and y must have same length")
tree = build_tree(X, y, 0, max_depth)
return {"tree": tree}
def predict(X: list[list[float]], tree: dict[str, Any]) -> list[int]:
"""Predict class labels."""
return [predict_single(tree, x) for x in X]
def score(X: list[list[float]], y: list[int], tree: dict[str, Any]) -> float:
"""Calculate accuracy score."""
predictions = predict(X, tree)
correct = sum(1 for i in range(len(y)) if predictions[i] == y[i])
return correct / len(y)
def cmd_fit(args: argparse.Namespace) -> None:
"""Handle fit subcommand."""
try:
data = json.load(sys.stdin)
except json.JSONDecodeError as e:
print(f"Error: Invalid JSON: {e}", file=sys.stderr)
sys.exit(1)
if "X" not in data or "y" not in data:
print("Error: Missing 'X' or 'y'", file=sys.stderr)
sys.exit(1)
max_depth = data.get("max_depth", 10)
try:
result = fit(data["X"], data["y"], max_depth)
print(json.dumps(result))
except ValueError as e:
print(f"Error: {e}", file=sys.stderr)
sys.exit(1)
def cmd_predict(args: argparse.Namespace) -> None:
"""Handle predict subcommand."""
try:
data = json.load(sys.stdin)
except json.JSONDecodeError as e:
print(f"Error: Invalid JSON: {e}", file=sys.stderr)
sys.exit(1)
if "X" not in data or "tree" not in data:
print("Error: Missing 'X' or 'tree'", file=sys.stderr)
sys.exit(1)
predictions = predict(data["X"], data["tree"])
print(json.dumps({"predictions": predictions}))
def cmd_fit_predict(args: argparse.Namespace) -> None:
"""Handle fit-predict subcommand."""
try:
data = json.load(sys.stdin)
except json.JSONDecodeError as e:
print(f"Error: Invalid JSON: {e}", file=sys.stderr)
sys.exit(1)
required = ["X_train", "y_train", "X_test"]
for field in required:
if field not in data:
print(f"Error: Missing '{field}'", file=sys.stderr)
sys.exit(1)
max_depth = data.get("max_depth", 10)
try:
result = fit(data["X_train"], data["y_train"], max_depth)
predictions = predict(data["X_test"], result["tree"])
print(json.dumps({"tree": result["tree"], "predictions": predictions}))
except ValueError as e:
print(f"Error: {e}", file=sys.stderr)
sys.exit(1)
def cmd_score(args: argparse.Namespace) -> None:
"""Handle score subcommand."""
try:
data = json.load(sys.stdin)
except json.JSONDecodeError as e:
print(f"Error: Invalid JSON: {e}", file=sys.stderr)
sys.exit(1)
if "X" not in data or "y" not in data or "tree" not in data:
print("Error: Missing 'X', 'y', or 'tree'", file=sys.stderr)
sys.exit(1)
accuracy = score(data["X"], data["y"], data["tree"])
print(json.dumps({"accuracy": accuracy}))
def main() -> None:
"""Main entry point."""
parser = argparse.ArgumentParser(
description="DecisionTreeClassifier CLI - GINI-based classification (sklearn-compatible)"
)
subparsers = parser.add_subparsers(dest="command", help="Available commands")
subparsers.add_parser("fit", help="Fit decision tree").set_defaults(func=cmd_fit)
subparsers.add_parser("predict", help="Predict class labels").set_defaults(func=cmd_predict)
subparsers.add_parser("fit-predict", help="Fit and predict").set_defaults(func=cmd_fit_predict)
subparsers.add_parser("score", help="Calculate accuracy").set_defaults(func=cmd_score)
args = parser.parse_args()
if args.command is None:
parser.print_help()
sys.exit(0)
args.func(args)
if __name__ == "__main__":
main()
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_sklearn_dtree/dtree_tool.py (7707 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_sklearn_dtree/dtree_tool.rs (18870 bytes)
📦 Cargo.toml: /home/noah/src/reprorusted-python-cli/examples/example_sklearn_dtree/Cargo.toml (3 dependencies)
⏱️ Parse time: 59ms
📊 Throughput: 127.1 KB/s
⏱️ Total time: 59ms
| true
|
sklearn_dtree
| 245
| 6
|
[
"lambda",
"class_definition",
"exception_handling",
"stdin_usage"
] | 0.783
| null |
example_sklearn_dtree
|
test_dtree_tool.py
|
#!/usr/bin/env python3
"""EXTREME TDD: Tests for sklearn DecisionTreeClassifier CLI.
Academic Reference: Breiman (2001) Random Forests [8] - GINI criterion
Tests decision tree classification with fit/predict/score pattern.
"""
import json
import subprocess
from pathlib import Path
SCRIPT = Path(__file__).parent / "dtree_tool.py"
def run(args, input_data=None):
"""Run the CLI and return (stdout, stderr, returncode)."""
result = subprocess.run(
["python3", str(SCRIPT)] + args,
capture_output=True,
text=True,
input=input_data,
)
return result.stdout, result.stderr, result.returncode
class TestDtreeFit:
"""Test tree fitting."""
def test_fit_simple_binary(self):
"""Test fitting on simple binary classification."""
# XOR-like pattern
data = json.dumps({
"X": [[0, 0], [0, 1], [1, 0], [1, 1]],
"y": [0, 1, 1, 0]
})
stdout, stderr, code = run(["fit"], data)
assert code == 0
result = json.loads(stdout)
assert "tree" in result
def test_fit_with_max_depth(self):
"""Test fitting with max_depth parameter."""
data = json.dumps({
"X": [[0], [1], [2], [3]],
"y": [0, 0, 1, 1],
"max_depth": 2
})
stdout, stderr, code = run(["fit"], data)
assert code == 0
result = json.loads(stdout)
assert "tree" in result
def test_fit_empty_fails(self):
"""Test that fitting on empty data fails."""
data = json.dumps({"X": [], "y": []})
stdout, stderr, code = run(["fit"], data)
assert code == 1
class TestDtreePredict:
"""Test tree prediction."""
def test_predict_simple(self):
"""Test prediction on simple data."""
# Precomputed tree for x > 1.5 -> 1, else 0
data = json.dumps({
"X": [[0], [3]],
"tree": {
"feature": 0,
"threshold": 1.5,
"left": {"class": 0},
"right": {"class": 1}
}
})
stdout, stderr, code = run(["predict"], data)
assert code == 0
result = json.loads(stdout)
assert "predictions" in result
assert result["predictions"] == [0, 1]
class TestDtreeFitPredict:
"""Test combined fit-predict."""
def test_fit_predict_linearly_separable(self):
"""Test fit-predict on linearly separable data."""
data = json.dumps({
"X_train": [[0], [1], [2], [3]],
"y_train": [0, 0, 1, 1],
"X_test": [[0.5], [2.5]]
})
stdout, stderr, code = run(["fit-predict"], data)
assert code == 0
result = json.loads(stdout)
assert result["predictions"][0] == 0
assert result["predictions"][1] == 1
class TestDtreeScore:
"""Test tree scoring."""
def test_score_perfect(self):
"""Test score = 1.0 for perfect predictions."""
data = json.dumps({
"X": [[0], [1], [2], [3]],
"y": [0, 0, 1, 1],
"tree": {
"feature": 0,
"threshold": 1.5,
"left": {"class": 0},
"right": {"class": 1}
}
})
stdout, stderr, code = run(["score"], data)
assert code == 0
result = json.loads(stdout)
assert result["accuracy"] == 1.0
class TestDtreeHelp:
"""Test help messages."""
def test_help(self):
"""Test --help flag."""
stdout, stderr, code = run(["--help"])
assert code == 0
assert "DecisionTree" in stdout or "tree" in stdout.lower()
def test_subcommand_help(self):
"""Test subcommand help."""
stdout, stderr, code = run(["fit", "--help"])
assert code == 0
class TestDtreeEdgeCases:
"""Test edge cases."""
def test_invalid_json_fails(self):
"""Test invalid JSON fails."""
stdout, stderr, code = run(["fit"], "not json")
assert code == 1
def test_single_class(self):
"""Test fitting with single class."""
data = json.dumps({
"X": [[0], [1], [2]],
"y": [0, 0, 0]
})
stdout, stderr, code = run(["fit"], data)
assert code == 0
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_sklearn_dtree/test_dtree_tool.py (4300 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_sklearn_dtree/test_dtree_tool.rs (7665 bytes)
📦 Cargo.toml: /home/noah/src/reprorusted-python-cli/examples/example_sklearn_dtree/Cargo.toml (2 dependencies)
⏱️ Parse time: 50ms
📊 Throughput: 82.6 KB/s
⏱️ Total time: 51ms
| true
|
sklearn_dtree
| 150
| 6
|
[
"context_manager",
"class_definition"
] | 0.652
| null |
example_sklearn_kfold
|
kfold_tool.py
|
#!/usr/bin/env python3
"""KFold CLI tool.
A CLI for sklearn-style K-fold cross-validation.
Designed for Python-to-Rust transpilation via depyler → aprender.
Academic Reference: Pedregosa et al. (2011) sklearn model selection [1]
Usage:
echo '{"n_samples": 10, "n_splits": 5}' | python kfold_tool.py split
"""
import argparse
import json
import math
import sys
def kfold_split(n_samples: int, n_splits: int = 5) -> list[dict[str, list[int]]]:
"""Generate K-fold train/test splits."""
if n_splits > n_samples:
raise ValueError(f"n_splits ({n_splits}) > n_samples ({n_samples})")
fold_sizes = [n_samples // n_splits] * n_splits
for i in range(n_samples % n_splits):
fold_sizes[i] += 1
indices = list(range(n_samples))
folds = []
current = 0
for fold_size in fold_sizes:
test_idx = indices[current : current + fold_size]
train_idx = indices[:current] + indices[current + fold_size :]
folds.append({"train": train_idx, "test": test_idx})
current += fold_size
return folds
def linear_regression_fit(X: list[list[float]], y: list[float]) -> tuple:
"""Simple linear regression fit."""
n = len(X)
if n == 0:
return [0.0], 0.0
n_features = len(X[0])
# Add bias column
# Normal equation: (X^T X)^-1 X^T y
# For simplicity, use single feature case
if n_features == 1:
sum_x = sum(X[i][0] for i in range(n))
sum_y = sum(y)
sum_xy = sum(X[i][0] * y[i] for i in range(n))
sum_xx = sum(X[i][0] ** 2 for i in range(n))
denom = n * sum_xx - sum_x**2
if abs(denom) < 1e-10:
return [0.0], sum_y / n if n > 0 else 0.0
coef = (n * sum_xy - sum_x * sum_y) / denom
intercept = (sum_y - coef * sum_x) / n
return [coef], intercept
return [0.0] * n_features, 0.0
def linear_regression_predict(
X: list[list[float]], coef: list[float], intercept: float
) -> list[float]:
"""Linear regression predict."""
return [intercept + sum(X[i][j] * coef[j] for j in range(len(coef))) for i in range(len(X))]
def r2_score(y_true: list[float], y_pred: list[float]) -> float:
"""Calculate R² score."""
if len(y_true) == 0:
return 0.0
y_mean = sum(y_true) / len(y_true)
ss_res = sum((y_true[i] - y_pred[i]) ** 2 for i in range(len(y_true)))
ss_tot = sum((y_true[i] - y_mean) ** 2 for i in range(len(y_true)))
if ss_tot == 0:
return 1.0 if ss_res == 0 else 0.0
return 1.0 - ss_res / ss_tot
def cross_val_score(
X: list[list[float]], y: list[float], model: str, n_splits: int = 5
) -> list[float]:
"""Perform cross-validation and return scores."""
folds = kfold_split(len(X), n_splits)
scores = []
for fold in folds:
X_train = [X[i] for i in fold["train"]]
y_train = [y[i] for i in fold["train"]]
X_test = [X[i] for i in fold["test"]]
y_test = [y[i] for i in fold["test"]]
if model == "linear_regression":
coef, intercept = linear_regression_fit(X_train, y_train)
y_pred = linear_regression_predict(X_test, coef, intercept)
score = r2_score(y_test, y_pred)
else:
score = 0.0
scores.append(score)
return scores
def cmd_split(args: argparse.Namespace) -> None:
"""Handle split subcommand."""
try:
data = json.load(sys.stdin)
except json.JSONDecodeError as e:
print(f"Error: Invalid JSON: {e}", file=sys.stderr)
sys.exit(1)
if "n_samples" not in data:
print("Error: Missing 'n_samples'", file=sys.stderr)
sys.exit(1)
n_splits = data.get("n_splits", 5)
try:
folds = kfold_split(data["n_samples"], n_splits)
print(json.dumps({"folds": folds}))
except ValueError as e:
print(f"Error: {e}", file=sys.stderr)
sys.exit(1)
def cmd_cross_val_score(args: argparse.Namespace) -> None:
"""Handle cross-val-score subcommand."""
try:
data = json.load(sys.stdin)
except json.JSONDecodeError as e:
print(f"Error: Invalid JSON: {e}", file=sys.stderr)
sys.exit(1)
required = ["X", "y", "model"]
for field in required:
if field not in data:
print(f"Error: Missing '{field}'", file=sys.stderr)
sys.exit(1)
n_splits = data.get("n_splits", 5)
try:
scores = cross_val_score(data["X"], data["y"], data["model"], n_splits)
mean_score = sum(scores) / len(scores)
std_score = math.sqrt(sum((s - mean_score) ** 2 for s in scores) / len(scores))
print(json.dumps({"scores": scores, "mean_score": mean_score, "std_score": std_score}))
except ValueError as e:
print(f"Error: {e}", file=sys.stderr)
sys.exit(1)
def main() -> None:
"""Main entry point."""
parser = argparse.ArgumentParser(
description="KFold CLI - cross-validation splitting (sklearn-compatible)"
)
subparsers = parser.add_subparsers(dest="command", help="Available commands")
subparsers.add_parser("split", help="Generate K-fold splits").set_defaults(func=cmd_split)
subparsers.add_parser("cross-val-score", help="Cross-validation scoring").set_defaults(
func=cmd_cross_val_score
)
args = parser.parse_args()
if args.command is None:
parser.print_help()
sys.exit(0)
args.func(args)
if __name__ == "__main__":
main()
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_sklearn_kfold/kfold_tool.py (5463 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_sklearn_kfold/kfold_tool.rs (14781 bytes)
📦 Cargo.toml: /home/noah/src/reprorusted-python-cli/examples/example_sklearn_kfold/Cargo.toml (3 dependencies)
⏱️ Parse time: 57ms
📊 Throughput: 93.3 KB/s
⏱️ Total time: 57ms
| true
|
sklearn_kfold
| 182
| 6
|
[
"exception_handling",
"stdin_usage"
] | 0.577
| null |
example_sklearn_kfold
|
test_kfold_tool.py
|
#!/usr/bin/env python3
"""EXTREME TDD: Tests for sklearn KFold cross-validation CLI.
Academic Reference: Pedregosa et al. (2011) sklearn model selection [1]
Tests K-fold cross-validation splitting and scoring.
"""
import json
import subprocess
from pathlib import Path
SCRIPT = Path(__file__).parent / "kfold_tool.py"
def run(args, input_data=None):
"""Run the CLI and return (stdout, stderr, returncode)."""
result = subprocess.run(
["python3", str(SCRIPT)] + args,
capture_output=True,
text=True,
input=input_data,
)
return result.stdout, result.stderr, result.returncode
class TestKfoldSplit:
"""Test K-fold splitting."""
def test_split_5fold(self):
"""Test 5-fold split."""
data = json.dumps({
"n_samples": 10,
"n_splits": 5
})
stdout, stderr, code = run(["split"], data)
assert code == 0
result = json.loads(stdout)
assert "folds" in result
assert len(result["folds"]) == 5
# Each fold should have 2 test samples
for fold in result["folds"]:
assert len(fold["test"]) == 2
assert len(fold["train"]) == 8
def test_split_default_5fold(self):
"""Test default n_splits=5."""
data = json.dumps({"n_samples": 10})
stdout, stderr, code = run(["split"], data)
assert code == 0
result = json.loads(stdout)
assert len(result["folds"]) == 5
def test_split_no_overlap(self):
"""Test that folds don't overlap."""
data = json.dumps({
"n_samples": 6,
"n_splits": 3
})
stdout, stderr, code = run(["split"], data)
assert code == 0
result = json.loads(stdout)
all_test = []
for fold in result["folds"]:
all_test.extend(fold["test"])
# All indices should appear exactly once in test sets
assert sorted(all_test) == list(range(6))
class TestKfoldCrossValScore:
"""Test cross_val_score equivalent."""
def test_cross_val_score_linreg(self):
"""Test cross-validation with linear data."""
data = json.dumps({
"X": [[1], [2], [3], [4], [5], [6], [7], [8], [9], [10]],
"y": [2, 4, 6, 8, 10, 12, 14, 16, 18, 20],
"model": "linear_regression",
"n_splits": 5
})
stdout, stderr, code = run(["cross-val-score"], data)
assert code == 0
result = json.loads(stdout)
assert "scores" in result
assert len(result["scores"]) == 5
# Perfect linear data should have high R²
assert result["mean_score"] > 0.9
def test_cross_val_score_returns_mean_std(self):
"""Test that mean and std are returned."""
data = json.dumps({
"X": [[1], [2], [3], [4], [5], [6]],
"y": [1, 2, 3, 4, 5, 6],
"model": "linear_regression",
"n_splits": 3
})
stdout, stderr, code = run(["cross-val-score"], data)
assert code == 0
result = json.loads(stdout)
assert "mean_score" in result
assert "std_score" in result
class TestKfoldHelp:
"""Test help messages."""
def test_help(self):
"""Test --help flag."""
stdout, stderr, code = run(["--help"])
assert code == 0
assert "KFold" in stdout or "fold" in stdout.lower()
def test_subcommand_help(self):
"""Test subcommand help."""
stdout, stderr, code = run(["split", "--help"])
assert code == 0
class TestKfoldEdgeCases:
"""Test edge cases."""
def test_invalid_json_fails(self):
"""Test invalid JSON fails."""
stdout, stderr, code = run(["split"], "not json")
assert code == 1
def test_k_greater_than_n_fails(self):
"""Test k > n_samples fails."""
data = json.dumps({
"n_samples": 3,
"n_splits": 5
})
stdout, stderr, code = run(["split"], data)
assert code == 1
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_sklearn_kfold/test_kfold_tool.py (4043 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_sklearn_kfold/test_kfold_tool.rs (6317 bytes)
📦 Cargo.toml: /home/noah/src/reprorusted-python-cli/examples/example_sklearn_kfold/Cargo.toml (2 dependencies)
⏱️ Parse time: 50ms
📊 Throughput: 77.8 KB/s
⏱️ Total time: 50ms
| true
|
sklearn_kfold
| 133
| 6
|
[
"context_manager",
"class_definition"
] | 0.652
| null |
example_sklearn_kmeans
|
kmeans_flat.py
|
#!/usr/bin/env python3
"""KMeans CLI - flat structure for depyler compatibility.
Simple 1D k-means with 4 fixed data points for demonstration.
"""
import argparse
def main():
"""Main entry point."""
parser = argparse.ArgumentParser(description="KMeans CLI")
parser.add_argument(
"--mode", type=str, required=True, choices=["cluster", "centroid"], help="Mode"
)
# 4 data points
parser.add_argument("--x0", type=float, default=0.0, help="Data point 0")
parser.add_argument("--x1", type=float, default=1.0, help="Data point 1")
parser.add_argument("--x2", type=float, default=5.0, help="Data point 2")
parser.add_argument("--x3", type=float, default=6.0, help="Data point 3")
# 2 initial centroids
parser.add_argument("--c0", type=float, default=0.5, help="Centroid 0")
parser.add_argument("--c1", type=float, default=5.5, help="Centroid 1")
# Iterations
parser.add_argument("--iters", type=float, default=3.0, help="Number of iterations")
args = parser.parse_args()
x0 = args.x0
x1 = args.x1
x2 = args.x2
x3 = args.x3
c0 = args.c0
c1 = args.c1
iters = args.iters
# Initialize labels
l0 = 0.0
l1 = 0.0
l2 = 0.0
l3 = 0.0
# K-means iterations
iter_count = 0.0
while iter_count < iters:
# Assign labels (0 or 1) based on closest centroid
# abs() via conditional
d0_c0 = x0 - c0
if d0_c0 < 0.0:
d0_c0 = 0.0 - d0_c0
d0_c1 = x0 - c1
if d0_c1 < 0.0:
d0_c1 = 0.0 - d0_c1
if d0_c0 < d0_c1:
l0 = 0.0
else:
l0 = 1.0
d1_c0 = x1 - c0
if d1_c0 < 0.0:
d1_c0 = 0.0 - d1_c0
d1_c1 = x1 - c1
if d1_c1 < 0.0:
d1_c1 = 0.0 - d1_c1
if d1_c0 < d1_c1:
l1 = 0.0
else:
l1 = 1.0
d2_c0 = x2 - c0
if d2_c0 < 0.0:
d2_c0 = 0.0 - d2_c0
d2_c1 = x2 - c1
if d2_c1 < 0.0:
d2_c1 = 0.0 - d2_c1
if d2_c0 < d2_c1:
l2 = 0.0
else:
l2 = 1.0
d3_c0 = x3 - c0
if d3_c0 < 0.0:
d3_c0 = 0.0 - d3_c0
d3_c1 = x3 - c1
if d3_c1 < 0.0:
d3_c1 = 0.0 - d3_c1
if d3_c0 < d3_c1:
l3 = 0.0
else:
l3 = 1.0
# Update centroids
sum0 = 0.0
count0 = 0.0
sum1 = 0.0
count1 = 0.0
if l0 < 0.5:
sum0 = sum0 + x0
count0 = count0 + 1.0
else:
sum1 = sum1 + x0
count1 = count1 + 1.0
if l1 < 0.5:
sum0 = sum0 + x1
count0 = count0 + 1.0
else:
sum1 = sum1 + x1
count1 = count1 + 1.0
if l2 < 0.5:
sum0 = sum0 + x2
count0 = count0 + 1.0
else:
sum1 = sum1 + x2
count1 = count1 + 1.0
if l3 < 0.5:
sum0 = sum0 + x3
count0 = count0 + 1.0
else:
sum1 = sum1 + x3
count1 = count1 + 1.0
if count0 > 0.0:
c0 = sum0 / count0
if count1 > 0.0:
c1 = sum1 / count1
iter_count = iter_count + 1.0
if args.mode == "cluster":
# Output final labels
print(f"l0={l0} l1={l1} l2={l2} l3={l3}")
elif args.mode == "centroid":
# Output final centroids
print(f"c0={c0} c1={c1}")
if __name__ == "__main__":
main()
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_sklearn_kmeans/kmeans_flat.py (3540 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_sklearn_kmeans/kmeans_flat.rs (4310 bytes)
📦 Cargo.toml: /home/noah/src/reprorusted-python-cli/examples/example_sklearn_kmeans/Cargo.toml (1 dependencies)
⏱️ Parse time: 47ms
📊 Throughput: 72.2 KB/s
⏱️ Total time: 48ms
| true
|
sklearn_kmeans
| 142
| 6
|
[
"context_manager"
] | 0.652
| null |
example_sklearn_kmeans
|
kmeans_tool.py
|
#!/usr/bin/env python3
"""KMeans CLI tool.
A CLI for sklearn-style KMeans clustering with fit/predict/labels pattern.
Designed for Python-to-Rust transpilation via depyler → aprender.
Academic Reference: Lloyd (1982) Least Squares Quantization in PCM [9]
Usage:
echo '{"X": [[1,1], [2,2], [8,8], [9,9]], "n_clusters": 2}' | python kmeans_tool.py fit
"""
import argparse
import json
import math
import random
import sys
def euclidean_distance(a: list[float], b: list[float]) -> float:
"""Calculate Euclidean distance between two points."""
return math.sqrt(sum((a[i] - b[i]) ** 2 for i in range(len(a))))
def assign_labels(X: list[list[float]], centroids: list[list[float]]) -> list[int]:
"""Assign each point to nearest centroid."""
labels = []
for point in X:
min_dist = float("inf")
min_idx = 0
for i, centroid in enumerate(centroids):
dist = euclidean_distance(point, centroid)
if dist < min_dist:
min_dist = dist
min_idx = i
labels.append(min_idx)
return labels
def update_centroids(
X: list[list[float]], labels: list[int], n_clusters: int, n_features: int
) -> list[list[float]]:
"""Update centroids as mean of assigned points."""
centroids = [[0.0] * n_features for _ in range(n_clusters)]
counts = [0] * n_clusters
for i, point in enumerate(X):
cluster = labels[i]
counts[cluster] += 1
for j in range(n_features):
centroids[cluster][j] += point[j]
for i in range(n_clusters):
if counts[i] > 0:
for j in range(n_features):
centroids[i][j] /= counts[i]
return centroids
def compute_inertia(X: list[list[float]], labels: list[int], centroids: list[list[float]]) -> float:
"""Compute within-cluster sum of squares (inertia)."""
inertia = 0.0
for i, point in enumerate(X):
centroid = centroids[labels[i]]
inertia += euclidean_distance(point, centroid) ** 2
return inertia
def fit(
X: list[list[float]],
n_clusters: int = 2,
max_iter: int = 300,
random_state: int = None,
) -> dict:
"""Fit KMeans using Lloyd's algorithm.
Args:
X: Data points (n_samples, n_features)
n_clusters: Number of clusters
max_iter: Maximum iterations
random_state: Random seed for reproducibility
"""
if len(X) == 0:
raise ValueError("Empty input data")
n_samples = len(X)
n_features = len(X[0])
if n_clusters > n_samples:
raise ValueError(
f"n_clusters ({n_clusters}) cannot be greater than n_samples ({n_samples})"
)
# Set random seed
if random_state is not None:
random.seed(random_state)
# Initialize centroids using random points from X
indices = random.sample(range(n_samples), n_clusters)
centroids = [X[i][:] for i in indices]
labels = []
for _ in range(max_iter):
# Assign labels
new_labels = assign_labels(X, centroids)
# Check for convergence
if labels == new_labels:
break
labels = new_labels
# Update centroids
centroids = update_centroids(X, labels, n_clusters, n_features)
inertia = compute_inertia(X, labels, centroids)
return {"labels": labels, "centroids": centroids, "inertia": inertia}
def predict(X: list[list[float]], centroids: list[list[float]]) -> list[int]:
"""Predict cluster labels for new data."""
return assign_labels(X, centroids)
def cmd_fit(args: argparse.Namespace) -> None:
"""Handle fit subcommand."""
try:
data = json.load(sys.stdin)
except json.JSONDecodeError as e:
print(f"Error: Invalid JSON input: {e}", file=sys.stderr)
sys.exit(1)
if "X" not in data:
print("Error: Missing required field 'X'", file=sys.stderr)
sys.exit(1)
n_clusters = data.get("n_clusters", 2)
max_iter = data.get("max_iter", 300)
random_state = data.get("random_state", None)
try:
result = fit(data["X"], n_clusters, max_iter, random_state)
print(json.dumps(result))
except ValueError as e:
print(f"Error: {e}", file=sys.stderr)
sys.exit(1)
def cmd_predict(args: argparse.Namespace) -> None:
"""Handle predict subcommand."""
try:
data = json.load(sys.stdin)
except json.JSONDecodeError as e:
print(f"Error: Invalid JSON input: {e}", file=sys.stderr)
sys.exit(1)
required = ["X", "centroids"]
for field in required:
if field not in data:
print(f"Error: Missing required field '{field}'", file=sys.stderr)
sys.exit(1)
labels = predict(data["X"], data["centroids"])
print(json.dumps({"labels": labels}))
def main() -> None:
"""Main entry point."""
parser = argparse.ArgumentParser(
description="KMeans CLI - clustering with Lloyd's algorithm (sklearn-compatible)"
)
subparsers = parser.add_subparsers(dest="command", help="Available commands")
# fit subcommand
fit_parser = subparsers.add_parser("fit", help="Fit KMeans model")
fit_parser.set_defaults(func=cmd_fit)
# predict subcommand
predict_parser = subparsers.add_parser("predict", help="Predict cluster labels")
predict_parser.set_defaults(func=cmd_predict)
args = parser.parse_args()
if args.command is None:
parser.print_help()
sys.exit(0)
args.func(args)
if __name__ == "__main__":
main()
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_sklearn_kmeans/kmeans_tool.py (5537 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_sklearn_kmeans/kmeans_tool.rs (11628 bytes)
📦 Cargo.toml: /home/noah/src/reprorusted-python-cli/examples/example_sklearn_kmeans/Cargo.toml (4 dependencies)
⏱️ Parse time: 54ms
📊 Throughput: 98.9 KB/s
⏱️ Total time: 54ms
| true
|
sklearn_kmeans
| 193
| 6
|
[
"context_manager",
"exception_handling",
"stdin_usage"
] | 0.652
| null |
example_sklearn_kmeans
|
test_kmeans_tool.py
|
#!/usr/bin/env python3
"""EXTREME TDD: Tests for sklearn KMeans CLI.
Academic Reference: Lloyd (1982) Least Squares Quantization in PCM [9]
Tests the fit/predict/labels pattern for K-means clustering.
"""
import json
import subprocess
from pathlib import Path
SCRIPT = Path(__file__).parent / "kmeans_tool.py"
def run(args, input_data=None):
"""Run the CLI and return (stdout, stderr, returncode)."""
result = subprocess.run(
["python3", str(SCRIPT)] + args,
capture_output=True,
text=True,
input=input_data,
)
return result.stdout, result.stderr, result.returncode
class TestKmeansFit:
"""Test model fitting."""
def test_fit_two_clusters(self):
"""Test fitting with 2 distinct clusters."""
# Two obvious clusters
data = json.dumps({
"X": [[1, 1], [1, 2], [2, 1], [2, 2],
[8, 8], [8, 9], [9, 8], [9, 9]],
"n_clusters": 2,
"random_state": 42
})
stdout, stderr, code = run(["fit"], data)
assert code == 0
result = json.loads(stdout)
assert "labels" in result
assert "centroids" in result
assert len(result["labels"]) == 8
assert len(result["centroids"]) == 2
# Check first 4 points have same label, last 4 have same label
assert len(set(result["labels"][:4])) == 1
assert len(set(result["labels"][4:])) == 1
assert result["labels"][0] != result["labels"][4]
def test_fit_three_clusters(self):
"""Test fitting with 3 clusters."""
data = json.dumps({
"X": [[0, 0], [0, 1], [1, 0],
[5, 5], [5, 6], [6, 5],
[10, 0], [10, 1], [11, 0]],
"n_clusters": 3,
"random_state": 42
})
stdout, stderr, code = run(["fit"], data)
assert code == 0
result = json.loads(stdout)
assert len(result["centroids"]) == 3
assert len(result["labels"]) == 9
def test_fit_default_clusters(self):
"""Test fitting with default n_clusters=2."""
data = json.dumps({
"X": [[1, 1], [2, 2], [8, 8], [9, 9]]
})
stdout, stderr, code = run(["fit"], data)
assert code == 0
result = json.loads(stdout)
assert len(result["centroids"]) == 2
def test_fit_empty_data_fails(self):
"""Test that fitting on empty data fails."""
data = json.dumps({"X": [], "n_clusters": 2})
stdout, stderr, code = run(["fit"], data)
assert code == 1
class TestKmeansPredict:
"""Test label prediction for new data."""
def test_predict_with_centroids(self):
"""Test prediction assigns points to nearest centroid."""
data = json.dumps({
"X": [[0, 0], [10, 10]],
"centroids": [[1, 1], [9, 9]]
})
stdout, stderr, code = run(["predict"], data)
assert code == 0
result = json.loads(stdout)
assert "labels" in result
# [0,0] closer to [1,1] (cluster 0)
# [10,10] closer to [9,9] (cluster 1)
assert result["labels"][0] != result["labels"][1]
def test_predict_boundary_case(self):
"""Test prediction at equidistant point."""
data = json.dumps({
"X": [[5, 5]], # Equidistant from both centroids
"centroids": [[0, 0], [10, 10]]
})
stdout, stderr, code = run(["predict"], data)
assert code == 0
result = json.loads(stdout)
assert len(result["labels"]) == 1
assert result["labels"][0] in [0, 1] # Either cluster is valid
class TestKmeansCentroids:
"""Test centroid computation."""
def test_centroids_are_means(self):
"""Test that centroids are at cluster means."""
# Perfect clusters, centroids should be exact
data = json.dumps({
"X": [[0, 0], [2, 0], [0, 2], [2, 2], # Mean: [1, 1]
[10, 10], [12, 10], [10, 12], [12, 12]], # Mean: [11, 11]
"n_clusters": 2,
"random_state": 42
})
stdout, stderr, code = run(["fit"], data)
assert code == 0
result = json.loads(stdout)
centroids = result["centroids"]
# Sort centroids by first coordinate
centroids.sort(key=lambda c: c[0])
assert abs(centroids[0][0] - 1.0) < 0.5
assert abs(centroids[0][1] - 1.0) < 0.5
assert abs(centroids[1][0] - 11.0) < 0.5
assert abs(centroids[1][1] - 11.0) < 0.5
class TestKmeansInertia:
"""Test inertia (within-cluster sum of squares)."""
def test_inertia_returned(self):
"""Test that inertia is returned."""
data = json.dumps({
"X": [[0, 0], [1, 0], [10, 0], [11, 0]],
"n_clusters": 2,
"random_state": 42
})
stdout, stderr, code = run(["fit"], data)
assert code == 0
result = json.loads(stdout)
assert "inertia" in result
assert result["inertia"] >= 0
def test_inertia_zero_perfect_fit(self):
"""Test inertia = 0 when points are exactly at centroids."""
data = json.dumps({
"X": [[0, 0], [0, 0], [10, 10], [10, 10]],
"n_clusters": 2,
"random_state": 42
})
stdout, stderr, code = run(["fit"], data)
assert code == 0
result = json.loads(stdout)
assert result["inertia"] < 0.01 # Should be essentially zero
class TestKmeansHelp:
"""Test help and usage messages."""
def test_help(self):
"""Test --help flag."""
stdout, stderr, code = run(["--help"])
assert code == 0
assert "KMeans" in stdout or "kmeans" in stdout.lower()
assert "fit" in stdout
def test_subcommand_help(self):
"""Test subcommand help."""
stdout, stderr, code = run(["fit", "--help"])
assert code == 0
class TestKmeansEdgeCases:
"""Test edge cases and error handling."""
def test_invalid_json_fails(self):
"""Test that invalid JSON input fails gracefully."""
stdout, stderr, code = run(["fit"], "not valid json")
assert code == 1
def test_k_greater_than_n_fails(self):
"""Test that k > n fails."""
data = json.dumps({
"X": [[1, 1], [2, 2]],
"n_clusters": 5,
"random_state": 42
})
stdout, stderr, code = run(["fit"], data)
assert code == 1
def test_single_point(self):
"""Test with single point."""
data = json.dumps({
"X": [[1, 1]],
"n_clusters": 1,
"random_state": 42
})
stdout, stderr, code = run(["fit"], data)
assert code == 0
result = json.loads(stdout)
assert result["labels"] == [0]
class TestKmeansParameters:
"""Test algorithm parameters."""
def test_max_iter(self):
"""Test custom max iterations."""
data = json.dumps({
"X": [[0, 0], [1, 0], [10, 0], [11, 0]],
"n_clusters": 2,
"max_iter": 100
})
stdout, stderr, code = run(["fit"], data)
assert code == 0
def test_random_state(self):
"""Test reproducibility with random state."""
data = json.dumps({
"X": [[0, 0], [1, 0], [10, 0], [11, 0]],
"n_clusters": 2,
"random_state": 42
})
stdout1, _, code1 = run(["fit"], data)
stdout2, _, code2 = run(["fit"], data)
assert code1 == 0 and code2 == 0
# With same random state, results should be identical
assert json.loads(stdout1)["labels"] == json.loads(stdout2)["labels"]
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_sklearn_kmeans/test_kmeans_tool.py (7701 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_sklearn_kmeans/test_kmeans_tool.rs (10751 bytes)
📦 Cargo.toml: /home/noah/src/reprorusted-python-cli/examples/example_sklearn_kmeans/Cargo.toml (2 dependencies)
⏱️ Parse time: 54ms
📊 Throughput: 138.9 KB/s
⏱️ Total time: 54ms
| true
|
sklearn_kmeans
| 236
| 6
|
[
"lambda",
"context_manager",
"class_definition"
] | 0.783
| null |
example_sklearn_linreg
|
linreg_cli.py
|
#!/usr/bin/env python3
"""LinearRegression CLI - CLI args only (no stdin JSON).
Uses pure CLI arguments to avoid serde_json::Value type coercion issues.
"""
import argparse
def compute_linreg(x1: float, y1: float, x2: float, y2: float) -> str:
"""Compute linear regression from two points."""
if x1 == x2:
return f"coef=0.0 intercept={y1}"
coef = (y2 - y1) / (x2 - x1)
intercept = y1 - coef * x1
return f"coef={coef} intercept={intercept}"
def predict_value(x: float, coef: float, intercept: float) -> str:
"""Predict y = coef * x + intercept."""
y = coef * x + intercept
return f"prediction={y}"
def main():
"""Main entry point."""
parser = argparse.ArgumentParser(description="LinearRegression CLI")
subparsers = parser.add_subparsers(dest="command", help="Available commands", required=True)
# fit subcommand
fit_parser = subparsers.add_parser("fit", help="Fit linear regression from two points")
fit_parser.add_argument("--x1", type=float, required=True, help="First x value")
fit_parser.add_argument("--y1", type=float, required=True, help="First y value")
fit_parser.add_argument("--x2", type=float, required=True, help="Second x value")
fit_parser.add_argument("--y2", type=float, required=True, help="Second y value")
# predict subcommand
pred_parser = subparsers.add_parser("predict", help="Predict using coefficients")
pred_parser.add_argument("--x", type=float, required=True, help="X value to predict")
pred_parser.add_argument("--coef", type=float, required=True, help="Coefficient")
pred_parser.add_argument("--intercept", type=float, required=True, help="Intercept")
args = parser.parse_args()
if args.command == "fit":
result = compute_linreg(args.x1, args.y1, args.x2, args.y2)
print(result)
elif args.command == "predict":
result = predict_value(args.x, args.coef, args.intercept)
print(result)
if __name__ == "__main__":
main()
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_sklearn_linreg/linreg_cli.py (2006 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_sklearn_linreg/linreg_cli.rs (3240 bytes)
📦 Cargo.toml: /home/noah/src/reprorusted-python-cli/examples/example_sklearn_linreg/Cargo.toml (1 dependencies)
⏱️ Parse time: 48ms
📊 Throughput: 40.0 KB/s
⏱️ Total time: 49ms
| true
|
sklearn_linreg
| 55
| 6
|
[
"stdin_usage"
] | 0.566
| null |
example_sklearn_linreg
|
linreg_flat.py
|
#!/usr/bin/env python3
"""LinearRegression CLI - flat structure (no subcommands).
Uses the same pattern as example_simple/trivial_cli.py for depyler compatibility.
"""
import argparse
def main():
"""Main entry point."""
parser = argparse.ArgumentParser(description="LinearRegression CLI")
parser.add_argument("--mode", type=str, required=True, choices=["fit", "predict"], help="Mode")
parser.add_argument("--x1", type=float, default=0.0, help="First x value")
parser.add_argument("--y1", type=float, default=0.0, help="First y value")
parser.add_argument("--x2", type=float, default=0.0, help="Second x value")
parser.add_argument("--y2", type=float, default=0.0, help="Second y value")
parser.add_argument("--x", type=float, default=0.0, help="X value to predict")
parser.add_argument("--coef", type=float, default=0.0, help="Coefficient")
parser.add_argument("--intercept", type=float, default=0.0, help="Intercept")
args = parser.parse_args()
if args.mode == "fit":
x1 = args.x1
y1 = args.y1
x2 = args.x2
y2 = args.y2
if x1 == x2:
coef = 0.0
intercept = y1
else:
coef = (y2 - y1) / (x2 - x1)
intercept = y1 - coef * x1
print(f"coef={coef} intercept={intercept}")
elif args.mode == "predict":
x = args.x
coef = args.coef
intercept = args.intercept
y = coef * x + intercept
print(f"prediction={y}")
if __name__ == "__main__":
main()
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_sklearn_linreg/linreg_flat.py (1542 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_sklearn_linreg/linreg_flat.rs (2417 bytes)
📦 Cargo.toml: /home/noah/src/reprorusted-python-cli/examples/example_sklearn_linreg/Cargo.toml (1 dependencies)
⏱️ Parse time: 51ms
📊 Throughput: 29.2 KB/s
⏱️ Total time: 51ms
| true
|
sklearn_linreg
| 45
| 6
|
[] | 0
| null |
example_sklearn_linreg
|
linreg_simple.py
|
#!/usr/bin/env python3
"""LinearRegression CLI - simplified for depyler compatibility."""
import argparse
import json
import sys
def fit_simple(x_vals: list, y_vals: list) -> dict:
"""Simple linear regression y = mx + b using least squares."""
n = len(x_vals)
if n == 0:
return {"coef": 0.0, "intercept": 0.0}
sum_x = 0.0
sum_y = 0.0
sum_xy = 0.0
sum_xx = 0.0
i = 0
while i < n:
sum_x = sum_x + x_vals[i]
sum_y = sum_y + y_vals[i]
sum_xy = sum_xy + x_vals[i] * y_vals[i]
sum_xx = sum_xx + x_vals[i] * x_vals[i]
i = i + 1
denom = n * sum_xx - sum_x * sum_x
if denom == 0:
return {"coef": 0.0, "intercept": sum_y / n}
coef = (n * sum_xy - sum_x * sum_y) / denom
intercept = (sum_y - coef * sum_x) / n
return {"coef": coef, "intercept": intercept}
def predict_simple(x_vals: list, coef: float, intercept: float) -> list:
"""Predict y = coef * x + intercept."""
result = []
i = 0
while i < len(x_vals):
pred = coef * x_vals[i] + intercept
result.append(pred)
i = i + 1
return result
def main():
"""Main entry point."""
parser = argparse.ArgumentParser(description="LinearRegression CLI")
parser.add_argument("command", choices=["fit", "predict"], help="Command")
args = parser.parse_args()
data = json.load(sys.stdin)
if args.command == "fit":
result = fit_simple(data["x"], data["y"])
print(json.dumps(result))
elif args.command == "predict":
preds = predict_simple(data["x"], data["coef"], data["intercept"])
print(json.dumps({"predictions": preds}))
if __name__ == "__main__":
main()
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_sklearn_linreg/linreg_simple.py (1719 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_sklearn_linreg/linreg_simple.rs (5033 bytes)
📦 Cargo.toml: /home/noah/src/reprorusted-python-cli/examples/example_sklearn_linreg/Cargo.toml (3 dependencies)
⏱️ Parse time: 48ms
📊 Throughput: 34.5 KB/s
⏱️ Total time: 48ms
| true
|
sklearn_linreg
| 66
| 6
|
[
"stdin_usage"
] | 0.566
| null |
example_sklearn_linreg
|
linreg_tool.py
|
#!/usr/bin/env python3
"""LinearRegression CLI tool.
A CLI for sklearn LinearRegression with fit/predict/score pattern.
Designed for Python-to-Rust transpilation via depyler → aprender.
Academic Reference: Pedregosa et al. (2011) sklearn API design [1]
Usage:
echo '{"X": [[1], [2], [3]], "y": [2, 4, 6]}' | python linreg_tool.py fit
echo '{"X": [[4]], "coef": [2.0], "intercept": 0.0}' | python linreg_tool.py predict
"""
import argparse
import json
import sys
def fit(X: list[list[float]], y: list[float]) -> dict:
"""Fit linear regression using ordinary least squares.
Uses the normal equation: beta = (X^T X)^-1 X^T y
For numerical stability with single feature, uses direct formula.
"""
if len(X) == 0 or len(y) == 0:
raise ValueError("Empty input data")
if len(X) != len(y):
raise ValueError(f"Dimension mismatch: X has {len(X)} samples, y has {len(y)}")
n_samples = len(X)
n_features = len(X[0])
# Add bias column (column of 1s) to X
X_with_bias = [[1.0] + row for row in X]
# Compute X^T X
XtX = [[0.0] * (n_features + 1) for _ in range(n_features + 1)]
for i in range(n_features + 1):
for j in range(n_features + 1):
for k in range(n_samples):
XtX[i][j] += X_with_bias[k][i] * X_with_bias[k][j]
# Compute X^T y
Xty = [0.0] * (n_features + 1)
for i in range(n_features + 1):
for k in range(n_samples):
Xty[i] += X_with_bias[k][i] * y[k]
# Solve using Gaussian elimination with partial pivoting
coeffs = solve_linear_system(XtX, Xty)
intercept = coeffs[0]
coef = coeffs[1:]
return {"coef": coef, "intercept": intercept}
def solve_linear_system(A: list[list[float]], b: list[float]) -> list[float]:
"""Solve Ax = b using Gaussian elimination with partial pivoting."""
n = len(A)
# Create augmented matrix
aug = [row[:] + [b[i]] for i, row in enumerate(A)]
# Forward elimination with partial pivoting
for col in range(n):
# Find pivot
max_row = col
for row in range(col + 1, n):
if abs(aug[row][col]) > abs(aug[max_row][col]):
max_row = row
# Swap rows
aug[col], aug[max_row] = aug[max_row], aug[col]
# Check for zero pivot
if abs(aug[col][col]) < 1e-12:
raise ValueError("Matrix is singular or nearly singular")
# Eliminate below
for row in range(col + 1, n):
factor = aug[row][col] / aug[col][col]
for j in range(col, n + 1):
aug[row][j] -= factor * aug[col][j]
# Back substitution
x = [0.0] * n
for i in range(n - 1, -1, -1):
x[i] = aug[i][n]
for j in range(i + 1, n):
x[i] -= aug[i][j] * x[j]
x[i] /= aug[i][i]
return x
def predict(X: list[list[float]], coef: list[float], intercept: float) -> list[float]:
"""Predict using linear model: y = X @ coef + intercept."""
predictions = []
for row in X:
pred = intercept
for i, val in enumerate(row):
pred += val * coef[i]
predictions.append(pred)
return predictions
def score(X: list[list[float]], y: list[float], coef: list[float], intercept: float) -> float:
"""Calculate R-squared score.
R² = 1 - SS_res / SS_tot
where SS_res = sum((y_true - y_pred)²)
SS_tot = sum((y_true - y_mean)²)
"""
predictions = predict(X, coef, intercept)
# Calculate mean of y
y_mean = sum(y) / len(y)
# Calculate SS_res and SS_tot
ss_res = sum((y[i] - predictions[i]) ** 2 for i in range(len(y)))
ss_tot = sum((y[i] - y_mean) ** 2 for i in range(len(y)))
if ss_tot == 0:
return 1.0 if ss_res == 0 else 0.0
return 1.0 - ss_res / ss_tot
def cmd_fit(args: argparse.Namespace) -> None:
"""Handle fit subcommand."""
try:
data = json.load(sys.stdin)
except json.JSONDecodeError as e:
print(f"Error: Invalid JSON input: {e}", file=sys.stderr)
sys.exit(1)
if "X" not in data:
print("Error: Missing required field 'X'", file=sys.stderr)
sys.exit(1)
if "y" not in data:
print("Error: Missing required field 'y'", file=sys.stderr)
sys.exit(1)
try:
result = fit(data["X"], data["y"])
print(json.dumps(result))
except ValueError as e:
print(f"Error: {e}", file=sys.stderr)
sys.exit(1)
def cmd_predict(args: argparse.Namespace) -> None:
"""Handle predict subcommand."""
try:
data = json.load(sys.stdin)
except json.JSONDecodeError as e:
print(f"Error: Invalid JSON input: {e}", file=sys.stderr)
sys.exit(1)
required = ["X", "coef", "intercept"]
for field in required:
if field not in data:
print(f"Error: Missing required field '{field}'", file=sys.stderr)
sys.exit(1)
predictions = predict(data["X"], data["coef"], data["intercept"])
print(json.dumps({"predictions": predictions}))
def cmd_score(args: argparse.Namespace) -> None:
"""Handle score subcommand."""
try:
data = json.load(sys.stdin)
except json.JSONDecodeError as e:
print(f"Error: Invalid JSON input: {e}", file=sys.stderr)
sys.exit(1)
required = ["X", "y", "coef", "intercept"]
for field in required:
if field not in data:
print(f"Error: Missing required field '{field}'", file=sys.stderr)
sys.exit(1)
r2 = score(data["X"], data["y"], data["coef"], data["intercept"])
print(json.dumps({"r2": r2}))
def cmd_fit_predict(args: argparse.Namespace) -> None:
"""Handle fit-predict subcommand."""
try:
data = json.load(sys.stdin)
except json.JSONDecodeError as e:
print(f"Error: Invalid JSON input: {e}", file=sys.stderr)
sys.exit(1)
required = ["X_train", "y_train", "X_test"]
for field in required:
if field not in data:
print(f"Error: Missing required field '{field}'", file=sys.stderr)
sys.exit(1)
try:
fit_result = fit(data["X_train"], data["y_train"])
predictions = predict(data["X_test"], fit_result["coef"], fit_result["intercept"])
result = {
"coef": fit_result["coef"],
"intercept": fit_result["intercept"],
"predictions": predictions,
}
print(json.dumps(result))
except ValueError as e:
print(f"Error: {e}", file=sys.stderr)
sys.exit(1)
def cmd_fit_score(args: argparse.Namespace) -> None:
"""Handle fit-score subcommand."""
try:
data = json.load(sys.stdin)
except json.JSONDecodeError as e:
print(f"Error: Invalid JSON input: {e}", file=sys.stderr)
sys.exit(1)
required = ["X", "y"]
for field in required:
if field not in data:
print(f"Error: Missing required field '{field}'", file=sys.stderr)
sys.exit(1)
try:
fit_result = fit(data["X"], data["y"])
r2 = score(data["X"], data["y"], fit_result["coef"], fit_result["intercept"])
result = {"coef": fit_result["coef"], "intercept": fit_result["intercept"], "r2": r2}
print(json.dumps(result))
except ValueError as e:
print(f"Error: {e}", file=sys.stderr)
sys.exit(1)
def main() -> None:
"""Main entry point."""
parser = argparse.ArgumentParser(
description="LinearRegression CLI - fit/predict/score pattern (sklearn-compatible)"
)
subparsers = parser.add_subparsers(dest="command", help="Available commands")
# fit subcommand
fit_parser = subparsers.add_parser("fit", help="Fit linear regression model")
fit_parser.set_defaults(func=cmd_fit)
# predict subcommand
predict_parser = subparsers.add_parser("predict", help="Predict using fitted model")
predict_parser.set_defaults(func=cmd_predict)
# score subcommand
score_parser = subparsers.add_parser("score", help="Calculate R-squared score")
score_parser.set_defaults(func=cmd_score)
# fit-predict subcommand
fit_predict_parser = subparsers.add_parser(
"fit-predict", help="Fit model and predict on test data"
)
fit_predict_parser.set_defaults(func=cmd_fit_predict)
# fit-score subcommand
fit_score_parser = subparsers.add_parser("fit-score", help="Fit model and calculate R-squared")
fit_score_parser.set_defaults(func=cmd_fit_score)
args = parser.parse_args()
if args.command is None:
parser.print_help()
sys.exit(0)
args.func(args)
if __name__ == "__main__":
main()
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_sklearn_linreg/linreg_tool.py (8658 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_sklearn_linreg/linreg_tool.rs (20919 bytes)
📦 Cargo.toml: /home/noah/src/reprorusted-python-cli/examples/example_sklearn_linreg/Cargo.toml (3 dependencies)
⏱️ Parse time: 56ms
📊 Throughput: 150.1 KB/s
⏱️ Total time: 56ms
| true
|
sklearn_linreg
| 281
| 6
|
[
"context_manager",
"exception_handling",
"stdin_usage",
"decorator",
"functools"
] | 0.652
| null |
example_sklearn_linreg
|
test_linreg_tool.py
|
#!/usr/bin/env python3
"""EXTREME TDD: Tests for sklearn LinearRegression CLI.
Academic Reference: Pedregosa et al. (2011) sklearn API design [1]
Tests the fit/predict/score pattern for linear regression.
"""
import json
import subprocess
from pathlib import Path
SCRIPT = Path(__file__).parent / "linreg_tool.py"
def run(args, input_data=None):
"""Run the CLI and return (stdout, stderr, returncode)."""
result = subprocess.run(
["python3", str(SCRIPT)] + args,
capture_output=True,
text=True,
input=input_data,
)
return result.stdout, result.stderr, result.returncode
class TestLinregFit:
"""Test model fitting."""
def test_fit_simple_data(self):
"""Test fitting on simple linear data: y = 2x + 1."""
data = json.dumps({
"X": [[1], [2], [3], [4], [5]],
"y": [3, 5, 7, 9, 11]
})
stdout, stderr, code = run(["fit"], data)
assert code == 0
result = json.loads(stdout)
assert "coef" in result
assert "intercept" in result
# y = 2x + 1, so coef ~= 2, intercept ~= 1
assert abs(result["coef"][0] - 2.0) < 0.01
assert abs(result["intercept"] - 1.0) < 0.01
def test_fit_multivariate(self):
"""Test fitting on multivariate data: y = 1*x1 + 2*x2 + 3."""
# Non-collinear data: x1 and x2 are independent
data = json.dumps({
"X": [[1, 2], [2, 1], [3, 4], [4, 3], [5, 6]],
"y": [8, 7, 14, 13, 20] # y = 1*x1 + 2*x2 + 3
})
stdout, stderr, code = run(["fit"], data)
assert code == 0
result = json.loads(stdout)
assert len(result["coef"]) == 2
# Coefficients should be approximately [1, 2]
assert abs(result["coef"][0] - 1.0) < 0.1
assert abs(result["coef"][1] - 2.0) < 0.1
def test_fit_empty_data_fails(self):
"""Test that fitting on empty data fails."""
data = json.dumps({"X": [], "y": []})
stdout, stderr, code = run(["fit"], data)
assert code == 1
assert "empty" in stderr.lower() or "error" in stderr.lower()
class TestLinregPredict:
"""Test model prediction."""
def test_predict_simple(self):
"""Test prediction with provided coefficients."""
data = json.dumps({
"X": [[6], [7], [8]],
"coef": [2.0],
"intercept": 1.0
})
stdout, stderr, code = run(["predict"], data)
assert code == 0
result = json.loads(stdout)
assert "predictions" in result
# y = 2x + 1: [13, 15, 17]
assert abs(result["predictions"][0] - 13.0) < 0.01
assert abs(result["predictions"][1] - 15.0) < 0.01
assert abs(result["predictions"][2] - 17.0) < 0.01
def test_predict_multivariate(self):
"""Test prediction with multivariate coefficients."""
data = json.dumps({
"X": [[1, 2], [3, 4]],
"coef": [1.0, 2.0],
"intercept": 0.5
})
stdout, stderr, code = run(["predict"], data)
assert code == 0
result = json.loads(stdout)
# y = 1*x1 + 2*x2 + 0.5: [5.5, 11.5]
assert abs(result["predictions"][0] - 5.5) < 0.01
assert abs(result["predictions"][1] - 11.5) < 0.01
class TestLinregScore:
"""Test model scoring (R-squared)."""
def test_score_perfect_fit(self):
"""Test R-squared = 1.0 for perfect fit."""
data = json.dumps({
"X": [[1], [2], [3], [4], [5]],
"y": [3, 5, 7, 9, 11],
"coef": [2.0],
"intercept": 1.0
})
stdout, stderr, code = run(["score"], data)
assert code == 0
result = json.loads(stdout)
assert "r2" in result
assert abs(result["r2"] - 1.0) < 0.01
def test_score_imperfect_fit(self):
"""Test R-squared < 1.0 for imperfect fit."""
data = json.dumps({
"X": [[1], [2], [3], [4], [5]],
"y": [3, 5, 8, 9, 11], # Not perfectly linear
"coef": [2.0],
"intercept": 1.0
})
stdout, stderr, code = run(["score"], data)
assert code == 0
result = json.loads(stdout)
assert result["r2"] < 1.0
assert result["r2"] > 0.8 # Should still be reasonable
class TestLinregFitPredict:
"""Test combined fit-predict pipeline."""
def test_fit_predict_pipeline(self):
"""Test fitting and predicting in one command."""
data = json.dumps({
"X_train": [[1], [2], [3], [4], [5]],
"y_train": [3, 5, 7, 9, 11],
"X_test": [[6], [7], [8]]
})
stdout, stderr, code = run(["fit-predict"], data)
assert code == 0
result = json.loads(stdout)
assert "predictions" in result
assert "coef" in result
assert "intercept" in result
# Predictions for [6], [7], [8] with y = 2x + 1
assert abs(result["predictions"][0] - 13.0) < 0.01
class TestLinregFitScore:
"""Test combined fit-score pipeline."""
def test_fit_score_pipeline(self):
"""Test fitting and scoring in one command."""
data = json.dumps({
"X": [[1], [2], [3], [4], [5]],
"y": [3, 5, 7, 9, 11]
})
stdout, stderr, code = run(["fit-score"], data)
assert code == 0
result = json.loads(stdout)
assert "r2" in result
assert "coef" in result
assert abs(result["r2"] - 1.0) < 0.01
class TestLinregHelp:
"""Test help and usage messages."""
def test_help(self):
"""Test --help flag."""
stdout, stderr, code = run(["--help"])
assert code == 0
assert "LinearRegression" in stdout or "linear" in stdout.lower()
assert "fit" in stdout
assert "predict" in stdout
assert "score" in stdout
def test_subcommand_help(self):
"""Test subcommand help."""
stdout, stderr, code = run(["fit", "--help"])
assert code == 0
assert "fit" in stdout.lower()
class TestLinregEdgeCases:
"""Test edge cases and error handling."""
def test_invalid_json_fails(self):
"""Test that invalid JSON input fails gracefully."""
stdout, stderr, code = run(["fit"], "not valid json")
assert code == 1
assert "json" in stderr.lower() or "error" in stderr.lower()
def test_missing_required_field_fails(self):
"""Test that missing required fields fail gracefully."""
data = json.dumps({"X": [[1], [2], [3]]}) # Missing y
stdout, stderr, code = run(["fit"], data)
assert code == 1
def test_dimension_mismatch_fails(self):
"""Test that dimension mismatch fails gracefully."""
data = json.dumps({
"X": [[1], [2], [3]],
"y": [1, 2] # Wrong length
})
stdout, stderr, code = run(["fit"], data)
assert code == 1
class TestLinregNumericalStability:
"""Test numerical stability with edge cases."""
def test_large_values(self):
"""Test with large values."""
data = json.dumps({
"X": [[1e6], [2e6], [3e6]],
"y": [2e6, 4e6, 6e6]
})
stdout, stderr, code = run(["fit"], data)
assert code == 0
result = json.loads(stdout)
assert abs(result["coef"][0] - 2.0) < 0.01
def test_small_values(self):
"""Test with small values."""
data = json.dumps({
"X": [[1e-6], [2e-6], [3e-6]],
"y": [2e-6, 4e-6, 6e-6]
})
stdout, stderr, code = run(["fit"], data)
assert code == 0
result = json.loads(stdout)
assert abs(result["coef"][0] - 2.0) < 0.01
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_sklearn_linreg/test_linreg_tool.py (7750 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_sklearn_linreg/test_linreg_tool.rs (11809 bytes)
📦 Cargo.toml: /home/noah/src/reprorusted-python-cli/examples/example_sklearn_linreg/Cargo.toml (2 dependencies)
⏱️ Parse time: 53ms
📊 Throughput: 142.5 KB/s
⏱️ Total time: 53ms
| true
|
sklearn_linreg
| 237
| 6
|
[
"context_manager",
"class_definition"
] | 0.652
| null |
example_sklearn_logreg
|
logreg_flat.py
|
#!/usr/bin/env python3
"""LogisticRegression CLI - flat structure for depyler compatibility.
Implements binary classification with sigmoid activation.
"""
import argparse
def main():
"""Main entry point."""
parser = argparse.ArgumentParser(description="LogisticRegression CLI")
parser.add_argument(
"--mode", type=str, required=True, choices=["sigmoid", "predict"], help="Mode"
)
parser.add_argument("--x", type=float, default=0.0, help="Input value")
parser.add_argument("--weight", type=float, default=1.0, help="Weight")
parser.add_argument("--bias", type=float, default=0.0, help="Bias")
parser.add_argument("--threshold", type=float, default=0.5, help="Classification threshold")
args = parser.parse_args()
if args.mode == "sigmoid":
# Compute sigmoid(x)
x = args.x
neg_bound = 0.0 - 20.0
# Avoid overflow: clamp to reasonable range
if x > 20.0:
result = 1.0
elif x < neg_bound:
result = 0.0
else:
# e^(-x) approximation for small values
exp_neg_x = 1.0
term = 1.0
i = 1.0
while i <= 10.0:
term = term * (-x) / i
exp_neg_x = exp_neg_x + term
i = i + 1.0
result = 1.0 / (1.0 + exp_neg_x)
print(f"sigmoid={result}")
elif args.mode == "predict":
x = args.x
w = args.weight
b = args.bias
threshold = args.threshold
# Linear combination
z = w * x + b
neg_bound = 0.0 - 20.0
# Sigmoid
if z > 20.0:
prob = 1.0
elif z < neg_bound:
prob = 0.0
else:
exp_neg_z = 1.0
term = 1.0
i = 1.0
while i <= 10.0:
term = term * (-z) / i
exp_neg_z = exp_neg_z + term
i = i + 1.0
prob = 1.0 / (1.0 + exp_neg_z)
# Classification
if prob >= threshold:
label = 1
else:
label = 0
print(f"prob={prob} label={label}")
if __name__ == "__main__":
main()
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_sklearn_logreg/logreg_flat.py (2175 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_sklearn_logreg/logreg_flat.rs (3425 bytes)
📦 Cargo.toml: /home/noah/src/reprorusted-python-cli/examples/example_sklearn_logreg/Cargo.toml (1 dependencies)
⏱️ Parse time: 49ms
📊 Throughput: 43.2 KB/s
⏱️ Total time: 49ms
| true
|
sklearn_logreg
| 74
| 6
|
[
"context_manager"
] | 0.652
| null |
example_sklearn_logreg
|
logreg_tool.py
|
#!/usr/bin/env python3
"""LogisticRegression CLI tool.
A CLI for sklearn-style LogisticRegression with fit/predict/score pattern.
Designed for Python-to-Rust transpilation via depyler → aprender.
Academic Reference: Pedregosa et al. (2011) sklearn API design [1]
Usage:
echo '{"X": [[1], [2], [8], [9]], "y": [0, 0, 1, 1]}' | python logreg_tool.py fit
echo '{"X": [[5]], "coef": [1.0], "intercept": -5.0}' | python logreg_tool.py predict
"""
import argparse
import json
import math
import sys
def sigmoid(x: float) -> float:
"""Sigmoid activation function."""
if x >= 0:
return 1.0 / (1.0 + math.exp(-x))
else:
exp_x = math.exp(x)
return exp_x / (1.0 + exp_x)
def fit(
X: list[list[float]],
y: list[int],
learning_rate: float = 0.1,
max_iter: int = 1000,
) -> dict:
"""Fit logistic regression using gradient descent.
Uses batch gradient descent to minimize cross-entropy loss.
"""
if len(X) == 0 or len(y) == 0:
raise ValueError("Empty input data")
if len(X) != len(y):
raise ValueError(f"Dimension mismatch: X has {len(X)} samples, y has {len(y)}")
n_samples = len(X)
n_features = len(X[0])
# Initialize weights
coef = [0.0] * n_features
intercept = 0.0
# Gradient descent
for _ in range(max_iter):
# Compute gradients
grad_coef = [0.0] * n_features
grad_intercept = 0.0
for i in range(n_samples):
# Compute prediction
z = intercept
for j in range(n_features):
z += coef[j] * X[i][j]
pred = sigmoid(z)
# Compute error
error = pred - y[i]
# Accumulate gradients
grad_intercept += error
for j in range(n_features):
grad_coef[j] += error * X[i][j]
# Update weights
intercept -= learning_rate * grad_intercept / n_samples
for j in range(n_features):
coef[j] -= learning_rate * grad_coef[j] / n_samples
return {"coef": coef, "intercept": intercept}
def predict_proba(X: list[list[float]], coef: list[float], intercept: float) -> list[float]:
"""Predict probabilities for class 1."""
probabilities = []
for row in X:
z = intercept
for i, val in enumerate(row):
z += val * coef[i]
probabilities.append(sigmoid(z))
return probabilities
def predict(X: list[list[float]], coef: list[float], intercept: float) -> list[int]:
"""Predict class labels (0 or 1)."""
probs = predict_proba(X, coef, intercept)
return [1 if p >= 0.5 else 0 for p in probs]
def score(X: list[list[float]], y: list[int], coef: list[float], intercept: float) -> float:
"""Calculate accuracy score."""
predictions = predict(X, coef, intercept)
correct = sum(1 for i in range(len(y)) if predictions[i] == y[i])
return correct / len(y)
def cmd_fit(args: argparse.Namespace) -> None:
"""Handle fit subcommand."""
try:
data = json.load(sys.stdin)
except json.JSONDecodeError as e:
print(f"Error: Invalid JSON input: {e}", file=sys.stderr)
sys.exit(1)
if "X" not in data:
print("Error: Missing required field 'X'", file=sys.stderr)
sys.exit(1)
if "y" not in data:
print("Error: Missing required field 'y'", file=sys.stderr)
sys.exit(1)
learning_rate = data.get("learning_rate", 0.1)
max_iter = data.get("max_iter", 1000)
try:
result = fit(data["X"], data["y"], learning_rate, max_iter)
print(json.dumps(result))
except ValueError as e:
print(f"Error: {e}", file=sys.stderr)
sys.exit(1)
def cmd_predict(args: argparse.Namespace) -> None:
"""Handle predict subcommand."""
try:
data = json.load(sys.stdin)
except json.JSONDecodeError as e:
print(f"Error: Invalid JSON input: {e}", file=sys.stderr)
sys.exit(1)
required = ["X", "coef", "intercept"]
for field in required:
if field not in data:
print(f"Error: Missing required field '{field}'", file=sys.stderr)
sys.exit(1)
predictions = predict(data["X"], data["coef"], data["intercept"])
print(json.dumps({"predictions": predictions}))
def cmd_predict_proba(args: argparse.Namespace) -> None:
"""Handle predict-proba subcommand."""
try:
data = json.load(sys.stdin)
except json.JSONDecodeError as e:
print(f"Error: Invalid JSON input: {e}", file=sys.stderr)
sys.exit(1)
required = ["X", "coef", "intercept"]
for field in required:
if field not in data:
print(f"Error: Missing required field '{field}'", file=sys.stderr)
sys.exit(1)
probabilities = predict_proba(data["X"], data["coef"], data["intercept"])
print(json.dumps({"probabilities": probabilities}))
def cmd_score(args: argparse.Namespace) -> None:
"""Handle score subcommand."""
try:
data = json.load(sys.stdin)
except json.JSONDecodeError as e:
print(f"Error: Invalid JSON input: {e}", file=sys.stderr)
sys.exit(1)
required = ["X", "y", "coef", "intercept"]
for field in required:
if field not in data:
print(f"Error: Missing required field '{field}'", file=sys.stderr)
sys.exit(1)
accuracy = score(data["X"], data["y"], data["coef"], data["intercept"])
print(json.dumps({"accuracy": accuracy}))
def cmd_fit_predict(args: argparse.Namespace) -> None:
"""Handle fit-predict subcommand."""
try:
data = json.load(sys.stdin)
except json.JSONDecodeError as e:
print(f"Error: Invalid JSON input: {e}", file=sys.stderr)
sys.exit(1)
required = ["X_train", "y_train", "X_test"]
for field in required:
if field not in data:
print(f"Error: Missing required field '{field}'", file=sys.stderr)
sys.exit(1)
learning_rate = data.get("learning_rate", 0.1)
max_iter = data.get("max_iter", 1000)
try:
fit_result = fit(data["X_train"], data["y_train"], learning_rate, max_iter)
predictions = predict(data["X_test"], fit_result["coef"], fit_result["intercept"])
result = {
"coef": fit_result["coef"],
"intercept": fit_result["intercept"],
"predictions": predictions,
}
print(json.dumps(result))
except ValueError as e:
print(f"Error: {e}", file=sys.stderr)
sys.exit(1)
def cmd_fit_score(args: argparse.Namespace) -> None:
"""Handle fit-score subcommand."""
try:
data = json.load(sys.stdin)
except json.JSONDecodeError as e:
print(f"Error: Invalid JSON input: {e}", file=sys.stderr)
sys.exit(1)
required = ["X", "y"]
for field in required:
if field not in data:
print(f"Error: Missing required field '{field}'", file=sys.stderr)
sys.exit(1)
learning_rate = data.get("learning_rate", 0.1)
max_iter = data.get("max_iter", 1000)
try:
fit_result = fit(data["X"], data["y"], learning_rate, max_iter)
accuracy = score(data["X"], data["y"], fit_result["coef"], fit_result["intercept"])
result = {
"coef": fit_result["coef"],
"intercept": fit_result["intercept"],
"accuracy": accuracy,
}
print(json.dumps(result))
except ValueError as e:
print(f"Error: {e}", file=sys.stderr)
sys.exit(1)
def main() -> None:
"""Main entry point."""
parser = argparse.ArgumentParser(
description="LogisticRegression CLI - fit/predict/score pattern (sklearn-compatible)"
)
subparsers = parser.add_subparsers(dest="command", help="Available commands")
# fit subcommand
fit_parser = subparsers.add_parser("fit", help="Fit logistic regression model")
fit_parser.set_defaults(func=cmd_fit)
# predict subcommand
predict_parser = subparsers.add_parser("predict", help="Predict class labels")
predict_parser.set_defaults(func=cmd_predict)
# predict-proba subcommand
predict_proba_parser = subparsers.add_parser("predict-proba", help="Predict probabilities")
predict_proba_parser.set_defaults(func=cmd_predict_proba)
# score subcommand
score_parser = subparsers.add_parser("score", help="Calculate accuracy score")
score_parser.set_defaults(func=cmd_score)
# fit-predict subcommand
fit_predict_parser = subparsers.add_parser(
"fit-predict", help="Fit model and predict on test data"
)
fit_predict_parser.set_defaults(func=cmd_fit_predict)
# fit-score subcommand
fit_score_parser = subparsers.add_parser("fit-score", help="Fit model and calculate accuracy")
fit_score_parser.set_defaults(func=cmd_fit_score)
args = parser.parse_args()
if args.command is None:
parser.print_help()
sys.exit(0)
args.func(args)
if __name__ == "__main__":
main()
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_sklearn_logreg/logreg_tool.py (9013 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_sklearn_logreg/logreg_tool.rs (17496 bytes)
📦 Cargo.toml: /home/noah/src/reprorusted-python-cli/examples/example_sklearn_logreg/Cargo.toml (3 dependencies)
⏱️ Parse time: 54ms
📊 Throughput: 161.1 KB/s
⏱️ Total time: 54ms
| true
|
sklearn_logreg
| 290
| 6
|
[
"context_manager",
"class_definition",
"exception_handling",
"stdin_usage"
] | 0.652
| null |
example_sklearn_logreg
|
test_logreg_tool.py
|
#!/usr/bin/env python3
"""EXTREME TDD: Tests for sklearn LogisticRegression CLI.
Academic Reference: Pedregosa et al. (2011) sklearn API design [1]
Tests the fit/predict/score pattern for binary classification.
"""
import json
import subprocess
from pathlib import Path
SCRIPT = Path(__file__).parent / "logreg_tool.py"
def run(args, input_data=None):
"""Run the CLI and return (stdout, stderr, returncode)."""
result = subprocess.run(
["python3", str(SCRIPT)] + args,
capture_output=True,
text=True,
input=input_data,
)
return result.stdout, result.stderr, result.returncode
class TestLogregFit:
"""Test model fitting."""
def test_fit_simple_binary(self):
"""Test fitting on linearly separable binary data."""
# Class 0: x < 5, Class 1: x >= 5
data = json.dumps({
"X": [[1], [2], [3], [4], [6], [7], [8], [9]],
"y": [0, 0, 0, 0, 1, 1, 1, 1]
})
stdout, stderr, code = run(["fit"], data)
assert code == 0
result = json.loads(stdout)
assert "coef" in result
assert "intercept" in result
assert len(result["coef"]) == 1
def test_fit_multivariate(self):
"""Test fitting on multivariate binary data."""
data = json.dumps({
"X": [[0, 0], [0, 1], [1, 0], [1, 1], [5, 5], [5, 6], [6, 5], [6, 6]],
"y": [0, 0, 0, 0, 1, 1, 1, 1]
})
stdout, stderr, code = run(["fit"], data)
assert code == 0
result = json.loads(stdout)
assert len(result["coef"]) == 2
def test_fit_empty_data_fails(self):
"""Test that fitting on empty data fails."""
data = json.dumps({"X": [], "y": []})
stdout, stderr, code = run(["fit"], data)
assert code == 1
assert "empty" in stderr.lower() or "error" in stderr.lower()
class TestLogregPredict:
"""Test model prediction."""
def test_predict_binary(self):
"""Test prediction with provided coefficients."""
# Sigmoid(coef * x + intercept) > 0.5 => class 1
data = json.dumps({
"X": [[1], [10]],
"coef": [1.0],
"intercept": -5.0 # Decision boundary at x=5
})
stdout, stderr, code = run(["predict"], data)
assert code == 0
result = json.loads(stdout)
assert "predictions" in result
assert result["predictions"][0] == 0 # x=1 < 5
assert result["predictions"][1] == 1 # x=10 > 5
def test_predict_proba(self):
"""Test probability prediction."""
data = json.dumps({
"X": [[5]], # At decision boundary
"coef": [1.0],
"intercept": -5.0
})
stdout, stderr, code = run(["predict-proba"], data)
assert code == 0
result = json.loads(stdout)
assert "probabilities" in result
# At boundary, probability should be ~0.5
assert abs(result["probabilities"][0] - 0.5) < 0.1
class TestLogregScore:
"""Test model scoring (accuracy)."""
def test_score_perfect(self):
"""Test accuracy = 1.0 for perfect predictions."""
data = json.dumps({
"X": [[1], [2], [8], [9]],
"y": [0, 0, 1, 1],
"coef": [1.0],
"intercept": -5.0
})
stdout, stderr, code = run(["score"], data)
assert code == 0
result = json.loads(stdout)
assert "accuracy" in result
assert result["accuracy"] == 1.0
def test_score_imperfect(self):
"""Test accuracy < 1.0 for imperfect predictions."""
data = json.dumps({
"X": [[1], [5], [9]], # x=5 is at boundary
"y": [0, 0, 1],
"coef": [1.0],
"intercept": -5.0
})
stdout, stderr, code = run(["score"], data)
assert code == 0
result = json.loads(stdout)
assert result["accuracy"] < 1.0
class TestLogregFitPredict:
"""Test combined fit-predict pipeline."""
def test_fit_predict_pipeline(self):
"""Test fitting and predicting in one command."""
data = json.dumps({
"X_train": [[1], [2], [3], [7], [8], [9]],
"y_train": [0, 0, 0, 1, 1, 1],
"X_test": [[0], [10]]
})
stdout, stderr, code = run(["fit-predict"], data)
assert code == 0
result = json.loads(stdout)
assert "predictions" in result
assert result["predictions"][0] == 0 # x=0 -> class 0
assert result["predictions"][1] == 1 # x=10 -> class 1
class TestLogregFitScore:
"""Test combined fit-score pipeline."""
def test_fit_score_pipeline(self):
"""Test fitting and scoring in one command."""
data = json.dumps({
"X": [[1], [2], [3], [7], [8], [9]],
"y": [0, 0, 0, 1, 1, 1]
})
stdout, stderr, code = run(["fit-score"], data)
assert code == 0
result = json.loads(stdout)
assert "accuracy" in result
assert result["accuracy"] >= 0.8 # Should be high for separable data
class TestLogregHelp:
"""Test help and usage messages."""
def test_help(self):
"""Test --help flag."""
stdout, stderr, code = run(["--help"])
assert code == 0
assert "LogisticRegression" in stdout or "logistic" in stdout.lower()
assert "fit" in stdout
assert "predict" in stdout
def test_subcommand_help(self):
"""Test subcommand help."""
stdout, stderr, code = run(["fit", "--help"])
assert code == 0
class TestLogregEdgeCases:
"""Test edge cases and error handling."""
def test_invalid_json_fails(self):
"""Test that invalid JSON input fails gracefully."""
stdout, stderr, code = run(["fit"], "not valid json")
assert code == 1
assert "json" in stderr.lower() or "error" in stderr.lower()
def test_missing_required_field_fails(self):
"""Test that missing required fields fail gracefully."""
data = json.dumps({"X": [[1], [2], [3]]}) # Missing y
stdout, stderr, code = run(["fit"], data)
assert code == 1
def test_non_binary_labels_handled(self):
"""Test that non-binary labels are rejected or handled."""
data = json.dumps({
"X": [[1], [2], [3]],
"y": [0, 1, 2] # 3 classes
})
stdout, stderr, code = run(["fit"], data)
# Should either fail or handle multiclass
# For simplicity, binary only is acceptable
if code == 0:
result = json.loads(stdout)
assert "coef" in result
class TestLogregParameters:
"""Test learning parameters."""
def test_learning_rate(self):
"""Test custom learning rate."""
data = json.dumps({
"X": [[1], [2], [8], [9]],
"y": [0, 0, 1, 1],
"learning_rate": 0.1,
"max_iter": 100
})
stdout, stderr, code = run(["fit"], data)
assert code == 0
def test_max_iterations(self):
"""Test custom max iterations."""
data = json.dumps({
"X": [[1], [2], [8], [9]],
"y": [0, 0, 1, 1],
"max_iter": 500
})
stdout, stderr, code = run(["fit"], data)
assert code == 0
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_sklearn_logreg/test_logreg_tool.py (7336 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_sklearn_logreg/test_logreg_tool.rs (11306 bytes)
📦 Cargo.toml: /home/noah/src/reprorusted-python-cli/examples/example_sklearn_logreg/Cargo.toml (2 dependencies)
⏱️ Parse time: 56ms
📊 Throughput: 127.2 KB/s
⏱️ Total time: 56ms
| true
|
sklearn_logreg
| 228
| 6
|
[
"context_manager",
"class_definition"
] | 0.652
| null |
example_sklearn_metrics
|
metrics_tool.py
|
#!/usr/bin/env python3
"""Metrics CLI tool.
A CLI for sklearn-style classification and regression metrics.
Designed for Python-to-Rust transpilation via depyler → aprender.
Academic Reference: Pedregosa et al. (2011) sklearn metrics [1]
Usage:
echo '{"y_true": [0, 1, 1], "y_pred": [0, 1, 0]}' | python metrics_tool.py accuracy
"""
import argparse
import json
import sys
def accuracy_score(y_true: list[int], y_pred: list[int]) -> float:
"""Calculate accuracy score."""
if len(y_true) != len(y_pred):
raise ValueError("Length mismatch")
if len(y_true) == 0:
raise ValueError("Empty input")
correct = sum(1 for i in range(len(y_true)) if y_true[i] == y_pred[i])
return correct / len(y_true)
def precision_score(y_true: list[int], y_pred: list[int]) -> float:
"""Calculate precision score (TP / (TP + FP))."""
if len(y_true) != len(y_pred):
raise ValueError("Length mismatch")
tp = sum(1 for i in range(len(y_true)) if y_true[i] == 1 and y_pred[i] == 1)
fp = sum(1 for i in range(len(y_true)) if y_true[i] == 0 and y_pred[i] == 1)
if tp + fp == 0:
return 0.0
return tp / (tp + fp)
def recall_score(y_true: list[int], y_pred: list[int]) -> float:
"""Calculate recall score (TP / (TP + FN))."""
if len(y_true) != len(y_pred):
raise ValueError("Length mismatch")
tp = sum(1 for i in range(len(y_true)) if y_true[i] == 1 and y_pred[i] == 1)
fn = sum(1 for i in range(len(y_true)) if y_true[i] == 1 and y_pred[i] == 0)
if tp + fn == 0:
return 0.0
return tp / (tp + fn)
def f1_score(y_true: list[int], y_pred: list[int]) -> float:
"""Calculate F1 score (harmonic mean of precision and recall)."""
p = precision_score(y_true, y_pred)
r = recall_score(y_true, y_pred)
if p + r == 0:
return 0.0
return 2 * p * r / (p + r)
def confusion_matrix(y_true: list[int], y_pred: list[int]) -> list[list[int]]:
"""Calculate confusion matrix [[TN, FP], [FN, TP]]."""
if len(y_true) != len(y_pred):
raise ValueError("Length mismatch")
tn = sum(1 for i in range(len(y_true)) if y_true[i] == 0 and y_pred[i] == 0)
fp = sum(1 for i in range(len(y_true)) if y_true[i] == 0 and y_pred[i] == 1)
fn = sum(1 for i in range(len(y_true)) if y_true[i] == 1 and y_pred[i] == 0)
tp = sum(1 for i in range(len(y_true)) if y_true[i] == 1 and y_pred[i] == 1)
return [[tn, fp], [fn, tp]]
def mean_squared_error(y_true: list[float], y_pred: list[float]) -> float:
"""Calculate mean squared error."""
if len(y_true) != len(y_pred):
raise ValueError("Length mismatch")
if len(y_true) == 0:
raise ValueError("Empty input")
return sum((y_true[i] - y_pred[i]) ** 2 for i in range(len(y_true))) / len(y_true)
def r2_score(y_true: list[float], y_pred: list[float]) -> float:
"""Calculate R-squared score."""
if len(y_true) != len(y_pred):
raise ValueError("Length mismatch")
if len(y_true) == 0:
raise ValueError("Empty input")
y_mean = sum(y_true) / len(y_true)
ss_res = sum((y_true[i] - y_pred[i]) ** 2 for i in range(len(y_true)))
ss_tot = sum((y_true[i] - y_mean) ** 2 for i in range(len(y_true)))
if ss_tot == 0:
return 1.0 if ss_res == 0 else 0.0
return 1.0 - ss_res / ss_tot
def cmd_accuracy(args: argparse.Namespace) -> None:
"""Handle accuracy subcommand."""
try:
data = json.load(sys.stdin)
except json.JSONDecodeError as e:
print(f"Error: Invalid JSON: {e}", file=sys.stderr)
sys.exit(1)
try:
acc = accuracy_score(data["y_true"], data["y_pred"])
print(json.dumps({"accuracy": acc}))
except (ValueError, KeyError) as e:
print(f"Error: {e}", file=sys.stderr)
sys.exit(1)
def cmd_precision(args: argparse.Namespace) -> None:
"""Handle precision subcommand."""
try:
data = json.load(sys.stdin)
except json.JSONDecodeError as e:
print(f"Error: Invalid JSON: {e}", file=sys.stderr)
sys.exit(1)
try:
prec = precision_score(data["y_true"], data["y_pred"])
print(json.dumps({"precision": prec}))
except (ValueError, KeyError) as e:
print(f"Error: {e}", file=sys.stderr)
sys.exit(1)
def cmd_recall(args: argparse.Namespace) -> None:
"""Handle recall subcommand."""
try:
data = json.load(sys.stdin)
except json.JSONDecodeError as e:
print(f"Error: Invalid JSON: {e}", file=sys.stderr)
sys.exit(1)
try:
rec = recall_score(data["y_true"], data["y_pred"])
print(json.dumps({"recall": rec}))
except (ValueError, KeyError) as e:
print(f"Error: {e}", file=sys.stderr)
sys.exit(1)
def cmd_f1(args: argparse.Namespace) -> None:
"""Handle f1 subcommand."""
try:
data = json.load(sys.stdin)
except json.JSONDecodeError as e:
print(f"Error: Invalid JSON: {e}", file=sys.stderr)
sys.exit(1)
try:
f1 = f1_score(data["y_true"], data["y_pred"])
print(json.dumps({"f1": f1}))
except (ValueError, KeyError) as e:
print(f"Error: {e}", file=sys.stderr)
sys.exit(1)
def cmd_confusion_matrix(args: argparse.Namespace) -> None:
"""Handle confusion-matrix subcommand."""
try:
data = json.load(sys.stdin)
except json.JSONDecodeError as e:
print(f"Error: Invalid JSON: {e}", file=sys.stderr)
sys.exit(1)
try:
matrix = confusion_matrix(data["y_true"], data["y_pred"])
print(json.dumps({"matrix": matrix}))
except (ValueError, KeyError) as e:
print(f"Error: {e}", file=sys.stderr)
sys.exit(1)
def cmd_mse(args: argparse.Namespace) -> None:
"""Handle mse subcommand."""
try:
data = json.load(sys.stdin)
except json.JSONDecodeError as e:
print(f"Error: Invalid JSON: {e}", file=sys.stderr)
sys.exit(1)
try:
mse = mean_squared_error(data["y_true"], data["y_pred"])
print(json.dumps({"mse": mse}))
except (ValueError, KeyError) as e:
print(f"Error: {e}", file=sys.stderr)
sys.exit(1)
def cmd_r2(args: argparse.Namespace) -> None:
"""Handle r2 subcommand."""
try:
data = json.load(sys.stdin)
except json.JSONDecodeError as e:
print(f"Error: Invalid JSON: {e}", file=sys.stderr)
sys.exit(1)
try:
r2 = r2_score(data["y_true"], data["y_pred"])
print(json.dumps({"r2": r2}))
except (ValueError, KeyError) as e:
print(f"Error: {e}", file=sys.stderr)
sys.exit(1)
def main() -> None:
"""Main entry point."""
parser = argparse.ArgumentParser(
description="Metrics CLI - classification and regression metrics (sklearn-compatible)"
)
subparsers = parser.add_subparsers(dest="command", help="Available commands")
subparsers.add_parser("accuracy", help="Accuracy score").set_defaults(func=cmd_accuracy)
subparsers.add_parser("precision", help="Precision score").set_defaults(func=cmd_precision)
subparsers.add_parser("recall", help="Recall score").set_defaults(func=cmd_recall)
subparsers.add_parser("f1", help="F1 score").set_defaults(func=cmd_f1)
subparsers.add_parser("confusion-matrix", help="Confusion matrix").set_defaults(
func=cmd_confusion_matrix
)
subparsers.add_parser("mse", help="Mean squared error").set_defaults(func=cmd_mse)
subparsers.add_parser("r2", help="R-squared score").set_defaults(func=cmd_r2)
args = parser.parse_args()
if args.command is None:
parser.print_help()
sys.exit(0)
args.func(args)
if __name__ == "__main__":
main()
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_sklearn_metrics/metrics_tool.py (7712 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_sklearn_metrics/metrics_tool.rs (17038 bytes)
📦 Cargo.toml: /home/noah/src/reprorusted-python-cli/examples/example_sklearn_metrics/Cargo.toml (3 dependencies)
⏱️ Parse time: 55ms
📊 Throughput: 134.6 KB/s
⏱️ Total time: 56ms
| true
|
sklearn_metrics
| 234
| 6
|
[
"exception_handling",
"stdin_usage"
] | 0.577
| null |
example_sklearn_metrics
|
test_metrics_tool.py
|
#!/usr/bin/env python3
"""EXTREME TDD: Tests for sklearn metrics CLI.
Academic Reference: Pedregosa et al. (2011) sklearn metrics [1]
Tests classification and regression metrics.
"""
import json
import subprocess
from pathlib import Path
SCRIPT = Path(__file__).parent / "metrics_tool.py"
def run(args, input_data=None):
"""Run the CLI and return (stdout, stderr, returncode)."""
result = subprocess.run(
["python3", str(SCRIPT)] + args,
capture_output=True,
text=True,
input=input_data,
)
return result.stdout, result.stderr, result.returncode
class TestAccuracy:
"""Test accuracy score."""
def test_accuracy_perfect(self):
"""Test accuracy = 1.0 for perfect predictions."""
data = json.dumps({
"y_true": [0, 1, 1, 0],
"y_pred": [0, 1, 1, 0]
})
stdout, stderr, code = run(["accuracy"], data)
assert code == 0
result = json.loads(stdout)
assert result["accuracy"] == 1.0
def test_accuracy_zero(self):
"""Test accuracy = 0.0 for all wrong."""
data = json.dumps({
"y_true": [0, 0, 0, 0],
"y_pred": [1, 1, 1, 1]
})
stdout, stderr, code = run(["accuracy"], data)
assert code == 0
result = json.loads(stdout)
assert result["accuracy"] == 0.0
def test_accuracy_partial(self):
"""Test accuracy for partial correctness."""
data = json.dumps({
"y_true": [0, 1, 1, 0],
"y_pred": [0, 1, 0, 0] # 3/4 correct
})
stdout, stderr, code = run(["accuracy"], data)
assert code == 0
result = json.loads(stdout)
assert abs(result["accuracy"] - 0.75) < 0.01
class TestPrecision:
"""Test precision score."""
def test_precision_perfect(self):
"""Test precision = 1.0 for no false positives."""
data = json.dumps({
"y_true": [1, 1, 0, 0],
"y_pred": [1, 1, 0, 0]
})
stdout, stderr, code = run(["precision"], data)
assert code == 0
result = json.loads(stdout)
assert result["precision"] == 1.0
def test_precision_with_fp(self):
"""Test precision with false positives."""
# TP=1, FP=1 -> precision = 1/2
data = json.dumps({
"y_true": [1, 0, 0, 0],
"y_pred": [1, 1, 0, 0]
})
stdout, stderr, code = run(["precision"], data)
assert code == 0
result = json.loads(stdout)
assert abs(result["precision"] - 0.5) < 0.01
class TestRecall:
"""Test recall score."""
def test_recall_perfect(self):
"""Test recall = 1.0 for no false negatives."""
data = json.dumps({
"y_true": [1, 1, 0, 0],
"y_pred": [1, 1, 0, 0]
})
stdout, stderr, code = run(["recall"], data)
assert code == 0
result = json.loads(stdout)
assert result["recall"] == 1.0
def test_recall_with_fn(self):
"""Test recall with false negatives."""
# TP=1, FN=1 -> recall = 1/2
data = json.dumps({
"y_true": [1, 1, 0, 0],
"y_pred": [1, 0, 0, 0]
})
stdout, stderr, code = run(["recall"], data)
assert code == 0
result = json.loads(stdout)
assert abs(result["recall"] - 0.5) < 0.01
class TestF1:
"""Test F1 score."""
def test_f1_perfect(self):
"""Test F1 = 1.0 for perfect predictions."""
data = json.dumps({
"y_true": [1, 1, 0, 0],
"y_pred": [1, 1, 0, 0]
})
stdout, stderr, code = run(["f1"], data)
assert code == 0
result = json.loads(stdout)
assert result["f1"] == 1.0
def test_f1_harmonic_mean(self):
"""Test F1 is harmonic mean of precision and recall."""
# P=0.5, R=1.0 -> F1 = 2 * 0.5 * 1.0 / (0.5 + 1.0) = 2/3
data = json.dumps({
"y_true": [1, 0, 0, 0],
"y_pred": [1, 1, 0, 0]
})
stdout, stderr, code = run(["f1"], data)
assert code == 0
result = json.loads(stdout)
assert abs(result["f1"] - 0.667) < 0.01
class TestConfusionMatrix:
"""Test confusion matrix."""
def test_confusion_matrix_binary(self):
"""Test binary confusion matrix."""
data = json.dumps({
"y_true": [0, 0, 1, 1],
"y_pred": [0, 1, 0, 1]
})
stdout, stderr, code = run(["confusion-matrix"], data)
assert code == 0
result = json.loads(stdout)
assert "matrix" in result
# [[TN, FP], [FN, TP]] = [[1, 1], [1, 1]]
assert result["matrix"][0][0] == 1 # TN
assert result["matrix"][0][1] == 1 # FP
assert result["matrix"][1][0] == 1 # FN
assert result["matrix"][1][1] == 1 # TP
class TestMSE:
"""Test mean squared error."""
def test_mse_zero(self):
"""Test MSE = 0 for perfect predictions."""
data = json.dumps({
"y_true": [1.0, 2.0, 3.0],
"y_pred": [1.0, 2.0, 3.0]
})
stdout, stderr, code = run(["mse"], data)
assert code == 0
result = json.loads(stdout)
assert result["mse"] == 0.0
def test_mse_nonzero(self):
"""Test MSE calculation."""
# MSE = ((1-2)^2 + (2-3)^2 + (3-4)^2) / 3 = 3/3 = 1
data = json.dumps({
"y_true": [1.0, 2.0, 3.0],
"y_pred": [2.0, 3.0, 4.0]
})
stdout, stderr, code = run(["mse"], data)
assert code == 0
result = json.loads(stdout)
assert abs(result["mse"] - 1.0) < 0.01
class TestR2:
"""Test R-squared score."""
def test_r2_perfect(self):
"""Test R2 = 1.0 for perfect predictions."""
data = json.dumps({
"y_true": [1.0, 2.0, 3.0],
"y_pred": [1.0, 2.0, 3.0]
})
stdout, stderr, code = run(["r2"], data)
assert code == 0
result = json.loads(stdout)
assert abs(result["r2"] - 1.0) < 0.01
class TestHelp:
"""Test help messages."""
def test_help(self):
"""Test --help flag."""
stdout, stderr, code = run(["--help"])
assert code == 0
assert "accuracy" in stdout.lower()
assert "precision" in stdout.lower()
def test_subcommand_help(self):
"""Test subcommand help."""
stdout, stderr, code = run(["accuracy", "--help"])
assert code == 0
class TestEdgeCases:
"""Test edge cases."""
def test_invalid_json_fails(self):
"""Test invalid JSON fails."""
stdout, stderr, code = run(["accuracy"], "not json")
assert code == 1
def test_length_mismatch_fails(self):
"""Test length mismatch fails."""
data = json.dumps({
"y_true": [0, 1, 1],
"y_pred": [0, 1]
})
stdout, stderr, code = run(["accuracy"], data)
assert code == 1
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_sklearn_metrics/test_metrics_tool.py (6973 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_sklearn_metrics/test_metrics_tool.rs (10617 bytes)
📦 Cargo.toml: /home/noah/src/reprorusted-python-cli/examples/example_sklearn_metrics/Cargo.toml (2 dependencies)
⏱️ Parse time: 55ms
📊 Throughput: 121.6 KB/s
⏱️ Total time: 56ms
| true
|
sklearn_metrics
| 237
| 6
|
[
"context_manager",
"class_definition",
"functools"
] | 0.652
| null |
example_sklearn_pca
|
pca_flat.py
|
#!/usr/bin/env python3
"""PCA CLI - flat structure for depyler compatibility.
Simple 2D PCA on 4 data points.
"""
import argparse
def main():
"""Main entry point."""
parser = argparse.ArgumentParser(description="PCA CLI")
parser.add_argument(
"--mode", type=str, required=True, choices=["transform", "variance"], help="Mode"
)
# 4 2D data points (x, y pairs)
parser.add_argument("--x0", type=float, default=1.0, help="Point 0 x")
parser.add_argument("--y0", type=float, default=2.0, help="Point 0 y")
parser.add_argument("--x1", type=float, default=2.0, help="Point 1 x")
parser.add_argument("--y1", type=float, default=3.0, help="Point 1 y")
parser.add_argument("--x2", type=float, default=3.0, help="Point 2 x")
parser.add_argument("--y2", type=float, default=4.0, help="Point 2 y")
parser.add_argument("--x3", type=float, default=4.0, help="Point 3 x")
parser.add_argument("--y3", type=float, default=5.0, help="Point 3 y")
args = parser.parse_args()
x0 = args.x0
y0 = args.y0
x1 = args.x1
y1 = args.y1
x2 = args.x2
y2 = args.y2
x3 = args.x3
y3 = args.y3
# Center the data
mean_x = (x0 + x1 + x2 + x3) / 4.0
mean_y = (y0 + y1 + y2 + y3) / 4.0
cx0 = x0 - mean_x
cy0 = y0 - mean_y
cx1 = x1 - mean_x
cy1 = y1 - mean_y
cx2 = x2 - mean_x
cy2 = y2 - mean_y
cx3 = x3 - mean_x
cy3 = y3 - mean_y
# Covariance matrix (2x2)
# cov_xx = sum(cx^2) / n
# cov_xy = sum(cx*cy) / n
# cov_yy = sum(cy^2) / n
cov_xx = (cx0 * cx0 + cx1 * cx1 + cx2 * cx2 + cx3 * cx3) / 4.0
cov_xy = (cx0 * cy0 + cx1 * cy1 + cx2 * cy2 + cx3 * cy3) / 4.0
cov_yy = (cy0 * cy0 + cy1 * cy1 + cy2 * cy2 + cy3 * cy3) / 4.0
# Eigenvalues of 2x2 symmetric matrix:
# trace = cov_xx + cov_yy
# det = cov_xx * cov_yy - cov_xy * cov_xy
# eigenvalues = (trace +- sqrt(trace^2 - 4*det)) / 2
trace = cov_xx + cov_yy
det = cov_xx * cov_yy - cov_xy * cov_xy
disc = trace * trace - 4.0 * det
# Simple sqrt approximation via Newton-Raphson
sqrt_disc = disc
if disc > 0.0:
sqrt_disc = disc / 2.0
i = 0.0
while i < 10.0:
sqrt_disc = (sqrt_disc + disc / sqrt_disc) / 2.0
i = i + 1.0
lambda1 = (trace + sqrt_disc) / 2.0
lambda2 = (trace - sqrt_disc) / 2.0
# Total variance
total_var = lambda1 + lambda2
if args.mode == "transform":
# Project onto first principal component
# For simplicity, use the direction (1, 1) normalized
# PC1 direction approximation
if cov_xy > 0.0001:
pc1_x = cov_xy
pc1_y = lambda1 - cov_xx
else:
pc1_x = 1.0
pc1_y = 0.0
# Normalize
mag = pc1_x * pc1_x + pc1_y * pc1_y
mag_sqrt = mag / 2.0
j = 0.0
while j < 10.0:
mag_sqrt = (mag_sqrt + mag / mag_sqrt) / 2.0
j = j + 1.0
if mag_sqrt > 0.0:
pc1_x = pc1_x / mag_sqrt
pc1_y = pc1_y / mag_sqrt
# Project
proj0 = cx0 * pc1_x + cy0 * pc1_y
proj1 = cx1 * pc1_x + cy1 * pc1_y
proj2 = cx2 * pc1_x + cy2 * pc1_y
proj3 = cx3 * pc1_x + cy3 * pc1_y
print(f"proj0={proj0} proj1={proj1} proj2={proj2} proj3={proj3}")
elif args.mode == "variance":
# Output explained variance ratio
if total_var > 0.0:
var_ratio1 = lambda1 / total_var
var_ratio2 = lambda2 / total_var
else:
var_ratio1 = 0.0
var_ratio2 = 0.0
print(f"var_ratio1={var_ratio1} var_ratio2={var_ratio2}")
if __name__ == "__main__":
main()
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_sklearn_pca/pca_flat.py (3716 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_sklearn_pca/pca_flat.rs (6139 bytes)
📦 Cargo.toml: /home/noah/src/reprorusted-python-cli/examples/example_sklearn_pca/Cargo.toml (1 dependencies)
⏱️ Parse time: 48ms
📊 Throughput: 74.1 KB/s
⏱️ Total time: 49ms
| true
|
sklearn_pca
| 122
| 6
|
[] | 0
| null |
example_sklearn_pca
|
pca_tool.py
|
#!/usr/bin/env python3
"""PCA CLI tool.
A CLI for sklearn-style PCA dimensionality reduction.
Designed for Python-to-Rust transpilation via depyler → aprender.
Academic Reference: van der Maaten & Hinton (2008) [10]
Usage:
echo '{"X": [[1,2,3], [2,4,6]], "n_components": 2}' | python pca_tool.py fit-transform
"""
import argparse
import json
import math
import sys
def mean_center(X: list[list[float]]) -> tuple[list[list[float]], list[float]]:
"""Center data by subtracting mean."""
n_samples = len(X)
n_features = len(X[0])
mean = [0.0] * n_features
for row in X:
for j in range(n_features):
mean[j] += row[j]
mean = [m / n_samples for m in mean]
X_centered = []
for row in X:
X_centered.append([row[j] - mean[j] for j in range(n_features)])
return X_centered, mean
def covariance_matrix(X: list[list[float]]) -> list[list[float]]:
"""Compute covariance matrix of centered data."""
n_samples = len(X)
n_features = len(X[0])
cov = [[0.0] * n_features for _ in range(n_features)]
for i in range(n_features):
for j in range(n_features):
for k in range(n_samples):
cov[i][j] += X[k][i] * X[k][j]
cov[i][j] /= n_samples - 1 if n_samples > 1 else 1
return cov
def power_iteration(matrix: list[list[float]], num_iter: int = 100) -> tuple[list[float], float]:
"""Find dominant eigenvector using power iteration."""
n = len(matrix)
# Initialize random vector
vec = [1.0 / math.sqrt(n)] * n
for _ in range(num_iter):
# Multiply matrix by vector
new_vec = [0.0] * n
for i in range(n):
for j in range(n):
new_vec[i] += matrix[i][j] * vec[j]
# Compute norm
norm = math.sqrt(sum(v * v for v in new_vec))
if norm < 1e-10:
break
# Normalize
vec = [v / norm for v in new_vec]
# Compute eigenvalue (Rayleigh quotient)
mv = [sum(matrix[i][j] * vec[j] for j in range(n)) for i in range(n)]
eigenvalue = sum(vec[i] * mv[i] for i in range(n))
return vec, eigenvalue
def deflate_matrix(
matrix: list[list[float]], eigenvec: list[float], eigenval: float
) -> list[list[float]]:
"""Deflate matrix by removing contribution of eigenvector."""
n = len(matrix)
result = [[matrix[i][j] for j in range(n)] for i in range(n)]
for i in range(n):
for j in range(n):
result[i][j] -= eigenval * eigenvec[i] * eigenvec[j]
return result
def fit(X: list[list[float]], n_components: int = None) -> dict:
"""Fit PCA to compute principal components."""
if len(X) == 0:
raise ValueError("Empty input data")
n_samples = len(X)
n_features = len(X[0])
if n_components is None:
n_components = min(n_samples, n_features)
if n_components > n_features:
raise ValueError(f"n_components ({n_components}) > n_features ({n_features})")
# Center data
X_centered, mean = mean_center(X)
# Compute covariance matrix
cov = covariance_matrix(X_centered)
# Extract principal components using power iteration
components = []
eigenvalues = []
matrix = [row[:] for row in cov]
for _ in range(n_components):
eigenvec, eigenval = power_iteration(matrix)
components.append(eigenvec)
eigenvalues.append(max(eigenval, 0)) # Ensure non-negative
matrix = deflate_matrix(matrix, eigenvec, eigenval)
# Compute explained variance ratio
total_var = sum(eigenvalues) if sum(eigenvalues) > 0 else 1
explained_variance_ratio = [e / total_var for e in eigenvalues]
return {
"components": components,
"mean": mean,
"explained_variance_ratio": explained_variance_ratio,
}
def transform(
X: list[list[float]], components: list[list[float]], mean: list[float]
) -> list[list[float]]:
"""Transform data using fitted PCA."""
n_samples = len(X)
n_features = len(X[0])
# Center data
X_centered = [[X[i][j] - mean[j] for j in range(n_features)] for i in range(n_samples)]
# Project onto components
X_transformed = []
for row in X_centered:
transformed = []
for comp in components:
proj = sum(row[j] * comp[j] for j in range(n_features))
transformed.append(proj)
X_transformed.append(transformed)
return X_transformed
def cmd_fit(args: argparse.Namespace) -> None:
"""Handle fit subcommand."""
try:
data = json.load(sys.stdin)
except json.JSONDecodeError as e:
print(f"Error: Invalid JSON: {e}", file=sys.stderr)
sys.exit(1)
if "X" not in data:
print("Error: Missing 'X'", file=sys.stderr)
sys.exit(1)
n_components = data.get("n_components", None)
try:
result = fit(data["X"], n_components)
print(json.dumps(result))
except ValueError as e:
print(f"Error: {e}", file=sys.stderr)
sys.exit(1)
def cmd_transform(args: argparse.Namespace) -> None:
"""Handle transform subcommand."""
try:
data = json.load(sys.stdin)
except json.JSONDecodeError as e:
print(f"Error: Invalid JSON: {e}", file=sys.stderr)
sys.exit(1)
required = ["X", "components", "mean"]
for field in required:
if field not in data:
print(f"Error: Missing '{field}'", file=sys.stderr)
sys.exit(1)
X_transformed = transform(data["X"], data["components"], data["mean"])
print(json.dumps({"X_transformed": X_transformed}))
def cmd_fit_transform(args: argparse.Namespace) -> None:
"""Handle fit-transform subcommand."""
try:
data = json.load(sys.stdin)
except json.JSONDecodeError as e:
print(f"Error: Invalid JSON: {e}", file=sys.stderr)
sys.exit(1)
if "X" not in data:
print("Error: Missing 'X'", file=sys.stderr)
sys.exit(1)
n_components = data.get("n_components", None)
try:
fit_result = fit(data["X"], n_components)
X_transformed = transform(data["X"], fit_result["components"], fit_result["mean"])
result = {
"X_transformed": X_transformed,
"components": fit_result["components"],
"mean": fit_result["mean"],
"explained_variance_ratio": fit_result["explained_variance_ratio"],
}
print(json.dumps(result))
except ValueError as e:
print(f"Error: {e}", file=sys.stderr)
sys.exit(1)
def main() -> None:
"""Main entry point."""
parser = argparse.ArgumentParser(
description="PCA CLI - dimensionality reduction (sklearn-compatible)"
)
subparsers = parser.add_subparsers(dest="command", help="Available commands")
fit_parser = subparsers.add_parser("fit", help="Fit PCA model")
fit_parser.set_defaults(func=cmd_fit)
transform_parser = subparsers.add_parser("transform", help="Transform data")
transform_parser.set_defaults(func=cmd_transform)
fit_transform_parser = subparsers.add_parser("fit-transform", help="Fit and transform")
fit_transform_parser.set_defaults(func=cmd_fit_transform)
args = parser.parse_args()
if args.command is None:
parser.print_help()
sys.exit(0)
args.func(args)
if __name__ == "__main__":
main()
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_sklearn_pca/pca_tool.py (7367 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_sklearn_pca/pca_tool.rs (17365 bytes)
📦 Cargo.toml: /home/noah/src/reprorusted-python-cli/examples/example_sklearn_pca/Cargo.toml (3 dependencies)
⏱️ Parse time: 56ms
📊 Throughput: 127.5 KB/s
⏱️ Total time: 56ms
| true
|
sklearn_pca
| 253
| 6
|
[
"exception_handling",
"stdin_usage"
] | 0.577
| null |
example_sklearn_pca
|
test_pca_tool.py
|
#!/usr/bin/env python3
"""EXTREME TDD: Tests for sklearn PCA CLI.
Academic Reference: van der Maaten & Hinton (2008) Dimensionality reduction [10]
Tests Principal Component Analysis for dimensionality reduction.
"""
import json
import subprocess
from pathlib import Path
SCRIPT = Path(__file__).parent / "pca_tool.py"
def run(args, input_data=None):
"""Run the CLI and return (stdout, stderr, returncode)."""
result = subprocess.run(
["python3", str(SCRIPT)] + args,
capture_output=True,
text=True,
input=input_data,
)
return result.stdout, result.stderr, result.returncode
class TestPcaFit:
"""Test PCA fitting."""
def test_fit_reduces_dimensions(self):
"""Test that PCA reduces to specified n_components."""
data = json.dumps({
"X": [[1, 2, 3], [2, 4, 6], [3, 6, 9], [4, 8, 12]],
"n_components": 2
})
stdout, stderr, code = run(["fit"], data)
assert code == 0
result = json.loads(stdout)
assert "components" in result
assert len(result["components"]) == 2 # 2 principal components
def test_fit_default_components(self):
"""Test default n_components equals n_features."""
data = json.dumps({
"X": [[1, 2], [2, 4], [3, 6], [4, 8]]
})
stdout, stderr, code = run(["fit"], data)
assert code == 0
result = json.loads(stdout)
assert len(result["components"]) == 2
def test_fit_empty_fails(self):
"""Test that fitting on empty data fails."""
data = json.dumps({"X": [], "n_components": 2})
stdout, stderr, code = run(["fit"], data)
assert code == 1
class TestPcaTransform:
"""Test PCA transformation."""
def test_transform_reduces_dim(self):
"""Test that transform reduces dimensionality."""
data = json.dumps({
"X": [[1, 2, 3], [2, 4, 6]],
"components": [[0.577, 0.577, 0.577], [0.707, -0.707, 0]],
"mean": [1.5, 3.0, 4.5]
})
stdout, stderr, code = run(["transform"], data)
assert code == 0
result = json.loads(stdout)
assert "X_transformed" in result
assert len(result["X_transformed"][0]) == 2 # Reduced to 2D
class TestPcaFitTransform:
"""Test combined fit-transform."""
def test_fit_transform(self):
"""Test fit_transform returns transformed data."""
data = json.dumps({
"X": [[1, 2], [2, 4], [3, 6], [4, 8]],
"n_components": 1
})
stdout, stderr, code = run(["fit-transform"], data)
assert code == 0
result = json.loads(stdout)
assert "X_transformed" in result
assert len(result["X_transformed"][0]) == 1 # Reduced to 1D
assert "components" in result
class TestPcaExplainedVariance:
"""Test explained variance ratio."""
def test_explained_variance_returned(self):
"""Test that explained variance ratio is returned."""
data = json.dumps({
"X": [[1, 2], [2, 4], [3, 6], [4, 8]],
"n_components": 2
})
stdout, stderr, code = run(["fit"], data)
assert code == 0
result = json.loads(stdout)
assert "explained_variance_ratio" in result
assert len(result["explained_variance_ratio"]) == 2
# Sum should be <= 1.0
assert sum(result["explained_variance_ratio"]) <= 1.01
def test_first_component_captures_most(self):
"""Test that first PC captures most variance for correlated data."""
# Highly correlated data - first PC should capture most variance
data = json.dumps({
"X": [[1, 1], [2, 2], [3, 3], [4, 4], [5, 5]],
"n_components": 2
})
stdout, stderr, code = run(["fit"], data)
assert code == 0
result = json.loads(stdout)
# First component should capture nearly all variance
assert result["explained_variance_ratio"][0] > 0.9
class TestPcaHelp:
"""Test help messages."""
def test_help(self):
"""Test --help flag."""
stdout, stderr, code = run(["--help"])
assert code == 0
assert "PCA" in stdout or "pca" in stdout.lower()
def test_subcommand_help(self):
"""Test subcommand help."""
stdout, stderr, code = run(["fit", "--help"])
assert code == 0
class TestPcaEdgeCases:
"""Test edge cases."""
def test_invalid_json_fails(self):
"""Test invalid JSON fails."""
stdout, stderr, code = run(["fit"], "not json")
assert code == 1
def test_n_components_too_large_fails(self):
"""Test n_components > n_features fails."""
data = json.dumps({
"X": [[1, 2], [3, 4]],
"n_components": 5
})
stdout, stderr, code = run(["fit"], data)
assert code == 1
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_sklearn_pca/test_pca_tool.py (4901 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_sklearn_pca/test_pca_tool.rs (7443 bytes)
📦 Cargo.toml: /home/noah/src/reprorusted-python-cli/examples/example_sklearn_pca/Cargo.toml (2 dependencies)
⏱️ Parse time: 50ms
📊 Throughput: 95.5 KB/s
⏱️ Total time: 50ms
| true
|
sklearn_pca
| 153
| 6
|
[
"class_definition",
"functools"
] | 0.612
| null |
example_sklearn_rf
|
rf_tool.py
|
#!/usr/bin/env python3
"""RandomForestClassifier CLI tool.
A CLI for sklearn-style Random Forest classification.
Designed for Python-to-Rust transpilation via depyler → aprender.
Academic Reference: Breiman (2001) Random Forests [8]
Usage:
echo '{"X": [[0], [1], [2], [3]], "y": [0, 0, 1, 1], "n_estimators": 10}' | python rf_tool.py fit
"""
import argparse
import json
import random
import sys
from typing import Any
def gini_impurity(y: list[int]) -> float:
"""Calculate GINI impurity."""
if len(y) == 0:
return 0.0
classes = set(y)
impurity = 1.0
for c in classes:
p = sum(1 for yi in y if yi == c) / len(y)
impurity -= p**2
return impurity
def bootstrap_sample(X: list[list[float]], y: list[int]) -> tuple:
"""Create bootstrap sample."""
n = len(X)
indices = [random.randint(0, n - 1) for _ in range(n)]
X_boot = [X[i] for i in indices]
y_boot = [y[i] for i in indices]
return X_boot, y_boot
def best_split(X: list[list[float]], y: list[int], max_features: int) -> tuple:
"""Find best split using GINI criterion with feature subsampling."""
n_samples = len(X)
n_features = len(X[0]) if n_samples > 0 else 0
if n_samples == 0:
return None, None, None, None
best_gini = float("inf")
best_feature = None
best_threshold = None
best_left_idx = None
best_right_idx = None
# Random feature subset
all_features = list(range(n_features))
features_to_check = random.sample(all_features, min(max_features, n_features))
for feature in features_to_check:
values = sorted({X[i][feature] for i in range(n_samples)})
if len(values) < 2:
continue
thresholds = [(values[i] + values[i + 1]) / 2 for i in range(len(values) - 1)]
for threshold in thresholds:
left_idx = [i for i in range(n_samples) if X[i][feature] <= threshold]
right_idx = [i for i in range(n_samples) if X[i][feature] > threshold]
if len(left_idx) == 0 or len(right_idx) == 0:
continue
left_y = [y[i] for i in left_idx]
right_y = [y[i] for i in right_idx]
gini = (
len(left_y) * gini_impurity(left_y) + len(right_y) * gini_impurity(right_y)
) / n_samples
if gini < best_gini:
best_gini = gini
best_feature = feature
best_threshold = threshold
best_left_idx = left_idx
best_right_idx = right_idx
return best_feature, best_threshold, best_left_idx, best_right_idx
def build_tree(
X: list[list[float]], y: list[int], depth: int, max_depth: int, max_features: int
) -> dict[str, Any]:
"""Recursively build decision tree."""
classes = set(y)
if len(classes) == 1 or depth >= max_depth or len(y) < 2:
class_counts = {}
for yi in y:
class_counts[yi] = class_counts.get(yi, 0) + 1
majority_class = (
max(class_counts.keys(), key=lambda k: class_counts[k]) if class_counts else 0
)
return {"class": majority_class}
feature, threshold, left_idx, right_idx = best_split(X, y, max_features)
if feature is None:
class_counts = {}
for yi in y:
class_counts[yi] = class_counts.get(yi, 0) + 1
majority_class = (
max(class_counts.keys(), key=lambda k: class_counts[k]) if class_counts else 0
)
return {"class": majority_class}
left_X = [X[i] for i in left_idx]
left_y = [y[i] for i in left_idx]
right_X = [X[i] for i in right_idx]
right_y = [y[i] for i in right_idx]
return {
"feature": feature,
"threshold": threshold,
"left": build_tree(left_X, left_y, depth + 1, max_depth, max_features),
"right": build_tree(right_X, right_y, depth + 1, max_depth, max_features),
}
def predict_single_tree(tree: dict[str, Any], x: list[float]) -> int:
"""Predict class using single tree."""
if "class" in tree:
return tree["class"]
if x[tree["feature"]] <= tree["threshold"]:
return predict_single_tree(tree["left"], x)
return predict_single_tree(tree["right"], x)
def fit(
X: list[list[float]],
y: list[int],
n_estimators: int = 10,
max_depth: int = 10,
random_state: int = None,
) -> dict[str, Any]:
"""Fit random forest classifier."""
if len(X) == 0 or len(y) == 0:
raise ValueError("Empty input data")
if random_state is not None:
random.seed(random_state)
n_features = len(X[0])
max_features = max(1, int(n_features**0.5)) # sqrt(n_features)
trees = []
for _ in range(n_estimators):
X_boot, y_boot = bootstrap_sample(X, y)
tree = build_tree(X_boot, y_boot, 0, max_depth, max_features)
trees.append(tree)
return {"trees": trees}
def predict(X: list[list[float]], trees: list[dict[str, Any]]) -> list[int]:
"""Predict class labels using majority voting."""
predictions = []
for x in X:
votes = {}
for tree in trees:
pred = predict_single_tree(tree, x)
votes[pred] = votes.get(pred, 0) + 1
majority = max(votes.keys(), key=lambda k: votes[k])
predictions.append(majority)
return predictions
def score(X: list[list[float]], y: list[int], trees: list[dict[str, Any]]) -> float:
"""Calculate accuracy score."""
predictions = predict(X, trees)
correct = sum(1 for i in range(len(y)) if predictions[i] == y[i])
return correct / len(y)
def cmd_fit(args: argparse.Namespace) -> None:
try:
data = json.load(sys.stdin)
except json.JSONDecodeError as e:
print(f"Error: Invalid JSON: {e}", file=sys.stderr)
sys.exit(1)
if "X" not in data or "y" not in data:
print("Error: Missing 'X' or 'y'", file=sys.stderr)
sys.exit(1)
n_estimators = data.get("n_estimators", 10)
max_depth = data.get("max_depth", 10)
random_state = data.get("random_state", None)
try:
result = fit(data["X"], data["y"], n_estimators, max_depth, random_state)
print(json.dumps(result))
except ValueError as e:
print(f"Error: {e}", file=sys.stderr)
sys.exit(1)
def cmd_predict(args: argparse.Namespace) -> None:
try:
data = json.load(sys.stdin)
except json.JSONDecodeError as e:
print(f"Error: Invalid JSON: {e}", file=sys.stderr)
sys.exit(1)
if "X" not in data or "trees" not in data:
print("Error: Missing 'X' or 'trees'", file=sys.stderr)
sys.exit(1)
predictions = predict(data["X"], data["trees"])
print(json.dumps({"predictions": predictions}))
def cmd_fit_predict(args: argparse.Namespace) -> None:
try:
data = json.load(sys.stdin)
except json.JSONDecodeError as e:
print(f"Error: Invalid JSON: {e}", file=sys.stderr)
sys.exit(1)
required = ["X_train", "y_train", "X_test"]
for field in required:
if field not in data:
print(f"Error: Missing '{field}'", file=sys.stderr)
sys.exit(1)
n_estimators = data.get("n_estimators", 10)
max_depth = data.get("max_depth", 10)
random_state = data.get("random_state", 42)
try:
result = fit(data["X_train"], data["y_train"], n_estimators, max_depth, random_state)
predictions = predict(data["X_test"], result["trees"])
print(json.dumps({"trees": result["trees"], "predictions": predictions}))
except ValueError as e:
print(f"Error: {e}", file=sys.stderr)
sys.exit(1)
def cmd_score(args: argparse.Namespace) -> None:
try:
data = json.load(sys.stdin)
except json.JSONDecodeError as e:
print(f"Error: Invalid JSON: {e}", file=sys.stderr)
sys.exit(1)
if "X" not in data or "y" not in data or "trees" not in data:
print("Error: Missing 'X', 'y', or 'trees'", file=sys.stderr)
sys.exit(1)
accuracy = score(data["X"], data["y"], data["trees"])
print(json.dumps({"accuracy": accuracy}))
def main() -> None:
parser = argparse.ArgumentParser(
description="RandomForestClassifier CLI - ensemble classification (sklearn-compatible)"
)
subparsers = parser.add_subparsers(dest="command", help="Available commands")
subparsers.add_parser("fit", help="Fit random forest").set_defaults(func=cmd_fit)
subparsers.add_parser("predict", help="Predict class labels").set_defaults(func=cmd_predict)
subparsers.add_parser("fit-predict", help="Fit and predict").set_defaults(func=cmd_fit_predict)
subparsers.add_parser("score", help="Calculate accuracy").set_defaults(func=cmd_score)
args = parser.parse_args()
if args.command is None:
parser.print_help()
sys.exit(0)
args.func(args)
if __name__ == "__main__":
main()
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_sklearn_rf/rf_tool.py (8919 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_sklearn_rf/rf_tool.rs (21594 bytes)
📦 Cargo.toml: /home/noah/src/reprorusted-python-cli/examples/example_sklearn_rf/Cargo.toml (4 dependencies)
⏱️ Parse time: 59ms
📊 Throughput: 145.8 KB/s
⏱️ Total time: 59ms
| true
|
sklearn_rf
| 281
| 6
|
[
"lambda",
"context_manager",
"class_definition",
"exception_handling",
"stdin_usage"
] | 0.783
| null |
example_sklearn_rf
|
test_rf_tool.py
|
#!/usr/bin/env python3
"""EXTREME TDD: Tests for sklearn RandomForestClassifier CLI.
Academic Reference: Breiman (2001) Random Forests [8]
Tests ensemble classification with bootstrap aggregation.
"""
import json
import subprocess
from pathlib import Path
SCRIPT = Path(__file__).parent / "rf_tool.py"
def run(args, input_data=None):
"""Run the CLI and return (stdout, stderr, returncode)."""
result = subprocess.run(
["python3", str(SCRIPT)] + args,
capture_output=True,
text=True,
input=input_data,
)
return result.stdout, result.stderr, result.returncode
class TestRfFit:
"""Test forest fitting."""
def test_fit_simple(self):
"""Test fitting on simple data."""
data = json.dumps({
"X": [[0], [1], [2], [3]],
"y": [0, 0, 1, 1],
"n_estimators": 3
})
stdout, stderr, code = run(["fit"], data)
assert code == 0
result = json.loads(stdout)
assert "trees" in result
assert len(result["trees"]) == 3
def test_fit_default_estimators(self):
"""Test default n_estimators=10."""
data = json.dumps({
"X": [[0], [1], [2], [3]],
"y": [0, 0, 1, 1]
})
stdout, stderr, code = run(["fit"], data)
assert code == 0
result = json.loads(stdout)
assert len(result["trees"]) == 10
def test_fit_empty_fails(self):
"""Test that fitting on empty data fails."""
data = json.dumps({"X": [], "y": []})
stdout, stderr, code = run(["fit"], data)
assert code == 1
class TestRfPredict:
"""Test forest prediction."""
def test_predict_majority_vote(self):
"""Test that prediction uses majority voting."""
# Trees that all agree
data = json.dumps({
"X": [[0], [3]],
"trees": [
{"feature": 0, "threshold": 1.5, "left": {"class": 0}, "right": {"class": 1}},
{"feature": 0, "threshold": 1.5, "left": {"class": 0}, "right": {"class": 1}},
{"feature": 0, "threshold": 1.5, "left": {"class": 0}, "right": {"class": 1}},
]
})
stdout, stderr, code = run(["predict"], data)
assert code == 0
result = json.loads(stdout)
assert result["predictions"] == [0, 1]
class TestRfFitPredict:
"""Test combined fit-predict."""
def test_fit_predict(self):
"""Test fit-predict pipeline."""
data = json.dumps({
"X_train": [[0], [1], [2], [3]],
"y_train": [0, 0, 1, 1],
"X_test": [[0.5], [2.5]],
"n_estimators": 5
})
stdout, stderr, code = run(["fit-predict"], data)
assert code == 0
result = json.loads(stdout)
# With enough trees, should get correct predictions
assert result["predictions"][0] == 0
assert result["predictions"][1] == 1
class TestRfScore:
"""Test forest scoring."""
def test_score_high_accuracy(self):
"""Test scoring on separable data."""
data = json.dumps({
"X": [[0], [1], [8], [9]],
"y": [0, 0, 1, 1],
"trees": [
{"feature": 0, "threshold": 4.5, "left": {"class": 0}, "right": {"class": 1}},
]
})
stdout, stderr, code = run(["score"], data)
assert code == 0
result = json.loads(stdout)
assert result["accuracy"] == 1.0
class TestRfHelp:
"""Test help messages."""
def test_help(self):
"""Test --help flag."""
stdout, stderr, code = run(["--help"])
assert code == 0
assert "RandomForest" in stdout or "forest" in stdout.lower()
class TestRfEdgeCases:
"""Test edge cases."""
def test_invalid_json_fails(self):
"""Test invalid JSON fails."""
stdout, stderr, code = run(["fit"], "not json")
assert code == 1
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_sklearn_rf/test_rf_tool.py (3951 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_sklearn_rf/test_rf_tool.rs (9009 bytes)
📦 Cargo.toml: /home/noah/src/reprorusted-python-cli/examples/example_sklearn_rf/Cargo.toml (2 dependencies)
⏱️ Parse time: 53ms
📊 Throughput: 72.4 KB/s
⏱️ Total time: 53ms
| true
|
sklearn_rf
| 133
| 6
|
[
"context_manager",
"class_definition"
] | 0.652
| null |
example_sklearn_scaler
|
scaler_flat.py
|
#!/usr/bin/env python3
"""StandardScaler CLI - flat structure for depyler compatibility.
Standardize features by removing mean and scaling to unit variance.
"""
import argparse
def main():
"""Main entry point."""
parser = argparse.ArgumentParser(description="StandardScaler CLI")
parser.add_argument(
"--mode", type=str, required=True, choices=["fit", "transform"], help="Mode"
)
# 4 data points
parser.add_argument("--x0", type=float, default=1.0, help="Data point 0")
parser.add_argument("--x1", type=float, default=2.0, help="Data point 1")
parser.add_argument("--x2", type=float, default=3.0, help="Data point 2")
parser.add_argument("--x3", type=float, default=4.0, help="Data point 3")
# For transform mode
parser.add_argument("--mean", type=float, default=0.0, help="Mean for transform")
parser.add_argument("--std", type=float, default=1.0, help="Std for transform")
parser.add_argument("--value", type=float, default=0.0, help="Value to transform")
args = parser.parse_args()
x0 = args.x0
x1 = args.x1
x2 = args.x2
x3 = args.x3
if args.mode == "fit":
# Compute mean
mean = (x0 + x1 + x2 + x3) / 4.0
# Compute variance
diff0 = x0 - mean
diff1 = x1 - mean
diff2 = x2 - mean
diff3 = x3 - mean
var = (diff0 * diff0 + diff1 * diff1 + diff2 * diff2 + diff3 * diff3) / 4.0
# Compute std (sqrt via Newton-Raphson)
std = var / 2.0
if var > 0.0:
i = 0.0
while i < 10.0:
std = (std + var / std) / 2.0
i = i + 1.0
else:
std = 0.0
print(f"mean={mean} std={std}")
elif args.mode == "transform":
mean = args.mean
std = args.std
value = args.value
if std > 0.0:
scaled = (value - mean) / std
else:
scaled = 0.0
print(f"scaled={scaled}")
if __name__ == "__main__":
main()
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_sklearn_scaler/scaler_flat.py (2012 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_sklearn_scaler/scaler_flat.rs (3152 bytes)
📦 Cargo.toml: /home/noah/src/reprorusted-python-cli/examples/example_sklearn_scaler/Cargo.toml (1 dependencies)
⏱️ Parse time: 49ms
📊 Throughput: 39.6 KB/s
⏱️ Total time: 49ms
| true
|
sklearn_scaler
| 67
| 6
|
[] | 0
| null |
example_sklearn_scaler
|
scaler_tool.py
|
#!/usr/bin/env python3
"""StandardScaler CLI tool.
A CLI for sklearn-style StandardScaler normalization.
Designed for Python-to-Rust transpilation via depyler → aprender.
Academic Reference: Pedregosa et al. (2011) sklearn preprocessing [1]
Usage:
echo '{"X": [[1, 10], [2, 20], [3, 30]]}' | python scaler_tool.py fit-transform
"""
import argparse
import json
import math
import sys
def compute_mean_std(X: list[list[float]]) -> tuple[list[float], list[float]]:
"""Compute mean and standard deviation for each feature."""
n_samples = len(X)
n_features = len(X[0])
# Compute mean
mean = [0.0] * n_features
for row in X:
for j in range(n_features):
mean[j] += row[j]
mean = [m / n_samples for m in mean]
# Compute std
std = [0.0] * n_features
for row in X:
for j in range(n_features):
std[j] += (row[j] - mean[j]) ** 2
std = [math.sqrt(s / n_samples) if n_samples > 0 else 0.0 for s in std]
return mean, std
def fit(X: list[list[float]]) -> dict:
"""Fit scaler to compute mean and std."""
if len(X) == 0:
raise ValueError("Empty input data")
mean, std = compute_mean_std(X)
return {"mean": mean, "std": std}
def transform(X: list[list[float]], mean: list[float], std: list[float]) -> list[list[float]]:
"""Transform data using fitted parameters."""
X_scaled = []
for row in X:
scaled_row = []
for j in range(len(row)):
if std[j] > 1e-10:
scaled_row.append((row[j] - mean[j]) / std[j])
else:
scaled_row.append(0.0) # Constant feature
X_scaled.append(scaled_row)
return X_scaled
def inverse_transform(
X_scaled: list[list[float]], mean: list[float], std: list[float]
) -> list[list[float]]:
"""Inverse transform to recover original data."""
X = []
for row in X_scaled:
original_row = []
for j in range(len(row)):
original_row.append(row[j] * std[j] + mean[j])
X.append(original_row)
return X
def cmd_fit(args: argparse.Namespace) -> None:
"""Handle fit subcommand."""
try:
data = json.load(sys.stdin)
except json.JSONDecodeError as e:
print(f"Error: Invalid JSON: {e}", file=sys.stderr)
sys.exit(1)
if "X" not in data:
print("Error: Missing 'X'", file=sys.stderr)
sys.exit(1)
try:
result = fit(data["X"])
print(json.dumps(result))
except ValueError as e:
print(f"Error: {e}", file=sys.stderr)
sys.exit(1)
def cmd_transform(args: argparse.Namespace) -> None:
"""Handle transform subcommand."""
try:
data = json.load(sys.stdin)
except json.JSONDecodeError as e:
print(f"Error: Invalid JSON: {e}", file=sys.stderr)
sys.exit(1)
required = ["X", "mean", "std"]
for field in required:
if field not in data:
print(f"Error: Missing '{field}'", file=sys.stderr)
sys.exit(1)
X_scaled = transform(data["X"], data["mean"], data["std"])
print(json.dumps({"X_scaled": X_scaled}))
def cmd_fit_transform(args: argparse.Namespace) -> None:
"""Handle fit-transform subcommand."""
try:
data = json.load(sys.stdin)
except json.JSONDecodeError as e:
print(f"Error: Invalid JSON: {e}", file=sys.stderr)
sys.exit(1)
if "X" not in data:
print("Error: Missing 'X'", file=sys.stderr)
sys.exit(1)
try:
fit_result = fit(data["X"])
X_scaled = transform(data["X"], fit_result["mean"], fit_result["std"])
result = {
"X_scaled": X_scaled,
"mean": fit_result["mean"],
"std": fit_result["std"],
}
print(json.dumps(result))
except ValueError as e:
print(f"Error: {e}", file=sys.stderr)
sys.exit(1)
def cmd_inverse_transform(args: argparse.Namespace) -> None:
"""Handle inverse-transform subcommand."""
try:
data = json.load(sys.stdin)
except json.JSONDecodeError as e:
print(f"Error: Invalid JSON: {e}", file=sys.stderr)
sys.exit(1)
required = ["X_scaled", "mean", "std"]
for field in required:
if field not in data:
print(f"Error: Missing '{field}'", file=sys.stderr)
sys.exit(1)
X = inverse_transform(data["X_scaled"], data["mean"], data["std"])
print(json.dumps({"X": X}))
def main() -> None:
"""Main entry point."""
parser = argparse.ArgumentParser(
description="StandardScaler CLI - zero mean, unit variance (sklearn-compatible)"
)
subparsers = parser.add_subparsers(dest="command", help="Available commands")
fit_parser = subparsers.add_parser("fit", help="Fit scaler")
fit_parser.set_defaults(func=cmd_fit)
transform_parser = subparsers.add_parser("transform", help="Transform data")
transform_parser.set_defaults(func=cmd_transform)
fit_transform_parser = subparsers.add_parser("fit-transform", help="Fit and transform")
fit_transform_parser.set_defaults(func=cmd_fit_transform)
inverse_parser = subparsers.add_parser("inverse-transform", help="Inverse transform")
inverse_parser.set_defaults(func=cmd_inverse_transform)
args = parser.parse_args()
if args.command is None:
parser.print_help()
sys.exit(0)
args.func(args)
if __name__ == "__main__":
main()
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_sklearn_scaler/scaler_tool.py (5446 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_sklearn_scaler/scaler_tool.rs (11428 bytes)
📦 Cargo.toml: /home/noah/src/reprorusted-python-cli/examples/example_sklearn_scaler/Cargo.toml (3 dependencies)
⏱️ Parse time: 52ms
📊 Throughput: 101.0 KB/s
⏱️ Total time: 52ms
| true
|
sklearn_scaler
| 188
| 6
|
[
"exception_handling",
"stdin_usage"
] | 0.577
| null |
example_sklearn_scaler
|
test_scaler_tool.py
|
#!/usr/bin/env python3
"""EXTREME TDD: Tests for sklearn StandardScaler CLI.
Academic Reference: Pedregosa et al. (2011) sklearn preprocessing [1]
Tests StandardScaler for zero mean, unit variance normalization.
"""
import json
import subprocess
from pathlib import Path
SCRIPT = Path(__file__).parent / "scaler_tool.py"
def run(args, input_data=None):
"""Run the CLI and return (stdout, stderr, returncode)."""
result = subprocess.run(
["python3", str(SCRIPT)] + args,
capture_output=True,
text=True,
input=input_data,
)
return result.stdout, result.stderr, result.returncode
class TestScalerFit:
"""Test scaler fitting."""
def test_fit_computes_mean_std(self):
"""Test that fit computes mean and std."""
data = json.dumps({
"X": [[1, 10], [2, 20], [3, 30], [4, 40], [5, 50]]
})
stdout, stderr, code = run(["fit"], data)
assert code == 0
result = json.loads(stdout)
assert "mean" in result
assert "std" in result
assert len(result["mean"]) == 2
assert len(result["std"]) == 2
# Mean of [1,2,3,4,5] = 3, [10,20,30,40,50] = 30
assert abs(result["mean"][0] - 3.0) < 0.01
assert abs(result["mean"][1] - 30.0) < 0.01
def test_fit_empty_fails(self):
"""Test that fitting on empty data fails."""
data = json.dumps({"X": []})
stdout, stderr, code = run(["fit"], data)
assert code == 1
class TestScalerTransform:
"""Test scaler transformation."""
def test_transform_normalizes(self):
"""Test that transform normalizes data."""
data = json.dumps({
"X": [[1], [2], [3], [4], [5]],
"mean": [3.0],
"std": [1.4142] # sqrt(2)
})
stdout, stderr, code = run(["transform"], data)
assert code == 0
result = json.loads(stdout)
assert "X_scaled" in result
# x=3 should become 0 (zero mean)
assert abs(result["X_scaled"][2][0]) < 0.01
def test_transform_preserves_shape(self):
"""Test that transform preserves data shape."""
data = json.dumps({
"X": [[1, 10], [2, 20]],
"mean": [1.5, 15],
"std": [0.5, 5]
})
stdout, stderr, code = run(["transform"], data)
assert code == 0
result = json.loads(stdout)
assert len(result["X_scaled"]) == 2
assert len(result["X_scaled"][0]) == 2
class TestScalerFitTransform:
"""Test combined fit-transform."""
def test_fit_transform(self):
"""Test fit_transform in one step."""
data = json.dumps({
"X": [[0, 0], [0, 0], [1, 1], [1, 1]]
})
stdout, stderr, code = run(["fit-transform"], data)
assert code == 0
result = json.loads(stdout)
assert "X_scaled" in result
assert "mean" in result
assert "std" in result
def test_fit_transform_zero_mean(self):
"""Test that fit_transform produces zero mean."""
data = json.dumps({
"X": [[1], [2], [3], [4], [5]]
})
stdout, stderr, code = run(["fit-transform"], data)
assert code == 0
result = json.loads(stdout)
# Mean of transformed data should be ~0
scaled_mean = sum(row[0] for row in result["X_scaled"]) / len(result["X_scaled"])
assert abs(scaled_mean) < 0.01
class TestScalerInverseTransform:
"""Test inverse transformation."""
def test_inverse_transform(self):
"""Test inverse_transform recovers original."""
data = json.dumps({
"X_scaled": [[-1.0], [0.0], [1.0]],
"mean": [5.0],
"std": [2.0]
})
stdout, stderr, code = run(["inverse-transform"], data)
assert code == 0
result = json.loads(stdout)
assert "X" in result
# -1*2 + 5 = 3, 0*2 + 5 = 5, 1*2 + 5 = 7
assert abs(result["X"][0][0] - 3.0) < 0.01
assert abs(result["X"][1][0] - 5.0) < 0.01
assert abs(result["X"][2][0] - 7.0) < 0.01
class TestScalerHelp:
"""Test help messages."""
def test_help(self):
"""Test --help flag."""
stdout, stderr, code = run(["--help"])
assert code == 0
assert "StandardScaler" in stdout or "scaler" in stdout.lower()
def test_subcommand_help(self):
"""Test subcommand help."""
stdout, stderr, code = run(["fit", "--help"])
assert code == 0
class TestScalerEdgeCases:
"""Test edge cases."""
def test_invalid_json_fails(self):
"""Test invalid JSON fails."""
stdout, stderr, code = run(["fit"], "not json")
assert code == 1
def test_zero_std_handled(self):
"""Test that zero std (constant feature) is handled."""
data = json.dumps({
"X": [[1, 5], [1, 10], [1, 15]] # First column is constant
})
stdout, stderr, code = run(["fit"], data)
assert code == 0
result = json.loads(stdout)
# Std of constant column should be 0 or small
assert result["std"][0] < 0.01
| false
|
sklearn_scaler
| 163
| 0
|
[
"class_definition"
] | 0.612
|
Type inference hints:
Hint: int for parameter 'args' [Low] (numeric operations)
Hint: int for variable 'args' [Medium] (usage patterns suggest this type)
Profiling Report
══════════════════════════════════════════════════
Summary
Total estimated instructions: 4
Total estimated allocations: 0
Functions analyzed: 1
Hot Paths
[1] run (100.0% of execution time)
Function Metrics
🔥 run 100.0% time | 4 inst | 0 alloc
Performance Predictions
• Rust's m
|
|
example_sklearn_tsne
|
test_tsne_tool.py
|
#!/usr/bin/env python3
"""EXTREME TDD: Tests for sklearn TSNE CLI.
Academic Reference: van der Maaten & Hinton (2008) t-SNE [10]
Tests t-distributed stochastic neighbor embedding.
"""
import json
import subprocess
from pathlib import Path
SCRIPT = Path(__file__).parent / "tsne_tool.py"
def run(args, input_data=None):
"""Run the CLI and return (stdout, stderr, returncode)."""
result = subprocess.run(
["python3", str(SCRIPT)] + args,
capture_output=True,
text=True,
input=input_data,
)
return result.stdout, result.stderr, result.returncode
class TestTsneFitTransform:
"""Test t-SNE fit_transform."""
def test_fit_transform_2d(self):
"""Test reducing to 2D."""
data = json.dumps({
"X": [[1, 2, 3], [2, 3, 4], [10, 11, 12], [11, 12, 13]],
"n_components": 2
})
stdout, stderr, code = run(["fit-transform"], data)
assert code == 0
result = json.loads(stdout)
assert "X_embedded" in result
assert len(result["X_embedded"]) == 4
assert len(result["X_embedded"][0]) == 2
def test_fit_transform_default_2d(self):
"""Test default n_components=2."""
data = json.dumps({
"X": [[1, 2, 3], [2, 3, 4], [3, 4, 5]]
})
stdout, stderr, code = run(["fit-transform"], data)
assert code == 0
result = json.loads(stdout)
assert len(result["X_embedded"][0]) == 2
def test_similar_points_stay_close(self):
"""Test that similar points in high-D stay close in low-D."""
# Two clusters in high-D
data = json.dumps({
"X": [[0, 0, 0], [0.1, 0.1, 0.1], # Cluster 1
[10, 10, 10], [10.1, 10.1, 10.1]], # Cluster 2
"n_components": 2,
"perplexity": 2
})
stdout, stderr, code = run(["fit-transform"], data)
assert code == 0
result = json.loads(stdout)
embedded = result["X_embedded"]
# Points 0,1 should be closer to each other than to 2,3
dist_01 = ((embedded[0][0] - embedded[1][0])**2 + (embedded[0][1] - embedded[1][1])**2)**0.5
dist_02 = ((embedded[0][0] - embedded[2][0])**2 + (embedded[0][1] - embedded[2][1])**2)**0.5
# Similar points should be closer (may not always hold due to randomness)
# Just check that output is valid
assert dist_01 >= 0 and dist_02 >= 0
class TestTsneParameters:
"""Test t-SNE parameters."""
def test_perplexity(self):
"""Test custom perplexity."""
data = json.dumps({
"X": [[1, 2], [2, 3], [3, 4], [4, 5], [5, 6]],
"perplexity": 2
})
stdout, stderr, code = run(["fit-transform"], data)
assert code == 0
def test_n_iter(self):
"""Test custom iterations."""
data = json.dumps({
"X": [[1, 2], [2, 3], [3, 4]],
"n_iter": 100
})
stdout, stderr, code = run(["fit-transform"], data)
assert code == 0
def test_random_state(self):
"""Test reproducibility."""
data = json.dumps({
"X": [[1, 2], [2, 3], [3, 4]],
"random_state": 42
})
stdout1, _, code1 = run(["fit-transform"], data)
stdout2, _, code2 = run(["fit-transform"], data)
assert code1 == 0 and code2 == 0
# With same random state, results should be identical
result1 = json.loads(stdout1)
result2 = json.loads(stdout2)
for i in range(len(result1["X_embedded"])):
for j in range(len(result1["X_embedded"][i])):
assert abs(result1["X_embedded"][i][j] - result2["X_embedded"][i][j]) < 0.01
class TestTsneHelp:
"""Test help messages."""
def test_help(self):
"""Test --help flag."""
stdout, stderr, code = run(["--help"])
assert code == 0
assert "TSNE" in stdout or "tsne" in stdout.lower()
class TestTsneEdgeCases:
"""Test edge cases."""
def test_invalid_json_fails(self):
"""Test invalid JSON fails."""
stdout, stderr, code = run(["fit-transform"], "not json")
assert code == 1
def test_empty_data_fails(self):
"""Test empty data fails."""
data = json.dumps({"X": []})
stdout, stderr, code = run(["fit-transform"], data)
assert code == 1
def test_too_few_samples_for_perplexity(self):
"""Test perplexity > n_samples fails or is adjusted."""
data = json.dumps({
"X": [[1, 2], [2, 3]],
"perplexity": 10
})
stdout, stderr, code = run(["fit-transform"], data)
# Should either fail or adjust perplexity
# Either outcome is acceptable
assert code in [0, 1]
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_sklearn_tsne/test_tsne_tool.py (4788 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_sklearn_tsne/test_tsne_tool.rs (7442 bytes)
📦 Cargo.toml: /home/noah/src/reprorusted-python-cli/examples/example_sklearn_tsne/Cargo.toml (2 dependencies)
⏱️ Parse time: 54ms
📊 Throughput: 85.2 KB/s
⏱️ Total time: 55ms
| true
|
sklearn_tsne
| 145
| 6
|
[
"class_definition"
] | 0.612
| null |
example_sklearn_tsne
|
tsne_tool.py
|
#!/usr/bin/env python3
"""TSNE CLI tool.
A CLI for sklearn-style t-SNE dimensionality reduction.
Designed for Python-to-Rust transpilation via depyler → aprender.
Academic Reference: van der Maaten & Hinton (2008) t-SNE [10]
Usage:
echo '{"X": [[1,2,3], [2,3,4], [10,11,12]], "n_components": 2}' | python tsne_tool.py fit-transform
"""
import argparse
import json
import math
import random
import sys
def euclidean_distance(a: list[float], b: list[float]) -> float:
"""Calculate Euclidean distance."""
return math.sqrt(sum((a[i] - b[i]) ** 2 for i in range(len(a))))
def compute_pairwise_distances(X: list[list[float]]) -> list[list[float]]:
"""Compute pairwise distance matrix."""
n = len(X)
D = [[0.0] * n for _ in range(n)]
for i in range(n):
for j in range(i + 1, n):
d = euclidean_distance(X[i], X[j])
D[i][j] = d
D[j][i] = d
return D
def compute_p_ij(D: list[list[float]], perplexity: float) -> list[list[float]]:
"""Compute joint probabilities P_ij using Gaussian kernel."""
n = len(D)
P = [[0.0] * n for _ in range(n)]
# For simplicity, use fixed sigma based on perplexity
sigma = perplexity / 3.0
for i in range(n):
for j in range(n):
if i != j:
P[i][j] = math.exp(-(D[i][j] ** 2) / (2 * sigma**2))
# Normalize
for i in range(n):
row_sum = sum(P[i])
if row_sum > 0:
for j in range(n):
P[i][j] /= row_sum
# Symmetrize
for i in range(n):
for j in range(i + 1, n):
avg = (P[i][j] + P[j][i]) / 2
P[i][j] = avg
P[j][i] = avg
return P
def compute_q_ij(Y: list[list[float]]) -> list[list[float]]:
"""Compute Q_ij using Student t-distribution."""
n = len(Y)
Q = [[0.0] * n for _ in range(n)]
for i in range(n):
for j in range(n):
if i != j:
d_sq = sum((Y[i][k] - Y[j][k]) ** 2 for k in range(len(Y[0])))
Q[i][j] = 1.0 / (1.0 + d_sq)
# Normalize
total = sum(sum(row) for row in Q)
if total > 0:
for i in range(n):
for j in range(n):
Q[i][j] /= total
return Q
def fit_transform(
X: list[list[float]],
n_components: int = 2,
perplexity: float = 30.0,
n_iter: int = 250,
learning_rate: float = 200.0,
random_state: int = None,
) -> list[list[float]]:
"""Fit t-SNE and transform data."""
if len(X) == 0:
raise ValueError("Empty input data")
n_samples = len(X)
# Adjust perplexity if too high
perplexity = min(perplexity, (n_samples - 1) / 3)
if perplexity < 1:
perplexity = 1
if random_state is not None:
random.seed(random_state)
# Initialize Y randomly
Y = [[random.gauss(0, 0.01) for _ in range(n_components)] for _ in range(n_samples)]
# Compute pairwise distances and P
D = compute_pairwise_distances(X)
P = compute_p_ij(D, perplexity)
# Gradient descent
for iteration in range(n_iter):
Q = compute_q_ij(Y)
# Compute gradients
grad = [[0.0] * n_components for _ in range(n_samples)]
for i in range(n_samples):
for j in range(n_samples):
if i != j:
pq_diff = P[i][j] - Q[i][j]
d_sq = sum((Y[i][k] - Y[j][k]) ** 2 for k in range(n_components))
factor = 4.0 * pq_diff / (1.0 + d_sq)
for k in range(n_components):
grad[i][k] += factor * (Y[i][k] - Y[j][k])
# Update Y
lr = learning_rate * (1.0 - iteration / n_iter) # Decay learning rate
for i in range(n_samples):
for k in range(n_components):
Y[i][k] -= lr * grad[i][k] / n_samples
return Y
def cmd_fit_transform(args: argparse.Namespace) -> None:
"""Handle fit-transform subcommand."""
try:
data = json.load(sys.stdin)
except json.JSONDecodeError as e:
print(f"Error: Invalid JSON: {e}", file=sys.stderr)
sys.exit(1)
if "X" not in data:
print("Error: Missing 'X'", file=sys.stderr)
sys.exit(1)
n_components = data.get("n_components", 2)
perplexity = data.get("perplexity", 30.0)
n_iter = data.get("n_iter", 250)
learning_rate = data.get("learning_rate", 200.0)
random_state = data.get("random_state", None)
try:
Y = fit_transform(data["X"], n_components, perplexity, n_iter, learning_rate, random_state)
print(json.dumps({"X_embedded": Y}))
except ValueError as e:
print(f"Error: {e}", file=sys.stderr)
sys.exit(1)
def main() -> None:
"""Main entry point."""
parser = argparse.ArgumentParser(
description="TSNE CLI - t-distributed stochastic neighbor embedding (sklearn-compatible)"
)
subparsers = parser.add_subparsers(dest="command", help="Available commands")
subparsers.add_parser("fit-transform", help="Fit and transform data").set_defaults(
func=cmd_fit_transform
)
args = parser.parse_args()
if args.command is None:
parser.print_help()
sys.exit(0)
args.func(args)
if __name__ == "__main__":
main()
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_sklearn_tsne/tsne_tool.py (5278 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_sklearn_tsne/tsne_tool.rs (18655 bytes)
📦 Cargo.toml: /home/noah/src/reprorusted-python-cli/examples/example_sklearn_tsne/Cargo.toml (4 dependencies)
⏱️ Parse time: 57ms
📊 Throughput: 89.2 KB/s
⏱️ Total time: 57ms
| true
|
sklearn_tsne
| 187
| 6
|
[
"exception_handling",
"stdin_usage"
] | 0.577
| null |
example_smtp_client
|
smtp_cli.py
|
#!/usr/bin/env python3
"""SMTP Protocol CLI.
Parse and encode SMTP protocol messages.
"""
import argparse
import base64
import sys
from dataclasses import dataclass, field
from enum import Enum
class SMTPState(Enum):
"""SMTP connection states."""
INIT = "init"
GREETED = "greeted"
AUTHENTICATED = "authenticated"
MAIL_FROM = "mail_from"
RCPT_TO = "rcpt_to"
DATA = "data"
QUIT = "quit"
@dataclass
class SMTPCommand:
"""SMTP command."""
verb: str
args: str = ""
def encode(self) -> bytes:
"""Encode command to bytes."""
if self.args:
return f"{self.verb} {self.args}\r\n".encode()
return f"{self.verb}\r\n".encode()
@dataclass
class SMTPResponse:
"""SMTP response."""
code: int
message: str
is_multiline: bool = False
lines: list[str] = field(default_factory=list)
@dataclass
class EmailMessage:
"""Email message structure."""
from_addr: str
to_addrs: list[str]
subject: str
body: str
headers: dict[str, str] = field(default_factory=dict)
# SMTP response codes
SMTP_CODES = {
211: "System status",
214: "Help message",
220: "Service ready",
221: "Service closing",
235: "Authentication successful",
250: "OK",
251: "User not local; will forward",
252: "Cannot VRFY user",
334: "Server challenge",
354: "Start mail input",
421: "Service not available",
450: "Mailbox unavailable",
451: "Local error",
452: "Insufficient storage",
500: "Syntax error",
501: "Syntax error in parameters",
502: "Command not implemented",
503: "Bad sequence of commands",
504: "Parameter not implemented",
550: "Mailbox unavailable",
551: "User not local",
552: "Storage exceeded",
553: "Mailbox name not allowed",
554: "Transaction failed",
}
def parse_response(data: bytes) -> SMTPResponse:
"""Parse SMTP response from bytes."""
text = data.decode("utf-8", errors="replace")
lines = text.strip().split("\r\n")
if not lines:
raise ValueError("Empty response")
# Parse first line
first_line = lines[0]
if len(first_line) < 3:
raise ValueError("Invalid response format")
code = int(first_line[:3])
is_multiline = len(first_line) > 3 and first_line[3] == "-"
all_lines = []
for line in lines:
if len(line) > 4:
all_lines.append(line[4:])
elif len(line) > 3:
all_lines.append(line[4:] if len(line) > 4 else "")
message = all_lines[0] if all_lines else ""
return SMTPResponse(code=code, message=message, is_multiline=is_multiline, lines=all_lines)
def encode_response(code: int, message: str, multiline: list[str] | None = None) -> bytes:
"""Encode SMTP response to bytes."""
if multiline:
lines = []
for _i, line in enumerate(multiline[:-1]):
lines.append(f"{code}-{line}")
lines.append(f"{code} {multiline[-1]}")
return ("\r\n".join(lines) + "\r\n").encode("utf-8")
return f"{code} {message}\r\n".encode()
def parse_command(data: bytes) -> SMTPCommand:
"""Parse SMTP command from bytes."""
text = data.decode("utf-8", errors="replace").strip()
if " " in text:
verb, args = text.split(" ", 1)
else:
verb = text
args = ""
return SMTPCommand(verb=verb.upper(), args=args)
def validate_email(email: str) -> bool:
"""Basic email validation."""
if not email or "@" not in email:
return False
parts = email.split("@")
if len(parts) != 2:
return False
local, domain = parts
if not local or not domain:
return False
if "." not in domain:
return False
return True
def extract_email(address: str) -> str:
"""Extract email from address format like '<user@example.com>'."""
address = address.strip()
if address.startswith("<") and address.endswith(">"):
return address[1:-1]
return address
def format_email(email: str) -> str:
"""Format email in angle bracket notation."""
email = extract_email(email)
return f"<{email}>"
def encode_auth_plain(username: str, password: str) -> str:
"""Encode credentials for AUTH PLAIN."""
# Format: \0username\0password
auth_string = f"\0{username}\0{password}"
return base64.b64encode(auth_string.encode("utf-8")).decode("ascii")
def decode_auth_plain(encoded: str) -> tuple[str, str]:
"""Decode AUTH PLAIN credentials."""
decoded = base64.b64decode(encoded).decode("utf-8")
parts = decoded.split("\0")
if len(parts) != 3:
raise ValueError("Invalid AUTH PLAIN format")
return parts[1], parts[2] # username, password
def encode_auth_login(data: str) -> str:
"""Encode for AUTH LOGIN."""
return base64.b64encode(data.encode("utf-8")).decode("ascii")
def decode_auth_login(encoded: str) -> str:
"""Decode AUTH LOGIN."""
return base64.b64decode(encoded).decode("utf-8")
def build_message(message: EmailMessage) -> str:
"""Build raw email message content."""
lines = []
# Standard headers
lines.append(f"From: {message.from_addr}")
lines.append(f"To: {', '.join(message.to_addrs)}")
lines.append(f"Subject: {message.subject}")
# Custom headers
for key, value in message.headers.items():
lines.append(f"{key}: {value}")
# Empty line before body
lines.append("")
# Body
lines.append(message.body)
return "\r\n".join(lines)
def parse_message(data: str) -> EmailMessage:
"""Parse raw email message content."""
# Split headers and body
parts = data.split("\r\n\r\n", 1)
header_section = parts[0]
body = parts[1] if len(parts) > 1 else ""
headers = {}
current_key = None
current_value = ""
for line in header_section.split("\r\n"):
if line.startswith(" ") or line.startswith("\t"):
# Continuation of previous header
current_value += " " + line.strip()
else:
if current_key:
headers[current_key] = current_value
if ":" in line:
current_key, current_value = line.split(":", 1)
current_key = current_key.strip()
current_value = current_value.strip()
else:
current_key = None
if current_key:
headers[current_key] = current_value
from_addr = headers.get("From", "")
to_addrs = [addr.strip() for addr in headers.get("To", "").split(",")]
subject = headers.get("Subject", "")
# Remove standard headers from dict
for key in ["From", "To", "Subject"]:
headers.pop(key, None)
return EmailMessage(
from_addr=from_addr, to_addrs=to_addrs, subject=subject, body=body, headers=headers
)
def escape_dot(line: str) -> str:
"""Escape leading dot for DATA command."""
if line.startswith("."):
return "." + line
return line
def unescape_dot(line: str) -> str:
"""Unescape leading dot from DATA command."""
if line.startswith(".."):
return line[1:]
return line
class SMTPSession:
"""SMTP session state machine."""
def __init__(self, domain: str = "localhost"):
self.state = SMTPState.INIT
self.domain = domain
self.mail_from: str | None = None
self.rcpt_to: list[str] = []
self.data_lines: list[str] = []
self.authenticated = False
def process_command(self, cmd: SMTPCommand) -> SMTPResponse:
"""Process command and return response."""
verb = cmd.verb
if verb == "HELO":
self.state = SMTPState.GREETED
return SMTPResponse(250, f"Hello {cmd.args}, pleased to meet you")
if verb == "EHLO":
self.state = SMTPState.GREETED
return SMTPResponse(
250,
self.domain,
is_multiline=True,
lines=[self.domain, "AUTH PLAIN LOGIN", "SIZE 35882577", "8BITMIME"],
)
if verb == "AUTH":
if self.state != SMTPState.GREETED:
return SMTPResponse(503, "Bad sequence of commands")
# Simplified - accept any auth
self.authenticated = True
self.state = SMTPState.AUTHENTICATED
return SMTPResponse(235, "Authentication successful")
if verb == "MAIL":
if self.state not in (SMTPState.GREETED, SMTPState.AUTHENTICATED):
return SMTPResponse(503, "Bad sequence of commands")
self.mail_from = extract_email(cmd.args.replace("FROM:", "").strip())
self.state = SMTPState.MAIL_FROM
return SMTPResponse(250, "OK")
if verb == "RCPT":
if self.state not in (SMTPState.MAIL_FROM, SMTPState.RCPT_TO):
return SMTPResponse(503, "Bad sequence of commands")
rcpt = extract_email(cmd.args.replace("TO:", "").strip())
self.rcpt_to.append(rcpt)
self.state = SMTPState.RCPT_TO
return SMTPResponse(250, "OK")
if verb == "DATA":
if self.state != SMTPState.RCPT_TO:
return SMTPResponse(503, "Bad sequence of commands")
self.state = SMTPState.DATA
return SMTPResponse(354, "End data with <CR><LF>.<CR><LF>")
if verb == "RSET":
self.mail_from = None
self.rcpt_to = []
self.data_lines = []
self.state = SMTPState.GREETED if self.state != SMTPState.INIT else SMTPState.INIT
return SMTPResponse(250, "OK")
if verb == "NOOP":
return SMTPResponse(250, "OK")
if verb == "QUIT":
self.state = SMTPState.QUIT
return SMTPResponse(221, f"{self.domain} closing connection")
return SMTPResponse(500, f"Command not recognized: {verb}")
def reset(self) -> None:
"""Reset transaction state."""
self.mail_from = None
self.rcpt_to = []
self.data_lines = []
def main() -> int:
parser = argparse.ArgumentParser(description="SMTP protocol parser")
parser.add_argument(
"--mode",
choices=["command", "response", "message", "auth", "session"],
default="command",
help="Operation mode",
)
parser.add_argument("--verb", default="EHLO", help="SMTP command verb")
parser.add_argument("--args", default="", help="Command arguments")
parser.add_argument("--code", type=int, default=250, help="Response code")
parser.add_argument("--from", dest="from_addr", help="From address")
parser.add_argument("--to", dest="to_addr", help="To address")
parser.add_argument("--subject", default="Test", help="Subject")
parser.add_argument("--body", default="Hello!", help="Body")
parser.add_argument("--username", help="Username for auth")
parser.add_argument("--password", help="Password for auth")
args = parser.parse_args()
if args.mode == "command":
cmd = SMTPCommand(verb=args.verb, args=args.args)
encoded = cmd.encode()
print(f"Command: {args.verb} {args.args}")
print(f"Encoded: {encoded}")
print(f"Hex: {encoded.hex()}")
elif args.mode == "response":
message = SMTP_CODES.get(args.code, "OK")
encoded = encode_response(args.code, message)
print(f"Response: {args.code} {message}")
print(f"Encoded: {encoded}")
elif args.mode == "message":
if args.from_addr and args.to_addr:
msg = EmailMessage(
from_addr=args.from_addr,
to_addrs=[args.to_addr],
subject=args.subject,
body=args.body,
)
raw = build_message(msg)
print("Raw message:")
print(raw)
elif args.mode == "auth":
if args.username and args.password:
plain = encode_auth_plain(args.username, args.password)
print(f"AUTH PLAIN: {plain}")
login_user = encode_auth_login(args.username)
login_pass = encode_auth_login(args.password)
print(f"AUTH LOGIN username: {login_user}")
print(f"AUTH LOGIN password: {login_pass}")
elif args.mode == "session":
session = SMTPSession("mail.example.com")
commands = [
SMTPCommand("EHLO", "client.example.com"),
SMTPCommand("MAIL", "FROM:<sender@example.com>"),
SMTPCommand("RCPT", "TO:<recipient@example.com>"),
SMTPCommand("DATA"),
SMTPCommand("QUIT"),
]
for cmd in commands:
response = session.process_command(cmd)
print(f"C: {cmd.verb} {cmd.args}")
print(f"S: {response.code} {response.message}")
return 0
if __name__ == "__main__":
sys.exit(main())
| false
|
smtp_client
| 438
| 0
|
[
"context_manager",
"class_definition",
"exception_handling",
"decorator",
"multiprocessing"
] | 0.652
|
Type inference hints:
Hint: list[Any] for variable 'first_line' [High] (usage patterns suggest this type)
Hint: list[Any] for variable 'line' [High] (usage patterns suggest this type)
Hint: list[Any] for variable 'all_lines' [High] (usage patterns suggest this type)
Hint: list[Any] for variable 'lines' [High] (usage patterns suggest this type)
Hint: str for variable 'text' [Medium] (usage patterns suggest this type)
Type inference hints:
Hint: str for variable 'line' [Medium] (usage patterns su
|
|
example_smtp_client
|
test_smtp_cli.py
|
"""Tests for smtp_cli.py"""
from smtp_cli import (
EmailMessage,
SMTPCommand,
SMTPSession,
SMTPState,
build_message,
decode_auth_login,
decode_auth_plain,
encode_auth_login,
encode_auth_plain,
encode_response,
escape_dot,
extract_email,
format_email,
parse_command,
parse_message,
parse_response,
unescape_dot,
validate_email,
)
class TestSMTPCommand:
def test_encode_simple(self):
cmd = SMTPCommand(verb="NOOP")
assert cmd.encode() == b"NOOP\r\n"
def test_encode_with_args(self):
cmd = SMTPCommand(verb="EHLO", args="example.com")
assert cmd.encode() == b"EHLO example.com\r\n"
def test_encode_mail_from(self):
cmd = SMTPCommand(verb="MAIL", args="FROM:<user@example.com>")
assert cmd.encode() == b"MAIL FROM:<user@example.com>\r\n"
class TestSMTPResponse:
def test_parse_simple(self):
data = b"250 OK\r\n"
response = parse_response(data)
assert response.code == 250
assert response.message == "OK"
assert response.is_multiline is False
def test_parse_multiline(self):
data = b"250-mail.example.com\r\n250-AUTH PLAIN LOGIN\r\n250 SIZE 35882577\r\n"
response = parse_response(data)
assert response.code == 250
assert response.is_multiline is True
assert len(response.lines) == 3
def test_parse_error(self):
data = b"550 User not found\r\n"
response = parse_response(data)
assert response.code == 550
def test_encode_simple(self):
encoded = encode_response(250, "OK")
assert encoded == b"250 OK\r\n"
def test_encode_multiline(self):
encoded = encode_response(250, "", ["EHLO", "AUTH PLAIN", "SIZE 1000"])
assert b"250-EHLO\r\n" in encoded
assert b"250 SIZE 1000\r\n" in encoded
class TestParseCommand:
def test_simple(self):
cmd = parse_command(b"NOOP\r\n")
assert cmd.verb == "NOOP"
assert cmd.args == ""
def test_with_args(self):
cmd = parse_command(b"EHLO example.com\r\n")
assert cmd.verb == "EHLO"
assert cmd.args == "example.com"
def test_mail_from(self):
cmd = parse_command(b"MAIL FROM:<user@example.com>\r\n")
assert cmd.verb == "MAIL"
assert "user@example.com" in cmd.args
def test_lowercase(self):
cmd = parse_command(b"ehlo example.com\r\n")
assert cmd.verb == "EHLO"
class TestEmailValidation:
def test_valid(self):
assert validate_email("user@example.com") is True
assert validate_email("user.name@example.co.uk") is True
def test_invalid(self):
assert validate_email("invalid") is False
assert validate_email("@example.com") is False
assert validate_email("user@") is False
assert validate_email("user@localhost") is False # No dot in domain
def test_empty(self):
assert validate_email("") is False
assert validate_email(None) is False
class TestEmailFormat:
def test_extract(self):
assert extract_email("<user@example.com>") == "user@example.com"
assert extract_email("user@example.com") == "user@example.com"
def test_format(self):
assert format_email("user@example.com") == "<user@example.com>"
assert format_email("<user@example.com>") == "<user@example.com>"
class TestAuthentication:
def test_auth_plain_encode(self):
encoded = encode_auth_plain("username", "password")
assert encoded # Base64 string
assert "=" in encoded or len(encoded) % 4 == 0 # Valid base64
def test_auth_plain_roundtrip(self):
encoded = encode_auth_plain("testuser", "testpass")
username, password = decode_auth_plain(encoded)
assert username == "testuser"
assert password == "testpass"
def test_auth_login_encode(self):
encoded = encode_auth_login("username")
assert encoded # Base64 string
def test_auth_login_roundtrip(self):
original = "testvalue"
encoded = encode_auth_login(original)
decoded = decode_auth_login(encoded)
assert decoded == original
class TestEmailMessage:
def test_build_simple(self):
msg = EmailMessage(
from_addr="sender@example.com",
to_addrs=["recipient@example.com"],
subject="Test",
body="Hello World",
)
raw = build_message(msg)
assert "From: sender@example.com" in raw
assert "To: recipient@example.com" in raw
assert "Subject: Test" in raw
assert "Hello World" in raw
def test_build_multiple_recipients(self):
msg = EmailMessage(
from_addr="sender@example.com",
to_addrs=["one@example.com", "two@example.com"],
subject="Test",
body="Hello",
)
raw = build_message(msg)
assert "one@example.com" in raw
assert "two@example.com" in raw
def test_build_with_headers(self):
msg = EmailMessage(
from_addr="sender@example.com",
to_addrs=["recipient@example.com"],
subject="Test",
body="Hello",
headers={"X-Custom": "value"},
)
raw = build_message(msg)
assert "X-Custom: value" in raw
def test_parse_simple(self):
raw = "From: sender@example.com\r\nTo: recipient@example.com\r\nSubject: Test\r\n\r\nHello World"
msg = parse_message(raw)
assert msg.from_addr == "sender@example.com"
assert msg.to_addrs == ["recipient@example.com"]
assert msg.subject == "Test"
assert msg.body == "Hello World"
class TestDotEscaping:
def test_escape(self):
assert escape_dot(".hidden") == "..hidden"
assert escape_dot("normal") == "normal"
assert escape_dot("..double") == "...double"
def test_unescape(self):
assert unescape_dot("..hidden") == ".hidden"
assert unescape_dot("normal") == "normal"
assert unescape_dot(".single") == ".single"
class TestSMTPSession:
def test_init(self):
session = SMTPSession("mail.example.com")
assert session.state == SMTPState.INIT
def test_ehlo(self):
session = SMTPSession()
response = session.process_command(SMTPCommand("EHLO", "client.com"))
assert response.code == 250
assert session.state == SMTPState.GREETED
def test_helo(self):
session = SMTPSession()
response = session.process_command(SMTPCommand("HELO", "client.com"))
assert response.code == 250
assert session.state == SMTPState.GREETED
def test_mail_from(self):
session = SMTPSession()
session.process_command(SMTPCommand("EHLO", "client.com"))
response = session.process_command(SMTPCommand("MAIL", "FROM:<sender@example.com>"))
assert response.code == 250
assert session.mail_from == "sender@example.com"
assert session.state == SMTPState.MAIL_FROM
def test_rcpt_to(self):
session = SMTPSession()
session.process_command(SMTPCommand("EHLO", "client.com"))
session.process_command(SMTPCommand("MAIL", "FROM:<sender@example.com>"))
response = session.process_command(SMTPCommand("RCPT", "TO:<recipient@example.com>"))
assert response.code == 250
assert "recipient@example.com" in session.rcpt_to
assert session.state == SMTPState.RCPT_TO
def test_data(self):
session = SMTPSession()
session.process_command(SMTPCommand("EHLO", "client.com"))
session.process_command(SMTPCommand("MAIL", "FROM:<sender@example.com>"))
session.process_command(SMTPCommand("RCPT", "TO:<recipient@example.com>"))
response = session.process_command(SMTPCommand("DATA"))
assert response.code == 354
assert session.state == SMTPState.DATA
def test_rset(self):
session = SMTPSession()
session.process_command(SMTPCommand("EHLO", "client.com"))
session.process_command(SMTPCommand("MAIL", "FROM:<sender@example.com>"))
response = session.process_command(SMTPCommand("RSET"))
assert response.code == 250
assert session.mail_from is None
assert session.state == SMTPState.GREETED
def test_quit(self):
session = SMTPSession()
response = session.process_command(SMTPCommand("QUIT"))
assert response.code == 221
assert session.state == SMTPState.QUIT
def test_noop(self):
session = SMTPSession()
response = session.process_command(SMTPCommand("NOOP"))
assert response.code == 250
def test_bad_sequence(self):
session = SMTPSession()
# Try RCPT before MAIL
response = session.process_command(SMTPCommand("RCPT", "TO:<test@example.com>"))
assert response.code == 503
def test_unknown_command(self):
session = SMTPSession()
response = session.process_command(SMTPCommand("INVALID"))
assert response.code == 500
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_smtp_client/test_smtp_cli.py (9138 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_smtp_client/test_smtp_cli.rs (16848 bytes)
⏱️ Parse time: 53ms
📊 Throughput: 167.1 KB/s
⏱️ Total time: 53ms
| true
|
smtp_client
| 290
| 5
|
[
"class_definition",
"decorator"
] | 0.612
| null |
example_sorted
|
sorted_tool.py
|
#!/usr/bin/env python3
"""Sorted Example - Sorting operations CLI.
Examples:
>>> sort_asc([5, 2, 8, 1, 9])
[1, 2, 5, 8, 9]
>>> sort_desc([5, 2, 8, 1, 9])
[9, 8, 5, 2, 1]
"""
import argparse
def sort_asc(nums: list) -> list:
"""Sort list ascending (bubble sort).
>>> sort_asc([3, 1, 2])
[1, 2, 3]
>>> sort_asc([5, 4, 3, 2, 1])
[1, 2, 3, 4, 5]
>>> sort_asc([1, 1, 1])
[1, 1, 1]
"""
result = nums.copy()
n = len(result)
i = 0
while i < n:
j = i + 1
while j < n:
if result[j] < result[i]:
tmp = result[i]
result[i] = result[j]
result[j] = tmp
j = j + 1
i = i + 1
return result
def sort_desc(nums: list) -> list:
"""Sort list descending (bubble sort).
>>> sort_desc([3, 1, 2])
[3, 2, 1]
>>> sort_desc([1, 2, 3, 4, 5])
[5, 4, 3, 2, 1]
"""
result = nums.copy()
n = len(result)
i = 0
while i < n:
j = i + 1
while j < n:
if result[j] > result[i]:
tmp = result[i]
result[i] = result[j]
result[j] = tmp
j = j + 1
i = i + 1
return result
def sort_alpha(words: list) -> list:
"""Sort strings alphabetically.
>>> sort_alpha(["c", "a", "b"])
['a', 'b', 'c']
>>> sort_alpha(["zebra", "apple", "mango"])
['apple', 'mango', 'zebra']
"""
result = words.copy()
n = len(result)
i = 0
while i < n:
j = i + 1
while j < n:
if result[j] < result[i]:
tmp = result[i]
result[i] = result[j]
result[j] = tmp
j = j + 1
i = i + 1
return result
def main():
parser = argparse.ArgumentParser(description="Sorting tool")
subs = parser.add_subparsers(dest="cmd", required=True)
a = subs.add_parser("asc")
a.add_argument("a", type=int)
a.add_argument("b", type=int)
a.add_argument("c", type=int)
a.add_argument("d", type=int)
a.add_argument("e", type=int)
de = subs.add_parser("desc")
de.add_argument("a", type=int)
de.add_argument("b", type=int)
de.add_argument("c", type=int)
de.add_argument("d", type=int)
de.add_argument("e", type=int)
al = subs.add_parser("alpha")
al.add_argument("a")
al.add_argument("b")
al.add_argument("c")
args = parser.parse_args()
if args.cmd == "asc":
nums = sort_asc([args.a, args.b, args.c, args.d, args.e])
print(f"{nums[0]} {nums[1]} {nums[2]} {nums[3]} {nums[4]}")
elif args.cmd == "desc":
nums = sort_desc([args.a, args.b, args.c, args.d, args.e])
print(f"{nums[0]} {nums[1]} {nums[2]} {nums[3]} {nums[4]}")
elif args.cmd == "alpha":
words = sort_alpha([args.a, args.b, args.c])
print(f"{words[0]} {words[1]} {words[2]}")
if __name__ == "__main__":
main()
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_sorted/sorted_tool.py (2956 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_sorted/sorted_tool.rs (9518 bytes)
📦 Cargo.toml: /home/noah/src/reprorusted-python-cli/examples/example_sorted/Cargo.toml (3 dependencies)
⏱️ Parse time: 98ms
📊 Throughput: 29.4 KB/s
⏱️ Total time: 98ms
| true
|
sorted
| 119
| 6
|
[] | 0
| null |
example_sorted
|
test_sorted_tool.py
|
"""Tests for sorted_tool - EXTREME TDD."""
import subprocess
from pathlib import Path
SCRIPT = Path(__file__).parent / "sorted_tool.py"
def run(cmd):
return subprocess.run(
["python3", str(SCRIPT)] + cmd.split(),
capture_output=True,
text=True,
)
def test_asc():
r = run("asc 3 1 4 1 5")
assert r.returncode == 0
assert r.stdout.strip() == "1 1 3 4 5"
def test_desc():
r = run("desc 3 1 4 1 5")
assert r.returncode == 0
assert r.stdout.strip() == "5 4 3 1 1"
def test_alpha():
r = run("alpha banana apple cherry")
assert r.returncode == 0
assert r.stdout.strip() == "apple banana cherry"
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_sorted/test_sorted_tool.py (666 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_sorted/test_sorted_tool.rs (1873 bytes)
📦 Cargo.toml: /home/noah/src/reprorusted-python-cli/examples/example_sorted/Cargo.toml (2 dependencies)
⏱️ Parse time: 48ms
📊 Throughput: 13.4 KB/s
⏱️ Total time: 48ms
| true
|
sorted
| 32
| 6
|
[] | 0
| null |
example_split
|
split_tool.py
|
#!/usr/bin/env python3
"""Split Example - String split operations CLI.
Examples:
>>> split_underscore("a_b_c")
['a', 'b', 'c']
>>> split_dash("x-y-z")
['x', 'y', 'z']
>>> split_dot("1.2.3")
['1', '2', '3']
"""
import argparse
def split_underscore(text: str) -> list:
"""Split string by underscores.
>>> split_underscore("hello_world_test")
['hello', 'world', 'test']
>>> split_underscore("a_b_c")
['a', 'b', 'c']
"""
return text.split("_")
def split_dash(text: str) -> list:
"""Split string by dashes.
>>> split_dash("foo-bar-baz")
['foo', 'bar', 'baz']
>>> split_dash("2025-11-29")
['2025', '11', '29']
"""
return text.split("-")
def split_dot(text: str) -> list:
"""Split string by dots.
>>> split_dot("192.168.1.1")
['192', '168', '1', '1']
>>> split_dot("a.b.c")
['a', 'b', 'c']
"""
return text.split(".")
def main():
parser = argparse.ArgumentParser(description="String split tool")
subs = parser.add_subparsers(dest="cmd", required=True)
u = subs.add_parser("underscore")
u.add_argument("text")
d = subs.add_parser("dash")
d.add_argument("text")
dt = subs.add_parser("dot")
dt.add_argument("text")
args = parser.parse_args()
if args.cmd == "underscore":
parts = split_underscore(args.text)
print(parts[0] + " " + parts[1] + " " + parts[2])
elif args.cmd == "dash":
parts = split_dash(args.text)
print(parts[0] + " " + parts[1] + " " + parts[2])
elif args.cmd == "dot":
parts = split_dot(args.text)
print(parts[0] + " " + parts[1] + " " + parts[2])
if __name__ == "__main__":
main()
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_split/split_tool.py (1713 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_split/split_tool.rs (3858 bytes)
📦 Cargo.toml: /home/noah/src/reprorusted-python-cli/examples/example_split/Cargo.toml (1 dependencies)
⏱️ Parse time: 48ms
📊 Throughput: 34.6 KB/s
⏱️ Total time: 48ms
| true
|
split
| 73
| 6
|
[] | 0
| null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.