[build-system] requires = ["hatchling"] build-backend = "hatchling.build" [project] name = "codewraith" version = "0.1.0" description = "Module-to-Spec Transformer: generates technical specifications from Python source code" readme = "README.md" requires-python = ">=3.10" license = "MIT" dependencies = [ "pydantic>=2.0.0", ] [project.optional-dependencies] ml = [ "transformers>=4.40.0", "torch>=2.1.0", "accelerate>=0.27.0", "datasets>=2.18.0", "dspy>=2.5.0", "outlines>=1.2.0", "lark>=1.1.0", "unsloth", "peft>=0.10.0", "trl>=0.8.0", "bitsandbytes>=0.43.0", "vllm>=0.8.0", ] app = [ "gradio>=4.0.0", "chromadb>=0.5.0", "sentence-transformers>=3.0.0", ] all = [ "codewraith[ml,app]", ] dev = [ "pytest>=8.0.0", "pytest-cov>=5.0.0", "ruff>=0.3.0", ] [tool.ruff] target-version = "py310" line-length = 100 extend-exclude = ["data/"] [tool.ruff.lint] select = ["E", "F", "I", "W"] [tool.ruff.lint.per-file-ignores] "src/codewraith/verifier/judge.py" = ["E501"] [tool.pytest.ini_options] testpaths = ["tests"] [tool.coverage.run] source = ["src"] omit = [ # Requires Ollama running "src/codewraith/teacher/generator.py", "src/codewraith/teacher/optimize.py", # Requires HF datasets streaming "src/codewraith/teacher/collect.py", # Requires GPU / unsloth "src/codewraith/student/trainer.py", "src/codewraith/student/evaluate.py", # Requires Ollama for LLM judge calls "src/codewraith/verifier/judge.py", # App (requires gradio + loaded model) "src/codewraith/app/main.py", "src/codewraith/app/retriever.py", "src/codewraith/app/repo_analyzer.py", ]