Ranjit Behera
commited on
Commit
·
c876830
1
Parent(s):
354e581
Fix: Default to regex-only mode for instant usage
Browse filesv1.0.2:
- use_llm defaults to False (no 5GB model download)
- Package works instantly out of the box
- Users can enable LLM with ExtractionConfig(use_llm=True)
- pyproject.toml +10 -3
- src/finee/schema.py +1 -1
pyproject.toml
CHANGED
|
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
|
|
| 4 |
|
| 5 |
[project]
|
| 6 |
name = "finee"
|
| 7 |
-
version = "1.0.
|
| 8 |
description = "Extract structured financial entities from Indian banking messages"
|
| 9 |
readme = "README.md"
|
| 10 |
license = "MIT"
|
|
@@ -91,12 +91,19 @@ Issues = "https://github.com/Ranjit0034/llm-mail-trainer/issues"
|
|
| 91 |
[project.scripts]
|
| 92 |
finee = "finee.cli:main"
|
| 93 |
|
| 94 |
-
[tool.hatch.build]
|
| 95 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 96 |
|
| 97 |
[tool.hatch.build.targets.wheel]
|
| 98 |
packages = ["src/finee"]
|
| 99 |
|
|
|
|
|
|
|
|
|
|
| 100 |
[tool.pytest.ini_options]
|
| 101 |
testpaths = ["tests"]
|
| 102 |
python_files = ["test_*.py"]
|
|
|
|
| 4 |
|
| 5 |
[project]
|
| 6 |
name = "finee"
|
| 7 |
+
version = "1.0.2"
|
| 8 |
description = "Extract structured financial entities from Indian banking messages"
|
| 9 |
readme = "README.md"
|
| 10 |
license = "MIT"
|
|
|
|
| 91 |
[project.scripts]
|
| 92 |
finee = "finee.cli:main"
|
| 93 |
|
| 94 |
+
[tool.hatch.build.targets.sdist]
|
| 95 |
+
include = [
|
| 96 |
+
"/src/finee",
|
| 97 |
+
"/README.md",
|
| 98 |
+
"/LICENSE",
|
| 99 |
+
]
|
| 100 |
|
| 101 |
[tool.hatch.build.targets.wheel]
|
| 102 |
packages = ["src/finee"]
|
| 103 |
|
| 104 |
+
[tool.hatch.build.targets.wheel.sources]
|
| 105 |
+
"src" = ""
|
| 106 |
+
|
| 107 |
[tool.pytest.ini_options]
|
| 108 |
testpaths = ["tests"]
|
| 109 |
python_files = ["test_*.py"]
|
src/finee/schema.py
CHANGED
|
@@ -158,7 +158,7 @@ class ExtractionConfig:
|
|
| 158 |
cache_max_size: int = 1000
|
| 159 |
|
| 160 |
# LLM settings
|
| 161 |
-
use_llm: bool = True
|
| 162 |
llm_timeout_seconds: float = 10.0
|
| 163 |
llm_max_tokens: int = 200
|
| 164 |
llm_temperature: float = 0.1
|
|
|
|
| 158 |
cache_max_size: int = 1000
|
| 159 |
|
| 160 |
# LLM settings
|
| 161 |
+
use_llm: bool = False # Set to True to enable LLM (requires model download)
|
| 162 |
llm_timeout_seconds: float = 10.0
|
| 163 |
llm_max_tokens: int = 200
|
| 164 |
llm_temperature: float = 0.1
|