Kunitomi commited on
Commit
196c526
·
verified ·
1 Parent(s): 6945ed6

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +590 -0
  2. .ipynb_checkpoints/README-checkpoint.md +258 -0
  3. .ipynb_checkpoints/inference_demo-checkpoint.ipynb +0 -0
  4. README.md +170 -0
  5. config.json +79 -0
  6. data/Costa_Rica_Red_Honey_Don_Oscar-green-prediction.png +3 -0
  7. data/Costa_Rica_Red_Honey_Don_Oscar-green.png +3 -0
  8. data/Costa_Rica_Zarcero_Monte_Brisas-roasted.png +3 -0
  9. inference_demo.ipynb +0 -0
  10. maskrcnn_coffeebeans_v1.safetensors +3 -0
  11. predict_beans.py +269 -0
  12. requirements.txt +8 -0
  13. src/bean_vision.egg-info/PKG-INFO +125 -0
  14. src/bean_vision.egg-info/SOURCES.txt +39 -0
  15. src/bean_vision.egg-info/dependency_links.txt +1 -0
  16. src/bean_vision.egg-info/entry_points.txt +4 -0
  17. src/bean_vision.egg-info/requires.txt +30 -0
  18. src/bean_vision.egg-info/top_level.txt +1 -0
  19. src/bean_vision/__init__.py +65 -0
  20. src/bean_vision/__pycache__/__init__.cpython-313.pyc +0 -0
  21. src/bean_vision/__pycache__/config.cpython-313.pyc +0 -0
  22. src/bean_vision/config.py +575 -0
  23. src/bean_vision/data/__init__.py +6 -0
  24. src/bean_vision/data/__pycache__/__init__.cpython-313.pyc +0 -0
  25. src/bean_vision/data/__pycache__/dataset.cpython-313.pyc +0 -0
  26. src/bean_vision/data/dataset.py +244 -0
  27. src/bean_vision/data/transforms.py +30 -0
  28. src/bean_vision/evaluation/__init__.py +8 -0
  29. src/bean_vision/evaluation/__pycache__/__init__.cpython-313.pyc +0 -0
  30. src/bean_vision/evaluation/__pycache__/metrics.cpython-313.pyc +0 -0
  31. src/bean_vision/evaluation/evaluator.py +460 -0
  32. src/bean_vision/evaluation/metrics.py +423 -0
  33. src/bean_vision/export/__init__.py +12 -0
  34. src/bean_vision/export/__pycache__/__init__.cpython-313.pyc +0 -0
  35. src/bean_vision/export/__pycache__/coco_exporter.cpython-313.pyc +0 -0
  36. src/bean_vision/export/__pycache__/labelme_exporter.cpython-313.pyc +0 -0
  37. src/bean_vision/export/__pycache__/polygon_utils.cpython-313.pyc +0 -0
  38. src/bean_vision/export/coco_exporter.py +163 -0
  39. src/bean_vision/export/labelme_exporter.py +157 -0
  40. src/bean_vision/export/polygon_utils.py +108 -0
  41. src/bean_vision/inference/__init__.py +7 -0
  42. src/bean_vision/inference/__pycache__/__init__.cpython-313.pyc +0 -0
  43. src/bean_vision/inference/__pycache__/mask_converter.cpython-313.pyc +0 -0
  44. src/bean_vision/inference/__pycache__/postprocessing.cpython-313.pyc +0 -0
  45. src/bean_vision/inference/__pycache__/predictor.cpython-313.pyc +0 -0
  46. src/bean_vision/inference/mask_converter.py +326 -0
  47. src/bean_vision/inference/postprocessing.py +362 -0
  48. src/bean_vision/inference/predictor.py +369 -0
  49. src/bean_vision/models/__init__.py +110 -0
  50. src/bean_vision/models/__pycache__/__init__.cpython-313.pyc +0 -0
.gitattributes CHANGED
@@ -33,3 +33,593 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ data/Costa_Rica_Red_Honey_Don_Oscar-green-prediction.png filter=lfs diff=lfs merge=lfs -text
37
+ data/Costa_Rica_Red_Honey_Don_Oscar-green.png filter=lfs diff=lfs merge=lfs -text
38
+ data/Costa_Rica_Zarcero_Monte_Brisas-roasted.png filter=lfs diff=lfs merge=lfs -text
39
+ venv/lib/python3.13/site-packages/PIL/.dylibs/libavif.16.3.0.dylib filter=lfs diff=lfs merge=lfs -text
40
+ venv/lib/python3.13/site-packages/PIL/.dylibs/libbrotlicommon.1.1.0.dylib filter=lfs diff=lfs merge=lfs -text
41
+ venv/lib/python3.13/site-packages/PIL/.dylibs/libbrotlidec.1.1.0.dylib filter=lfs diff=lfs merge=lfs -text
42
+ venv/lib/python3.13/site-packages/PIL/.dylibs/libfreetype.6.dylib filter=lfs diff=lfs merge=lfs -text
43
+ venv/lib/python3.13/site-packages/PIL/.dylibs/libharfbuzz.0.dylib filter=lfs diff=lfs merge=lfs -text
44
+ venv/lib/python3.13/site-packages/PIL/.dylibs/libjpeg.62.4.0.dylib filter=lfs diff=lfs merge=lfs -text
45
+ venv/lib/python3.13/site-packages/PIL/.dylibs/liblcms2.2.dylib filter=lfs diff=lfs merge=lfs -text
46
+ venv/lib/python3.13/site-packages/PIL/.dylibs/liblzma.5.dylib filter=lfs diff=lfs merge=lfs -text
47
+ venv/lib/python3.13/site-packages/PIL/.dylibs/libopenjp2.2.5.3.dylib filter=lfs diff=lfs merge=lfs -text
48
+ venv/lib/python3.13/site-packages/PIL/.dylibs/libpng16.16.dylib filter=lfs diff=lfs merge=lfs -text
49
+ venv/lib/python3.13/site-packages/PIL/.dylibs/libtiff.6.dylib filter=lfs diff=lfs merge=lfs -text
50
+ venv/lib/python3.13/site-packages/PIL/.dylibs/libwebp.7.dylib filter=lfs diff=lfs merge=lfs -text
51
+ venv/lib/python3.13/site-packages/PIL/.dylibs/libwebpmux.3.dylib filter=lfs diff=lfs merge=lfs -text
52
+ venv/lib/python3.13/site-packages/PIL/.dylibs/libxcb.1.1.0.dylib filter=lfs diff=lfs merge=lfs -text
53
+ venv/lib/python3.13/site-packages/PIL/.dylibs/libz.1.3.1.zlib-ng.dylib filter=lfs diff=lfs merge=lfs -text
54
+ venv/lib/python3.13/site-packages/PIL/__pycache__/Image.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
55
+ venv/lib/python3.13/site-packages/PIL/__pycache__/TiffImagePlugin.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
56
+ venv/lib/python3.13/site-packages/PIL/_imaging.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
57
+ venv/lib/python3.13/site-packages/PIL/_imagingft.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
58
+ venv/lib/python3.13/site-packages/__pycache__/typing_extensions.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
59
+ venv/lib/python3.13/site-packages/contourpy/_contourpy.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
60
+ venv/lib/python3.13/site-packages/cv2/.dylibs/libSvtAv1Enc.3.0.2.dylib filter=lfs diff=lfs merge=lfs -text
61
+ venv/lib/python3.13/site-packages/cv2/.dylibs/libX11.6.dylib filter=lfs diff=lfs merge=lfs -text
62
+ venv/lib/python3.13/site-packages/cv2/.dylibs/libaom.3.12.1.dylib filter=lfs diff=lfs merge=lfs -text
63
+ venv/lib/python3.13/site-packages/cv2/.dylibs/libaribb24.0.dylib filter=lfs diff=lfs merge=lfs -text
64
+ venv/lib/python3.13/site-packages/cv2/.dylibs/libavcodec.61.19.101.dylib filter=lfs diff=lfs merge=lfs -text
65
+ venv/lib/python3.13/site-packages/cv2/.dylibs/libavformat.61.7.100.dylib filter=lfs diff=lfs merge=lfs -text
66
+ venv/lib/python3.13/site-packages/cv2/.dylibs/libavif.16.3.0.dylib filter=lfs diff=lfs merge=lfs -text
67
+ venv/lib/python3.13/site-packages/cv2/.dylibs/libavutil.59.39.100.dylib filter=lfs diff=lfs merge=lfs -text
68
+ venv/lib/python3.13/site-packages/cv2/.dylibs/libbluray.2.dylib filter=lfs diff=lfs merge=lfs -text
69
+ venv/lib/python3.13/site-packages/cv2/.dylibs/libbrotlicommon.1.1.0.dylib filter=lfs diff=lfs merge=lfs -text
70
+ venv/lib/python3.13/site-packages/cv2/.dylibs/libbrotlidec.1.1.0.dylib filter=lfs diff=lfs merge=lfs -text
71
+ venv/lib/python3.13/site-packages/cv2/.dylibs/libbrotlienc.1.1.0.dylib filter=lfs diff=lfs merge=lfs -text
72
+ venv/lib/python3.13/site-packages/cv2/.dylibs/libcrypto.3.dylib filter=lfs diff=lfs merge=lfs -text
73
+ venv/lib/python3.13/site-packages/cv2/.dylibs/libdav1d.7.dylib filter=lfs diff=lfs merge=lfs -text
74
+ venv/lib/python3.13/site-packages/cv2/.dylibs/libfontconfig.1.dylib filter=lfs diff=lfs merge=lfs -text
75
+ venv/lib/python3.13/site-packages/cv2/.dylibs/libfreetype.6.dylib filter=lfs diff=lfs merge=lfs -text
76
+ venv/lib/python3.13/site-packages/cv2/.dylibs/libgmp.10.dylib filter=lfs diff=lfs merge=lfs -text
77
+ venv/lib/python3.13/site-packages/cv2/.dylibs/libgnutls.30.dylib filter=lfs diff=lfs merge=lfs -text
78
+ venv/lib/python3.13/site-packages/cv2/.dylibs/libhogweed.6.10.dylib filter=lfs diff=lfs merge=lfs -text
79
+ venv/lib/python3.13/site-packages/cv2/.dylibs/libidn2.0.dylib filter=lfs diff=lfs merge=lfs -text
80
+ venv/lib/python3.13/site-packages/cv2/.dylibs/libintl.8.dylib filter=lfs diff=lfs merge=lfs -text
81
+ venv/lib/python3.13/site-packages/cv2/.dylibs/libjxl.0.11.1.dylib filter=lfs diff=lfs merge=lfs -text
82
+ venv/lib/python3.13/site-packages/cv2/.dylibs/libjxl_cms.0.11.1.dylib filter=lfs diff=lfs merge=lfs -text
83
+ venv/lib/python3.13/site-packages/cv2/.dylibs/liblcms2.2.dylib filter=lfs diff=lfs merge=lfs -text
84
+ venv/lib/python3.13/site-packages/cv2/.dylibs/liblzma.5.dylib filter=lfs diff=lfs merge=lfs -text
85
+ venv/lib/python3.13/site-packages/cv2/.dylibs/libmbedcrypto.3.6.3.dylib filter=lfs diff=lfs merge=lfs -text
86
+ venv/lib/python3.13/site-packages/cv2/.dylibs/libmp3lame.0.dylib filter=lfs diff=lfs merge=lfs -text
87
+ venv/lib/python3.13/site-packages/cv2/.dylibs/libnettle.8.10.dylib filter=lfs diff=lfs merge=lfs -text
88
+ venv/lib/python3.13/site-packages/cv2/.dylibs/libopencore-amrnb.0.dylib filter=lfs diff=lfs merge=lfs -text
89
+ venv/lib/python3.13/site-packages/cv2/.dylibs/libopencore-amrwb.0.dylib filter=lfs diff=lfs merge=lfs -text
90
+ venv/lib/python3.13/site-packages/cv2/.dylibs/libopenjp2.2.5.3.dylib filter=lfs diff=lfs merge=lfs -text
91
+ venv/lib/python3.13/site-packages/cv2/.dylibs/libopus.0.dylib filter=lfs diff=lfs merge=lfs -text
92
+ venv/lib/python3.13/site-packages/cv2/.dylibs/libp11-kit.0.dylib filter=lfs diff=lfs merge=lfs -text
93
+ venv/lib/python3.13/site-packages/cv2/.dylibs/libpng16.16.dylib filter=lfs diff=lfs merge=lfs -text
94
+ venv/lib/python3.13/site-packages/cv2/.dylibs/librav1e.0.8.0.dylib filter=lfs diff=lfs merge=lfs -text
95
+ venv/lib/python3.13/site-packages/cv2/.dylibs/librist.4.dylib filter=lfs diff=lfs merge=lfs -text
96
+ venv/lib/python3.13/site-packages/cv2/.dylibs/libsodium.26.dylib filter=lfs diff=lfs merge=lfs -text
97
+ venv/lib/python3.13/site-packages/cv2/.dylibs/libsoxr.0.1.2.dylib filter=lfs diff=lfs merge=lfs -text
98
+ venv/lib/python3.13/site-packages/cv2/.dylibs/libspeex.1.dylib filter=lfs diff=lfs merge=lfs -text
99
+ venv/lib/python3.13/site-packages/cv2/.dylibs/libsrt.1.5.4.dylib filter=lfs diff=lfs merge=lfs -text
100
+ venv/lib/python3.13/site-packages/cv2/.dylibs/libssh.4.10.1.dylib filter=lfs diff=lfs merge=lfs -text
101
+ venv/lib/python3.13/site-packages/cv2/.dylibs/libssl.3.dylib filter=lfs diff=lfs merge=lfs -text
102
+ venv/lib/python3.13/site-packages/cv2/.dylibs/libswresample.5.3.100.dylib filter=lfs diff=lfs merge=lfs -text
103
+ venv/lib/python3.13/site-packages/cv2/.dylibs/libswscale.8.3.100.dylib filter=lfs diff=lfs merge=lfs -text
104
+ venv/lib/python3.13/site-packages/cv2/.dylibs/libtasn1.6.dylib filter=lfs diff=lfs merge=lfs -text
105
+ venv/lib/python3.13/site-packages/cv2/.dylibs/libtheoradec.1.dylib filter=lfs diff=lfs merge=lfs -text
106
+ venv/lib/python3.13/site-packages/cv2/.dylibs/libtheoraenc.1.dylib filter=lfs diff=lfs merge=lfs -text
107
+ venv/lib/python3.13/site-packages/cv2/.dylibs/libunistring.5.dylib filter=lfs diff=lfs merge=lfs -text
108
+ venv/lib/python3.13/site-packages/cv2/.dylibs/libvmaf.3.dylib filter=lfs diff=lfs merge=lfs -text
109
+ venv/lib/python3.13/site-packages/cv2/.dylibs/libvorbis.0.dylib filter=lfs diff=lfs merge=lfs -text
110
+ venv/lib/python3.13/site-packages/cv2/.dylibs/libvorbisenc.2.dylib filter=lfs diff=lfs merge=lfs -text
111
+ venv/lib/python3.13/site-packages/cv2/.dylibs/libvpx.11.dylib filter=lfs diff=lfs merge=lfs -text
112
+ venv/lib/python3.13/site-packages/cv2/.dylibs/libwebp.7.1.10.dylib filter=lfs diff=lfs merge=lfs -text
113
+ venv/lib/python3.13/site-packages/cv2/.dylibs/libx264.164.dylib filter=lfs diff=lfs merge=lfs -text
114
+ venv/lib/python3.13/site-packages/cv2/.dylibs/libx265.215.dylib filter=lfs diff=lfs merge=lfs -text
115
+ venv/lib/python3.13/site-packages/cv2/.dylibs/libxcb.1.1.0.dylib filter=lfs diff=lfs merge=lfs -text
116
+ venv/lib/python3.13/site-packages/cv2/.dylibs/libzmq.5.dylib filter=lfs diff=lfs merge=lfs -text
117
+ venv/lib/python3.13/site-packages/cv2/cv2.abi3.so filter=lfs diff=lfs merge=lfs -text
118
+ venv/lib/python3.13/site-packages/fontTools/__pycache__/agl.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
119
+ venv/lib/python3.13/site-packages/fontTools/cffLib/__pycache__/__init__.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
120
+ venv/lib/python3.13/site-packages/fontTools/cu2qu/cu2qu.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
121
+ venv/lib/python3.13/site-packages/fontTools/designspaceLib/__pycache__/__init__.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
122
+ venv/lib/python3.13/site-packages/fontTools/feaLib/__pycache__/ast.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
123
+ venv/lib/python3.13/site-packages/fontTools/feaLib/__pycache__/parser.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
124
+ venv/lib/python3.13/site-packages/fontTools/feaLib/lexer.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
125
+ venv/lib/python3.13/site-packages/fontTools/misc/bezierTools.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
126
+ venv/lib/python3.13/site-packages/fontTools/otlLib/__pycache__/builder.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
127
+ venv/lib/python3.13/site-packages/fontTools/pens/momentsPen.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
128
+ venv/lib/python3.13/site-packages/fontTools/qu2cu/qu2cu.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
129
+ venv/lib/python3.13/site-packages/fontTools/subset/__pycache__/__init__.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
130
+ venv/lib/python3.13/site-packages/fontTools/ttLib/tables/__pycache__/otConverters.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
131
+ venv/lib/python3.13/site-packages/fontTools/ttLib/tables/__pycache__/otTables.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
132
+ venv/lib/python3.13/site-packages/fontTools/varLib/iup.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
133
+ venv/lib/python3.13/site-packages/functorch/_C.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
134
+ venv/lib/python3.13/site-packages/jinja2/__pycache__/compiler.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
135
+ venv/lib/python3.13/site-packages/kiwisolver/_cext.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
136
+ venv/lib/python3.13/site-packages/matplotlib/__pycache__/_cm_listed.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
137
+ venv/lib/python3.13/site-packages/matplotlib/__pycache__/_mathtext.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
138
+ venv/lib/python3.13/site-packages/matplotlib/__pycache__/axis.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
139
+ venv/lib/python3.13/site-packages/matplotlib/__pycache__/backend_bases.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
140
+ venv/lib/python3.13/site-packages/matplotlib/__pycache__/collections.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
141
+ venv/lib/python3.13/site-packages/matplotlib/__pycache__/colors.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
142
+ venv/lib/python3.13/site-packages/matplotlib/__pycache__/figure.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
143
+ venv/lib/python3.13/site-packages/matplotlib/__pycache__/patches.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
144
+ venv/lib/python3.13/site-packages/matplotlib/__pycache__/pyplot.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
145
+ venv/lib/python3.13/site-packages/matplotlib/__pycache__/ticker.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
146
+ venv/lib/python3.13/site-packages/matplotlib/__pycache__/transforms.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
147
+ venv/lib/python3.13/site-packages/matplotlib/__pycache__/widgets.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
148
+ venv/lib/python3.13/site-packages/matplotlib/_c_internal_utils.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
149
+ venv/lib/python3.13/site-packages/matplotlib/_image.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
150
+ venv/lib/python3.13/site-packages/matplotlib/_path.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
151
+ venv/lib/python3.13/site-packages/matplotlib/_qhull.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
152
+ venv/lib/python3.13/site-packages/matplotlib/_tri.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
153
+ venv/lib/python3.13/site-packages/matplotlib/axes/__pycache__/_axes.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
154
+ venv/lib/python3.13/site-packages/matplotlib/axes/__pycache__/_base.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
155
+ venv/lib/python3.13/site-packages/matplotlib/backends/__pycache__/backend_pdf.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
156
+ venv/lib/python3.13/site-packages/matplotlib/backends/_backend_agg.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
157
+ venv/lib/python3.13/site-packages/matplotlib/backends/_macosx.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
158
+ venv/lib/python3.13/site-packages/matplotlib/backends/_tkagg.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
159
+ venv/lib/python3.13/site-packages/matplotlib/ft2font.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
160
+ venv/lib/python3.13/site-packages/matplotlib/mpl-data/fonts/ttf/DejaVuSans-Bold.ttf filter=lfs diff=lfs merge=lfs -text
161
+ venv/lib/python3.13/site-packages/matplotlib/mpl-data/fonts/ttf/DejaVuSans-BoldOblique.ttf filter=lfs diff=lfs merge=lfs -text
162
+ venv/lib/python3.13/site-packages/matplotlib/mpl-data/fonts/ttf/DejaVuSans-Oblique.ttf filter=lfs diff=lfs merge=lfs -text
163
+ venv/lib/python3.13/site-packages/matplotlib/mpl-data/fonts/ttf/DejaVuSans.ttf filter=lfs diff=lfs merge=lfs -text
164
+ venv/lib/python3.13/site-packages/matplotlib/mpl-data/fonts/ttf/DejaVuSansMono-Bold.ttf filter=lfs diff=lfs merge=lfs -text
165
+ venv/lib/python3.13/site-packages/matplotlib/mpl-data/fonts/ttf/DejaVuSansMono-BoldOblique.ttf filter=lfs diff=lfs merge=lfs -text
166
+ venv/lib/python3.13/site-packages/matplotlib/mpl-data/fonts/ttf/DejaVuSansMono-Oblique.ttf filter=lfs diff=lfs merge=lfs -text
167
+ venv/lib/python3.13/site-packages/matplotlib/mpl-data/fonts/ttf/DejaVuSansMono.ttf filter=lfs diff=lfs merge=lfs -text
168
+ venv/lib/python3.13/site-packages/matplotlib/mpl-data/fonts/ttf/DejaVuSerif-Bold.ttf filter=lfs diff=lfs merge=lfs -text
169
+ venv/lib/python3.13/site-packages/matplotlib/mpl-data/fonts/ttf/DejaVuSerif-BoldItalic.ttf filter=lfs diff=lfs merge=lfs -text
170
+ venv/lib/python3.13/site-packages/matplotlib/mpl-data/fonts/ttf/DejaVuSerif-Italic.ttf filter=lfs diff=lfs merge=lfs -text
171
+ venv/lib/python3.13/site-packages/matplotlib/mpl-data/fonts/ttf/DejaVuSerif.ttf filter=lfs diff=lfs merge=lfs -text
172
+ venv/lib/python3.13/site-packages/matplotlib/mpl-data/fonts/ttf/STIXGeneral.ttf filter=lfs diff=lfs merge=lfs -text
173
+ venv/lib/python3.13/site-packages/matplotlib/mpl-data/fonts/ttf/STIXGeneralBol.ttf filter=lfs diff=lfs merge=lfs -text
174
+ venv/lib/python3.13/site-packages/matplotlib/mpl-data/fonts/ttf/STIXGeneralBolIta.ttf filter=lfs diff=lfs merge=lfs -text
175
+ venv/lib/python3.13/site-packages/matplotlib/mpl-data/fonts/ttf/STIXGeneralItalic.ttf filter=lfs diff=lfs merge=lfs -text
176
+ venv/lib/python3.13/site-packages/matplotlib/tests/__pycache__/test_axes.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
177
+ venv/lib/python3.13/site-packages/matplotlib/tests/__pycache__/test_figure.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
178
+ venv/lib/python3.13/site-packages/matplotlib/tests/__pycache__/test_image.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
179
+ venv/lib/python3.13/site-packages/matplotlib/tests/__pycache__/test_ticker.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
180
+ venv/lib/python3.13/site-packages/mpl_toolkits/mplot3d/__pycache__/axes3d.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
181
+ venv/lib/python3.13/site-packages/mpl_toolkits/mplot3d/tests/__pycache__/test_axes3d.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
182
+ venv/lib/python3.13/site-packages/mpmath/__pycache__/function_docs.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
183
+ venv/lib/python3.13/site-packages/mpmath/tests/__pycache__/test_fp.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
184
+ venv/lib/python3.13/site-packages/mpmath/tests/__pycache__/test_functions2.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
185
+ venv/lib/python3.13/site-packages/networkx/drawing/__pycache__/nx_pylab.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
186
+ venv/lib/python3.13/site-packages/networkx/drawing/tests/baseline/test_display_complex.png filter=lfs diff=lfs merge=lfs -text
187
+ venv/lib/python3.13/site-packages/numpy/_core/__pycache__/_add_newdocs.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
188
+ venv/lib/python3.13/site-packages/numpy/_core/__pycache__/fromnumeric.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
189
+ venv/lib/python3.13/site-packages/numpy/_core/_multiarray_tests.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
190
+ venv/lib/python3.13/site-packages/numpy/_core/_multiarray_umath.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
191
+ venv/lib/python3.13/site-packages/numpy/_core/_simd.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
192
+ venv/lib/python3.13/site-packages/numpy/_core/tests/__pycache__/test_datetime.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
193
+ venv/lib/python3.13/site-packages/numpy/_core/tests/__pycache__/test_dtype.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
194
+ venv/lib/python3.13/site-packages/numpy/_core/tests/__pycache__/test_multiarray.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
195
+ venv/lib/python3.13/site-packages/numpy/_core/tests/__pycache__/test_nditer.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
196
+ venv/lib/python3.13/site-packages/numpy/_core/tests/__pycache__/test_numeric.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
197
+ venv/lib/python3.13/site-packages/numpy/_core/tests/__pycache__/test_regression.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
198
+ venv/lib/python3.13/site-packages/numpy/_core/tests/__pycache__/test_ufunc.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
199
+ venv/lib/python3.13/site-packages/numpy/_core/tests/__pycache__/test_umath.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
200
+ venv/lib/python3.13/site-packages/numpy/f2py/__pycache__/crackfortran.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
201
+ venv/lib/python3.13/site-packages/numpy/fft/_pocketfft_umath.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
202
+ venv/lib/python3.13/site-packages/numpy/lib/__pycache__/_function_base_impl.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
203
+ venv/lib/python3.13/site-packages/numpy/lib/tests/__pycache__/test_function_base.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
204
+ venv/lib/python3.13/site-packages/numpy/lib/tests/__pycache__/test_io.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
205
+ venv/lib/python3.13/site-packages/numpy/linalg/__pycache__/_linalg.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
206
+ venv/lib/python3.13/site-packages/numpy/linalg/_umath_linalg.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
207
+ venv/lib/python3.13/site-packages/numpy/linalg/tests/__pycache__/test_linalg.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
208
+ venv/lib/python3.13/site-packages/numpy/ma/__pycache__/core.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
209
+ venv/lib/python3.13/site-packages/numpy/ma/tests/__pycache__/test_core.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
210
+ venv/lib/python3.13/site-packages/numpy/ma/tests/__pycache__/test_extras.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
211
+ venv/lib/python3.13/site-packages/numpy/random/_bounded_integers.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
212
+ venv/lib/python3.13/site-packages/numpy/random/_common.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
213
+ venv/lib/python3.13/site-packages/numpy/random/_generator.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
214
+ venv/lib/python3.13/site-packages/numpy/random/_mt19937.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
215
+ venv/lib/python3.13/site-packages/numpy/random/_pcg64.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
216
+ venv/lib/python3.13/site-packages/numpy/random/_philox.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
217
+ venv/lib/python3.13/site-packages/numpy/random/bit_generator.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
218
+ venv/lib/python3.13/site-packages/numpy/random/mtrand.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
219
+ venv/lib/python3.13/site-packages/numpy/random/tests/__pycache__/test_generator_mt19937.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
220
+ venv/lib/python3.13/site-packages/numpy/random/tests/__pycache__/test_random.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
221
+ venv/lib/python3.13/site-packages/numpy/random/tests/__pycache__/test_randomstate.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
222
+ venv/lib/python3.13/site-packages/numpy/testing/_private/__pycache__/utils.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
223
+ venv/lib/python3.13/site-packages/numpy/testing/tests/__pycache__/test_utils.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
224
+ venv/lib/python3.13/site-packages/pandas/_libs/algos.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
225
+ venv/lib/python3.13/site-packages/pandas/_libs/arrays.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
226
+ venv/lib/python3.13/site-packages/pandas/_libs/groupby.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
227
+ venv/lib/python3.13/site-packages/pandas/_libs/hashing.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
228
+ venv/lib/python3.13/site-packages/pandas/_libs/hashtable.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
229
+ venv/lib/python3.13/site-packages/pandas/_libs/index.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
230
+ venv/lib/python3.13/site-packages/pandas/_libs/internals.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
231
+ venv/lib/python3.13/site-packages/pandas/_libs/interval.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
232
+ venv/lib/python3.13/site-packages/pandas/_libs/join.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
233
+ venv/lib/python3.13/site-packages/pandas/_libs/lib.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
234
+ venv/lib/python3.13/site-packages/pandas/_libs/missing.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
235
+ venv/lib/python3.13/site-packages/pandas/_libs/ops.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
236
+ venv/lib/python3.13/site-packages/pandas/_libs/parsers.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
237
+ venv/lib/python3.13/site-packages/pandas/_libs/reshape.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
238
+ venv/lib/python3.13/site-packages/pandas/_libs/sas.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
239
+ venv/lib/python3.13/site-packages/pandas/_libs/sparse.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
240
+ venv/lib/python3.13/site-packages/pandas/_libs/testing.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
241
+ venv/lib/python3.13/site-packages/pandas/_libs/tslib.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
242
+ venv/lib/python3.13/site-packages/pandas/_libs/tslibs/conversion.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
243
+ venv/lib/python3.13/site-packages/pandas/_libs/tslibs/dtypes.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
244
+ venv/lib/python3.13/site-packages/pandas/_libs/tslibs/fields.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
245
+ venv/lib/python3.13/site-packages/pandas/_libs/tslibs/nattype.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
246
+ venv/lib/python3.13/site-packages/pandas/_libs/tslibs/np_datetime.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
247
+ venv/lib/python3.13/site-packages/pandas/_libs/tslibs/offsets.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
248
+ venv/lib/python3.13/site-packages/pandas/_libs/tslibs/parsing.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
249
+ venv/lib/python3.13/site-packages/pandas/_libs/tslibs/period.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
250
+ venv/lib/python3.13/site-packages/pandas/_libs/tslibs/strptime.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
251
+ venv/lib/python3.13/site-packages/pandas/_libs/tslibs/timedeltas.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
252
+ venv/lib/python3.13/site-packages/pandas/_libs/tslibs/timestamps.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
253
+ venv/lib/python3.13/site-packages/pandas/_libs/tslibs/timezones.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
254
+ venv/lib/python3.13/site-packages/pandas/_libs/tslibs/tzconversion.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
255
+ venv/lib/python3.13/site-packages/pandas/_libs/tslibs/vectorized.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
256
+ venv/lib/python3.13/site-packages/pandas/_libs/window/aggregations.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
257
+ venv/lib/python3.13/site-packages/pandas/_libs/window/indexers.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
258
+ venv/lib/python3.13/site-packages/pandas/_libs/writers.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
259
+ venv/lib/python3.13/site-packages/pandas/core/__pycache__/frame.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
260
+ venv/lib/python3.13/site-packages/pandas/core/__pycache__/generic.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
261
+ venv/lib/python3.13/site-packages/pandas/core/__pycache__/indexing.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
262
+ venv/lib/python3.13/site-packages/pandas/core/__pycache__/series.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
263
+ venv/lib/python3.13/site-packages/pandas/core/arrays/__pycache__/categorical.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
264
+ venv/lib/python3.13/site-packages/pandas/core/arrays/arrow/__pycache__/array.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
265
+ venv/lib/python3.13/site-packages/pandas/core/groupby/__pycache__/groupby.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
266
+ venv/lib/python3.13/site-packages/pandas/core/indexes/__pycache__/base.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
267
+ venv/lib/python3.13/site-packages/pandas/core/indexes/__pycache__/multi.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
268
+ venv/lib/python3.13/site-packages/pandas/core/reshape/__pycache__/merge.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
269
+ venv/lib/python3.13/site-packages/pandas/core/strings/__pycache__/accessor.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
270
+ venv/lib/python3.13/site-packages/pandas/io/__pycache__/pytables.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
271
+ venv/lib/python3.13/site-packages/pandas/io/__pycache__/sql.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
272
+ venv/lib/python3.13/site-packages/pandas/io/__pycache__/stata.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
273
+ venv/lib/python3.13/site-packages/pandas/io/formats/__pycache__/style.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
274
+ venv/lib/python3.13/site-packages/pandas/tests/__pycache__/test_algos.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
275
+ venv/lib/python3.13/site-packages/pandas/tests/arithmetic/__pycache__/test_datetime64.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
276
+ venv/lib/python3.13/site-packages/pandas/tests/arithmetic/__pycache__/test_timedelta64.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
277
+ venv/lib/python3.13/site-packages/pandas/tests/computation/__pycache__/test_eval.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
278
+ venv/lib/python3.13/site-packages/pandas/tests/copy_view/__pycache__/test_methods.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
279
+ venv/lib/python3.13/site-packages/pandas/tests/dtypes/__pycache__/test_inference.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
280
+ venv/lib/python3.13/site-packages/pandas/tests/extension/__pycache__/test_arrow.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
281
+ venv/lib/python3.13/site-packages/pandas/tests/frame/__pycache__/test_arithmetic.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
282
+ venv/lib/python3.13/site-packages/pandas/tests/frame/__pycache__/test_constructors.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
283
+ venv/lib/python3.13/site-packages/pandas/tests/frame/__pycache__/test_reductions.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
284
+ venv/lib/python3.13/site-packages/pandas/tests/frame/__pycache__/test_stack_unstack.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
285
+ venv/lib/python3.13/site-packages/pandas/tests/frame/indexing/__pycache__/test_indexing.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
286
+ venv/lib/python3.13/site-packages/pandas/tests/groupby/__pycache__/test_groupby.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
287
+ venv/lib/python3.13/site-packages/pandas/tests/indexing/__pycache__/test_loc.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
288
+ venv/lib/python3.13/site-packages/pandas/tests/io/__pycache__/test_sql.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
289
+ venv/lib/python3.13/site-packages/pandas/tests/io/__pycache__/test_stata.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
290
+ venv/lib/python3.13/site-packages/pandas/tests/io/formats/__pycache__/test_format.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
291
+ venv/lib/python3.13/site-packages/pandas/tests/io/json/__pycache__/test_pandas.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
292
+ venv/lib/python3.13/site-packages/pandas/tests/plotting/__pycache__/test_datetimelike.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
293
+ venv/lib/python3.13/site-packages/pandas/tests/plotting/frame/__pycache__/test_frame.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
294
+ venv/lib/python3.13/site-packages/pandas/tests/resample/__pycache__/test_datetime_index.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
295
+ venv/lib/python3.13/site-packages/pandas/tests/reshape/__pycache__/test_pivot.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
296
+ venv/lib/python3.13/site-packages/pandas/tests/reshape/merge/__pycache__/test_merge.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
297
+ venv/lib/python3.13/site-packages/pandas/tests/series/__pycache__/test_constructors.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
298
+ venv/lib/python3.13/site-packages/pandas/tests/tools/__pycache__/test_to_datetime.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
299
+ venv/lib/python3.13/site-packages/pip/_vendor/distlib/t64-arm.exe filter=lfs diff=lfs merge=lfs -text
300
+ venv/lib/python3.13/site-packages/pip/_vendor/distlib/t64.exe filter=lfs diff=lfs merge=lfs -text
301
+ venv/lib/python3.13/site-packages/pip/_vendor/distlib/w64-arm.exe filter=lfs diff=lfs merge=lfs -text
302
+ venv/lib/python3.13/site-packages/pip/_vendor/distlib/w64.exe filter=lfs diff=lfs merge=lfs -text
303
+ venv/lib/python3.13/site-packages/pip/_vendor/idna/__pycache__/uts46data.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
304
+ venv/lib/python3.13/site-packages/pip/_vendor/pkg_resources/__pycache__/__init__.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
305
+ venv/lib/python3.13/site-packages/pip/_vendor/rich/__pycache__/_emoji_codes.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
306
+ venv/lib/python3.13/site-packages/pip/_vendor/rich/__pycache__/console.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
307
+ venv/lib/python3.13/site-packages/pkg_resources/__pycache__/__init__.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
308
+ venv/lib/python3.13/site-packages/pyparsing/__pycache__/core.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
309
+ venv/lib/python3.13/site-packages/safetensors/_safetensors_rust.abi3.so filter=lfs diff=lfs merge=lfs -text
310
+ venv/lib/python3.13/site-packages/scipy/.dylibs/libgcc_s.1.1.dylib filter=lfs diff=lfs merge=lfs -text
311
+ venv/lib/python3.13/site-packages/scipy/.dylibs/libgfortran.5.dylib filter=lfs diff=lfs merge=lfs -text
312
+ venv/lib/python3.13/site-packages/scipy/.dylibs/libquadmath.0.dylib filter=lfs diff=lfs merge=lfs -text
313
+ venv/lib/python3.13/site-packages/scipy/_cyutility.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
314
+ venv/lib/python3.13/site-packages/scipy/_lib/_ccallback_c.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
315
+ venv/lib/python3.13/site-packages/scipy/_lib/_uarray/_uarray.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
316
+ venv/lib/python3.13/site-packages/scipy/cluster/__pycache__/hierarchy.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
317
+ venv/lib/python3.13/site-packages/scipy/cluster/_hierarchy.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
318
+ venv/lib/python3.13/site-packages/scipy/cluster/_optimal_leaf_ordering.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
319
+ venv/lib/python3.13/site-packages/scipy/cluster/_vq.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
320
+ venv/lib/python3.13/site-packages/scipy/constants/__pycache__/_codata.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
321
+ venv/lib/python3.13/site-packages/scipy/fft/_pocketfft/pypocketfft.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
322
+ venv/lib/python3.13/site-packages/scipy/fftpack/convolve.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
323
+ venv/lib/python3.13/site-packages/scipy/integrate/__pycache__/_lebedev.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
324
+ venv/lib/python3.13/site-packages/scipy/integrate/_dop.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
325
+ venv/lib/python3.13/site-packages/scipy/integrate/_lsoda.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
326
+ venv/lib/python3.13/site-packages/scipy/integrate/_odepack.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
327
+ venv/lib/python3.13/site-packages/scipy/integrate/_quadpack.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
328
+ venv/lib/python3.13/site-packages/scipy/integrate/_test_odeint_banded.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
329
+ venv/lib/python3.13/site-packages/scipy/integrate/_vode.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
330
+ venv/lib/python3.13/site-packages/scipy/interpolate/_dfitpack.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
331
+ venv/lib/python3.13/site-packages/scipy/interpolate/_fitpack.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
332
+ venv/lib/python3.13/site-packages/scipy/interpolate/_interpnd.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
333
+ venv/lib/python3.13/site-packages/scipy/interpolate/_ppoly.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
334
+ venv/lib/python3.13/site-packages/scipy/interpolate/_rbfinterp_pythran.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
335
+ venv/lib/python3.13/site-packages/scipy/interpolate/_rgi_cython.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
336
+ venv/lib/python3.13/site-packages/scipy/interpolate/tests/__pycache__/test_bsplines.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
337
+ venv/lib/python3.13/site-packages/scipy/interpolate/tests/__pycache__/test_interpolate.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
338
+ venv/lib/python3.13/site-packages/scipy/io/_fast_matrix_market/_fmm_core.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
339
+ venv/lib/python3.13/site-packages/scipy/io/matlab/_mio5_utils.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
340
+ venv/lib/python3.13/site-packages/scipy/io/matlab/_streams.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
341
+ venv/lib/python3.13/site-packages/scipy/linalg/_cythonized_array_utils.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
342
+ venv/lib/python3.13/site-packages/scipy/linalg/_decomp_interpolative.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
343
+ venv/lib/python3.13/site-packages/scipy/linalg/_decomp_lu_cython.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
344
+ venv/lib/python3.13/site-packages/scipy/linalg/_decomp_update.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
345
+ venv/lib/python3.13/site-packages/scipy/linalg/_fblas.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
346
+ venv/lib/python3.13/site-packages/scipy/linalg/_flapack.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
347
+ venv/lib/python3.13/site-packages/scipy/linalg/_linalg_pythran.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
348
+ venv/lib/python3.13/site-packages/scipy/linalg/_matfuncs_expm.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
349
+ venv/lib/python3.13/site-packages/scipy/linalg/_matfuncs_sqrtm_triu.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
350
+ venv/lib/python3.13/site-packages/scipy/linalg/_solve_toeplitz.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
351
+ venv/lib/python3.13/site-packages/scipy/linalg/cython_blas.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
352
+ venv/lib/python3.13/site-packages/scipy/linalg/cython_lapack.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
353
+ venv/lib/python3.13/site-packages/scipy/linalg/tests/__pycache__/test_basic.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
354
+ venv/lib/python3.13/site-packages/scipy/linalg/tests/__pycache__/test_decomp.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
355
+ venv/lib/python3.13/site-packages/scipy/linalg/tests/__pycache__/test_decomp_update.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
356
+ venv/lib/python3.13/site-packages/scipy/linalg/tests/__pycache__/test_lapack.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
357
+ venv/lib/python3.13/site-packages/scipy/ndimage/_nd_image.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
358
+ venv/lib/python3.13/site-packages/scipy/ndimage/_ni_label.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
359
+ venv/lib/python3.13/site-packages/scipy/ndimage/tests/__pycache__/test_filters.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
360
+ venv/lib/python3.13/site-packages/scipy/ndimage/tests/__pycache__/test_morphology.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
361
+ venv/lib/python3.13/site-packages/scipy/odr/__odrpack.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
362
+ venv/lib/python3.13/site-packages/scipy/optimize/__pycache__/_optimize.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
363
+ venv/lib/python3.13/site-packages/scipy/optimize/_bglu_dense.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
364
+ venv/lib/python3.13/site-packages/scipy/optimize/_highspy/_core.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
365
+ venv/lib/python3.13/site-packages/scipy/optimize/_highspy/_highs_options.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
366
+ venv/lib/python3.13/site-packages/scipy/optimize/_minpack.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
367
+ venv/lib/python3.13/site-packages/scipy/optimize/_moduleTNC.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
368
+ venv/lib/python3.13/site-packages/scipy/optimize/_pava_pybind.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
369
+ venv/lib/python3.13/site-packages/scipy/optimize/_trlib/_trlib.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
370
+ venv/lib/python3.13/site-packages/scipy/optimize/cython_optimize/_zeros.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
371
+ venv/lib/python3.13/site-packages/scipy/optimize/tests/__pycache__/test_linprog.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
372
+ venv/lib/python3.13/site-packages/scipy/optimize/tests/__pycache__/test_optimize.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
373
+ venv/lib/python3.13/site-packages/scipy/signal/__pycache__/_filter_design.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
374
+ venv/lib/python3.13/site-packages/scipy/signal/__pycache__/_ltisys.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
375
+ venv/lib/python3.13/site-packages/scipy/signal/__pycache__/_short_time_fft.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
376
+ venv/lib/python3.13/site-packages/scipy/signal/__pycache__/_signaltools.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
377
+ venv/lib/python3.13/site-packages/scipy/signal/_peak_finding_utils.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
378
+ venv/lib/python3.13/site-packages/scipy/signal/_sigtools.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
379
+ venv/lib/python3.13/site-packages/scipy/signal/_sosfilt.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
380
+ venv/lib/python3.13/site-packages/scipy/signal/_upfirdn_apply.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
381
+ venv/lib/python3.13/site-packages/scipy/signal/tests/__pycache__/test_filter_design.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
382
+ venv/lib/python3.13/site-packages/scipy/signal/tests/__pycache__/test_signaltools.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
383
+ venv/lib/python3.13/site-packages/scipy/signal/tests/__pycache__/test_spectral.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
384
+ venv/lib/python3.13/site-packages/scipy/sparse/_csparsetools.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
385
+ venv/lib/python3.13/site-packages/scipy/sparse/_sparsetools.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
386
+ venv/lib/python3.13/site-packages/scipy/sparse/csgraph/_flow.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
387
+ venv/lib/python3.13/site-packages/scipy/sparse/csgraph/_matching.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
388
+ venv/lib/python3.13/site-packages/scipy/sparse/csgraph/_min_spanning_tree.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
389
+ venv/lib/python3.13/site-packages/scipy/sparse/csgraph/_reordering.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
390
+ venv/lib/python3.13/site-packages/scipy/sparse/csgraph/_shortest_path.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
391
+ venv/lib/python3.13/site-packages/scipy/sparse/csgraph/_tools.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
392
+ venv/lib/python3.13/site-packages/scipy/sparse/csgraph/_traversal.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
393
+ venv/lib/python3.13/site-packages/scipy/sparse/linalg/_dsolve/_superlu.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
394
+ venv/lib/python3.13/site-packages/scipy/sparse/linalg/_eigen/arpack/_arpack.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
395
+ venv/lib/python3.13/site-packages/scipy/sparse/linalg/_propack/_cpropack.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
396
+ venv/lib/python3.13/site-packages/scipy/sparse/linalg/_propack/_dpropack.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
397
+ venv/lib/python3.13/site-packages/scipy/sparse/linalg/_propack/_spropack.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
398
+ venv/lib/python3.13/site-packages/scipy/sparse/linalg/_propack/_zpropack.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
399
+ venv/lib/python3.13/site-packages/scipy/sparse/tests/__pycache__/test_base.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
400
+ venv/lib/python3.13/site-packages/scipy/spatial/__pycache__/distance.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
401
+ venv/lib/python3.13/site-packages/scipy/spatial/_ckdtree.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
402
+ venv/lib/python3.13/site-packages/scipy/spatial/_distance_pybind.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
403
+ venv/lib/python3.13/site-packages/scipy/spatial/_distance_wrap.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
404
+ venv/lib/python3.13/site-packages/scipy/spatial/_hausdorff.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
405
+ venv/lib/python3.13/site-packages/scipy/spatial/_qhull.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
406
+ venv/lib/python3.13/site-packages/scipy/spatial/tests/__pycache__/test_distance.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
407
+ venv/lib/python3.13/site-packages/scipy/spatial/transform/_rigid_transform.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
408
+ venv/lib/python3.13/site-packages/scipy/spatial/transform/_rotation.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
409
+ venv/lib/python3.13/site-packages/scipy/spatial/transform/tests/__pycache__/test_rotation.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
410
+ venv/lib/python3.13/site-packages/scipy/special/__pycache__/_add_newdocs.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
411
+ venv/lib/python3.13/site-packages/scipy/special/__pycache__/_basic.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
412
+ venv/lib/python3.13/site-packages/scipy/special/_ellip_harm_2.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
413
+ venv/lib/python3.13/site-packages/scipy/special/_gufuncs.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
414
+ venv/lib/python3.13/site-packages/scipy/special/_specfun.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
415
+ venv/lib/python3.13/site-packages/scipy/special/_special_ufuncs.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
416
+ venv/lib/python3.13/site-packages/scipy/special/_test_internal.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
417
+ venv/lib/python3.13/site-packages/scipy/special/_ufuncs.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
418
+ venv/lib/python3.13/site-packages/scipy/special/_ufuncs_cxx.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
419
+ venv/lib/python3.13/site-packages/scipy/special/cython_special.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
420
+ venv/lib/python3.13/site-packages/scipy/special/tests/__pycache__/test_basic.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
421
+ venv/lib/python3.13/site-packages/scipy/special/tests/__pycache__/test_legendre.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
422
+ venv/lib/python3.13/site-packages/scipy/special/tests/__pycache__/test_mpmath.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
423
+ venv/lib/python3.13/site-packages/scipy/stats/__pycache__/_continuous_distns.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
424
+ venv/lib/python3.13/site-packages/scipy/stats/__pycache__/_distn_infrastructure.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
425
+ venv/lib/python3.13/site-packages/scipy/stats/__pycache__/_distribution_infrastructure.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
426
+ venv/lib/python3.13/site-packages/scipy/stats/__pycache__/_morestats.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
427
+ venv/lib/python3.13/site-packages/scipy/stats/__pycache__/_mstats_basic.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
428
+ venv/lib/python3.13/site-packages/scipy/stats/__pycache__/_multivariate.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
429
+ venv/lib/python3.13/site-packages/scipy/stats/__pycache__/_qmc.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
430
+ venv/lib/python3.13/site-packages/scipy/stats/__pycache__/_resampling.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
431
+ venv/lib/python3.13/site-packages/scipy/stats/__pycache__/_stats_py.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
432
+ venv/lib/python3.13/site-packages/scipy/stats/_ansari_swilk_statistics.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
433
+ venv/lib/python3.13/site-packages/scipy/stats/_biasedurn.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
434
+ venv/lib/python3.13/site-packages/scipy/stats/_qmc_cy.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
435
+ venv/lib/python3.13/site-packages/scipy/stats/_qmvnt_cy.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
436
+ venv/lib/python3.13/site-packages/scipy/stats/_rcont/rcont.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
437
+ venv/lib/python3.13/site-packages/scipy/stats/_sobol.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
438
+ venv/lib/python3.13/site-packages/scipy/stats/_stats.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
439
+ venv/lib/python3.13/site-packages/scipy/stats/_stats_pythran.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
440
+ venv/lib/python3.13/site-packages/scipy/stats/_unuran/unuran_wrapper.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
441
+ venv/lib/python3.13/site-packages/scipy/stats/tests/__pycache__/test_continuous.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
442
+ venv/lib/python3.13/site-packages/scipy/stats/tests/__pycache__/test_distributions.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
443
+ venv/lib/python3.13/site-packages/scipy/stats/tests/__pycache__/test_morestats.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
444
+ venv/lib/python3.13/site-packages/scipy/stats/tests/__pycache__/test_mstats_basic.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
445
+ venv/lib/python3.13/site-packages/scipy/stats/tests/__pycache__/test_multivariate.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
446
+ venv/lib/python3.13/site-packages/scipy/stats/tests/__pycache__/test_resampling.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
447
+ venv/lib/python3.13/site-packages/scipy/stats/tests/__pycache__/test_stats.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
448
+ venv/lib/python3.13/site-packages/seaborn/__pycache__/categorical.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
449
+ venv/lib/python3.13/site-packages/setuptools/_vendor/__pycache__/typing_extensions.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
450
+ venv/lib/python3.13/site-packages/setuptools/_vendor/backports/tarfile/__pycache__/__init__.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
451
+ venv/lib/python3.13/site-packages/setuptools/_vendor/inflect/__pycache__/__init__.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
452
+ venv/lib/python3.13/site-packages/setuptools/_vendor/more_itertools/__pycache__/more.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
453
+ venv/lib/python3.13/site-packages/setuptools/config/_validate_pyproject/__pycache__/fastjsonschema_validations.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
454
+ venv/lib/python3.13/site-packages/sympy/assumptions/tests/__pycache__/test_query.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
455
+ venv/lib/python3.13/site-packages/sympy/combinatorics/__pycache__/perm_groups.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
456
+ venv/lib/python3.13/site-packages/sympy/concrete/tests/__pycache__/test_sums_products.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
457
+ venv/lib/python3.13/site-packages/sympy/core/__pycache__/expr.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
458
+ venv/lib/python3.13/site-packages/sympy/core/__pycache__/function.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
459
+ venv/lib/python3.13/site-packages/sympy/core/__pycache__/numbers.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
460
+ venv/lib/python3.13/site-packages/sympy/core/tests/__pycache__/test_args.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
461
+ venv/lib/python3.13/site-packages/sympy/core/tests/__pycache__/test_arit.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
462
+ venv/lib/python3.13/site-packages/sympy/core/tests/__pycache__/test_expr.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
463
+ venv/lib/python3.13/site-packages/sympy/core/tests/__pycache__/test_function.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
464
+ venv/lib/python3.13/site-packages/sympy/core/tests/__pycache__/test_numbers.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
465
+ venv/lib/python3.13/site-packages/sympy/core/tests/__pycache__/test_relational.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
466
+ venv/lib/python3.13/site-packages/sympy/crypto/__pycache__/crypto.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
467
+ venv/lib/python3.13/site-packages/sympy/functions/combinatorial/__pycache__/numbers.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
468
+ venv/lib/python3.13/site-packages/sympy/functions/combinatorial/tests/__pycache__/test_comb_numbers.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
469
+ venv/lib/python3.13/site-packages/sympy/functions/elementary/__pycache__/hyperbolic.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
470
+ venv/lib/python3.13/site-packages/sympy/functions/elementary/__pycache__/trigonometric.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
471
+ venv/lib/python3.13/site-packages/sympy/functions/elementary/tests/__pycache__/test_hyperbolic.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
472
+ venv/lib/python3.13/site-packages/sympy/functions/elementary/tests/__pycache__/test_piecewise.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
473
+ venv/lib/python3.13/site-packages/sympy/functions/elementary/tests/__pycache__/test_trigonometric.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
474
+ venv/lib/python3.13/site-packages/sympy/functions/special/__pycache__/error_functions.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
475
+ venv/lib/python3.13/site-packages/sympy/functions/special/tests/__pycache__/test_bessel.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
476
+ venv/lib/python3.13/site-packages/sympy/functions/special/tests/__pycache__/test_error_functions.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
477
+ venv/lib/python3.13/site-packages/sympy/holonomic/__pycache__/holonomic.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
478
+ venv/lib/python3.13/site-packages/sympy/integrals/__pycache__/laplace.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
479
+ venv/lib/python3.13/site-packages/sympy/integrals/__pycache__/manualintegrate.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
480
+ venv/lib/python3.13/site-packages/sympy/integrals/__pycache__/meijerint.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
481
+ venv/lib/python3.13/site-packages/sympy/integrals/tests/__pycache__/test_integrals.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
482
+ venv/lib/python3.13/site-packages/sympy/logic/__pycache__/boolalg.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
483
+ venv/lib/python3.13/site-packages/sympy/logic/tests/__pycache__/test_boolalg.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
484
+ venv/lib/python3.13/site-packages/sympy/matrices/__pycache__/common.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
485
+ venv/lib/python3.13/site-packages/sympy/matrices/__pycache__/matrixbase.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
486
+ venv/lib/python3.13/site-packages/sympy/matrices/tests/__pycache__/test_matrices.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
487
+ venv/lib/python3.13/site-packages/sympy/matrices/tests/__pycache__/test_matrixbase.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
488
+ venv/lib/python3.13/site-packages/sympy/parsing/autolev/__pycache__/_listener_autolev_antlr.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
489
+ venv/lib/python3.13/site-packages/sympy/parsing/autolev/_antlr/__pycache__/autolevparser.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
490
+ venv/lib/python3.13/site-packages/sympy/parsing/latex/_antlr/__pycache__/latexparser.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
491
+ venv/lib/python3.13/site-packages/sympy/parsing/tests/__pycache__/test_c_parser.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
492
+ venv/lib/python3.13/site-packages/sympy/physics/__pycache__/secondquant.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
493
+ venv/lib/python3.13/site-packages/sympy/physics/biomechanics/tests/__pycache__/test_curve.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
494
+ venv/lib/python3.13/site-packages/sympy/physics/continuum_mechanics/__pycache__/beam.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
495
+ venv/lib/python3.13/site-packages/sympy/physics/control/__pycache__/lti.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
496
+ venv/lib/python3.13/site-packages/sympy/physics/control/tests/__pycache__/test_lti.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
497
+ venv/lib/python3.13/site-packages/sympy/physics/mechanics/tests/__pycache__/test_joint.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
498
+ venv/lib/python3.13/site-packages/sympy/physics/quantum/__pycache__/spin.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
499
+ venv/lib/python3.13/site-packages/sympy/physics/quantum/tests/__pycache__/test_spin.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
500
+ venv/lib/python3.13/site-packages/sympy/plotting/__pycache__/series.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
501
+ venv/lib/python3.13/site-packages/sympy/plotting/tests/__pycache__/test_series.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
502
+ venv/lib/python3.13/site-packages/sympy/polys/__pycache__/compatibility.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
503
+ venv/lib/python3.13/site-packages/sympy/polys/__pycache__/polyclasses.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
504
+ venv/lib/python3.13/site-packages/sympy/polys/__pycache__/polyquinticconst.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
505
+ venv/lib/python3.13/site-packages/sympy/polys/__pycache__/polytools.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
506
+ venv/lib/python3.13/site-packages/sympy/polys/__pycache__/rings.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
507
+ venv/lib/python3.13/site-packages/sympy/polys/benchmarks/__pycache__/bench_solvers.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
508
+ venv/lib/python3.13/site-packages/sympy/polys/domains/tests/__pycache__/test_domains.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
509
+ venv/lib/python3.13/site-packages/sympy/polys/matrices/__pycache__/domainmatrix.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
510
+ venv/lib/python3.13/site-packages/sympy/polys/matrices/tests/__pycache__/test_domainmatrix.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
511
+ venv/lib/python3.13/site-packages/sympy/polys/numberfields/__pycache__/resolvent_lookup.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
512
+ venv/lib/python3.13/site-packages/sympy/polys/tests/__pycache__/test_polytools.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
513
+ venv/lib/python3.13/site-packages/sympy/polys/tests/__pycache__/test_rings.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
514
+ venv/lib/python3.13/site-packages/sympy/printing/__pycache__/latex.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
515
+ venv/lib/python3.13/site-packages/sympy/printing/__pycache__/mathml.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
516
+ venv/lib/python3.13/site-packages/sympy/printing/pretty/__pycache__/pretty.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
517
+ venv/lib/python3.13/site-packages/sympy/printing/pretty/tests/__pycache__/test_pretty.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
518
+ venv/lib/python3.13/site-packages/sympy/printing/tests/__pycache__/test_latex.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
519
+ venv/lib/python3.13/site-packages/sympy/printing/tests/__pycache__/test_mathml.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
520
+ venv/lib/python3.13/site-packages/sympy/series/tests/__pycache__/test_limits.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
521
+ venv/lib/python3.13/site-packages/sympy/sets/__pycache__/sets.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
522
+ venv/lib/python3.13/site-packages/sympy/sets/tests/__pycache__/test_fancysets.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
523
+ venv/lib/python3.13/site-packages/sympy/sets/tests/__pycache__/test_sets.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
524
+ venv/lib/python3.13/site-packages/sympy/simplify/__pycache__/hyperexpand.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
525
+ venv/lib/python3.13/site-packages/sympy/solvers/__pycache__/solvers.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
526
+ venv/lib/python3.13/site-packages/sympy/solvers/__pycache__/solveset.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
527
+ venv/lib/python3.13/site-packages/sympy/solvers/diophantine/__pycache__/diophantine.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
528
+ venv/lib/python3.13/site-packages/sympy/solvers/ode/__pycache__/ode.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
529
+ venv/lib/python3.13/site-packages/sympy/solvers/ode/__pycache__/single.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
530
+ venv/lib/python3.13/site-packages/sympy/solvers/ode/tests/__pycache__/test_ode.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
531
+ venv/lib/python3.13/site-packages/sympy/solvers/ode/tests/__pycache__/test_single.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
532
+ venv/lib/python3.13/site-packages/sympy/solvers/ode/tests/__pycache__/test_systems.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
533
+ venv/lib/python3.13/site-packages/sympy/solvers/tests/__pycache__/test_solvers.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
534
+ venv/lib/python3.13/site-packages/sympy/solvers/tests/__pycache__/test_solveset.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
535
+ venv/lib/python3.13/site-packages/sympy/stats/__pycache__/crv_types.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
536
+ venv/lib/python3.13/site-packages/sympy/stats/__pycache__/stochastic_process_types.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
537
+ venv/lib/python3.13/site-packages/sympy/stats/tests/__pycache__/test_continuous_rv.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
538
+ venv/lib/python3.13/site-packages/sympy/tensor/__pycache__/tensor.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
539
+ venv/lib/python3.13/site-packages/sympy/tensor/array/expressions/__pycache__/array_expressions.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
540
+ venv/lib/python3.13/site-packages/sympy/tensor/tests/__pycache__/test_tensor.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
541
+ venv/lib/python3.13/site-packages/sympy/utilities/__pycache__/iterables.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
542
+ venv/lib/python3.13/site-packages/sympy/utilities/tests/__pycache__/test_lambdify.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
543
+ venv/lib/python3.13/site-packages/sympy/utilities/tests/__pycache__/test_wester.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
544
+ venv/lib/python3.13/site-packages/torch/__pycache__/__init__.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
545
+ venv/lib/python3.13/site-packages/torch/__pycache__/_meta_registrations.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
546
+ venv/lib/python3.13/site-packages/torch/__pycache__/_tensor_docs.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
547
+ venv/lib/python3.13/site-packages/torch/__pycache__/_torch_docs.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
548
+ venv/lib/python3.13/site-packages/torch/__pycache__/overrides.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
549
+ venv/lib/python3.13/site-packages/torch/_decomp/__pycache__/decompositions.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
550
+ venv/lib/python3.13/site-packages/torch/_dynamo/__pycache__/guards.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
551
+ venv/lib/python3.13/site-packages/torch/_dynamo/__pycache__/output_graph.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
552
+ venv/lib/python3.13/site-packages/torch/_dynamo/__pycache__/symbolic_convert.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
553
+ venv/lib/python3.13/site-packages/torch/_dynamo/__pycache__/trace_rules.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
554
+ venv/lib/python3.13/site-packages/torch/_dynamo/__pycache__/utils.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
555
+ venv/lib/python3.13/site-packages/torch/_dynamo/variables/__pycache__/builder.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
556
+ venv/lib/python3.13/site-packages/torch/_dynamo/variables/__pycache__/builtin.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
557
+ venv/lib/python3.13/site-packages/torch/_dynamo/variables/__pycache__/functions.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
558
+ venv/lib/python3.13/site-packages/torch/_dynamo/variables/__pycache__/higher_order_ops.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
559
+ venv/lib/python3.13/site-packages/torch/_export/serde/__pycache__/serialize.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
560
+ venv/lib/python3.13/site-packages/torch/_functorch/__pycache__/partitioners.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
561
+ venv/lib/python3.13/site-packages/torch/_inductor/__pycache__/codecache.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
562
+ venv/lib/python3.13/site-packages/torch/_inductor/__pycache__/compile_fx.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
563
+ venv/lib/python3.13/site-packages/torch/_inductor/__pycache__/cudagraph_trees.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
564
+ venv/lib/python3.13/site-packages/torch/_inductor/__pycache__/graph.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
565
+ venv/lib/python3.13/site-packages/torch/_inductor/__pycache__/ir.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
566
+ venv/lib/python3.13/site-packages/torch/_inductor/__pycache__/lowering.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
567
+ venv/lib/python3.13/site-packages/torch/_inductor/__pycache__/pattern_matcher.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
568
+ venv/lib/python3.13/site-packages/torch/_inductor/__pycache__/scheduler.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
569
+ venv/lib/python3.13/site-packages/torch/_inductor/__pycache__/select_algorithm.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
570
+ venv/lib/python3.13/site-packages/torch/_inductor/__pycache__/utils.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
571
+ venv/lib/python3.13/site-packages/torch/_inductor/codegen/__pycache__/common.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
572
+ venv/lib/python3.13/site-packages/torch/_inductor/codegen/__pycache__/cpp.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
573
+ venv/lib/python3.13/site-packages/torch/_inductor/codegen/__pycache__/cpp_wrapper_cpu.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
574
+ venv/lib/python3.13/site-packages/torch/_inductor/codegen/__pycache__/simd.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
575
+ venv/lib/python3.13/site-packages/torch/_inductor/codegen/__pycache__/triton.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
576
+ venv/lib/python3.13/site-packages/torch/_inductor/codegen/__pycache__/wrapper.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
577
+ venv/lib/python3.13/site-packages/torch/_inductor/fx_passes/__pycache__/quantization.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
578
+ venv/lib/python3.13/site-packages/torch/_inductor/fx_passes/__pycache__/split_cat.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
579
+ venv/lib/python3.13/site-packages/torch/_inductor/kernel/__pycache__/flex_attention.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
580
+ venv/lib/python3.13/site-packages/torch/_inductor/runtime/__pycache__/triton_heuristics.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
581
+ venv/lib/python3.13/site-packages/torch/_refs/__pycache__/__init__.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
582
+ venv/lib/python3.13/site-packages/torch/_subclasses/__pycache__/fake_tensor.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
583
+ venv/lib/python3.13/site-packages/torch/backends/_nnapi/__pycache__/serializer.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
584
+ venv/lib/python3.13/site-packages/torch/bin/protoc filter=lfs diff=lfs merge=lfs -text
585
+ venv/lib/python3.13/site-packages/torch/bin/protoc-3.13.0.0 filter=lfs diff=lfs merge=lfs -text
586
+ venv/lib/python3.13/site-packages/torch/distributed/__pycache__/distributed_c10d.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
587
+ venv/lib/python3.13/site-packages/torch/distributed/fsdp/__pycache__/_flat_param.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
588
+ venv/lib/python3.13/site-packages/torch/distributed/pipelining/__pycache__/schedules.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
589
+ venv/lib/python3.13/site-packages/torch/fx/experimental/__pycache__/proxy_tensor.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
590
+ venv/lib/python3.13/site-packages/torch/fx/experimental/__pycache__/symbolic_shapes.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
591
+ venv/lib/python3.13/site-packages/torch/lib/libc10.dylib filter=lfs diff=lfs merge=lfs -text
592
+ venv/lib/python3.13/site-packages/torch/lib/libomp.dylib filter=lfs diff=lfs merge=lfs -text
593
+ venv/lib/python3.13/site-packages/torch/lib/libtorch_cpu.dylib filter=lfs diff=lfs merge=lfs -text
594
+ venv/lib/python3.13/site-packages/torch/lib/libtorch_python.dylib filter=lfs diff=lfs merge=lfs -text
595
+ venv/lib/python3.13/site-packages/torch/linalg/__pycache__/__init__.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
596
+ venv/lib/python3.13/site-packages/torch/nested/_internal/__pycache__/ops.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
597
+ venv/lib/python3.13/site-packages/torch/nn/__pycache__/functional.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
598
+ venv/lib/python3.13/site-packages/torch/nn/modules/__pycache__/module.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
599
+ venv/lib/python3.13/site-packages/torch/nn/parallel/__pycache__/distributed.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
600
+ venv/lib/python3.13/site-packages/torch/onnx/__pycache__/symbolic_opset9.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
601
+ venv/lib/python3.13/site-packages/torch/sparse/__pycache__/_triton_ops_meta.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
602
+ venv/lib/python3.13/site-packages/torch/testing/_internal/__pycache__/common_methods_invocations.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
603
+ venv/lib/python3.13/site-packages/torch/testing/_internal/__pycache__/common_modules.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
604
+ venv/lib/python3.13/site-packages/torch/testing/_internal/__pycache__/common_nn.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
605
+ venv/lib/python3.13/site-packages/torch/testing/_internal/__pycache__/common_quantization.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
606
+ venv/lib/python3.13/site-packages/torch/testing/_internal/__pycache__/common_utils.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
607
+ venv/lib/python3.13/site-packages/torch/testing/_internal/distributed/__pycache__/distributed_test.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
608
+ venv/lib/python3.13/site-packages/torch/testing/_internal/distributed/rpc/__pycache__/dist_autograd_test.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
609
+ venv/lib/python3.13/site-packages/torch/testing/_internal/distributed/rpc/__pycache__/rpc_test.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
610
+ venv/lib/python3.13/site-packages/torch/testing/_internal/generated/__pycache__/annotated_fn_args.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
611
+ venv/lib/python3.13/site-packages/torch/testing/_internal/opinfo/__pycache__/core.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
612
+ venv/lib/python3.13/site-packages/torch/utils/__pycache__/cpp_extension.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
613
+ venv/lib/python3.13/site-packages/torch/utils/hipify/__pycache__/cuda_to_hip_mappings.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
614
+ venv/lib/python3.13/site-packages/torchgen/__pycache__/gen.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
615
+ venv/lib/python3.13/site-packages/torchgen/__pycache__/model.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
616
+ venv/lib/python3.13/site-packages/torchvision/.dylibs/libc++.1.0.dylib filter=lfs diff=lfs merge=lfs -text
617
+ venv/lib/python3.13/site-packages/torchvision/.dylibs/libjpeg.8.2.2.dylib filter=lfs diff=lfs merge=lfs -text
618
+ venv/lib/python3.13/site-packages/torchvision/.dylibs/libpng16.16.dylib filter=lfs diff=lfs merge=lfs -text
619
+ venv/lib/python3.13/site-packages/torchvision/.dylibs/libwebp.7.1.8.dylib filter=lfs diff=lfs merge=lfs -text
620
+ venv/lib/python3.13/site-packages/torchvision/.dylibs/libz.1.2.13.dylib filter=lfs diff=lfs merge=lfs -text
621
+ venv/lib/python3.13/site-packages/torchvision/_C.so filter=lfs diff=lfs merge=lfs -text
622
+ venv/lib/python3.13/site-packages/torchvision/image.so filter=lfs diff=lfs merge=lfs -text
623
+ venv/lib/python3.13/site-packages/torchvision/transforms/__pycache__/transforms.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
624
+ venv/lib/python3.13/site-packages/torchvision/transforms/v2/functional/__pycache__/_geometry.cpython-313.pyc filter=lfs diff=lfs merge=lfs -text
625
+ venv/lib/python3.13/site-packages/yaml/_yaml.cpython-313-darwin.so filter=lfs diff=lfs merge=lfs -text
.ipynb_checkpoints/README-checkpoint.md ADDED
@@ -0,0 +1,258 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ tags:
4
+ - vision
5
+ - image-segmentation
6
+ - instance-segmentation
7
+ - object-detection
8
+ - pytorch
9
+ - mask-rcnn
10
+ - coffee
11
+ - quality-control
12
+ datasets:
13
+ - custom
14
+ metrics:
15
+ - iou
16
+ - precision
17
+ - recall
18
+ - mAP
19
+ model-index:
20
+ - name: coffee-bean-maskrcnn
21
+ results:
22
+ - task:
23
+ type: instance-segmentation
24
+ name: Instance Segmentation
25
+ dataset:
26
+ name: Coffee Bean Dataset
27
+ type: custom
28
+ metrics:
29
+ - type: precision
30
+ value: 99.92
31
+ name: Precision
32
+ - type: recall
33
+ value: 96.71
34
+ name: Recall
35
+ - type: iou
36
+ value: 90.93
37
+ name: Average IoU
38
+ - type: detection-rate
39
+ value: 96.71
40
+ name: Detection Rate
41
+ library_name: pytorch
42
+ pipeline_tag: image-segmentation
43
+ ---
44
+
45
+ # Coffee Bean Detection - Fine-tuned Mask R-CNN
46
+
47
+ ## Model Details
48
+
49
+ ### Model Description
50
+
51
+ This is a fine-tuned Mask R-CNN model specialized for detecting and segmenting individual coffee beans in images. The model performs instance segmentation, providing precise pixel-level masks and bounding boxes for each detected coffee bean.
52
+
53
+ - **Developed by:** Mark Kunitomi
54
+ - **Model type:** Instance Segmentation (Mask R-CNN)
55
+ - **Architecture:** ResNet-50 FPN backbone
56
+ - **Language:** Not applicable
57
+ - **License:** MIT
58
+
59
+ ### Model Sources
60
+
61
+ - **Repository:** [GitHub - Bean Vision](https://github.com/Markkunitomi/bean-vision)
62
+ - **Paper:** Based on [Mask R-CNN](https://arxiv.org/abs/1703.06870)
63
+
64
+ ## Uses
65
+
66
+ ### Direct Use
67
+
68
+ This model is designed for:
69
+
70
+ - Bean size and shape analysis for roasting optimization
71
+ - Research in agricultural computer vision
72
+ - Educational purposes in computer vision applications
73
+
74
+ ### Downstream Use
75
+
76
+ The model can be fine-tuned for:
77
+ - Specific coffee varieties or roast levels
78
+ - Other small object detection tasks
79
+ - Agricultural produce sorting and grading
80
+ - Industrial quality inspection tasks
81
+
82
+ ### Out-of-Scope Use
83
+
84
+ This model is not suitable for:
85
+ - Medical imaging or diagnosis
86
+ - Security or surveillance applications
87
+ - Real-time applications requiring < 100ms latency
88
+ - Detection of objects other than coffee beans without fine-tuning
89
+
90
+ ## Bias, Risks, and Limitations
91
+
92
+ ### Limitations
93
+ - Trained specifically on coffee beans; limited generalization to other objects
94
+ - Best performance on top-down views of beans on flat surfaces
95
+ - May struggle with heavily occluded or stacked beans
96
+ - Performance varies with extreme roast levels (very light or charred beans)
97
+
98
+ ### Recommendations
99
+ - Use confidence threshold of 0.5 or higher for production applications
100
+ - Validate on your specific bean varieties before deployment
101
+ - Consider additional fine-tuning for specialized use cases
102
+ - Implement human verification for critical applications
103
+
104
+ ## Example Results
105
+
106
+ ![Coffee Bean Detection Example](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/hub/coffee-beans-detection-example.png)
107
+
108
+ *Example showing detection and segmentation of coffee beans with confidence scores. The model accurately identifies individual beans even when they are touching or partially overlapping.*
109
+
110
+ ## How to Get Started with the Model
111
+
112
+ ### Quick Start with Jupyter Notebook
113
+
114
+ A complete inference notebook (`inference_demo.ipynb`) is included with this model. It provides:
115
+ - Step-by-step setup instructions
116
+ - Complete BeanPredictor class implementation
117
+ - Visualization tools
118
+ - Batch processing capabilities
119
+ - Performance analysis utilities
120
+
121
+ ### Basic Usage
122
+
123
+ ```python
124
+ import torch
125
+ from torchvision.models.detection import maskrcnn_resnet50_fpn
126
+ from torchvision.transforms import functional as F
127
+ from PIL import Image
128
+ import numpy as np
129
+
130
+ # Load the model
131
+ model = maskrcnn_resnet50_fpn(num_classes=2) # background + bean
132
+ state_dict = torch.load('maskrcnn_coffeebeans_v1.safetensors', map_location='cpu')
133
+ model.load_state_dict(state_dict)
134
+ model.eval()
135
+
136
+ # Prepare image
137
+ image = Image.open('your_coffee_beans.jpg').convert('RGB')
138
+ image_tensor = F.to_tensor(image).unsqueeze(0)
139
+
140
+ # Run inference
141
+ with torch.no_grad():
142
+ predictions = model(image_tensor)[0]
143
+
144
+ # Filter predictions by confidence
145
+ threshold = 0.5
146
+ mask = predictions['scores'] > threshold
147
+ filtered_predictions = {
148
+ 'boxes': predictions['boxes'][mask],
149
+ 'labels': predictions['labels'][mask],
150
+ 'scores': predictions['scores'][mask],
151
+ 'masks': predictions['masks'][mask]
152
+ }
153
+
154
+ print(f"Detected {len(filtered_predictions['boxes'])} coffee beans")
155
+ ```
156
+
157
+ ## Training Details
158
+
159
+ ### Training Data
160
+
161
+ The model was trained on a custom dataset of coffee bean images:
162
+ - 128 training images with detailed COCO-format annotations
163
+ - Multiple coffee varieties and roast levels
164
+ - Various lighting conditions and backgrounds
165
+ - Manually annotated polygon masks for each bean
166
+ - Data augmentation: rotation, scaling, color jittering, horizontal/vertical flips
167
+
168
+ ### Training Procedure
169
+
170
+ #### Base Model
171
+ This model was fine-tuned from `model_128img_25ep_25-09-13.pth`, which was itself trained on 128 coffee bean images for 25 epochs. The base model provided a strong foundation for coffee bean detection, and this fine-tuned version specifically optimizes mask quality and detection accuracy.
172
+
173
+ #### Preprocessing
174
+ - Images resized to maintain aspect ratio
175
+ - Normalization with ImageNet statistics
176
+ - Random augmentations during training
177
+
178
+ #### Training Hyperparameters
179
+ - **Training regime:** Mixed precision (fp16/fp32)
180
+ - **Epochs:** 10 (best checkpoint at epoch 9)
181
+ - **Batch size:** 2
182
+ - **Optimizer:** AdamW
183
+ - **Learning rates:**
184
+ - Base: 1e-4
185
+ - Mask head: 5e-4 (higher for mask optimization)
186
+ - Backbone: 1e-5 (lower for stability)
187
+ - **Scheduler:** CosineAnnealingWarmRestarts (T_0=3, T_mult=2)
188
+ - **Weight decay:** 1e-4
189
+ - **Gradient clipping:** 1.0
190
+ - **Mask loss weight:** 2.0 (emphasized for better segmentation)
191
+
192
+ ### Evaluation
193
+
194
+ #### Testing Data
195
+ Evaluated on a held-out validation set:
196
+ - 4,952 ground truth bean instances
197
+ - Diverse bean arrangements and densities
198
+ - Various roast levels and lighting conditions
199
+
200
+ #### Metrics
201
+
202
+ | Metric | Value |
203
+ |--------|-------|
204
+ | **Precision** | 99.92% |
205
+ | **Recall** | 96.71% |
206
+ | **Average IoU** | 90.93% |
207
+ | **Detection Rate** | 96.71% |
208
+ | **Average Confidence** | 99.82% |
209
+ | **Mask Loss** | 0.1333 |
210
+ | **Validation Loss** | 0.2464 |
211
+
212
+ ## Environmental Impact
213
+
214
+ Carbon emissions were not tracked for this training. The model was trained on a single GPU for approximately 2 hours.
215
+
216
+ ## Technical Specifications
217
+
218
+ ### Model Architecture and Objective
219
+
220
+ - **Architecture:** Mask R-CNN with ResNet-50 Feature Pyramid Network
221
+ - **Input:** RGB images (any size)
222
+ - **Output:** Instance masks, bounding boxes, class labels, confidence scores
223
+ - **Objective:** Minimize combined classification, box regression, and mask segmentation losses
224
+ - **Model Size:** 176.1 MB (SafeTensors format)
225
+
226
+ ### Compute Infrastructure
227
+
228
+ #### Hardware
229
+ - GPU: Single GPU training
230
+ - Training time: ~2 hours for fine-tuning
231
+
232
+ #### Software
233
+ - PyTorch 2.0+
234
+ - TorchVision 0.15+
235
+ - CUDA 11.8+
236
+ - Python 3.8+
237
+
238
+ ## Citation
239
+
240
+ **BibTeX:**
241
+ ```bibtex
242
+ @misc{coffee_bean_maskrcnn_2025,
243
+ author = {Bean Vision Team},
244
+ title = {Coffee Bean Detection - Fine-tuned Mask R-CNN},
245
+ year = {2025},
246
+ publisher = {Hugging Face},
247
+ journal = {Hugging Face Model Hub},
248
+ howpublished = {\url{https://huggingface.co/[your-username]/coffee-bean-maskrcnn}}
249
+ }
250
+ ```
251
+
252
+ ## Model Card Authors
253
+
254
+ Bean Vision Team
255
+
256
+ ## Model Card Contact
257
+
258
+ Please open an issue on the [GitHub repository](https://github.com/Markkunitomi/bean-vision) for questions or feedback.
.ipynb_checkpoints/inference_demo-checkpoint.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
README.md CHANGED
@@ -1,3 +1,173 @@
1
  ---
2
  license: apache-2.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
  license: apache-2.0
3
+ tags:
4
+ - vision
5
+ - image-segmentation
6
+ - instance-segmentation
7
+ - object-detection
8
+ - pytorch
9
+ - mask-rcnn
10
+ - coffee
11
+ datasets:
12
+ - custom
13
+ metrics:
14
+ - iou
15
+ - precision
16
+ - recall
17
+ - mAP
18
+ model-index:
19
+ - name: coffee-bean-maskrcnn
20
+ results:
21
+ - task:
22
+ type: instance-segmentation
23
+ name: Instance Segmentation
24
+ dataset:
25
+ name: Coffee Bean Dataset
26
+ type: custom
27
+ metrics:
28
+ - type: precision
29
+ value: 99.92
30
+ name: Precision
31
+ - type: recall
32
+ value: 96.71
33
+ name: Recall
34
+ - type: iou
35
+ value: 90.93
36
+ name: Average IoU
37
+ - type: detection-rate
38
+ value: 96.71
39
+ name: Detection Rate
40
+ library_name: pytorch
41
+ pipeline_tag: image-segmentation
42
  ---
43
+
44
+ # Coffee Bean Detection - Fine-tuned Mask R-CNN
45
+
46
+ ## Model Details
47
+
48
+ ### Model Description
49
+
50
+ This is a fine-tuned Mask R-CNN model specialized for detecting and segmenting individual coffee beans in images. The model performs instance segmentation, providing precise pixel-level masks and bounding boxes for each detected coffee bean.
51
+
52
+ - **Developed by:** Mark Kunitomi
53
+ - **Model type:** Instance Segmentation (Mask R-CNN)
54
+ - **Architecture:** ResNet-50 FPN backbone
55
+ - **License:** apache-2.0
56
+
57
+
58
+
59
+
60
+ ## Example Results
61
+
62
+ <div align="center">
63
+ <img src="./data/Costa_Rica_Red_Honey_Don_Oscar-green.png" alt="Original Green Beans" width="45%" style="margin-right: 10px;"/>
64
+ <img src="./data/Costa_Rica_Red_Honey_Don_Oscar-green-prediction.png" alt="Detection Results" width="45%"/>
65
+ </div>
66
+
67
+ *Left: Original green coffee beans image. Right: Detection and segmentation results showing individual bean masks with confidence scores.*
68
+
69
+ ## How to Get Started with the Model
70
+
71
+ ### Quick Start with Jupyter Notebook
72
+
73
+ A complete inference notebook (`inference_demo.ipynb`) is included with this model.
74
+
75
+ ### Command Line Interface
76
+
77
+ For quick inference, use the included `predict_beans.py` script:
78
+
79
+ ```bash
80
+ # Install dependencies
81
+ pip install -r requirements.txt
82
+
83
+ # Basic inference on a single image
84
+ python predict_beans.py --model maskrcnn_coffeebeans_v1.safetensors --images your_image.jpg
85
+
86
+ # Process multiple images with custom settings
87
+ python predict_beans.py --model maskrcnn_coffeebeans_v1.safetensors \
88
+ --images *.jpg \
89
+ --confidence 0.5 \
90
+ --nms_threshold 0.3 \
91
+ --smooth_polygons \
92
+ --filter_edge_beans \
93
+ --output_dir results \
94
+ --export_format coco
95
+
96
+ # For all available options
97
+ python predict_beans.py --help
98
+ ```
99
+
100
+ **Key CLI Options:**
101
+ - `--confidence`: Detection confidence threshold (default: 0.5)
102
+ - `--nms_threshold`: Non-maximum suppression threshold (default: 0.3)
103
+ - `--smooth_polygons`: Apply polygon smoothing for cleaner masks
104
+ - `--filter_edge_beans`: Remove beans touching image edges
105
+ - `--export_format`: Output format (json, coco, labelme, all)
106
+
107
+
108
+
109
+ ### Recommendations
110
+ - Use confidence threshold of 0.5 or higher for production applications
111
+ - Validate on your specific bean varieties before deployment
112
+ - Consider additional fine-tuning for specialized use cases
113
+ - Implement human verification for critical applications
114
+
115
+ ## Training Details
116
+
117
+ ### Training Data
118
+
119
+ The model was trained on a custom dataset of coffee bean images:
120
+ - 128 training images with detailed COCO-format annotations
121
+ - Multiple coffee varieties and roast levels
122
+ - Various lighting conditions and backgrounds
123
+ - Manually annotated polygon masks for each bean
124
+ - Data augmentation: rotation, scaling, color jittering, horizontal/vertical flips
125
+
126
+ #### Preprocessing
127
+ - Images resized to maintain aspect ratio
128
+ - Normalization with ImageNet statistics
129
+ - Random augmentations during training
130
+
131
+
132
+ ### Evaluation
133
+
134
+ #### Testing Data
135
+ Evaluated on a held-out validation set:
136
+ - 4,952 ground truth bean instances
137
+ - Diverse bean arrangements and densities
138
+ - Various roast levels and lighting conditions
139
+
140
+ #### Metrics
141
+
142
+ | Metric | Value |
143
+ |--------|-------|
144
+ | **Precision** | 99.92% |
145
+ | **Recall** | 96.71% |
146
+ | **Average IoU** | 90.93% |
147
+ | **Detection Rate** | 96.71% |
148
+ | **Average Confidence** | 99.82% |
149
+ | **Mask Loss** | 0.1333 |
150
+ | **Validation Loss** | 0.2464 |
151
+
152
+
153
+ ## Technical Specifications
154
+
155
+ ### Model Architecture and Objective
156
+
157
+ - **Architecture:** Mask R-CNN with ResNet-50 Feature Pyramid Network
158
+ - **Input:** RGB images (any size)
159
+ - **Output:** Instance masks, bounding boxes, class labels, confidence scores
160
+ - **Objective:** Minimize combined classification, box regression, and mask segmentation losses
161
+ - **Model Size:** 176.1 MB (SafeTensors format)
162
+
163
+ ### Compute Infrastructure
164
+
165
+ #### Hardware
166
+ - CPU: Mac Mini M2 with 8GB RAM
167
+ - Training time: ~2 hours for fine-tuning
168
+
169
+ #### Software
170
+ - PyTorch 2.0+
171
+ - TorchVision 0.15+
172
+ - CUDA 11.8+
173
+ - Python 3.8+
config.json ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": ["MaskRCNN"],
3
+ "model_type": "maskrcnn_resnet50_fpn",
4
+ "framework": "pytorch",
5
+ "task": "instance-segmentation",
6
+ "num_classes": 2,
7
+ "backbone": "resnet50",
8
+ "fpn": true,
9
+ "pretrained": false,
10
+ "model_config": {
11
+ "box_score_thresh": 0.5,
12
+ "box_nms_thresh": 0.5,
13
+ "box_detections_per_img": 1000,
14
+ "box_fg_iou_thresh": 0.5,
15
+ "box_bg_iou_thresh": 0.5,
16
+ "box_batch_size_per_image": 512,
17
+ "box_positive_fraction": 0.25,
18
+ "bbox_reg_weights": null,
19
+ "rpn_anchor_generator": {
20
+ "sizes": [[32], [64], [128], [256], [512]],
21
+ "aspect_ratios": [[0.5, 1.0, 2.0]]
22
+ },
23
+ "rpn_pre_nms_top_n_train": 2000,
24
+ "rpn_pre_nms_top_n_test": 1000,
25
+ "rpn_post_nms_top_n_train": 2000,
26
+ "rpn_post_nms_top_n_test": 1000,
27
+ "rpn_nms_thresh": 0.7,
28
+ "rpn_fg_iou_thresh": 0.7,
29
+ "rpn_bg_iou_thresh": 0.3,
30
+ "rpn_batch_size_per_image": 256,
31
+ "rpn_positive_fraction": 0.5
32
+ },
33
+ "training_config": {
34
+ "base_model": "model_128img_25ep_25-09-13.pth",
35
+ "epochs": 10,
36
+ "best_epoch": 9,
37
+ "batch_size": 2,
38
+ "learning_rate": 0.0001,
39
+ "learning_rate_mask": 0.0005,
40
+ "learning_rate_backbone": 0.00001,
41
+ "optimizer": "AdamW",
42
+ "scheduler": "CosineAnnealingWarmRestarts",
43
+ "weight_decay": 0.0001,
44
+ "gradient_clipping": 1.0,
45
+ "mask_loss_weight": 2.0
46
+ },
47
+ "performance_metrics": {
48
+ "precision": 0.9991654496140204,
49
+ "recall": 0.9670840064620355,
50
+ "detection_rate": 0.9670840064620355,
51
+ "avg_iou": 0.909326434135437,
52
+ "avg_confidence": 0.9982164825598233,
53
+ "best_mask_loss": 0.13330359986195198,
54
+ "val_loss": 0.24638879069915184
55
+ },
56
+ "dataset_info": {
57
+ "train_images": 128,
58
+ "val_ground_truth_instances": 4952,
59
+ "val_detected_instances": 4789,
60
+ "annotation_format": "COCO"
61
+ },
62
+ "input_specs": {
63
+ "image_format": "RGB",
64
+ "normalization": "ImageNet",
65
+ "min_size": null,
66
+ "max_size": null
67
+ },
68
+ "output_specs": {
69
+ "boxes": "Bounding boxes [N, 4] in xyxy format",
70
+ "labels": "Class labels [N] (0=background, 1=bean)",
71
+ "scores": "Confidence scores [N] range 0-1",
72
+ "masks": "Binary masks [N, 1, H, W]"
73
+ },
74
+ "model_size_mb": 176.1,
75
+ "format": "safetensors",
76
+ "version": "1.0.0",
77
+ "created_date": "2025-09-21",
78
+ "license": "MIT"
79
+ }
data/Costa_Rica_Red_Honey_Don_Oscar-green-prediction.png ADDED

Git LFS Details

  • SHA256: 02b0dcd0b078d0eb5c637165872bc0a3306acc654079bc85f8fee7dca144cf49
  • Pointer size: 132 Bytes
  • Size of remote file: 6.5 MB
data/Costa_Rica_Red_Honey_Don_Oscar-green.png ADDED

Git LFS Details

  • SHA256: e3a5142de33d011debf2828a49c614eb8c14fedce53fc4483ea25a2383a58369
  • Pointer size: 132 Bytes
  • Size of remote file: 7.95 MB
data/Costa_Rica_Zarcero_Monte_Brisas-roasted.png ADDED

Git LFS Details

  • SHA256: 58bfc58ec9bfee8b350d99cf3fee7bf4ee2fc394707fa4a5c228119ca03b65b1
  • Pointer size: 132 Bytes
  • Size of remote file: 8.94 MB
inference_demo.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
maskrcnn_coffeebeans_v1.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dec1c434c6f985e20b83dfa737db0618cfb0a73a3e3957261e2439993c09c36f
3
+ size 176146452
predict_beans.py ADDED
@@ -0,0 +1,269 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Bean detection prediction script
4
+ """
5
+
6
+ # Standard library imports
7
+ import argparse
8
+ import json
9
+ import sys
10
+ import time
11
+ from pathlib import Path
12
+ from typing import Any, Dict, List
13
+
14
+ # Add src to path
15
+ sys.path.insert(0, str(Path(__file__).parent / "src"))
16
+
17
+ # Local imports
18
+ from bean_vision.export import COCOExporter, LabelMeExporter
19
+ from bean_vision.inference import BeanPredictor
20
+ from bean_vision.visualization.detection_viz import DetectionVisualizer
21
+
22
+ # ANSI escape codes for terminal formatting
23
+ BOLD = '\033[1m'
24
+ RESET = '\033[0m'
25
+
26
+
27
+ def main():
28
+ parser = argparse.ArgumentParser(description='Bean detection using trained MaskR-CNN')
29
+
30
+ # Model and input arguments
31
+ parser.add_argument('--model', type=str, required=True,
32
+ help='Path to trained model checkpoint')
33
+ parser.add_argument('--images', nargs='+', required=True,
34
+ help='Input image paths (can use wildcards)')
35
+
36
+ # Detection parameters
37
+ parser.add_argument('--confidence', '--threshold', type=float, default=0.5,
38
+ dest='confidence',
39
+ help='Confidence threshold for detections')
40
+ parser.add_argument('--max_detections', type=int, default=500,
41
+ help='Maximum detections per image')
42
+ parser.add_argument('--mask_threshold', type=float, default=0.5,
43
+ help='Threshold for mask binarization')
44
+ parser.add_argument('--device', type=str, default='cpu',
45
+ help='Device to use (cpu or cuda)')
46
+
47
+ # NMS parameters
48
+ parser.add_argument('--apply_nms', action='store_true', default=True,
49
+ help='Apply Non-Maximum Suppression to remove overlapping detections (default: True)')
50
+ parser.add_argument('--no_nms', dest='apply_nms', action='store_false',
51
+ help='Disable NMS')
52
+ parser.add_argument('--nms_type', choices=['box', 'mask'], default='box',
53
+ help='Type of NMS to apply (default: box - faster, mask - more accurate)')
54
+ parser.add_argument('--nms_threshold', type=float, default=0.3,
55
+ help='IoU threshold for NMS (lower = more aggressive)')
56
+
57
+ # Edge and size filtering
58
+ parser.add_argument('--filter_edge_beans', action='store_true', default=True,
59
+ help='Filter out partial beans at image edges (default: True)')
60
+ parser.add_argument('--no_edge_filter', dest='filter_edge_beans', action='store_false',
61
+ help='Disable edge bean filtering')
62
+ parser.add_argument('--edge_threshold', type=int, default=10,
63
+ help='Pixel distance from edge to consider for filtering')
64
+ parser.add_argument('--min_bean_area', type=float, default=500,
65
+ help='Minimum bean area in pixels')
66
+ parser.add_argument('--max_bean_area', type=float, default=30000,
67
+ help='Maximum bean area in pixels')
68
+
69
+ # Output options
70
+ parser.add_argument('--output_dir', type=str, default='results',
71
+ help='Directory to save outputs (default: results)')
72
+ parser.add_argument('--visualize', action='store_true', default=True,
73
+ help='Create visualization images (default: True)')
74
+ parser.add_argument('--no_visualize', dest='visualize', action='store_false',
75
+ help='Disable visualization')
76
+ parser.add_argument('--vis_type', choices=['masks', 'polygons', 'both'],
77
+ default='both', help='Visualization type (default: both)')
78
+ parser.add_argument('--export_format', choices=['json', 'coco', 'labelme', 'all'],
79
+ default='json', help='Export format for predictions (default: json)')
80
+ parser.add_argument('--include_polygons', action='store_true', default=True,
81
+ help='Convert masks to polygons (default: True)')
82
+
83
+ # Polygon smoothing options
84
+ parser.add_argument('--smooth_polygons', action='store_true', default=True,
85
+ help='Apply smoothing to polygons to reduce jaggedness (default: True)')
86
+ parser.add_argument('--no_smooth', dest='smooth_polygons', action='store_false',
87
+ help='Disable polygon smoothing')
88
+ parser.add_argument('--smoothing_factor', type=float, default=0.1,
89
+ help='Smoothing factor (0.0-1.0, 0=no smoothing, 1=maximum smoothing, default: 0.1)')
90
+
91
+ # Legacy compatibility
92
+ parser.add_argument('--save_json', action='store_true',
93
+ help='Save predictions as JSON (legacy, use --export_format json)')
94
+
95
+ args = parser.parse_args()
96
+
97
+ # Handle legacy save_json flag
98
+ if args.save_json and not args.export_format:
99
+ args.export_format = 'json'
100
+
101
+ # Create output directory if needed
102
+ if args.output_dir:
103
+ output_dir = Path(args.output_dir)
104
+ output_dir.mkdir(parents=True, exist_ok=True)
105
+ else:
106
+ output_dir = None
107
+
108
+ # Print header
109
+ print('\n' + '='*80)
110
+ print(f'{BOLD}BEAN DETECTION{RESET}')
111
+ print('='*80)
112
+
113
+ # Initialize predictor
114
+ print(f'\n{BOLD}Model:{RESET} {Path(args.model).name}')
115
+
116
+ predictor = BeanPredictor(
117
+ model_path=Path(args.model),
118
+ device=args.device,
119
+ max_detections=args.max_detections,
120
+ confidence_threshold=args.confidence,
121
+ mask_threshold=args.mask_threshold,
122
+ nms_threshold=args.nms_threshold,
123
+ smooth_polygons=args.smooth_polygons or args.smoothing_factor > 0,
124
+ smoothing_factor=args.smoothing_factor,
125
+ apply_nms=args.apply_nms,
126
+ nms_type=args.nms_type,
127
+ filter_edge_beans=args.filter_edge_beans,
128
+ edge_threshold=args.edge_threshold,
129
+ min_bean_area=args.min_bean_area,
130
+ max_bean_area=args.max_bean_area
131
+ )
132
+
133
+ # Initialize visualizer if needed
134
+ if args.visualize:
135
+ visualizer = DetectionVisualizer(confidence_threshold=args.confidence)
136
+
137
+ # Initialize exporters if needed
138
+ coco_exporter = COCOExporter("bean_predictions") if args.export_format in ['coco', 'all'] else None
139
+ labelme_exporter = LabelMeExporter() if args.export_format in ['labelme', 'all'] else None
140
+
141
+ # Process images
142
+ all_results = []
143
+ total_beans = 0
144
+ total_time = 0
145
+
146
+ # Print processing header
147
+ if len(args.images) > 1:
148
+ print(f'\n{BOLD}Processing {len(args.images)} images...{RESET}')
149
+ else:
150
+ print(f'\n{BOLD}Processing image...{RESET}')
151
+
152
+ for image_path in args.images:
153
+ image_path = Path(image_path)
154
+
155
+ if not image_path.exists():
156
+ print(f" [!] {image_path.name}: not found")
157
+ continue
158
+
159
+ # Run prediction - always include polygons for better analysis
160
+ result = predictor.predict(
161
+ image_path,
162
+ return_polygons=True, # Always return polygons
163
+ return_masks=True
164
+ )
165
+
166
+ # Print results
167
+ if len(args.images) == 1:
168
+ print(f'\n{BOLD}Results:{RESET}')
169
+ print(f' Image: {image_path.name}')
170
+ print(f' Beans detected: {result["bean_count"]}')
171
+ print(f' Inference time: {result["inference_time"]:.2f}s')
172
+ else:
173
+ print(f' {image_path.name}: {result["bean_count"]} beans ({result["inference_time"]:.1f}s)')
174
+
175
+ total_beans += result['bean_count']
176
+ total_time += result['inference_time']
177
+
178
+ # Visualize if requested (silent)
179
+ if args.visualize and output_dir:
180
+ if args.vis_type in ['masks', 'both']:
181
+ # Use legacy naming for backward compatibility
182
+ mask_vis_path = output_dir / f"{image_path.stem}_prediction.png"
183
+ visualizer.visualize_masks_with_confidence(
184
+ image_path,
185
+ result,
186
+ mask_vis_path,
187
+ mask_threshold=args.mask_threshold
188
+ )
189
+
190
+ if args.vis_type in ['polygons', 'both'] and 'polygons' in result:
191
+ poly_vis_path = output_dir / f"{image_path.stem}_poly_vis.png"
192
+ visualizer.visualize_polygons(
193
+ image_path,
194
+ result,
195
+ poly_vis_path
196
+ )
197
+
198
+ # Add to exporters
199
+ if coco_exporter:
200
+ img_id = coco_exporter.add_image(
201
+ image_path,
202
+ result['image_size'][0],
203
+ result['image_size'][1]
204
+ )
205
+ coco_exporter.add_predictions(result, img_id)
206
+
207
+ if labelme_exporter and output_dir:
208
+ labelme_path = output_dir / f"{image_path.stem}_labelme.json"
209
+ labelme_exporter.save(image_path, result, labelme_path)
210
+ # Silent save
211
+
212
+ # Store result (without tensor data for JSON export)
213
+ json_result = {
214
+ 'image_path': result['image_path'],
215
+ 'image_size': result['image_size'],
216
+ 'inference_time': result['inference_time'],
217
+ 'bean_count': result['bean_count'],
218
+ 'confidence_threshold': result['confidence_threshold'],
219
+ 'total_detections': result['total_detections'],
220
+ 'filtered_detections': result['filtered_detections'],
221
+ 'predictions': {
222
+ 'boxes': result['boxes'],
223
+ 'scores': result['scores'],
224
+ 'labels': result['labels']
225
+ }
226
+ }
227
+
228
+ # Rename for backward compatibility
229
+ json_result['inference_time_seconds'] = json_result.pop('inference_time')
230
+
231
+ if 'polygons' in result:
232
+ # Keep polygons in their original format for proper COCO export
233
+ # The format is: List[List[List[Tuple[float, float]]]]
234
+ # Each detection has a list of polygons (usually just one)
235
+ json_result['predictions']['polygons'] = result['polygons']
236
+
237
+ all_results.append(json_result)
238
+
239
+ # Save exports (silent)
240
+ if output_dir:
241
+ if coco_exporter:
242
+ coco_path = output_dir / "predictions_coco.json"
243
+ coco_exporter.save(coco_path)
244
+
245
+ if args.export_format in ['json', 'all']:
246
+ json_path = output_dir / "predictions.json"
247
+ with open(json_path, 'w') as f:
248
+ json.dump(all_results, f, indent=2)
249
+
250
+ # Print summary
251
+ if len(all_results) > 0:
252
+ if len(all_results) > 1:
253
+ print(f'\n{BOLD}Summary:{RESET}')
254
+ avg_beans = total_beans / len(all_results)
255
+ print(f' Total images: {len(all_results)}')
256
+ print(f' Total beans: {total_beans}')
257
+ print(f' Average per image: {avg_beans:.0f}')
258
+ print(f' Total time: {total_time:.1f}s')
259
+
260
+ # Show output directory
261
+ if output_dir:
262
+ print(f'\n{BOLD}Output directory:{RESET} {output_dir}/')
263
+
264
+ print('\n' + '='*80)
265
+ print() # Add final newline
266
+
267
+
268
+ if __name__ == "__main__":
269
+ main()
requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ torch>=2.0.0
2
+ torchvision>=0.15.0
3
+ pillow>=9.0.0
4
+ numpy>=1.21.0
5
+ opencv-python>=4.5.0
6
+ matplotlib>=3.5.0
7
+ safetensors>=0.3.0
8
+ tqdm>=4.65.0
src/bean_vision.egg-info/PKG-INFO ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.4
2
+ Name: bean-vision
3
+ Version: 1.0.0
4
+ Summary: Production-grade coffee bean segmentation with MaskR-CNN
5
+ Author: Bean Vision Team
6
+ License: MIT
7
+ Project-URL: Homepage, https://github.com/Markkunitomi/bean-vision
8
+ Project-URL: Repository, https://github.com/Markkunitomi/bean-vision
9
+ Project-URL: Documentation, https://github.com/Markkunitomi/bean-vision#readme
10
+ Classifier: Development Status :: 4 - Beta
11
+ Classifier: Intended Audience :: Developers
12
+ Classifier: License :: OSI Approved :: MIT License
13
+ Classifier: Operating System :: OS Independent
14
+ Classifier: Programming Language :: Python :: 3
15
+ Classifier: Programming Language :: Python :: 3.8
16
+ Classifier: Programming Language :: Python :: 3.9
17
+ Classifier: Programming Language :: Python :: 3.10
18
+ Classifier: Programming Language :: Python :: 3.11
19
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
20
+ Classifier: Topic :: Scientific/Engineering :: Image Processing
21
+ Requires-Python: >=3.8
22
+ Description-Content-Type: text/markdown
23
+ Requires-Dist: torch>=2.0.0
24
+ Requires-Dist: torchvision>=0.15.0
25
+ Requires-Dist: opencv-python>=4.8.0
26
+ Requires-Dist: Pillow>=10.0.0
27
+ Requires-Dist: numpy>=1.24.0
28
+ Requires-Dist: scipy>=1.10.0
29
+ Requires-Dist: pycocotools>=2.0.6
30
+ Requires-Dist: albumentations>=1.3.0
31
+ Requires-Dist: pandas>=2.0.0
32
+ Requires-Dist: matplotlib>=3.7.0
33
+ Requires-Dist: seaborn>=0.12.0
34
+ Requires-Dist: scikit-image>=0.21.0
35
+ Requires-Dist: tqdm>=4.65.0
36
+ Requires-Dist: tensorboard>=2.13.0
37
+ Requires-Dist: jupyter>=1.0.0
38
+ Requires-Dist: notebook>=7.0.0
39
+ Requires-Dist: labelme>=5.3.0
40
+ Provides-Extra: api
41
+ Requires-Dist: fastapi>=0.100.0; extra == "api"
42
+ Requires-Dist: uvicorn[standard]>=0.20.0; extra == "api"
43
+ Requires-Dist: python-multipart>=0.0.6; extra == "api"
44
+ Provides-Extra: dev
45
+ Requires-Dist: pytest>=7.0.0; extra == "dev"
46
+ Requires-Dist: pytest-cov>=4.0.0; extra == "dev"
47
+ Requires-Dist: black>=22.0.0; extra == "dev"
48
+ Requires-Dist: isort>=5.0.0; extra == "dev"
49
+ Requires-Dist: flake8>=5.0.0; extra == "dev"
50
+ Requires-Dist: mypy>=1.0.0; extra == "dev"
51
+
52
+ # Bean Vision
53
+
54
+ Coffee bean detection and segmentation using Mask R-CNN.
55
+
56
+ ## Installation
57
+
58
+ ```bash
59
+ git clone https://github.com/Markkunitomi/bean-vision.git
60
+ cd bean-vision
61
+
62
+ python -m venv .venv
63
+ source .venv/bin/activate
64
+
65
+ uv pip install -e .
66
+ ```
67
+
68
+ ## Usage
69
+
70
+ ### Command Line
71
+
72
+ ```bash
73
+ # Basic prediction
74
+ python predict_beans.py models/beans_101img_20ep.pth data/sample.png
75
+
76
+ # With options
77
+ python predict_beans.py models/beans_101img_20ep.pth data/sample.png \
78
+ --smooth_polygons --filter_edge_beans \
79
+ --threshold 0.5 --nms_threshold 0.5
80
+ ```
81
+
82
+ ### Python API
83
+
84
+ ```python
85
+ from src.bean_vision.inference import BeanPredictor
86
+
87
+ predictor = BeanPredictor(model_path="models/beans_101img_20ep.pth")
88
+ results = predictor.predict("image.jpg", threshold=0.5)
89
+ predictor.visualize(results, "output.png")
90
+ ```
91
+
92
+ ## Training
93
+
94
+ ```bash
95
+ python scripts/train.py \
96
+ --dataset_path coco_datasets/dataset_101_images/ \
97
+ --epochs 20 \
98
+ --batch_size 2 \
99
+ --output models/my_model.pth
100
+ ```
101
+
102
+ ## Models
103
+
104
+ | Model | Training Images | mAP@0.5 |
105
+ |-------|----------------|---------|
106
+ | beans_101img_20ep.pth | 101 | 98.9% |
107
+ | beans_60img_20ep.pth | 60 | 94.7% |
108
+ | beans_30img_20ep.pth | 30 | 89.1% |
109
+ | beans_10img_20ep.pth | 10 | 78.3% |
110
+
111
+ ## Dataset Preparation
112
+
113
+ 1. Annotate with LabelMe:
114
+ ```bash
115
+ labelme data/ --labels bean
116
+ ```
117
+
118
+ 2. Convert to COCO:
119
+ ```bash
120
+ labelme2coco data/ coco_datasets/dataset/ --labels bean
121
+ ```
122
+
123
+ ## License
124
+
125
+ MIT
src/bean_vision.egg-info/SOURCES.txt ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ README.md
2
+ pyproject.toml
3
+ src/bean_vision/__init__.py
4
+ src/bean_vision/config.py
5
+ src/bean_vision.egg-info/PKG-INFO
6
+ src/bean_vision.egg-info/SOURCES.txt
7
+ src/bean_vision.egg-info/dependency_links.txt
8
+ src/bean_vision.egg-info/entry_points.txt
9
+ src/bean_vision.egg-info/requires.txt
10
+ src/bean_vision.egg-info/top_level.txt
11
+ src/bean_vision/data/__init__.py
12
+ src/bean_vision/data/dataset.py
13
+ src/bean_vision/data/transforms.py
14
+ src/bean_vision/evaluation/__init__.py
15
+ src/bean_vision/evaluation/evaluator.py
16
+ src/bean_vision/evaluation/metrics.py
17
+ src/bean_vision/export/__init__.py
18
+ src/bean_vision/export/coco_exporter.py
19
+ src/bean_vision/export/labelme_exporter.py
20
+ src/bean_vision/export/polygon_utils.py
21
+ src/bean_vision/inference/__init__.py
22
+ src/bean_vision/inference/mask_converter.py
23
+ src/bean_vision/inference/postprocessing.py
24
+ src/bean_vision/inference/predictor.py
25
+ src/bean_vision/models/__init__.py
26
+ src/bean_vision/models/model.py
27
+ src/bean_vision/training/__init__.py
28
+ src/bean_vision/training/logging.py
29
+ src/bean_vision/training/metrics.py
30
+ src/bean_vision/training/timing.py
31
+ src/bean_vision/training/trainer.py
32
+ src/bean_vision/utils/__init__.py
33
+ src/bean_vision/utils/io.py
34
+ src/bean_vision/utils/logging.py
35
+ src/bean_vision/utils/misc.py
36
+ src/bean_vision/visualization/__init__.py
37
+ src/bean_vision/visualization/detection_viz.py
38
+ tests/test_predict_beans.py
39
+ tests/test_train_beans.py
src/bean_vision.egg-info/dependency_links.txt ADDED
@@ -0,0 +1 @@
 
 
1
+
src/bean_vision.egg-info/entry_points.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ [console_scripts]
2
+ bean-api = bean_vision.cli.api:main
3
+ bean-predict = bean_vision.cli.predict:main
4
+ bean-train = bean_vision.cli.train:main
src/bean_vision.egg-info/requires.txt ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ torch>=2.0.0
2
+ torchvision>=0.15.0
3
+ opencv-python>=4.8.0
4
+ Pillow>=10.0.0
5
+ numpy>=1.24.0
6
+ scipy>=1.10.0
7
+ pycocotools>=2.0.6
8
+ albumentations>=1.3.0
9
+ pandas>=2.0.0
10
+ matplotlib>=3.7.0
11
+ seaborn>=0.12.0
12
+ scikit-image>=0.21.0
13
+ tqdm>=4.65.0
14
+ tensorboard>=2.13.0
15
+ jupyter>=1.0.0
16
+ notebook>=7.0.0
17
+ labelme>=5.3.0
18
+
19
+ [api]
20
+ fastapi>=0.100.0
21
+ uvicorn[standard]>=0.20.0
22
+ python-multipart>=0.0.6
23
+
24
+ [dev]
25
+ pytest>=7.0.0
26
+ pytest-cov>=4.0.0
27
+ black>=22.0.0
28
+ isort>=5.0.0
29
+ flake8>=5.0.0
30
+ mypy>=1.0.0
src/bean_vision.egg-info/top_level.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ bean_vision
src/bean_vision/__init__.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Bean Vision - Production-grade coffee bean segmentation."""
2
+
3
+ __version__ = "2.0.0"
4
+ __author__ = "Bean Vision Team"
5
+
6
+ # Import main components for easier access
7
+ # Note: Using safe imports to avoid breaking if modules are missing
8
+ try:
9
+ from .models.model import BeanModel
10
+ BeanDetector = BeanModel # Alias for compatibility
11
+ create_model = None # This function might not exist in the restored version
12
+ load_model_checkpoint = None # This function might not exist in the restored version
13
+ except ImportError:
14
+ BeanModel = None
15
+ BeanDetector = None
16
+ create_model = None
17
+ load_model_checkpoint = None
18
+
19
+ try:
20
+ from .data.dataset import BeanDataset
21
+ from .data.transforms import get_train_transforms, get_val_transforms
22
+ except ImportError:
23
+ BeanDataset = None
24
+ get_train_transforms = None
25
+ get_val_transforms = None
26
+ try:
27
+ from .inference.predictor import BeanPredictor
28
+ except ImportError:
29
+ BeanPredictor = None
30
+
31
+ try:
32
+ from .evaluation.evaluator import BeanEvaluator
33
+ except ImportError:
34
+ BeanEvaluator = None
35
+
36
+ try:
37
+ from .export import COCOExporter, LabelMeExporter
38
+ except ImportError:
39
+ COCOExporter = None
40
+ LabelMeExporter = None
41
+
42
+ __all__ = [
43
+ # Models
44
+ 'BeanDetector',
45
+ 'create_model',
46
+ 'load_model_checkpoint',
47
+
48
+ # Data
49
+ 'BeanDataset',
50
+ 'get_train_transforms',
51
+ 'get_val_transforms',
52
+
53
+ # Inference
54
+ 'BeanPredictor',
55
+
56
+ # Evaluation
57
+ 'BeanEvaluator',
58
+
59
+ # Export
60
+ 'COCOExporter',
61
+ 'LabelMeExporter',
62
+
63
+ # Version
64
+ '__version__'
65
+ ]
src/bean_vision/__pycache__/__init__.cpython-313.pyc ADDED
Binary file (1.29 kB). View file
 
src/bean_vision/__pycache__/config.cpython-313.pyc ADDED
Binary file (27.7 kB). View file
 
src/bean_vision/config.py ADDED
@@ -0,0 +1,575 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Production-grade configuration management system for bean color analysis.
4
+
5
+ Supports environment-specific configurations, validation, and hot-reloading.
6
+ """
7
+
8
+ import os
9
+ import logging
10
+ from pathlib import Path
11
+ from typing import Dict, Any, Optional, Union, List
12
+ from dataclasses import dataclass, field
13
+ import yaml
14
+ from functools import lru_cache
15
+
16
+ logger = logging.getLogger(__name__)
17
+
18
+
19
+ @dataclass
20
+ class ModelConfig:
21
+ """Model configuration parameters."""
22
+ num_classes: int
23
+ checkpoint_path: str
24
+ device: str
25
+
26
+
27
+ @dataclass
28
+ class ImageConfig:
29
+ """Image processing configuration."""
30
+ resize_width: int
31
+ resize_height: int
32
+ imagenet_mean: List[float]
33
+ imagenet_std: List[float]
34
+
35
+
36
+ @dataclass
37
+ class TrainingConfig:
38
+ """Training configuration parameters."""
39
+ epochs: int
40
+ batch_size: int
41
+ learning_rate: float
42
+ weight_decay: float
43
+ momentum: float
44
+ lr_scheduler_step: int
45
+ lr_scheduler_gamma: float
46
+ gradient_clip_max_norm: float
47
+ backbone_freeze_epochs: int
48
+
49
+
50
+ @dataclass
51
+ class AugmentationConfig:
52
+ """Data augmentation configuration."""
53
+ random_rotate90_prob: float
54
+ rotate_limit: int
55
+ rotate_prob: float
56
+ horizontal_flip_prob: float
57
+ vertical_flip_prob: float
58
+ brightness_limit: float
59
+ contrast_limit: float
60
+ brightness_contrast_prob: float
61
+
62
+
63
+ @dataclass
64
+ class InferenceConfig:
65
+ """Inference configuration parameters."""
66
+ confidence_threshold: float
67
+ nms_threshold: float
68
+ min_contour_area: int
69
+
70
+
71
+ @dataclass
72
+ class PathsConfig:
73
+ """File and directory paths."""
74
+ data_dir: str
75
+ coco_json: str
76
+ log_dir: str
77
+ output_dir: str
78
+
79
+
80
+ @dataclass
81
+ class LoggingConfig:
82
+ """Logging configuration."""
83
+ level: str
84
+ format: str
85
+ file_logging: bool
86
+ console_logging: bool
87
+
88
+
89
+ @dataclass
90
+ class APIConfig:
91
+ """API configuration parameters."""
92
+ title: str
93
+ description: str
94
+ version: str
95
+ host: str
96
+ port: int
97
+ debug: bool
98
+ reload: bool
99
+ workers: int
100
+ max_file_size_mb: int
101
+ max_batch_size: int
102
+ request_timeout_seconds: int
103
+ rate_limit_enabled: bool
104
+ rate_limit_requests: int
105
+ rate_limit_window_minutes: int
106
+ cors_enabled: bool
107
+ cors_origins: List[str]
108
+ cors_methods: List[str]
109
+ cors_headers: List[str]
110
+ auth_enabled: bool
111
+ api_key_header: str
112
+ admin_api_key: str
113
+ include_model_info: bool
114
+ include_processing_time: bool
115
+ default_return_polygons: bool
116
+ cache_enabled: bool
117
+ cache_ttl_seconds: int
118
+ cache_max_size: int
119
+
120
+
121
+ @dataclass
122
+ class BeanVisionConfig:
123
+ """Main configuration class for Bean Vision."""
124
+ model: ModelConfig
125
+ image: ImageConfig
126
+ training: TrainingConfig
127
+ augmentation: AugmentationConfig
128
+ inference: InferenceConfig
129
+ paths: PathsConfig
130
+ logging: LoggingConfig
131
+ api: Optional[APIConfig] = None
132
+
133
+ @classmethod
134
+ def from_yaml(cls, config_path: str) -> 'BeanVisionConfig':
135
+ """Load configuration from YAML file."""
136
+ config_path = Path(config_path)
137
+
138
+ if not config_path.exists():
139
+ raise FileNotFoundError(f"Configuration file not found: {config_path}")
140
+
141
+ try:
142
+ with open(config_path, 'r') as f:
143
+ config_dict = yaml.safe_load(f)
144
+ except yaml.YAMLError as e:
145
+ raise ValueError(f"Invalid YAML configuration: {e}")
146
+
147
+ try:
148
+ # API config is optional
149
+ api_config = None
150
+ if 'api' in config_dict:
151
+ api_config = APIConfig(**config_dict['api'])
152
+
153
+ return cls(
154
+ model=ModelConfig(**config_dict['model']),
155
+ image=ImageConfig(**config_dict['image']),
156
+ training=TrainingConfig(**config_dict['training']),
157
+ augmentation=AugmentationConfig(**config_dict['augmentation']),
158
+ inference=InferenceConfig(**config_dict['inference']),
159
+ paths=PathsConfig(**config_dict['paths']),
160
+ logging=LoggingConfig(**config_dict['logging']),
161
+ api=api_config
162
+ )
163
+ except (KeyError, TypeError) as e:
164
+ raise ValueError(f"Invalid configuration structure: {e}")
165
+
166
+ def validate(self) -> None:
167
+ """Validate configuration values."""
168
+ # Validate model config
169
+ if self.model.num_classes < 2:
170
+ raise ValueError("num_classes must be >= 2")
171
+
172
+ # Validate image config
173
+ if self.image.resize_width <= 0 or self.image.resize_height <= 0:
174
+ raise ValueError("Image dimensions must be positive")
175
+
176
+ if len(self.image.imagenet_mean) != 3 or len(self.image.imagenet_std) != 3:
177
+ raise ValueError("ImageNet mean and std must have 3 values")
178
+
179
+ # Validate training config
180
+ if self.training.epochs <= 0:
181
+ raise ValueError("epochs must be positive")
182
+ if self.training.batch_size <= 0:
183
+ raise ValueError("batch_size must be positive")
184
+ if self.training.learning_rate <= 0:
185
+ raise ValueError("learning_rate must be positive")
186
+
187
+ # Validate inference config
188
+ if not 0 <= self.inference.confidence_threshold <= 1:
189
+ raise ValueError("confidence_threshold must be between 0 and 1")
190
+ if not 0 <= self.inference.nms_threshold <= 1:
191
+ raise ValueError("nms_threshold must be between 0 and 1")
192
+
193
+ # Validate paths
194
+ for path_attr in ['data_dir', 'log_dir', 'output_dir']:
195
+ path = getattr(self.paths, path_attr)
196
+ if not path or not isinstance(path, str):
197
+ raise ValueError(f"{path_attr} must be a non-empty string")
198
+
199
+
200
+ @dataclass
201
+ class AnalysisThresholds:
202
+ """Analysis threshold configuration."""
203
+ median_delta_e2000_max: float = 3.0
204
+ p95_delta_e2000_max: float = 5.0
205
+ std_L_max: float = 2.0
206
+ center_edge_L_max: float = 2.0
207
+ cv_L_max: float = 8.0
208
+ p95_pairwise_delta_e2000_max: float = 6.0
209
+ tolerance_percent_min: float = 90.0
210
+ defect_delta_e2000_max: float = 10.0
211
+ defect_std_L_max: float = 5.0
212
+
213
+
214
+ @dataclass
215
+ class PerformanceConfig:
216
+ """Performance optimization configuration."""
217
+ multiprocessing_enabled: bool = True
218
+ max_workers: Optional[int] = None
219
+ chunk_size: int = 10
220
+ lab_conversion_cache_size: int = 1000
221
+ enable_lru_cache: bool = True
222
+ use_numpy_vectorized: bool = True
223
+ kdtree_enabled: bool = True
224
+ kdtree_leaf_size: int = 30
225
+ kdtree_sample_size: int = 1000
226
+
227
+
228
+ @dataclass
229
+ class UIConfig:
230
+ """User interface configuration."""
231
+ verbosity: str = "normal" # quiet, normal, verbose, debug
232
+ show_progress_bars: bool = True
233
+ update_frequency: int = 10
234
+ use_color: bool = True
235
+ show_warnings: bool = True
236
+ max_error_lines: int = 5
237
+
238
+
239
+ @dataclass
240
+ class BusinessConfig:
241
+ """Business metrics configuration."""
242
+ cost_analysis_enabled: bool = True
243
+ cost_per_defective_bean: float = 0.05
244
+ cost_per_uneven_bean: float = 0.02
245
+ labor_cost_per_hour: float = 25.0
246
+ baseline_defect_rate: float = 0.15
247
+ target_defect_rate: float = 0.05
248
+ batch_value_usd: float = 500.0
249
+
250
+
251
+ @dataclass
252
+ class CalibrationProfile:
253
+ """Color calibration profile."""
254
+ name: str
255
+ description: str
256
+ illuminant: str
257
+ viewing_angle: int
258
+ background_gray: int
259
+ lab_tolerance_factor: float = 1.0
260
+ delta_e_threshold_factor: float = 1.0
261
+ color_correction_matrix: Optional[List[List[float]]] = None
262
+
263
+
264
+ class ConfigurationManager:
265
+ """
266
+ Production-grade configuration manager with validation and environment support.
267
+ """
268
+
269
+ def __init__(self, config_dir: Union[str, Path] = None, environment: str = None):
270
+ """
271
+ Initialize configuration manager.
272
+
273
+ Args:
274
+ config_dir: Directory containing configuration files
275
+ environment: Environment name (dev, staging, prod)
276
+ """
277
+ self.config_dir = Path(config_dir) if config_dir else Path(__file__).parent.parent.parent / "config"
278
+ self.environment = environment or os.getenv("BEAN_VISION_ENV", "development")
279
+
280
+ # Configuration cache
281
+ self._config_cache: Dict[str, Any] = {}
282
+ self._file_mtimes: Dict[str, float] = {}
283
+
284
+ # Load main configurations
285
+ self._load_all_configs()
286
+
287
+ def _load_all_configs(self) -> None:
288
+ """Load all configuration files."""
289
+ try:
290
+ # Load main analysis config
291
+ self._config_cache["analysis"] = self._load_yaml_config("analysis_config.yaml")
292
+
293
+ # Load calibration profiles
294
+ self._config_cache["calibration"] = self._load_yaml_config("calibration_profiles.yaml")
295
+
296
+ # Apply environment-specific overrides
297
+ self._apply_environment_overrides()
298
+
299
+ logger.info(f"Loaded configuration for environment: {self.environment}")
300
+
301
+ except Exception as e:
302
+ logger.error(f"Failed to load configuration: {e}")
303
+ raise
304
+
305
+ def _load_yaml_config(self, filename: str) -> Dict[str, Any]:
306
+ """Load a YAML configuration file with caching."""
307
+ filepath = self.config_dir / filename
308
+
309
+ if not filepath.exists():
310
+ raise FileNotFoundError(f"Configuration file not found: {filepath}")
311
+
312
+ # Check if file has been modified
313
+ mtime = filepath.stat().st_mtime
314
+ if filename in self._file_mtimes and self._file_mtimes[filename] == mtime:
315
+ return self._config_cache.get(filename.replace(".yaml", ""), {})
316
+
317
+ # Load the file
318
+ with open(filepath, 'r', encoding='utf-8') as f:
319
+ config = yaml.safe_load(f)
320
+
321
+ self._file_mtimes[filename] = mtime
322
+ logger.debug(f"Loaded configuration from {filepath}")
323
+
324
+ return config
325
+
326
+ def _apply_environment_overrides(self) -> None:
327
+ """Apply environment-specific configuration overrides."""
328
+ analysis_config = self._config_cache.get("analysis", {})
329
+ env_overrides = analysis_config.get("environments", {}).get(self.environment, {})
330
+
331
+ if env_overrides:
332
+ self._deep_merge_dict(analysis_config, env_overrides)
333
+ logger.info(f"Applied {len(env_overrides)} environment overrides for {self.environment}")
334
+
335
+ def _deep_merge_dict(self, base: Dict[str, Any], override: Dict[str, Any]) -> None:
336
+ """Deep merge override dictionary into base dictionary."""
337
+ for key, value in override.items():
338
+ if key in base and isinstance(base[key], dict) and isinstance(value, dict):
339
+ self._deep_merge_dict(base[key], value)
340
+ else:
341
+ base[key] = value
342
+
343
+ @lru_cache(maxsize=128)
344
+ def get_analysis_thresholds(self, bean_type: str) -> AnalysisThresholds:
345
+ """Get analysis thresholds for specified bean type."""
346
+ config = self._config_cache["analysis"]["analysis"]
347
+
348
+ if bean_type == "roasted":
349
+ thresholds = config["roasted_thresholds"]
350
+ elif bean_type == "green":
351
+ thresholds = config["green_thresholds"]
352
+ else:
353
+ raise ValueError(f"Unknown bean type: {bean_type}")
354
+
355
+ return AnalysisThresholds(**thresholds)
356
+
357
+ @lru_cache(maxsize=32)
358
+ def get_performance_config(self) -> PerformanceConfig:
359
+ """Get performance optimization configuration."""
360
+ config = self._config_cache["analysis"]["performance"]
361
+
362
+ return PerformanceConfig(
363
+ multiprocessing_enabled=config["multiprocessing"]["enabled"],
364
+ max_workers=config["multiprocessing"]["max_workers"],
365
+ chunk_size=config["multiprocessing"]["chunk_size"],
366
+ lab_conversion_cache_size=config["caching"]["lab_conversion_cache_size"],
367
+ enable_lru_cache=config["caching"]["enable_lru_cache"],
368
+ use_numpy_vectorized=config["vectorization"]["use_numpy_vectorized"],
369
+ kdtree_enabled=config["kdtree"]["enabled"],
370
+ kdtree_leaf_size=config["kdtree"]["leaf_size"],
371
+ kdtree_sample_size=config["kdtree"]["sample_size"]
372
+ )
373
+
374
+ @lru_cache(maxsize=32)
375
+ def get_ui_config(self) -> UIConfig:
376
+ """Get user interface configuration."""
377
+ config = self._config_cache["analysis"]["ui"]
378
+
379
+ return UIConfig(
380
+ verbosity=config["verbosity"],
381
+ show_progress_bars=config["progress"]["show_progress_bars"],
382
+ update_frequency=config["progress"]["update_frequency"],
383
+ use_color=config["terminal"]["use_color"],
384
+ show_warnings=config["terminal"]["show_warnings"],
385
+ max_error_lines=config["terminal"]["max_error_lines"]
386
+ )
387
+
388
+ @lru_cache(maxsize=32)
389
+ def get_business_config(self) -> BusinessConfig:
390
+ """Get business metrics configuration."""
391
+ config = self._config_cache["analysis"]["business"]
392
+
393
+ return BusinessConfig(
394
+ cost_analysis_enabled=config["cost_analysis"]["enabled"],
395
+ cost_per_defective_bean=config["cost_analysis"]["cost_per_defective_bean"],
396
+ cost_per_uneven_bean=config["cost_analysis"]["cost_per_uneven_bean"],
397
+ labor_cost_per_hour=config["cost_analysis"]["labor_cost_per_hour"],
398
+ baseline_defect_rate=config["roi"]["baseline_defect_rate"],
399
+ target_defect_rate=config["roi"]["target_defect_rate"],
400
+ batch_value_usd=config["roi"]["batch_value_usd"]
401
+ )
402
+
403
+ def get_calibration_profile(self, profile_name: str) -> CalibrationProfile:
404
+ """Get calibration profile by name."""
405
+ profiles = self._config_cache["calibration"]["profiles"]
406
+
407
+ if profile_name not in profiles:
408
+ available = list(profiles.keys())
409
+ raise ValueError(f"Unknown calibration profile: {profile_name}. Available: {available}")
410
+
411
+ profile_data = profiles[profile_name]
412
+
413
+ return CalibrationProfile(
414
+ name=profile_data["name"],
415
+ description=profile_data["description"],
416
+ illuminant=profile_data["illuminant"],
417
+ viewing_angle=profile_data["viewing_angle"],
418
+ background_gray=profile_data["background_gray"],
419
+ lab_tolerance_factor=profile_data.get("analysis_adjustments", {}).get("lab_tolerance_factor", 1.0),
420
+ delta_e_threshold_factor=profile_data.get("analysis_adjustments", {}).get("delta_e_threshold_factor", 1.0),
421
+ color_correction_matrix=profile_data.get("color_correction", {}).get("matrix")
422
+ )
423
+
424
+ def get_available_calibration_profiles(self) -> List[str]:
425
+ """Get list of available calibration profile names."""
426
+ return list(self._config_cache["calibration"]["profiles"].keys())
427
+
428
+ def get_colorchecker_patches(self) -> Dict[str, List[float]]:
429
+ """Get X-Rite ColorChecker reference patch values."""
430
+ return self._config_cache["calibration"]["colorchecker"]["reference_patches"]
431
+
432
+ def get_config_value(self, path: str, default: Any = None) -> Any:
433
+ """
434
+ Get configuration value by dot-notation path.
435
+
436
+ Args:
437
+ path: Dot-notation path (e.g., "analysis.roasted_thresholds.std_L_max")
438
+ default: Default value if path not found
439
+
440
+ Returns:
441
+ Configuration value
442
+ """
443
+ keys = path.split(".")
444
+ current = self._config_cache
445
+
446
+ try:
447
+ for key in keys:
448
+ current = current[key]
449
+ return current
450
+ except KeyError:
451
+ logger.warning(f"Configuration path not found: {path}")
452
+ return default
453
+
454
+ def reload_config(self) -> None:
455
+ """Reload configuration from files."""
456
+ logger.info("Reloading configuration...")
457
+
458
+ # Clear caches
459
+ self._config_cache.clear()
460
+ self._file_mtimes.clear()
461
+ self.get_analysis_thresholds.cache_clear()
462
+ self.get_performance_config.cache_clear()
463
+ self.get_ui_config.cache_clear()
464
+ self.get_business_config.cache_clear()
465
+
466
+ # Reload all configs
467
+ self._load_all_configs()
468
+
469
+ def validate_config(self) -> List[str]:
470
+ """
471
+ Validate configuration and return list of issues.
472
+
473
+ Returns:
474
+ List of validation error messages
475
+ """
476
+ issues = []
477
+
478
+ try:
479
+ # Validate analysis config
480
+ analysis_config = self._config_cache.get("analysis", {})
481
+ if not analysis_config:
482
+ issues.append("Missing analysis configuration")
483
+
484
+ # Validate required sections
485
+ required_sections = ["analysis", "performance", "ui", "logging", "output"]
486
+ for section in required_sections:
487
+ if section not in analysis_config:
488
+ issues.append(f"Missing required section: {section}")
489
+
490
+ # Validate calibration profiles
491
+ calibration_config = self._config_cache.get("calibration", {})
492
+ if not calibration_config.get("profiles"):
493
+ issues.append("No calibration profiles defined")
494
+
495
+ # Validate threshold values
496
+ for bean_type in ["roasted", "green"]:
497
+ try:
498
+ thresholds = self.get_analysis_thresholds(bean_type)
499
+ if thresholds.tolerance_percent_min < 0 or thresholds.tolerance_percent_min > 100:
500
+ issues.append(f"{bean_type} tolerance_percent_min must be 0-100")
501
+ except Exception as e:
502
+ issues.append(f"Invalid {bean_type} thresholds: {e}")
503
+
504
+ # Validate performance config
505
+ try:
506
+ perf_config = self.get_performance_config()
507
+ if perf_config.chunk_size <= 0:
508
+ issues.append("Performance chunk_size must be positive")
509
+ except Exception as e:
510
+ issues.append(f"Invalid performance config: {e}")
511
+
512
+ except Exception as e:
513
+ issues.append(f"Configuration validation error: {e}")
514
+
515
+ return issues
516
+
517
+ def to_dict(self) -> Dict[str, Any]:
518
+ """Export entire configuration as dictionary."""
519
+ return {
520
+ "environment": self.environment,
521
+ "config_dir": str(self.config_dir),
522
+ "analysis": self._config_cache.get("analysis", {}),
523
+ "calibration": self._config_cache.get("calibration", {})
524
+ }
525
+
526
+
527
+ # Global configuration instance
528
+ _config_manager: Optional[ConfigurationManager] = None
529
+
530
+
531
+ def get_config_manager() -> ConfigurationManager:
532
+ """Get global configuration manager instance."""
533
+ global _config_manager
534
+ if _config_manager is None:
535
+ _config_manager = ConfigurationManager()
536
+ return _config_manager
537
+
538
+
539
+ def initialize_config(config_dir: Union[str, Path] = None, environment: str = None) -> None:
540
+ """Initialize global configuration manager."""
541
+ global _config_manager
542
+ _config_manager = ConfigurationManager(config_dir, environment)
543
+
544
+
545
+ # Convenience functions
546
+ def get_analysis_thresholds(bean_type: str) -> AnalysisThresholds:
547
+ """Get analysis thresholds for bean type."""
548
+ return get_config_manager().get_analysis_thresholds(bean_type)
549
+
550
+
551
+ def get_performance_config() -> PerformanceConfig:
552
+ """Get performance configuration."""
553
+ return get_config_manager().get_performance_config()
554
+
555
+
556
+ def get_ui_config() -> UIConfig:
557
+ """Get UI configuration."""
558
+ return get_config_manager().get_ui_config()
559
+
560
+
561
+ def get_business_config() -> BusinessConfig:
562
+ """Get business configuration."""
563
+ return get_config_manager().get_business_config()
564
+
565
+
566
+ def get_calibration_profile(profile_name: str) -> CalibrationProfile:
567
+ """Get calibration profile by name."""
568
+ return get_config_manager().get_calibration_profile(profile_name)
569
+
570
+
571
+ def load_config(config_path: str = "config.yaml") -> BeanVisionConfig:
572
+ """Load and validate configuration."""
573
+ config = BeanVisionConfig.from_yaml(config_path)
574
+ config.validate()
575
+ return config
src/bean_vision/data/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ """Data handling modules for Bean Vision."""
2
+
3
+ from .dataset import BeanDataset
4
+ from .transforms import get_transforms
5
+
6
+ __all__ = ["BeanDataset", "get_transforms"]
src/bean_vision/data/__pycache__/__init__.cpython-313.pyc ADDED
Binary file (373 Bytes). View file
 
src/bean_vision/data/__pycache__/dataset.cpython-313.pyc ADDED
Binary file (15.6 kB). View file
 
src/bean_vision/data/dataset.py ADDED
@@ -0,0 +1,244 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ """Dataset module for Bean Vision project."""
3
+
4
+ import gzip
5
+ import os
6
+ import tempfile
7
+ from pathlib import Path
8
+ from typing import List, Optional, Tuple
9
+
10
+ import albumentations as A
11
+ import cv2
12
+ import numpy as np
13
+ import torch
14
+ from pycocotools.coco import COCO
15
+ from torch.utils.data import Dataset
16
+
17
+ from bean_vision.config import BeanVisionConfig
18
+ from bean_vision.utils.logging import get_logger
19
+ from bean_vision.utils.misc import ValidationError
20
+ from bean_vision.utils.paths import safe_load_image, validate_coco_json
21
+ from typing import Dict, Any, Tuple
22
+
23
+ # Type aliases
24
+ DatasetItem = Tuple[torch.Tensor, Dict[str, Any]]
25
+ ImageArray = np.ndarray
26
+ TensorDict = Dict[str, torch.Tensor]
27
+
28
+
29
+ class BeanDataset(Dataset):
30
+ """COCO-format dataset with polygon-to-mask conversion and transforms."""
31
+
32
+ def __init__(self,
33
+ coco_json: str,
34
+ data_dir: str,
35
+ config: BeanVisionConfig,
36
+ transforms: Optional[A.Compose] = None,
37
+ is_train: bool = True) -> None:
38
+ self.logger = get_logger(self.__class__.__name__)
39
+ self.config = config
40
+ self.transforms = transforms
41
+ self.is_train = is_train
42
+
43
+ # Validate inputs
44
+ coco_path = validate_coco_json(coco_json)
45
+ self.data_dir = Path(data_dir)
46
+
47
+ if not self.data_dir.exists():
48
+ raise FileNotFoundError(f"Data directory not found: {self.data_dir}")
49
+
50
+ # Load COCO data
51
+ try:
52
+ self.coco = self._load_coco_data(coco_path)
53
+ self.image_ids = list(self.coco.imgs.keys())
54
+ self.logger.info(f"Loaded {len(self.image_ids)} images from {coco_path}")
55
+ except Exception as e:
56
+ raise ValidationError(f"Failed to load COCO data: {e}")
57
+
58
+ def _load_coco_data(self, coco_path: Path) -> COCO:
59
+ """Load COCO data, handling compressed files."""
60
+ if coco_path.suffix == '.gz':
61
+ self.logger.info("Loading compressed COCO file")
62
+ with gzip.open(coco_path, 'rt') as f_in:
63
+ with tempfile.NamedTemporaryFile(mode='w', suffix='.json', delete=False) as f_out:
64
+ f_out.write(f_in.read())
65
+ temp_json = f_out.name
66
+
67
+ try:
68
+ coco = COCO(temp_json)
69
+ return coco
70
+ finally:
71
+ os.unlink(temp_json)
72
+ else:
73
+ return COCO(str(coco_path))
74
+
75
+ def __len__(self) -> int:
76
+ return len(self.image_ids)
77
+
78
+ def __getitem__(self, idx: int) -> DatasetItem:
79
+ """Get dataset item with error handling."""
80
+ try:
81
+ return self._get_item_safe(idx)
82
+ except Exception as e:
83
+ self.logger.error(f"Error loading item {idx}: {e}")
84
+ return self._get_dummy_item()
85
+
86
+ def _get_item_safe(self, idx: int) -> DatasetItem:
87
+ """Safely get dataset item."""
88
+ img_id = self.image_ids[idx]
89
+ img_info = self.coco.imgs[img_id]
90
+
91
+ # Load image with fallback
92
+ image, original_size = self._load_image(img_info)
93
+
94
+ # Get annotations
95
+ ann_ids = self.coco.getAnnIds(imgIds=img_id)
96
+ anns = self.coco.loadAnns(ann_ids)
97
+
98
+ # Process masks and boxes
99
+ masks, boxes, labels = self._process_annotations(anns, img_info)
100
+
101
+ # Apply transforms
102
+ if self.transforms:
103
+ image, masks, boxes, labels = self._apply_transforms(
104
+ image, masks, boxes, labels, img_info
105
+ )
106
+
107
+ # Convert to tensors
108
+ image_tensor = self._image_to_tensor(image)
109
+ target = self._create_target_dict(masks, boxes, labels, img_id, anns)
110
+
111
+ return image_tensor, target
112
+
113
+ def _load_image(self, img_info: dict) -> Tuple[ImageArray, Tuple[int, int]]:
114
+ """Load image with proper error handling."""
115
+ file_name = img_info['file_name']
116
+ if file_name.startswith('data/'):
117
+ file_name = file_name[5:]
118
+
119
+ img_path = self.data_dir / file_name
120
+ return safe_load_image(img_path)
121
+
122
+ def _process_annotations(self, anns: List[dict], img_info: dict) -> Tuple[List[np.ndarray], List[List[float]], List[int]]:
123
+ """Process COCO annotations into masks, boxes, and labels."""
124
+ masks = []
125
+ boxes = []
126
+ labels = []
127
+
128
+ for ann in anns:
129
+ mask = self._polygon_to_mask(ann['segmentation'], img_info['height'], img_info['width'])
130
+ if mask.sum() == 0:
131
+ continue
132
+
133
+ masks.append(mask)
134
+ x, y, w, h = ann['bbox']
135
+ boxes.append([x, y, x + w, y + h])
136
+ labels.append(1) # Bean class
137
+
138
+ return masks, boxes, labels
139
+
140
+ def _polygon_to_mask(self, segmentation: List[List[float]], height: int, width: int) -> np.ndarray:
141
+ """Convert polygon segmentation to binary mask."""
142
+ mask = np.zeros((height, width), dtype=np.uint8)
143
+
144
+ if isinstance(segmentation, list):
145
+ for poly in segmentation:
146
+ if len(poly) >= 6:
147
+ poly_array = np.array(poly).reshape(-1, 2)
148
+ cv2.fillPoly(mask, [poly_array.astype(np.int32)], 1)
149
+
150
+ return mask
151
+
152
+ def _apply_transforms(self, image: ImageArray, masks: List[np.ndarray],
153
+ boxes: List[List[float]], labels: List[int],
154
+ img_info: dict) -> Tuple[ImageArray, torch.Tensor, torch.Tensor, torch.Tensor]:
155
+ """Apply augmentation transforms to image and masks."""
156
+ if len(masks) == 0:
157
+ transformed = self.transforms(image=image)
158
+ image = transformed['image']
159
+ return (image,
160
+ torch.zeros((0, image.shape[0], image.shape[1]), dtype=torch.uint8),
161
+ torch.zeros((0, 4), dtype=torch.float32),
162
+ torch.zeros((0,), dtype=torch.int64))
163
+
164
+ # Apply transforms to image and masks together
165
+ transform_with_masks = A.Compose(
166
+ self.transforms.transforms,
167
+ additional_targets={f'mask{i}': 'mask' for i in range(len(masks))}
168
+ )
169
+
170
+ transform_input = {'image': image}
171
+ for i, mask in enumerate(masks):
172
+ transform_input[f'mask{i}'] = mask.astype(np.uint8)
173
+
174
+ transformed = transform_with_masks(**transform_input)
175
+ image = transformed['image']
176
+
177
+ # Extract transformed masks
178
+ transformed_masks = [transformed[f'mask{i}'] for i in range(len(masks))]
179
+
180
+ # Update bounding boxes based on transformed masks
181
+ updated_boxes, updated_labels, updated_masks = self._update_boxes_from_masks(
182
+ transformed_masks, labels
183
+ )
184
+
185
+ # Convert to tensors
186
+ masks_tensor = torch.as_tensor(np.stack(updated_masks), dtype=torch.uint8) if updated_masks else torch.zeros((0, image.shape[0], image.shape[1]), dtype=torch.uint8)
187
+ boxes_tensor = torch.as_tensor(updated_boxes, dtype=torch.float32) if updated_boxes else torch.zeros((0, 4), dtype=torch.float32)
188
+ labels_tensor = torch.as_tensor(updated_labels, dtype=torch.int64) if updated_labels else torch.zeros((0,), dtype=torch.int64)
189
+
190
+ return image, masks_tensor, boxes_tensor, labels_tensor
191
+
192
+ def _update_boxes_from_masks(self, masks: List[np.ndarray],
193
+ labels: List[int]) -> Tuple[List[List[float]], List[int], List[np.ndarray]]:
194
+ """Update bounding boxes based on mask contours after transformation."""
195
+ updated_boxes = []
196
+ updated_labels = []
197
+ updated_masks = []
198
+
199
+ for i, mask in enumerate(masks):
200
+ contours, _ = cv2.findContours(mask.astype(np.uint8), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
201
+
202
+ if len(contours) > 0:
203
+ largest_contour = max(contours, key=cv2.contourArea)
204
+ if cv2.contourArea(largest_contour) > self.config.inference.min_contour_area:
205
+ x, y, w, h = cv2.boundingRect(largest_contour)
206
+ updated_boxes.append([x, y, x + w, y + h])
207
+ updated_labels.append(labels[i] if i < len(labels) else 1)
208
+ updated_masks.append(mask)
209
+
210
+ return updated_boxes, updated_labels, updated_masks
211
+
212
+ def _image_to_tensor(self, image: ImageArray) -> torch.Tensor:
213
+ """Convert image to normalized tensor."""
214
+ image_tensor = torch.tensor(image, dtype=torch.float32).permute(2, 0, 1) / 255.0
215
+ mean = torch.tensor(self.config.image.imagenet_mean).view(3, 1, 1)
216
+ std = torch.tensor(self.config.image.imagenet_std).view(3, 1, 1)
217
+ return (image_tensor - mean) / std
218
+
219
+ def _create_target_dict(self, masks: torch.Tensor, boxes: torch.Tensor,
220
+ labels: torch.Tensor, img_id: int, anns: List[dict]) -> TensorDict:
221
+ """Create target dictionary for model training."""
222
+ return {
223
+ 'boxes': boxes,
224
+ 'labels': labels,
225
+ 'masks': masks,
226
+ 'image_id': torch.tensor([img_id]),
227
+ 'area': torch.tensor([ann['area'] for ann in anns]) if anns else torch.tensor([]),
228
+ 'iscrowd': torch.tensor([ann['iscrowd'] for ann in anns]) if anns else torch.tensor([])
229
+ }
230
+
231
+ def _get_dummy_item(self) -> DatasetItem:
232
+ """Create dummy item for error cases."""
233
+ dummy_image = torch.zeros((3, self.config.image.resize_height, self.config.image.resize_width))
234
+ dummy_target = {
235
+ 'boxes': torch.zeros((0, 4), dtype=torch.float32),
236
+ 'labels': torch.zeros((0,), dtype=torch.int64),
237
+ 'masks': torch.zeros((0, self.config.image.resize_height, self.config.image.resize_width), dtype=torch.uint8),
238
+ 'image_id': torch.tensor([0]),
239
+ 'area': torch.tensor([]),
240
+ 'iscrowd': torch.tensor([])
241
+ }
242
+ return dummy_image, dummy_target
243
+
244
+
src/bean_vision/data/transforms.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Image transforms for Bean Vision dataset."""
2
+
3
+ import albumentations as A
4
+ from bean_vision.config import BeanVisionConfig
5
+
6
+
7
+ def get_transforms(config: BeanVisionConfig, is_train: bool = True) -> A.Compose:
8
+ """Get Albumentations transforms pipeline from configuration."""
9
+ if is_train:
10
+ return A.Compose([
11
+ A.Resize(config.image.resize_height, config.image.resize_width),
12
+ A.RandomRotate90(p=config.augmentation.random_rotate90_prob),
13
+ A.Rotate(limit=config.augmentation.rotate_limit, p=config.augmentation.rotate_prob),
14
+ A.HorizontalFlip(p=config.augmentation.horizontal_flip_prob),
15
+ A.VerticalFlip(p=config.augmentation.vertical_flip_prob),
16
+ A.RandomBrightnessContrast(
17
+ brightness_limit=config.augmentation.brightness_limit,
18
+ contrast_limit=config.augmentation.contrast_limit,
19
+ p=config.augmentation.brightness_contrast_prob
20
+ ),
21
+ ])
22
+ else:
23
+ return A.Compose([
24
+ A.Resize(config.image.resize_height, config.image.resize_width),
25
+ ])
26
+
27
+
28
+ def collate_fn(batch):
29
+ """Custom collate function for DataLoader."""
30
+ return tuple(zip(*batch))
src/bean_vision/evaluation/__init__.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ """Evaluation modules for bean detection and segmentation."""
2
+
3
+ from .metrics import BeanMetrics, DetectionMetrics, SegmentationMetrics, SizeBasedMetrics
4
+ from .evaluator import BeanEvaluator
5
+ from .report_generator import ReportGenerator
6
+
7
+ __all__ = ['BeanMetrics', 'DetectionMetrics', 'SegmentationMetrics', 'SizeBasedMetrics',
8
+ 'BeanEvaluator', 'ReportGenerator']
src/bean_vision/evaluation/__pycache__/__init__.cpython-313.pyc ADDED
Binary file (556 Bytes). View file
 
src/bean_vision/evaluation/__pycache__/metrics.cpython-313.pyc ADDED
Binary file (21.9 kB). View file
 
src/bean_vision/evaluation/evaluator.py ADDED
@@ -0,0 +1,460 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Main evaluation pipeline for bean detection models."""
2
+
3
+ import json
4
+ import time
5
+ from pathlib import Path
6
+ from typing import Dict, List, Optional, Union
7
+ import numpy as np
8
+ import torch
9
+ from torch.utils.data import DataLoader
10
+
11
+ from .metrics import BeanMetrics, DetectionMetrics, SegmentationMetrics, SizeBasedMetrics
12
+
13
+
14
+ class BeanEvaluator:
15
+ """Main evaluator for bean detection and segmentation models."""
16
+
17
+ def __init__(self, model: torch.nn.Module, device: torch.device,
18
+ confidence_threshold: float = 0.5,
19
+ iou_thresholds: List[float] = None):
20
+ """Initialize evaluator.
21
+
22
+ Args:
23
+ model: PyTorch model to evaluate
24
+ device: Device to run evaluation on
25
+ confidence_threshold: Minimum confidence for valid detection
26
+ iou_thresholds: IoU thresholds for mAP calculation
27
+ """
28
+ self.model = model
29
+ self.device = device
30
+ self.confidence_threshold = confidence_threshold
31
+ self.metrics_calculator = BeanMetrics(
32
+ confidence_threshold=confidence_threshold,
33
+ iou_thresholds=iou_thresholds
34
+ )
35
+
36
+ def evaluate_detection(self, dataloader: DataLoader,
37
+ save_predictions: bool = False,
38
+ output_dir: Optional[Path] = None) -> DetectionMetrics:
39
+ """Evaluate detection performance on dataset.
40
+
41
+ Args:
42
+ dataloader: DataLoader for evaluation dataset
43
+ save_predictions: Whether to save predictions to file
44
+ output_dir: Directory to save results
45
+
46
+ Returns:
47
+ DetectionMetrics with computed metrics
48
+ """
49
+ self.model.eval()
50
+ all_predictions = []
51
+ all_ground_truths = []
52
+
53
+ print(f"Evaluating detection on {len(dataloader)} batches...")
54
+
55
+ with torch.no_grad():
56
+ for batch_idx, (images, targets) in enumerate(dataloader):
57
+ print(f"Processing batch {batch_idx+1}/{len(dataloader)}")
58
+
59
+ # Move to device
60
+ images = [img.to(self.device) for img in images]
61
+
62
+ # Get predictions
63
+ outputs = self.model(images)
64
+
65
+ # Process each image in batch
66
+ for i, (output, target) in enumerate(zip(outputs, targets)):
67
+ pred_dict = {
68
+ 'boxes': output['boxes'].cpu(),
69
+ 'scores': output['scores'].cpu(),
70
+ 'labels': output['labels'].cpu()
71
+ }
72
+
73
+ gt_dict = {
74
+ 'boxes': target['boxes'],
75
+ 'labels': target['labels']
76
+ }
77
+
78
+ all_predictions.append(pred_dict)
79
+ all_ground_truths.append(gt_dict)
80
+
81
+ # Compute detection metrics
82
+ print("Computing detection metrics...")
83
+ detection_metrics = self.metrics_calculator.compute_detection_metrics(
84
+ all_predictions, all_ground_truths
85
+ )
86
+
87
+ # Save predictions if requested
88
+ if save_predictions and output_dir:
89
+ self._save_predictions(all_predictions, all_ground_truths, output_dir)
90
+
91
+ return detection_metrics
92
+
93
+ def evaluate_segmentation(self, dataloader: DataLoader,
94
+ output_dir: Optional[Path] = None) -> SegmentationMetrics:
95
+ """Evaluate segmentation performance on dataset.
96
+
97
+ Args:
98
+ dataloader: DataLoader for evaluation dataset
99
+ output_dir: Directory to save results
100
+
101
+ Returns:
102
+ SegmentationMetrics with computed metrics
103
+ """
104
+ self.model.eval()
105
+ all_predictions = []
106
+ all_ground_truths = []
107
+
108
+ print(f"Evaluating segmentation on {len(dataloader)} batches...")
109
+
110
+ with torch.no_grad():
111
+ for batch_idx, (images, targets) in enumerate(dataloader):
112
+ print(f"Processing batch {batch_idx+1}/{len(dataloader)}")
113
+
114
+ images = [img.to(self.device) for img in images]
115
+ outputs = self.model(images)
116
+
117
+ for i, (output, target) in enumerate(zip(outputs, targets)):
118
+ pred_masks = output.get('masks', torch.tensor([]))
119
+ gt_masks = target.get('masks', torch.tensor([]))
120
+
121
+ # Convert masks to binary
122
+ if len(pred_masks) > 0:
123
+ pred_masks = (pred_masks > 0.5).float()
124
+
125
+ pred_dict = {'masks': pred_masks.cpu()}
126
+ gt_dict = {'masks': gt_masks}
127
+
128
+ all_predictions.append(pred_dict)
129
+ all_ground_truths.append(gt_dict)
130
+
131
+ # Compute segmentation metrics
132
+ print("Computing segmentation metrics...")
133
+ seg_metrics = self.metrics_calculator.compute_segmentation_metrics(
134
+ all_predictions, all_ground_truths
135
+ )
136
+
137
+ return seg_metrics
138
+
139
+ def evaluate_by_size(self, dataloader: DataLoader) -> SizeBasedMetrics:
140
+ """Evaluate performance by object size categories.
141
+
142
+ Args:
143
+ dataloader: DataLoader for evaluation dataset
144
+
145
+ Returns:
146
+ SizeBasedMetrics with size-specific performance
147
+ """
148
+ self.model.eval()
149
+ all_predictions = []
150
+ all_ground_truths = []
151
+
152
+ print("Evaluating performance by object size...")
153
+
154
+ with torch.no_grad():
155
+ for batch_idx, (images, targets) in enumerate(dataloader):
156
+ images = [img.to(self.device) for img in images]
157
+ outputs = self.model(images)
158
+
159
+ for output, target in zip(outputs, targets):
160
+ pred_dict = {
161
+ 'boxes': output['boxes'].cpu(),
162
+ 'scores': output['scores'].cpu(),
163
+ 'labels': output['labels'].cpu()
164
+ }
165
+
166
+ gt_dict = {
167
+ 'boxes': target['boxes'],
168
+ 'labels': target['labels']
169
+ }
170
+
171
+ all_predictions.append(pred_dict)
172
+ all_ground_truths.append(gt_dict)
173
+
174
+ size_metrics = self.metrics_calculator.compute_size_based_metrics(
175
+ all_predictions, all_ground_truths
176
+ )
177
+
178
+ return size_metrics
179
+
180
+ def full_evaluation(self, dataloader: DataLoader,
181
+ output_dir: Optional[Path] = None,
182
+ save_predictions: bool = True) -> Dict[str, Union[DetectionMetrics, SegmentationMetrics, SizeBasedMetrics]]:
183
+ """Run complete evaluation suite.
184
+
185
+ Args:
186
+ dataloader: DataLoader for evaluation dataset
187
+ output_dir: Directory to save results
188
+ save_predictions: Whether to save predictions
189
+
190
+ Returns:
191
+ Dictionary with all computed metrics
192
+ """
193
+ if output_dir:
194
+ output_dir.mkdir(parents=True, exist_ok=True)
195
+
196
+ print("Starting full evaluation...")
197
+ start_time = time.time()
198
+
199
+ # Detection evaluation
200
+ detection_metrics = self.evaluate_detection(
201
+ dataloader, save_predictions=save_predictions, output_dir=output_dir
202
+ )
203
+
204
+ # Segmentation evaluation
205
+ segmentation_metrics = self.evaluate_segmentation(dataloader, output_dir)
206
+
207
+ # Size-based evaluation
208
+ size_metrics = self.evaluate_by_size(dataloader)
209
+
210
+ evaluation_time = time.time() - start_time
211
+
212
+ results = {
213
+ 'detection': detection_metrics,
214
+ 'segmentation': segmentation_metrics,
215
+ 'size_based': size_metrics,
216
+ 'evaluation_time': evaluation_time
217
+ }
218
+
219
+ # Save results to file
220
+ if output_dir:
221
+ self._save_results(results, output_dir)
222
+
223
+ print(f"Evaluation completed in {evaluation_time:.2f} seconds")
224
+ return results
225
+
226
+ def compare_models(self, model_paths: List[Path], dataloader: DataLoader,
227
+ output_dir: Path) -> Dict[str, Dict]:
228
+ """Compare multiple models on same dataset.
229
+
230
+ Args:
231
+ model_paths: List of paths to model checkpoints
232
+ dataloader: DataLoader for evaluation dataset
233
+ output_dir: Directory to save comparison results
234
+
235
+ Returns:
236
+ Dictionary with results for each model
237
+ """
238
+ output_dir.mkdir(parents=True, exist_ok=True)
239
+ comparison_results = {}
240
+
241
+ original_model = self.model
242
+
243
+ for model_path in model_paths:
244
+ print(f"Evaluating model: {model_path.name}")
245
+
246
+ # Load model checkpoint
247
+ checkpoint = torch.load(model_path, map_location=self.device)
248
+ if 'model_state_dict' in checkpoint:
249
+ self.model.load_state_dict(checkpoint['model_state_dict'])
250
+ else:
251
+ self.model.load_state_dict(checkpoint)
252
+
253
+ # Run evaluation
254
+ model_results = self.full_evaluation(
255
+ dataloader,
256
+ output_dir=output_dir / model_path.stem,
257
+ save_predictions=False
258
+ )
259
+
260
+ comparison_results[model_path.stem] = model_results
261
+
262
+ # Restore original model
263
+ self.model = original_model
264
+
265
+ # Save comparison results
266
+ self._save_model_comparison(comparison_results, output_dir)
267
+
268
+ return comparison_results
269
+
270
+ def confidence_analysis(self, dataloader: DataLoader,
271
+ thresholds: List[float] = None) -> Dict[str, List[float]]:
272
+ """Analyze performance across different confidence thresholds.
273
+
274
+ Args:
275
+ dataloader: DataLoader for evaluation dataset
276
+ thresholds: List of confidence thresholds to test
277
+
278
+ Returns:
279
+ Dictionary with metrics at each threshold
280
+ """
281
+ if thresholds is None:
282
+ thresholds = np.arange(0.1, 1.0, 0.1).tolist()
283
+
284
+ print("Analyzing performance across confidence thresholds...")
285
+
286
+ # Get all predictions first
287
+ all_predictions = []
288
+ all_ground_truths = []
289
+
290
+ self.model.eval()
291
+ with torch.no_grad():
292
+ for images, targets in dataloader:
293
+ images = [img.to(self.device) for img in images]
294
+ outputs = self.model(images)
295
+
296
+ for output, target in zip(outputs, targets):
297
+ pred_dict = {
298
+ 'boxes': output['boxes'].cpu(),
299
+ 'scores': output['scores'].cpu(),
300
+ 'labels': output['labels'].cpu()
301
+ }
302
+
303
+ gt_dict = {
304
+ 'boxes': target['boxes'],
305
+ 'labels': target['labels']
306
+ }
307
+
308
+ all_predictions.append(pred_dict)
309
+ all_ground_truths.append(gt_dict)
310
+
311
+ # Evaluate at different thresholds
312
+ results = {
313
+ 'thresholds': thresholds,
314
+ 'precision': [],
315
+ 'recall': [],
316
+ 'f1_score': [],
317
+ 'ap_50': []
318
+ }
319
+
320
+ original_threshold = self.metrics_calculator.confidence_threshold
321
+
322
+ for threshold in thresholds:
323
+ print(f"Evaluating at threshold {threshold:.2f}")
324
+ self.metrics_calculator.confidence_threshold = threshold
325
+
326
+ metrics = self.metrics_calculator.compute_detection_metrics(
327
+ all_predictions, all_ground_truths
328
+ )
329
+
330
+ results['precision'].append(metrics.precision)
331
+ results['recall'].append(metrics.recall)
332
+ results['f1_score'].append(metrics.f1_score)
333
+ results['ap_50'].append(metrics.ap_50)
334
+
335
+ # Restore original threshold
336
+ self.metrics_calculator.confidence_threshold = original_threshold
337
+
338
+ return results
339
+
340
+ def _save_predictions(self, predictions: List[Dict], ground_truths: List[Dict],
341
+ output_dir: Path) -> None:
342
+ """Save predictions and ground truth to JSON file."""
343
+ output_file = output_dir / 'predictions.json'
344
+
345
+ # Convert tensors to lists for JSON serialization
346
+ serializable_preds = []
347
+ for pred in predictions:
348
+ pred_dict = {}
349
+ for k, v in pred.items():
350
+ if torch.is_tensor(v):
351
+ pred_dict[k] = v.tolist()
352
+ else:
353
+ pred_dict[k] = v
354
+ serializable_preds.append(pred_dict)
355
+
356
+ serializable_gts = []
357
+ for gt in ground_truths:
358
+ gt_dict = {}
359
+ for k, v in gt.items():
360
+ if torch.is_tensor(v):
361
+ gt_dict[k] = v.tolist()
362
+ else:
363
+ gt_dict[k] = v
364
+ serializable_gts.append(gt_dict)
365
+
366
+ with open(output_file, 'w') as f:
367
+ json.dump({
368
+ 'predictions': serializable_preds,
369
+ 'ground_truths': serializable_gts
370
+ }, f, indent=2)
371
+
372
+ print(f"Predictions saved to {output_file}")
373
+
374
+ def _save_results(self, results: Dict, output_dir: Path) -> None:
375
+ """Save evaluation results to JSON file."""
376
+ output_file = output_dir / 'evaluation_results.json'
377
+
378
+ # Convert dataclasses to dictionaries
379
+ serializable_results = {}
380
+ for key, value in results.items():
381
+ if hasattr(value, '__dict__'):
382
+ serializable_results[key] = value.__dict__
383
+ else:
384
+ serializable_results[key] = value
385
+
386
+ with open(output_file, 'w') as f:
387
+ json.dump(serializable_results, f, indent=2, default=str)
388
+
389
+ print(f"Results saved to {output_file}")
390
+
391
+ def _save_model_comparison(self, comparison_results: Dict, output_dir: Path) -> None:
392
+ """Save model comparison results."""
393
+ output_file = output_dir / 'model_comparison.json'
394
+
395
+ # Convert dataclasses to dictionaries
396
+ serializable_results = {}
397
+ for model_name, results in comparison_results.items():
398
+ serializable_results[model_name] = {}
399
+ for key, value in results.items():
400
+ if hasattr(value, '__dict__'):
401
+ serializable_results[model_name][key] = value.__dict__
402
+ else:
403
+ serializable_results[model_name][key] = value
404
+
405
+ with open(output_file, 'w') as f:
406
+ json.dump(serializable_results, f, indent=2, default=str)
407
+
408
+ print(f"Model comparison saved to {output_file}")
409
+
410
+ def print_metrics_summary(self, metrics: Union[DetectionMetrics, Dict]) -> None:
411
+ """Print a formatted summary of metrics."""
412
+ if isinstance(metrics, dict):
413
+ detection_metrics = metrics.get('detection')
414
+ if detection_metrics:
415
+ self._print_detection_metrics(detection_metrics)
416
+
417
+ segmentation_metrics = metrics.get('segmentation')
418
+ if segmentation_metrics:
419
+ self._print_segmentation_metrics(segmentation_metrics)
420
+
421
+ size_metrics = metrics.get('size_based')
422
+ if size_metrics:
423
+ self._print_size_metrics(size_metrics)
424
+ elif isinstance(metrics, DetectionMetrics):
425
+ self._print_detection_metrics(metrics)
426
+
427
+ def _print_detection_metrics(self, metrics: DetectionMetrics) -> None:
428
+ """Print detection metrics in formatted table."""
429
+ print("\n" + "="*60)
430
+ print("DETECTION METRICS SUMMARY")
431
+ print("="*60)
432
+ print(f"{'Metric':<25} {'Value':<15}")
433
+ print("-"*40)
434
+ print(f"{'mAP@0.5':<25} {metrics.ap_50:<15.4f}")
435
+ print(f"{'mAP@0.75':<25} {metrics.ap_75:<15.4f}")
436
+ print(f"{'mAP@0.5:0.95':<25} {metrics.ap_50_95:<15.4f}")
437
+ print(f"{'Precision':<25} {metrics.precision:<15.4f}")
438
+ print(f"{'Recall':<25} {metrics.recall:<15.4f}")
439
+ print(f"{'F1 Score':<25} {metrics.f1_score:<15.4f}")
440
+ print(f"{'Mean Confidence':<25} {metrics.confidence_mean:<15.4f}")
441
+ print(f"{'Mean IoU':<25} {metrics.iou_mean:<15.4f}")
442
+ print(f"{'Total Detections':<25} {metrics.total_detections:<15}")
443
+ print(f"{'Valid Detections':<25} {metrics.valid_detections:<15}")
444
+ print(f"{'False Positives':<25} {metrics.false_positives:<15}")
445
+ print(f"{'False Negatives':<25} {metrics.false_negatives:<15}")
446
+
447
+ def _print_segmentation_metrics(self, metrics: SegmentationMetrics) -> None:
448
+ """Print segmentation metrics."""
449
+ print(f"\nSegmentation AP@0.5: {metrics.mask_ap_50:.4f}")
450
+ print(f"Dice Coefficient: {metrics.dice_coefficient:.4f}")
451
+ print(f"Jaccard Index: {metrics.jaccard_index:.4f}")
452
+ print(f"Pixel Accuracy: {metrics.pixel_accuracy:.4f}")
453
+
454
+ def _print_size_metrics(self, metrics: SizeBasedMetrics) -> None:
455
+ """Print size-based metrics."""
456
+ print(f"\nSize-based Performance:")
457
+ print(f"Small objects AP: {metrics.small_ap:.4f}")
458
+ print(f"Medium objects AP: {metrics.medium_ap:.4f}")
459
+ print(f"Large objects AP: {metrics.large_ap:.4f}")
460
+ print(f"Size distribution: {metrics.size_distribution}")
src/bean_vision/evaluation/metrics.py ADDED
@@ -0,0 +1,423 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Core metrics for bean detection and segmentation evaluation."""
2
+
3
+ from dataclasses import dataclass
4
+ from typing import Dict, List, Tuple, Optional
5
+ import numpy as np
6
+ import torch
7
+ from pycocotools.coco import COCO
8
+ from pycocotools.cocoeval import COCOeval
9
+ import pycocotools.mask as mask_utils
10
+ import warnings
11
+ from sklearn.metrics import precision_recall_curve, average_precision_score
12
+ # Suppress sklearn warnings about no positive class during early training
13
+ warnings.filterwarnings('ignore', category=UserWarning, module='sklearn.metrics._ranking')
14
+
15
+
16
+ @dataclass
17
+ class DetectionMetrics:
18
+ """Detection performance metrics."""
19
+ ap_50: float # Average Precision at IoU 0.5
20
+ ap_75: float # Average Precision at IoU 0.75
21
+ ap_50_95: float # Average Precision at IoU 0.5:0.95
22
+ precision: float # Overall precision
23
+ recall: float # Overall recall
24
+ f1_score: float # F1 score
25
+ confidence_mean: float # Mean confidence score
26
+ confidence_std: float # Confidence standard deviation
27
+ total_detections: int # Total number of detections
28
+ valid_detections: int # Detections above threshold
29
+ false_positives: int # Number of false positives
30
+ false_negatives: int # Number of false negatives
31
+ iou_mean: float # Mean IoU of matched detections
32
+ iou_std: float # IoU standard deviation
33
+
34
+
35
+ @dataclass
36
+ class SegmentationMetrics:
37
+ """Segmentation performance metrics."""
38
+ mask_ap_50: float # Mask Average Precision at IoU 0.5
39
+ mask_ap_75: float # Mask Average Precision at IoU 0.75
40
+ mask_ap_50_95: float # Mask Average Precision at IoU 0.5:0.95
41
+ dice_coefficient: float # Dice coefficient for masks
42
+ jaccard_index: float # Jaccard index (IoU) for masks
43
+ pixel_accuracy: float # Pixel-wise accuracy
44
+ mask_quality_mean: float # Mean mask quality score
45
+ mask_quality_std: float # Mask quality standard deviation
46
+
47
+
48
+ @dataclass
49
+ class SizeBasedMetrics:
50
+ """Performance metrics by object size."""
51
+ small_ap: float # AP for small objects
52
+ medium_ap: float # AP for medium objects
53
+ large_ap: float # AP for large objects
54
+ size_distribution: Dict[str, int] # Count by size category
55
+
56
+
57
+ class BeanMetrics:
58
+ """Comprehensive metrics calculator for bean detection."""
59
+
60
+ def __init__(self, confidence_threshold: float = 0.5,
61
+ iou_thresholds: List[float] = None,
62
+ size_thresholds: Tuple[int, int] = (32, 96)):
63
+ """Initialize metrics calculator.
64
+
65
+ Args:
66
+ confidence_threshold: Minimum confidence for valid detection
67
+ iou_thresholds: IoU thresholds for mAP calculation
68
+ size_thresholds: (small_max, medium_max) area thresholds
69
+ """
70
+ self.confidence_threshold = confidence_threshold
71
+ self.iou_thresholds = iou_thresholds or [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95]
72
+ self.size_thresholds = size_thresholds
73
+
74
+ def compute_detection_metrics(self, predictions: List[Dict],
75
+ ground_truths: List[Dict]) -> DetectionMetrics:
76
+ """Compute detection metrics comparing predictions to ground truth.
77
+
78
+ Args:
79
+ predictions: List of prediction dicts with 'boxes', 'scores', 'labels'
80
+ ground_truths: List of GT dicts with 'boxes', 'labels'
81
+
82
+ Returns:
83
+ DetectionMetrics object with computed metrics
84
+ """
85
+ all_pred_boxes_filtered = [] # Boxes after confidence filtering
86
+ all_pred_scores_filtered = [] # Scores after confidence filtering
87
+ all_pred_boxes_raw = [] # ALL boxes before filtering
88
+ all_pred_scores_raw = [] # ALL scores before filtering
89
+ all_gt_boxes = []
90
+ ious = []
91
+ matched_ious = []
92
+
93
+ tp_count = 0
94
+ fp_count = 0
95
+ fn_count = 0
96
+
97
+ for pred, gt in zip(predictions, ground_truths):
98
+ pred_boxes = pred['boxes'].cpu().numpy() if torch.is_tensor(pred['boxes']) else np.array(pred['boxes'])
99
+ pred_scores = pred['scores'].cpu().numpy() if torch.is_tensor(pred['scores']) else np.array(pred['scores'])
100
+ gt_boxes = gt['boxes'].cpu().numpy() if torch.is_tensor(gt['boxes']) else np.array(gt['boxes'])
101
+
102
+ # Store ALL raw predictions (before filtering)
103
+ all_pred_boxes_raw.extend(pred_boxes)
104
+ all_pred_scores_raw.extend(pred_scores)
105
+
106
+ # Debug: Log raw predictions before filtering
107
+ if len(pred_boxes) > 0:
108
+ print(f" Raw predictions: {len(pred_boxes)} boxes, scores range [{pred_scores.min():.4f}, {pred_scores.max():.4f}]")
109
+
110
+ # Filter predictions by confidence
111
+ valid_mask = pred_scores >= self.confidence_threshold
112
+ pred_boxes_valid = pred_boxes[valid_mask]
113
+ pred_scores_valid = pred_scores[valid_mask]
114
+
115
+ all_pred_boxes_filtered.extend(pred_boxes_valid)
116
+ all_pred_scores_filtered.extend(pred_scores_valid)
117
+ all_gt_boxes.extend(gt_boxes)
118
+
119
+ # Compute IoU matrix and match predictions to ground truth
120
+ if len(pred_boxes_valid) > 0 and len(gt_boxes) > 0:
121
+ iou_matrix = self._compute_iou_matrix(pred_boxes_valid, gt_boxes)
122
+ matches = self._match_predictions(iou_matrix, iou_threshold=0.5)
123
+
124
+ for pred_idx, gt_idx in matches:
125
+ if iou_matrix[pred_idx, gt_idx] >= 0.5:
126
+ tp_count += 1
127
+ matched_ious.append(iou_matrix[pred_idx, gt_idx])
128
+ else:
129
+ fp_count += 1
130
+
131
+ # Unmatched predictions are false positives
132
+ unmatched_preds = len(pred_boxes_valid) - len(matches)
133
+ fp_count += unmatched_preds
134
+
135
+ # Unmatched ground truths are false negatives
136
+ matched_gt_indices = set(gt_idx for _, gt_idx in matches)
137
+ fn_count += len(gt_boxes) - len(matched_gt_indices)
138
+ else:
139
+ # Handle edge cases explicitly
140
+ if len(pred_boxes_valid) == 0 and len(gt_boxes) > 0:
141
+ # No predictions but ground truth exists = all false negatives
142
+ fn_count += len(gt_boxes)
143
+ elif len(pred_boxes_valid) > 0 and len(gt_boxes) == 0:
144
+ # Predictions but no ground truth = all false positives
145
+ fp_count += len(pred_boxes_valid)
146
+ # If both are 0, nothing to count
147
+
148
+ # Debug print for false negatives
149
+ if len(all_pred_boxes_filtered) == 0 and len(all_gt_boxes) > 0:
150
+ print(f" DEBUG: No valid predictions (after filtering), {len(all_gt_boxes)} GT boxes, FN={fn_count}")
151
+ print(f" DEBUG: Total raw predictions before filtering: {len(all_pred_boxes_raw)}")
152
+
153
+ # Compute overall metrics
154
+ precision = tp_count / max(tp_count + fp_count, 1)
155
+ recall = tp_count / max(tp_count + fn_count, 1)
156
+ f1_score = 2 * precision * recall / max(precision + recall, 1e-8)
157
+
158
+ # Confidence statistics (from filtered scores)
159
+ all_pred_scores_np = np.array(all_pred_scores_filtered) if all_pred_scores_filtered else np.array([0.0])
160
+ confidence_mean = float(np.mean(all_pred_scores_np))
161
+ confidence_std = float(np.std(all_pred_scores_np))
162
+
163
+ # IoU statistics
164
+ matched_ious = np.array(matched_ious) if matched_ious else np.array([0.0])
165
+ iou_mean = float(np.mean(matched_ious))
166
+ iou_std = float(np.std(matched_ious))
167
+
168
+ # Compute AP at different IoU thresholds
169
+ ap_50 = self._compute_average_precision(predictions, ground_truths, iou_threshold=0.5)
170
+ ap_75 = self._compute_average_precision(predictions, ground_truths, iou_threshold=0.75)
171
+ ap_50_95 = np.mean([
172
+ self._compute_average_precision(predictions, ground_truths, iou_threshold=iou_thresh)
173
+ for iou_thresh in self.iou_thresholds
174
+ ])
175
+
176
+ return DetectionMetrics(
177
+ ap_50=ap_50,
178
+ ap_75=ap_75,
179
+ ap_50_95=ap_50_95,
180
+ precision=precision,
181
+ recall=recall,
182
+ f1_score=f1_score,
183
+ confidence_mean=confidence_mean,
184
+ confidence_std=confidence_std,
185
+ total_detections=len(all_pred_boxes_raw), # ALL predictions before filtering
186
+ valid_detections=len(all_pred_boxes_filtered), # Only predictions passing threshold
187
+ false_positives=fp_count,
188
+ false_negatives=fn_count,
189
+ iou_mean=iou_mean,
190
+ iou_std=iou_std
191
+ )
192
+
193
+ def compute_segmentation_metrics(self, predictions: List[Dict],
194
+ ground_truths: List[Dict]) -> SegmentationMetrics:
195
+ """Compute segmentation metrics for masks.
196
+
197
+ Args:
198
+ predictions: List with 'masks' key containing binary masks
199
+ ground_truths: List with 'masks' key containing binary masks
200
+
201
+ Returns:
202
+ SegmentationMetrics object
203
+ """
204
+ all_dice_scores = []
205
+ all_jaccard_scores = []
206
+ all_pixel_accuracies = []
207
+ all_mask_qualities = []
208
+
209
+ for pred, gt in zip(predictions, ground_truths):
210
+ pred_masks = pred.get('masks', [])
211
+ gt_masks = gt.get('masks', [])
212
+
213
+ if len(pred_masks) == 0 or len(gt_masks) == 0:
214
+ continue
215
+
216
+ # Convert to numpy if needed
217
+ if torch.is_tensor(pred_masks):
218
+ pred_masks = pred_masks.cpu().numpy()
219
+ if torch.is_tensor(gt_masks):
220
+ gt_masks = gt_masks.cpu().numpy()
221
+
222
+ # Match masks based on IoU and compute metrics
223
+ for pred_mask in pred_masks:
224
+ best_dice = 0.0
225
+ best_jaccard = 0.0
226
+ best_pixel_acc = 0.0
227
+
228
+ for gt_mask in gt_masks:
229
+ dice = self._compute_dice_coefficient(pred_mask, gt_mask)
230
+ jaccard = self._compute_jaccard_index(pred_mask, gt_mask)
231
+ pixel_acc = self._compute_pixel_accuracy(pred_mask, gt_mask)
232
+
233
+ if dice > best_dice:
234
+ best_dice = dice
235
+ best_jaccard = jaccard
236
+ best_pixel_acc = pixel_acc
237
+
238
+ all_dice_scores.append(best_dice)
239
+ all_jaccard_scores.append(best_jaccard)
240
+ all_pixel_accuracies.append(best_pixel_acc)
241
+
242
+ # Mask quality score (combination of metrics)
243
+ mask_quality = (best_dice + best_jaccard + best_pixel_acc) / 3
244
+ all_mask_qualities.append(mask_quality)
245
+
246
+ # Compute mask AP at different thresholds
247
+ mask_ap_50 = self._compute_mask_average_precision(predictions, ground_truths, iou_threshold=0.5)
248
+ mask_ap_75 = self._compute_mask_average_precision(predictions, ground_truths, iou_threshold=0.75)
249
+ mask_ap_50_95 = np.mean([
250
+ self._compute_mask_average_precision(predictions, ground_truths, iou_threshold=iou_thresh)
251
+ for iou_thresh in self.iou_thresholds
252
+ ])
253
+
254
+ return SegmentationMetrics(
255
+ mask_ap_50=mask_ap_50,
256
+ mask_ap_75=mask_ap_75,
257
+ mask_ap_50_95=mask_ap_50_95,
258
+ dice_coefficient=float(np.mean(all_dice_scores)) if all_dice_scores else 0.0,
259
+ jaccard_index=float(np.mean(all_jaccard_scores)) if all_jaccard_scores else 0.0,
260
+ pixel_accuracy=float(np.mean(all_pixel_accuracies)) if all_pixel_accuracies else 0.0,
261
+ mask_quality_mean=float(np.mean(all_mask_qualities)) if all_mask_qualities else 0.0,
262
+ mask_quality_std=float(np.std(all_mask_qualities)) if all_mask_qualities else 0.0
263
+ )
264
+
265
+ def compute_size_based_metrics(self, predictions: List[Dict],
266
+ ground_truths: List[Dict]) -> SizeBasedMetrics:
267
+ """Compute metrics based on object size categories."""
268
+ small_preds, small_gts = [], []
269
+ medium_preds, medium_gts = [], []
270
+ large_preds, large_gts = [], []
271
+ size_counts = {'small': 0, 'medium': 0, 'large': 0}
272
+
273
+ for pred, gt in zip(predictions, ground_truths):
274
+ pred_boxes = pred['boxes'].cpu().numpy() if torch.is_tensor(pred['boxes']) else pred['boxes']
275
+ gt_boxes = gt['boxes'].cpu().numpy() if torch.is_tensor(gt['boxes']) else gt['boxes']
276
+
277
+ # Categorize predictions by size
278
+ pred_areas = self._compute_box_areas(pred_boxes)
279
+ for i, area in enumerate(pred_areas):
280
+ if area < self.size_thresholds[0]:
281
+ small_preds.append({k: v[i:i+1] for k, v in pred.items()})
282
+ elif area < self.size_thresholds[1]:
283
+ medium_preds.append({k: v[i:i+1] for k, v in pred.items()})
284
+ else:
285
+ large_preds.append({k: v[i:i+1] for k, v in pred.items()})
286
+
287
+ # Categorize ground truths by size
288
+ gt_areas = self._compute_box_areas(gt_boxes)
289
+ for i, area in enumerate(gt_areas):
290
+ if area < self.size_thresholds[0]:
291
+ small_gts.append({k: v[i:i+1] for k, v in gt.items()})
292
+ size_counts['small'] += 1
293
+ elif area < self.size_thresholds[1]:
294
+ medium_gts.append({k: v[i:i+1] for k, v in gt.items()})
295
+ size_counts['medium'] += 1
296
+ else:
297
+ large_gts.append({k: v[i:i+1] for k, v in gt.items()})
298
+ size_counts['large'] += 1
299
+
300
+ # Compute AP for each size category
301
+ small_ap = self._compute_average_precision(small_preds, small_gts) if small_gts else 0.0
302
+ medium_ap = self._compute_average_precision(medium_preds, medium_gts) if medium_gts else 0.0
303
+ large_ap = self._compute_average_precision(large_preds, large_gts) if large_gts else 0.0
304
+
305
+ return SizeBasedMetrics(
306
+ small_ap=small_ap,
307
+ medium_ap=medium_ap,
308
+ large_ap=large_ap,
309
+ size_distribution=size_counts
310
+ )
311
+
312
+ def _compute_iou_matrix(self, boxes1: np.ndarray, boxes2: np.ndarray) -> np.ndarray:
313
+ """Compute IoU matrix between two sets of boxes."""
314
+ def box_area(boxes):
315
+ return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
316
+
317
+ area1 = box_area(boxes1)
318
+ area2 = box_area(boxes2)
319
+
320
+ # Compute intersection
321
+ inter_x1 = np.maximum(boxes1[:, None, 0], boxes2[:, 0])
322
+ inter_y1 = np.maximum(boxes1[:, None, 1], boxes2[:, 1])
323
+ inter_x2 = np.minimum(boxes1[:, None, 2], boxes2[:, 2])
324
+ inter_y2 = np.minimum(boxes1[:, None, 3], boxes2[:, 3])
325
+
326
+ inter_area = np.maximum(0, inter_x2 - inter_x1) * np.maximum(0, inter_y2 - inter_y1)
327
+ union_area = area1[:, None] + area2 - inter_area
328
+
329
+ return inter_area / np.maximum(union_area, 1e-8)
330
+
331
+ def _match_predictions(self, iou_matrix: np.ndarray,
332
+ iou_threshold: float = 0.5) -> List[Tuple[int, int]]:
333
+ """Match predictions to ground truth using Hungarian algorithm."""
334
+ from scipy.optimize import linear_sum_assignment
335
+
336
+ # Use negative IoU for minimization
337
+ cost_matrix = 1 - iou_matrix
338
+ pred_indices, gt_indices = linear_sum_assignment(cost_matrix)
339
+
340
+ # Filter matches by IoU threshold
341
+ matches = []
342
+ for pred_idx, gt_idx in zip(pred_indices, gt_indices):
343
+ if iou_matrix[pred_idx, gt_idx] >= iou_threshold:
344
+ matches.append((pred_idx, gt_idx))
345
+
346
+ return matches
347
+
348
+ def _compute_average_precision(self, predictions: List[Dict],
349
+ ground_truths: List[Dict],
350
+ iou_threshold: float = 0.5) -> float:
351
+ """Compute average precision at given IoU threshold."""
352
+ if not predictions or not ground_truths:
353
+ return 0.0
354
+
355
+ all_scores = []
356
+ all_labels = []
357
+
358
+ for pred, gt in zip(predictions, ground_truths):
359
+ pred_boxes = pred['boxes'].cpu().numpy() if torch.is_tensor(pred['boxes']) else pred['boxes']
360
+ pred_scores = pred['scores'].cpu().numpy() if torch.is_tensor(pred['scores']) else pred['scores']
361
+ gt_boxes = gt['boxes'].cpu().numpy() if torch.is_tensor(gt['boxes']) else gt['boxes']
362
+
363
+ if len(pred_boxes) == 0:
364
+ continue
365
+
366
+ # Compute IoU and determine true/false positives
367
+ if len(gt_boxes) > 0:
368
+ iou_matrix = self._compute_iou_matrix(pred_boxes, gt_boxes)
369
+ matches = self._match_predictions(iou_matrix, iou_threshold)
370
+ matched_pred_indices = set(pred_idx for pred_idx, _ in matches)
371
+
372
+ for i, score in enumerate(pred_scores):
373
+ all_scores.append(score)
374
+ all_labels.append(1 if i in matched_pred_indices else 0)
375
+ else:
376
+ # No ground truth, all predictions are false positives
377
+ all_scores.extend(pred_scores)
378
+ all_labels.extend([0] * len(pred_scores))
379
+
380
+ if not all_scores:
381
+ return 0.0
382
+
383
+ # Compute precision-recall curve and average precision
384
+ try:
385
+ ap = average_precision_score(all_labels, all_scores)
386
+ return float(ap)
387
+ except ValueError:
388
+ return 0.0
389
+
390
+ def _compute_mask_average_precision(self, predictions: List[Dict],
391
+ ground_truths: List[Dict],
392
+ iou_threshold: float = 0.5) -> float:
393
+ """Compute average precision for masks."""
394
+ # Similar to box AP but using mask IoU
395
+ return 0.0 # Placeholder - implement if needed
396
+
397
+ def _compute_dice_coefficient(self, mask1: np.ndarray, mask2: np.ndarray) -> float:
398
+ """Compute Dice coefficient between two binary masks."""
399
+ # Convert to boolean arrays to ensure compatibility
400
+ mask1_bool = mask1.astype(bool)
401
+ mask2_bool = mask2.astype(bool)
402
+ intersection = np.sum(mask1_bool & mask2_bool)
403
+ union = np.sum(mask1_bool) + np.sum(mask2_bool)
404
+ return 2.0 * intersection / max(union, 1e-8)
405
+
406
+ def _compute_jaccard_index(self, mask1: np.ndarray, mask2: np.ndarray) -> float:
407
+ """Compute Jaccard index (IoU) between two binary masks."""
408
+ # Convert to boolean arrays to ensure compatibility
409
+ mask1_bool = mask1.astype(bool)
410
+ mask2_bool = mask2.astype(bool)
411
+ intersection = np.sum(mask1_bool & mask2_bool)
412
+ union = np.sum(mask1_bool | mask2_bool)
413
+ return intersection / max(union, 1e-8)
414
+
415
+ def _compute_pixel_accuracy(self, mask1: np.ndarray, mask2: np.ndarray) -> float:
416
+ """Compute pixel-wise accuracy between two binary masks."""
417
+ correct_pixels = np.sum(mask1 == mask2)
418
+ total_pixels = mask1.size
419
+ return correct_pixels / total_pixels
420
+
421
+ def _compute_box_areas(self, boxes: np.ndarray) -> np.ndarray:
422
+ """Compute areas of bounding boxes."""
423
+ return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
src/bean_vision/export/__init__.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Export utilities for detection results."""
2
+
3
+ from .coco_exporter import COCOExporter
4
+ from .labelme_exporter import LabelMeExporter
5
+ from .polygon_utils import simplify_polygon, polygon_area
6
+
7
+ __all__ = [
8
+ 'COCOExporter',
9
+ 'LabelMeExporter',
10
+ 'simplify_polygon',
11
+ 'polygon_area'
12
+ ]
src/bean_vision/export/__pycache__/__init__.cpython-313.pyc ADDED
Binary file (476 Bytes). View file
 
src/bean_vision/export/__pycache__/coco_exporter.cpython-313.pyc ADDED
Binary file (5.76 kB). View file
 
src/bean_vision/export/__pycache__/labelme_exporter.cpython-313.pyc ADDED
Binary file (5.31 kB). View file
 
src/bean_vision/export/__pycache__/polygon_utils.cpython-313.pyc ADDED
Binary file (4.14 kB). View file
 
src/bean_vision/export/coco_exporter.py ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Export predictions to COCO format."""
2
+
3
+ import json
4
+ from pathlib import Path
5
+ from typing import Dict, List, Any, Optional
6
+ from datetime import datetime
7
+
8
+ from .polygon_utils import polygon_area, polygon_bbox
9
+
10
+
11
+ class COCOExporter:
12
+ """Export detection results to COCO format."""
13
+
14
+ def __init__(self, dataset_name: str = "bean_detections"):
15
+ """Initialize COCO exporter.
16
+
17
+ Args:
18
+ dataset_name: Name of the dataset
19
+ """
20
+ self.dataset_name = dataset_name
21
+ self.coco_data = self._initialize_coco_structure()
22
+ self.annotation_id = 1
23
+ self.image_id = 1
24
+
25
+ def _initialize_coco_structure(self) -> Dict[str, Any]:
26
+ """Initialize empty COCO structure."""
27
+ return {
28
+ "info": {
29
+ "description": self.dataset_name,
30
+ "version": "1.0",
31
+ "year": datetime.now().year,
32
+ "date_created": datetime.now().isoformat()
33
+ },
34
+ "categories": [
35
+ {"id": 1, "name": "bean", "supercategory": "object"}
36
+ ],
37
+ "images": [],
38
+ "annotations": []
39
+ }
40
+
41
+ def add_image(
42
+ self,
43
+ image_path: Path,
44
+ width: int,
45
+ height: int,
46
+ image_id: Optional[int] = None
47
+ ) -> int:
48
+ """Add image to COCO dataset.
49
+
50
+ Args:
51
+ image_path: Path to image file
52
+ width: Image width
53
+ height: Image height
54
+ image_id: Optional specific image ID
55
+
56
+ Returns:
57
+ Image ID
58
+ """
59
+ if image_id is None:
60
+ image_id = self.image_id
61
+ self.image_id += 1
62
+
63
+ image_info = {
64
+ "id": image_id,
65
+ "file_name": image_path.name,
66
+ "width": width,
67
+ "height": height
68
+ }
69
+
70
+ self.coco_data["images"].append(image_info)
71
+ return image_id
72
+
73
+ def add_predictions(
74
+ self,
75
+ predictions: Dict[str, Any],
76
+ image_id: int
77
+ ) -> int:
78
+ """Add predictions for an image.
79
+
80
+ Args:
81
+ predictions: Prediction results with polygons
82
+ image_id: ID of the image
83
+
84
+ Returns:
85
+ Number of annotations added
86
+ """
87
+ num_added = 0
88
+
89
+ # Handle polygon predictions
90
+ if 'polygons' in predictions:
91
+ polygons = predictions['polygons']
92
+ scores = predictions.get('scores', [1.0] * len(polygons))
93
+
94
+ for polygon_list, score in zip(polygons, scores):
95
+ for polygon in polygon_list:
96
+ if len(polygon) >= 3:
97
+ # Flatten polygon
98
+ segmentation = [coord for point in polygon for coord in point]
99
+
100
+ # Calculate bbox and area
101
+ x1, y1, x2, y2 = polygon_bbox(polygon)
102
+ area = polygon_area(polygon)
103
+
104
+ annotation = {
105
+ "id": self.annotation_id,
106
+ "image_id": image_id,
107
+ "category_id": 1, # bean
108
+ "segmentation": [segmentation],
109
+ "area": area,
110
+ "bbox": [x1, y1, x2 - x1, y2 - y1],
111
+ "iscrowd": 0,
112
+ "score": float(score)
113
+ }
114
+
115
+ self.coco_data["annotations"].append(annotation)
116
+ self.annotation_id += 1
117
+ num_added += 1
118
+
119
+ # Handle box-only predictions
120
+ elif 'boxes' in predictions:
121
+ boxes = predictions['boxes']
122
+ scores = predictions.get('scores', [1.0] * len(boxes))
123
+
124
+ for box, score in zip(boxes, scores):
125
+ x1, y1, x2, y2 = box
126
+
127
+ annotation = {
128
+ "id": self.annotation_id,
129
+ "image_id": image_id,
130
+ "category_id": 1,
131
+ "bbox": [x1, y1, x2 - x1, y2 - y1],
132
+ "area": (x2 - x1) * (y2 - y1),
133
+ "iscrowd": 0,
134
+ "score": float(score)
135
+ }
136
+
137
+ self.coco_data["annotations"].append(annotation)
138
+ self.annotation_id += 1
139
+ num_added += 1
140
+
141
+ return num_added
142
+
143
+ def save(self, output_path: Path):
144
+ """Save COCO data to JSON file.
145
+
146
+ Args:
147
+ output_path: Path to save JSON file
148
+ """
149
+ with open(output_path, 'w') as f:
150
+ json.dump(self.coco_data, f, indent=2)
151
+ print(f"COCO format saved to {output_path}")
152
+
153
+ def get_statistics(self) -> Dict[str, int]:
154
+ """Get dataset statistics.
155
+
156
+ Returns:
157
+ Dictionary with counts
158
+ """
159
+ return {
160
+ "num_images": len(self.coco_data["images"]),
161
+ "num_annotations": len(self.coco_data["annotations"]),
162
+ "num_categories": len(self.coco_data["categories"])
163
+ }
src/bean_vision/export/labelme_exporter.py ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Export predictions to LabelMe format."""
2
+
3
+ import json
4
+ import base64
5
+ from pathlib import Path
6
+ from typing import Dict, List, Any, Optional
7
+ from PIL import Image
8
+ import io
9
+
10
+
11
+ class LabelMeExporter:
12
+ """Export detection results to LabelMe format."""
13
+
14
+ def __init__(self, version: str = "5.0.1"):
15
+ """Initialize LabelMe exporter.
16
+
17
+ Args:
18
+ version: LabelMe version string
19
+ """
20
+ self.version = version
21
+
22
+ def create_labelme_annotation(
23
+ self,
24
+ image_path: Path,
25
+ predictions: Dict[str, Any],
26
+ include_image_data: bool = False
27
+ ) -> Dict[str, Any]:
28
+ """Create LabelMe format annotation.
29
+
30
+ Args:
31
+ image_path: Path to image file
32
+ predictions: Prediction results with polygons
33
+ include_image_data: Whether to include base64 image data
34
+
35
+ Returns:
36
+ LabelMe format dictionary
37
+ """
38
+ # Load image to get dimensions
39
+ with Image.open(image_path) as img:
40
+ width, height = img.size
41
+
42
+ # Optionally encode image data
43
+ image_data = None
44
+ if include_image_data:
45
+ buffer = io.BytesIO()
46
+ img.save(buffer, format=img.format or 'PNG')
47
+ image_data = base64.b64encode(buffer.getvalue()).decode('utf-8')
48
+
49
+ # Create LabelMe structure
50
+ labelme_data = {
51
+ "version": self.version,
52
+ "flags": {},
53
+ "shapes": [],
54
+ "imagePath": str(image_path.name),
55
+ "imageData": image_data,
56
+ "imageHeight": height,
57
+ "imageWidth": width
58
+ }
59
+
60
+ # Add polygon shapes
61
+ if 'polygons' in predictions:
62
+ polygons = predictions['polygons']
63
+ scores = predictions.get('scores', [1.0] * len(polygons))
64
+
65
+ for polygon_list, score in zip(polygons, scores):
66
+ for polygon in polygon_list:
67
+ if len(polygon) >= 3:
68
+ shape = {
69
+ "label": "bean",
70
+ "points": polygon,
71
+ "group_id": None,
72
+ "shape_type": "polygon",
73
+ "flags": {},
74
+ "attributes": {
75
+ "confidence": float(score)
76
+ }
77
+ }
78
+ labelme_data["shapes"].append(shape)
79
+
80
+ # Add boxes as rectangles if no polygons
81
+ elif 'boxes' in predictions:
82
+ boxes = predictions['boxes']
83
+ scores = predictions.get('scores', [1.0] * len(boxes))
84
+
85
+ for box, score in zip(boxes, scores):
86
+ x1, y1, x2, y2 = box
87
+
88
+ shape = {
89
+ "label": "bean",
90
+ "points": [[x1, y1], [x2, y2]],
91
+ "group_id": None,
92
+ "shape_type": "rectangle",
93
+ "flags": {},
94
+ "attributes": {
95
+ "confidence": float(score)
96
+ }
97
+ }
98
+ labelme_data["shapes"].append(shape)
99
+
100
+ return labelme_data
101
+
102
+ def save(
103
+ self,
104
+ image_path: Path,
105
+ predictions: Dict[str, Any],
106
+ output_path: Path,
107
+ include_image_data: bool = False
108
+ ):
109
+ """Save LabelMe annotation to JSON file.
110
+
111
+ Args:
112
+ image_path: Path to image file
113
+ predictions: Prediction results
114
+ output_path: Path to save JSON file
115
+ include_image_data: Whether to include base64 image data
116
+ """
117
+ labelme_data = self.create_labelme_annotation(
118
+ image_path,
119
+ predictions,
120
+ include_image_data
121
+ )
122
+
123
+ with open(output_path, 'w') as f:
124
+ json.dump(labelme_data, f, indent=2)
125
+
126
+ print(f"LabelMe format saved to {output_path}")
127
+
128
+ def save_batch(
129
+ self,
130
+ results: List[Dict[str, Any]],
131
+ output_dir: Path,
132
+ include_image_data: bool = False
133
+ ):
134
+ """Save batch of predictions to LabelMe format.
135
+
136
+ Args:
137
+ results: List of prediction results with image_path
138
+ output_dir: Directory to save annotations
139
+ include_image_data: Whether to include base64 image data
140
+ """
141
+ output_dir.mkdir(parents=True, exist_ok=True)
142
+
143
+ for result in results:
144
+ if 'error' in result:
145
+ continue
146
+
147
+ image_path = Path(result['image_path'])
148
+ output_path = output_dir / f"{image_path.stem}.json"
149
+
150
+ self.save(
151
+ image_path,
152
+ result,
153
+ output_path,
154
+ include_image_data
155
+ )
156
+
157
+ print(f"Saved {len(results)} LabelMe annotations to {output_dir}")
src/bean_vision/export/polygon_utils.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Polygon utility functions."""
2
+
3
+ from typing import List, Tuple
4
+ import numpy as np
5
+ import cv2
6
+
7
+
8
+ def simplify_polygon(
9
+ polygon: List[Tuple[float, float]],
10
+ epsilon_percent: float = 1.0
11
+ ) -> List[Tuple[float, float]]:
12
+ """Simplify polygon using Douglas-Peucker algorithm.
13
+
14
+ Args:
15
+ polygon: List of (x, y) coordinates
16
+ epsilon_percent: Epsilon as percentage of perimeter
17
+
18
+ Returns:
19
+ Simplified polygon
20
+ """
21
+ if len(polygon) < 3:
22
+ return polygon
23
+
24
+ # Convert to numpy array
25
+ points = np.array(polygon, dtype=np.float32)
26
+
27
+ # Calculate epsilon based on perimeter
28
+ perimeter = cv2.arcLength(points, True)
29
+ epsilon = epsilon_percent * perimeter / 100
30
+
31
+ # Simplify
32
+ simplified = cv2.approxPolyDP(points, epsilon, True)
33
+
34
+ # Convert back to list of tuples
35
+ return [(float(p[0][0]), float(p[0][1])) for p in simplified]
36
+
37
+
38
+ def polygon_area(polygon: List[Tuple[float, float]]) -> float:
39
+ """Calculate area of polygon using shoelace formula.
40
+
41
+ Args:
42
+ polygon: List of (x, y) coordinates
43
+
44
+ Returns:
45
+ Area of polygon
46
+ """
47
+ if len(polygon) < 3:
48
+ return 0.0
49
+
50
+ # Shoelace formula
51
+ n = len(polygon)
52
+ area = 0.0
53
+
54
+ for i in range(n):
55
+ j = (i + 1) % n
56
+ area += polygon[i][0] * polygon[j][1]
57
+ area -= polygon[j][0] * polygon[i][1]
58
+
59
+ return abs(area) / 2.0
60
+
61
+
62
+ def polygon_centroid(polygon: List[Tuple[float, float]]) -> Tuple[float, float]:
63
+ """Calculate centroid of polygon.
64
+
65
+ Args:
66
+ polygon: List of (x, y) coordinates
67
+
68
+ Returns:
69
+ Centroid (x, y)
70
+ """
71
+ if not polygon:
72
+ return (0.0, 0.0)
73
+
74
+ x_sum = sum(p[0] for p in polygon)
75
+ y_sum = sum(p[1] for p in polygon)
76
+
77
+ return (x_sum / len(polygon), y_sum / len(polygon))
78
+
79
+
80
+ def polygon_bbox(polygon: List[Tuple[float, float]]) -> Tuple[float, float, float, float]:
81
+ """Calculate bounding box of polygon.
82
+
83
+ Args:
84
+ polygon: List of (x, y) coordinates
85
+
86
+ Returns:
87
+ Bounding box (x1, y1, x2, y2)
88
+ """
89
+ if not polygon:
90
+ return (0.0, 0.0, 0.0, 0.0)
91
+
92
+ xs = [p[0] for p in polygon]
93
+ ys = [p[1] for p in polygon]
94
+
95
+ return (min(xs), min(ys), max(xs), max(ys))
96
+
97
+
98
+ def validate_polygon(polygon: List[Tuple[float, float]], min_points: int = 3) -> bool:
99
+ """Validate polygon has minimum requirements.
100
+
101
+ Args:
102
+ polygon: List of (x, y) coordinates
103
+ min_points: Minimum number of points
104
+
105
+ Returns:
106
+ True if valid
107
+ """
108
+ return len(polygon) >= min_points and polygon_area(polygon) > 0
src/bean_vision/inference/__init__.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ """Bean detection inference module."""
2
+
3
+ from .predictor import BeanPredictor
4
+ from .mask_converter import MaskConverter
5
+ from .postprocessing import PostProcessor
6
+
7
+ __all__ = ['BeanPredictor', 'MaskConverter', 'PostProcessor']
src/bean_vision/inference/__pycache__/__init__.cpython-313.pyc ADDED
Binary file (433 Bytes). View file
 
src/bean_vision/inference/__pycache__/mask_converter.cpython-313.pyc ADDED
Binary file (13.3 kB). View file
 
src/bean_vision/inference/__pycache__/postprocessing.cpython-313.pyc ADDED
Binary file (13.3 kB). View file
 
src/bean_vision/inference/__pycache__/predictor.cpython-313.pyc ADDED
Binary file (12.4 kB). View file
 
src/bean_vision/inference/mask_converter.py ADDED
@@ -0,0 +1,326 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Convert binary masks to polygon representations."""
2
+
3
+ from typing import List, Tuple, Optional, Dict, Any
4
+ import numpy as np
5
+ import cv2
6
+ import torch
7
+ from scipy import interpolate
8
+
9
+
10
+ class MaskConverter:
11
+ """Convert binary segmentation masks to polygon coordinates."""
12
+
13
+ def __init__(
14
+ self,
15
+ simplify_epsilon: float = 0.5,
16
+ min_area: float = 100.0,
17
+ approximation_method: str = 'douglas_peucker',
18
+ use_detailed_contours: bool = True,
19
+ smooth_polygons: bool = False,
20
+ smoothing_factor: float = 0.0
21
+ ):
22
+ """Initialize mask converter.
23
+
24
+ Args:
25
+ simplify_epsilon: Epsilon for polygon simplification (higher = fewer points)
26
+ min_area: Minimum contour area to keep
27
+ approximation_method: Method for polygon approximation
28
+ use_detailed_contours: Use CHAIN_APPROX_NONE for more detailed contours
29
+ smooth_polygons: Apply spline smoothing to polygons
30
+ smoothing_factor: Smoothing factor for spline interpolation (0-1, 0=no smoothing)
31
+ """
32
+ self.simplify_epsilon = simplify_epsilon
33
+ self.min_area = min_area
34
+ self.approximation_method = approximation_method
35
+ self.use_detailed_contours = use_detailed_contours
36
+ self.smooth_polygons = smooth_polygons
37
+ self.smoothing_factor = smoothing_factor
38
+
39
+ def smooth_polygon_spline(
40
+ self,
41
+ polygon: List[Tuple[float, float]],
42
+ smoothing_factor: float = None
43
+ ) -> List[Tuple[float, float]]:
44
+ """Smooth a polygon using spline interpolation optimized for elliptical shapes.
45
+
46
+ Args:
47
+ polygon: List of (x, y) coordinates
48
+ smoothing_factor: Smoothing factor (0-1, higher = smoother)
49
+
50
+ Returns:
51
+ Smoothed polygon coordinates
52
+ """
53
+ if smoothing_factor is None:
54
+ smoothing_factor = self.smoothing_factor
55
+
56
+ if len(polygon) < 4 or smoothing_factor <= 0:
57
+ return polygon
58
+
59
+ # Extract x and y coordinates
60
+ points = np.array(polygon)
61
+ x = points[:, 0]
62
+ y = points[:, 1]
63
+
64
+ # Close the polygon by adding the first point at the end
65
+ x = np.append(x, x[0])
66
+ y = np.append(y, y[0])
67
+
68
+ try:
69
+ # Create parametric representation
70
+ t = np.linspace(0, 1, len(x))
71
+
72
+ # Determine smoothing parameter based on factor
73
+ # Optimized for bean-like elliptical shapes
74
+ # Use a more aggressive smoothing for better ellipse fitting
75
+ if smoothing_factor >= 0.5:
76
+ # Strong smoothing for elliptical shapes
77
+ s = max(0, len(x) * (1 - smoothing_factor * 1.5))
78
+ else:
79
+ # Gentle smoothing to preserve details
80
+ s = len(x) * (1 - smoothing_factor)
81
+
82
+ # Create spline interpolation with periodic boundary conditions
83
+ tck, _ = interpolate.splprep([x, y], s=s, per=True, k=min(3, len(x)-1))
84
+
85
+ # Generate smooth points - use fewer points for cleaner ellipses
86
+ if smoothing_factor >= 0.5:
87
+ # For strong smoothing, use optimal number of points for ellipse
88
+ num_points = max(20, min(60, int(len(polygon) * 0.8)))
89
+ else:
90
+ num_points = max(len(polygon), int(len(polygon) * 1.2))
91
+
92
+ t_smooth = np.linspace(0, 1, num_points, endpoint=False)
93
+ x_smooth, y_smooth = interpolate.splev(t_smooth, tck)
94
+
95
+ # Apply additional ellipse-fitting if high smoothing
96
+ if smoothing_factor >= 0.7:
97
+ # Fit an ellipse to the points and blend
98
+ from sklearn.covariance import EllipticEnvelope
99
+ try:
100
+ # Fit ellipse parameters
101
+ center_x, center_y = np.mean(x_smooth), np.mean(y_smooth)
102
+
103
+ # Simple ellipse smoothing by filtering high-frequency variations
104
+ angles = np.arctan2(y_smooth - center_y, x_smooth - center_x)
105
+ distances = np.sqrt((x_smooth - center_x)**2 + (y_smooth - center_y)**2)
106
+
107
+ # Smooth the distances to create more elliptical shape
108
+ window_size = max(3, int(len(distances) * 0.1))
109
+ if window_size % 2 == 0:
110
+ window_size += 1
111
+ smoothed_distances = np.convolve(distances,
112
+ np.ones(window_size)/window_size,
113
+ mode='same')
114
+
115
+ # Blend original with smoothed based on factor
116
+ blend_factor = (smoothing_factor - 0.7) / 0.3 # 0 at 0.7, 1 at 1.0
117
+ final_distances = distances * (1 - blend_factor * 0.5) + smoothed_distances * blend_factor * 0.5
118
+
119
+ # Reconstruct points
120
+ x_smooth = center_x + final_distances * np.cos(angles)
121
+ y_smooth = center_y + final_distances * np.sin(angles)
122
+ except:
123
+ pass # Keep the spline-smoothed version if ellipse fitting fails
124
+
125
+ # Combine back into polygon format
126
+ smooth_polygon = [(float(x_smooth[i]), float(y_smooth[i]))
127
+ for i in range(len(x_smooth))]
128
+
129
+ return smooth_polygon
130
+
131
+ except Exception:
132
+ # If smoothing fails, return original polygon
133
+ return polygon
134
+
135
+ def mask_to_polygon(
136
+ self,
137
+ mask: np.ndarray,
138
+ threshold: float = 0.5
139
+ ) -> List[List[Tuple[float, float]]]:
140
+ """Convert a binary mask to polygon coordinates.
141
+
142
+ Args:
143
+ mask: Binary mask array (H, W) or (1, H, W)
144
+ threshold: Threshold for binarizing the mask
145
+
146
+ Returns:
147
+ List of polygons, each polygon is a list of (x, y) tuples
148
+ """
149
+ # Handle different mask dimensions
150
+ if mask.ndim == 3:
151
+ mask = mask[0]
152
+
153
+ # Threshold the mask
154
+ binary_mask = (mask > threshold).astype(np.uint8)
155
+
156
+ # Apply morphological operations to smooth mask boundaries
157
+ if self.smooth_polygons and self.smoothing_factor > 0:
158
+ # Use elliptical kernel for bean-like shapes
159
+ if self.smoothing_factor > 0.5:
160
+ # Strong smoothing - larger elliptical kernel
161
+ kernel_size = int(5 + 4 * self.smoothing_factor)
162
+ kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (kernel_size, kernel_size))
163
+ # Close small gaps then open to smooth boundaries
164
+ binary_mask = cv2.morphologyEx(binary_mask, cv2.MORPH_CLOSE, kernel, iterations=1)
165
+ binary_mask = cv2.morphologyEx(binary_mask, cv2.MORPH_OPEN, kernel, iterations=1)
166
+ # Additional Gaussian blur for ultra-smooth boundaries
167
+ if self.smoothing_factor > 0.7:
168
+ binary_mask = cv2.GaussianBlur(binary_mask, (5, 5), 1.0)
169
+ binary_mask = (binary_mask > 0.5).astype(np.uint8)
170
+ else:
171
+ # Light smoothing - smaller kernel
172
+ kernel_size = int(3 + 2 * self.smoothing_factor)
173
+ kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (kernel_size, kernel_size))
174
+ binary_mask = cv2.morphologyEx(binary_mask, cv2.MORPH_CLOSE, kernel, iterations=1)
175
+ elif self.use_detailed_contours:
176
+ # Minimal smoothing for detailed contours
177
+ kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
178
+ binary_mask = cv2.morphologyEx(binary_mask, cv2.MORPH_CLOSE, kernel, iterations=1)
179
+
180
+ # Find contours - use CHAIN_APPROX_NONE for detailed contours
181
+ contour_mode = cv2.CHAIN_APPROX_NONE if self.use_detailed_contours else cv2.CHAIN_APPROX_SIMPLE
182
+ contours, _ = cv2.findContours(
183
+ binary_mask,
184
+ cv2.RETR_EXTERNAL,
185
+ contour_mode
186
+ )
187
+
188
+ polygons = []
189
+ for contour in contours:
190
+ # Filter by area
191
+ area = cv2.contourArea(contour)
192
+ if area < self.min_area:
193
+ continue
194
+
195
+ # Convert contour to polygon points
196
+ polygon = [(float(point[0][0]), float(point[0][1]))
197
+ for point in contour.reshape(-1, 1, 2)]
198
+
199
+ # Apply smoothing if requested
200
+ if self.smooth_polygons and self.smoothing_factor > 0:
201
+ polygon = self.smooth_polygon_spline(polygon, self.smoothing_factor)
202
+
203
+ # Simplify polygon after smoothing if needed
204
+ if self.approximation_method == 'douglas_peucker' and not self.smooth_polygons:
205
+ if self.use_detailed_contours:
206
+ # Very gentle simplification for detailed contours
207
+ epsilon = 0.003 * cv2.arcLength(contour, True)
208
+ else:
209
+ epsilon = self.simplify_epsilon * cv2.arcLength(contour, True) / 100
210
+ simplified = cv2.approxPolyDP(
211
+ np.array(polygon, dtype=np.float32).reshape(-1, 1, 2),
212
+ epsilon,
213
+ True
214
+ )
215
+ polygon = [(float(point[0][0]), float(point[0][1]))
216
+ for point in simplified]
217
+
218
+ # Ensure minimum number of points for a valid polygon
219
+ if len(polygon) >= 3:
220
+ polygons.append(polygon)
221
+
222
+ return polygons
223
+
224
+ def masks_to_polygons(
225
+ self,
226
+ masks: torch.Tensor,
227
+ threshold: float = 0.5
228
+ ) -> List[List[List[Tuple[float, float]]]]:
229
+ """Convert multiple masks to polygons.
230
+
231
+ Args:
232
+ masks: Tensor of masks (N, 1, H, W)
233
+ threshold: Threshold for binarizing masks
234
+
235
+ Returns:
236
+ List of polygon lists, one per mask
237
+ """
238
+ masks_np = masks.cpu().numpy()
239
+ all_polygons = []
240
+
241
+ for mask in masks_np:
242
+ polygons = self.mask_to_polygon(mask, threshold)
243
+ all_polygons.append(polygons)
244
+
245
+ return all_polygons
246
+
247
+ def polygon_to_mask(
248
+ self,
249
+ polygon: List[Tuple[float, float]],
250
+ height: int,
251
+ width: int
252
+ ) -> np.ndarray:
253
+ """Convert polygon back to binary mask (for validation).
254
+
255
+ Args:
256
+ polygon: List of (x, y) coordinates
257
+ height: Mask height
258
+ width: Mask width
259
+
260
+ Returns:
261
+ Binary mask array
262
+ """
263
+ mask = np.zeros((height, width), dtype=np.uint8)
264
+
265
+ if len(polygon) >= 3:
266
+ points = np.array(polygon, dtype=np.int32)
267
+ cv2.fillPoly(mask, [points], 1)
268
+
269
+ return mask
270
+
271
+ def calculate_iou(
272
+ self,
273
+ mask1: np.ndarray,
274
+ mask2: np.ndarray
275
+ ) -> float:
276
+ """Calculate IoU between two masks.
277
+
278
+ Args:
279
+ mask1: First binary mask
280
+ mask2: Second binary mask
281
+
282
+ Returns:
283
+ IoU score
284
+ """
285
+ intersection = np.logical_and(mask1, mask2).sum()
286
+ union = np.logical_or(mask1, mask2).sum()
287
+
288
+ if union == 0:
289
+ return 0.0
290
+
291
+ return float(intersection) / float(union)
292
+
293
+ def validate_conversion(
294
+ self,
295
+ original_mask: np.ndarray,
296
+ polygons: List[List[Tuple[float, float]]],
297
+ threshold: float = 0.9
298
+ ) -> Dict[str, Any]:
299
+ """Validate polygon conversion quality.
300
+
301
+ Args:
302
+ original_mask: Original binary mask
303
+ polygons: Converted polygons
304
+ threshold: Minimum IoU threshold for valid conversion
305
+
306
+ Returns:
307
+ Validation metrics
308
+ """
309
+ height, width = original_mask.shape[-2:]
310
+
311
+ # Reconstruct mask from polygons
312
+ reconstructed = np.zeros((height, width), dtype=np.uint8)
313
+ for polygon in polygons:
314
+ if len(polygon) >= 3:
315
+ points = np.array(polygon, dtype=np.int32)
316
+ cv2.fillPoly(reconstructed, [points], 1)
317
+
318
+ # Calculate metrics
319
+ iou = self.calculate_iou(original_mask > 0.5, reconstructed)
320
+
321
+ return {
322
+ 'iou': iou,
323
+ 'valid': iou >= threshold,
324
+ 'num_polygons': len(polygons),
325
+ 'total_points': sum(len(p) for p in polygons)
326
+ }
src/bean_vision/inference/postprocessing.py ADDED
@@ -0,0 +1,362 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Post-processing utilities for predictions."""
2
+
3
+ from typing import Dict, List, Any, Optional
4
+ import torch
5
+ import torchvision.ops as ops
6
+ import numpy as np
7
+
8
+
9
+ class PostProcessor:
10
+ """Post-process model predictions."""
11
+
12
+ def __init__(
13
+ self,
14
+ confidence_threshold: float = 0.5,
15
+ nms_threshold: float = 0.3,
16
+ max_detections: Optional[int] = None
17
+ ):
18
+ """Initialize post-processor.
19
+
20
+ Args:
21
+ confidence_threshold: Minimum confidence score
22
+ nms_threshold: IoU threshold for NMS
23
+ max_detections: Maximum number of detections to keep
24
+ """
25
+ self.confidence_threshold = confidence_threshold
26
+ self.nms_threshold = nms_threshold
27
+ self.max_detections = max_detections
28
+
29
+ def filter_predictions(
30
+ self,
31
+ predictions: Dict[str, torch.Tensor],
32
+ confidence_threshold: Optional[float] = None
33
+ ) -> Dict[str, torch.Tensor]:
34
+ """Filter predictions by confidence threshold.
35
+
36
+ Args:
37
+ predictions: Model predictions dictionary
38
+ confidence_threshold: Override default threshold
39
+
40
+ Returns:
41
+ Filtered predictions
42
+ """
43
+ threshold = confidence_threshold or self.confidence_threshold
44
+
45
+ # Filter by confidence
46
+ keep_indices = predictions['scores'] >= threshold
47
+
48
+ filtered = {
49
+ 'boxes': predictions['boxes'][keep_indices],
50
+ 'labels': predictions['labels'][keep_indices],
51
+ 'scores': predictions['scores'][keep_indices]
52
+ }
53
+
54
+ # Include masks if present
55
+ if 'masks' in predictions:
56
+ filtered['masks'] = predictions['masks'][keep_indices]
57
+
58
+ return filtered
59
+
60
+ def apply_nms(
61
+ self,
62
+ boxes: torch.Tensor,
63
+ scores: torch.Tensor,
64
+ labels: torch.Tensor,
65
+ masks: Optional[torch.Tensor] = None,
66
+ nms_threshold: Optional[float] = None
67
+ ) -> Dict[str, torch.Tensor]:
68
+ """Apply Non-Maximum Suppression.
69
+
70
+ Args:
71
+ boxes: Bounding boxes tensor
72
+ scores: Confidence scores
73
+ labels: Class labels
74
+ masks: Optional masks tensor
75
+ nms_threshold: Override default NMS threshold
76
+
77
+ Returns:
78
+ NMS-filtered predictions
79
+ """
80
+ threshold = nms_threshold or self.nms_threshold
81
+
82
+ # Apply NMS per class
83
+ keep_indices = []
84
+ unique_labels = labels.unique()
85
+
86
+ for label in unique_labels:
87
+ label_mask = labels == label
88
+ label_boxes = boxes[label_mask]
89
+ label_scores = scores[label_mask]
90
+
91
+ if len(label_boxes) > 0:
92
+ # Apply NMS
93
+ keep = ops.nms(label_boxes, label_scores, threshold)
94
+
95
+ # Convert back to original indices
96
+ original_indices = torch.where(label_mask)[0]
97
+ keep_indices.extend(original_indices[keep].tolist())
98
+
99
+ # Sort by score
100
+ keep_indices = torch.tensor(keep_indices)
101
+ keep_indices = keep_indices[scores[keep_indices].argsort(descending=True)]
102
+
103
+ # Limit detections if specified
104
+ if self.max_detections is not None:
105
+ keep_indices = keep_indices[:self.max_detections]
106
+
107
+ result = {
108
+ 'boxes': boxes[keep_indices],
109
+ 'labels': labels[keep_indices],
110
+ 'scores': scores[keep_indices]
111
+ }
112
+
113
+ if masks is not None:
114
+ result['masks'] = masks[keep_indices]
115
+
116
+ return result
117
+
118
+ def compute_mask_iou(self, mask1: np.ndarray, mask2: np.ndarray) -> float:
119
+ """Compute IoU between two binary masks."""
120
+ intersection = np.logical_and(mask1, mask2).sum()
121
+ union = np.logical_or(mask1, mask2).sum()
122
+ if union == 0:
123
+ return 0.0
124
+ return intersection / union
125
+
126
+ def apply_mask_nms(
127
+ self,
128
+ boxes: torch.Tensor,
129
+ scores: torch.Tensor,
130
+ labels: torch.Tensor,
131
+ masks: torch.Tensor,
132
+ iou_threshold: float = 0.3
133
+ ) -> Dict[str, torch.Tensor]:
134
+ """Apply Non-Maximum Suppression based on mask IoU.
135
+
136
+ This is more accurate than box NMS for overlapping beans.
137
+
138
+ Args:
139
+ boxes: Bounding boxes tensor
140
+ scores: Confidence scores
141
+ labels: Class labels
142
+ masks: Masks tensor
143
+ iou_threshold: IoU threshold for suppression
144
+
145
+ Returns:
146
+ NMS-filtered predictions
147
+ """
148
+ if len(masks) == 0:
149
+ return {'boxes': boxes, 'scores': scores, 'labels': labels, 'masks': masks}
150
+
151
+ # Limit to top scoring detections for mask NMS (too slow otherwise)
152
+ MAX_MASK_NMS = 200
153
+ if len(masks) > MAX_MASK_NMS:
154
+ # Keep top scoring detections for mask NMS
155
+ scores_np = scores.cpu().numpy()
156
+ top_indices = np.argsort(scores_np)[-MAX_MASK_NMS:]
157
+
158
+ # Apply mask NMS only to top detections
159
+ top_result = self._apply_mask_nms_impl(
160
+ boxes[top_indices],
161
+ scores[top_indices],
162
+ labels[top_indices],
163
+ masks[top_indices],
164
+ iou_threshold
165
+ )
166
+ return top_result
167
+ else:
168
+ return self._apply_mask_nms_impl(boxes, scores, labels, masks, iou_threshold)
169
+
170
+ def _apply_mask_nms_impl(
171
+ self,
172
+ boxes: torch.Tensor,
173
+ scores: torch.Tensor,
174
+ labels: torch.Tensor,
175
+ masks: torch.Tensor,
176
+ iou_threshold: float
177
+ ) -> Dict[str, torch.Tensor]:
178
+ """Implementation of mask NMS."""
179
+ # Convert to numpy for mask operations
180
+ masks_np = masks.cpu().numpy()
181
+ scores_np = scores.cpu().numpy()
182
+
183
+ # Binarize masks
184
+ binary_masks = masks_np[:, 0] > 0.5
185
+
186
+ # Sort by scores (highest first)
187
+ order = scores_np.argsort()[::-1]
188
+
189
+ keep = []
190
+ while order.size > 0:
191
+ i = order[0]
192
+ keep.append(i)
193
+
194
+ if order.size == 1:
195
+ break
196
+
197
+ # Compute IoU of current mask with remaining masks
198
+ ious = np.array([
199
+ self.compute_mask_iou(binary_masks[i], binary_masks[j])
200
+ for j in order[1:]
201
+ ])
202
+
203
+ # Keep only masks with IoU below threshold
204
+ inds = np.where(ious <= iou_threshold)[0]
205
+ order = order[inds + 1]
206
+
207
+ keep_tensor = torch.tensor(keep, dtype=torch.long)
208
+
209
+ return {
210
+ 'boxes': boxes[keep_tensor],
211
+ 'labels': labels[keep_tensor],
212
+ 'scores': scores[keep_tensor],
213
+ 'masks': masks[keep_tensor]
214
+ }
215
+
216
+ def filter_edge_beans(
217
+ self,
218
+ boxes: torch.Tensor,
219
+ masks: torch.Tensor,
220
+ image_shape: tuple,
221
+ edge_threshold: int = 10
222
+ ) -> torch.Tensor:
223
+ """Filter out partial beans at image edges.
224
+
225
+ Args:
226
+ boxes: Bounding boxes
227
+ masks: Binary masks
228
+ image_shape: (height, width) of image
229
+ edge_threshold: Distance from edge to consider
230
+
231
+ Returns:
232
+ Indices of beans to keep
233
+ """
234
+ h, w = image_shape
235
+ keep = []
236
+
237
+ boxes_np = boxes.cpu().numpy()
238
+ masks_np = masks.cpu().numpy()
239
+
240
+ for i, box in enumerate(boxes_np):
241
+ x1, y1, x2, y2 = box
242
+
243
+ # Check if box touches image edge
244
+ if (x1 <= edge_threshold or y1 <= edge_threshold or
245
+ x2 >= w - edge_threshold or y2 >= h - edge_threshold):
246
+
247
+ # For edge beans, check if mask is substantially cut off
248
+ mask = masks_np[i, 0] > 0.5
249
+ mask_area = mask.sum()
250
+
251
+ if mask_area == 0:
252
+ continue
253
+
254
+ # Estimate if bean is complete by checking mask distribution near edges
255
+ edge_mask_top = mask[:edge_threshold, :].sum() if mask.shape[0] > edge_threshold else 0
256
+ edge_mask_bottom = mask[-edge_threshold:, :].sum() if mask.shape[0] > edge_threshold else 0
257
+ edge_mask_left = mask[:, :edge_threshold].sum() if mask.shape[1] > edge_threshold else 0
258
+ edge_mask_right = mask[:, -edge_threshold:].sum() if mask.shape[1] > edge_threshold else 0
259
+
260
+ edge_ratio = (edge_mask_top + edge_mask_bottom + edge_mask_left + edge_mask_right) / mask_area
261
+
262
+ # Keep if bean appears mostly complete (low edge ratio)
263
+ if edge_ratio < 0.15:
264
+ keep.append(i)
265
+ else:
266
+ # Keep all non-edge beans
267
+ keep.append(i)
268
+
269
+ return torch.tensor(keep, dtype=torch.long)
270
+
271
+ def filter_by_size(
272
+ self,
273
+ boxes: torch.Tensor,
274
+ masks: torch.Tensor,
275
+ min_area: float = 500,
276
+ max_area: float = 30000
277
+ ) -> torch.Tensor:
278
+ """Filter detections by mask area to remove noise and anomalies.
279
+
280
+ Args:
281
+ boxes: Bounding boxes
282
+ masks: Binary masks
283
+ min_area: Minimum mask area in pixels
284
+ max_area: Maximum mask area in pixels
285
+
286
+ Returns:
287
+ Indices of detections to keep
288
+ """
289
+ keep = []
290
+ masks_np = masks.cpu().numpy()
291
+
292
+ for i, mask in enumerate(masks_np):
293
+ mask_binary = mask[0] > 0.5
294
+ area = mask_binary.sum()
295
+
296
+ if min_area <= area <= max_area:
297
+ keep.append(i)
298
+
299
+ return torch.tensor(keep, dtype=torch.long)
300
+
301
+ def combine_predictions(
302
+ self,
303
+ predictions_list: List[Dict[str, torch.Tensor]]
304
+ ) -> Dict[str, torch.Tensor]:
305
+ """Combine predictions from multiple models or augmentations.
306
+
307
+ Args:
308
+ predictions_list: List of prediction dictionaries
309
+
310
+ Returns:
311
+ Combined predictions
312
+ """
313
+ if not predictions_list:
314
+ return {}
315
+
316
+ # Concatenate all predictions
317
+ combined = {
318
+ 'boxes': torch.cat([p['boxes'] for p in predictions_list]),
319
+ 'labels': torch.cat([p['labels'] for p in predictions_list]),
320
+ 'scores': torch.cat([p['scores'] for p in predictions_list])
321
+ }
322
+
323
+ if 'masks' in predictions_list[0]:
324
+ combined['masks'] = torch.cat([p['masks'] for p in predictions_list])
325
+
326
+ # Apply NMS to remove duplicates
327
+ return self.apply_nms(
328
+ combined['boxes'],
329
+ combined['scores'],
330
+ combined['labels'],
331
+ combined.get('masks')
332
+ )
333
+
334
+ def calculate_metrics(
335
+ self,
336
+ predictions: Dict[str, torch.Tensor],
337
+ ground_truth: Optional[Dict[str, torch.Tensor]] = None
338
+ ) -> Dict[str, float]:
339
+ """Calculate basic metrics for predictions.
340
+
341
+ Args:
342
+ predictions: Model predictions
343
+ ground_truth: Optional ground truth for comparison
344
+
345
+ Returns:
346
+ Metrics dictionary
347
+ """
348
+ metrics = {
349
+ 'num_detections': len(predictions['scores']),
350
+ 'avg_confidence': predictions['scores'].mean().item() if len(predictions['scores']) > 0 else 0.0,
351
+ 'min_confidence': predictions['scores'].min().item() if len(predictions['scores']) > 0 else 0.0,
352
+ 'max_confidence': predictions['scores'].max().item() if len(predictions['scores']) > 0 else 0.0
353
+ }
354
+
355
+ # Add per-class counts
356
+ if 'labels' in predictions:
357
+ unique_labels = predictions['labels'].unique()
358
+ for label in unique_labels:
359
+ count = (predictions['labels'] == label).sum().item()
360
+ metrics[f'class_{label}_count'] = count
361
+
362
+ return metrics
src/bean_vision/inference/predictor.py ADDED
@@ -0,0 +1,369 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Main bean detection predictor class."""
2
+
3
+ # Standard library imports
4
+ import time
5
+ from pathlib import Path
6
+ from typing import Any, Dict, List, Optional, Tuple
7
+
8
+ # Third-party imports
9
+ import numpy as np
10
+ import torch
11
+ from PIL import Image
12
+ from torchvision.transforms import functional as F
13
+
14
+ # Local imports
15
+ from ..models import BeanDetector
16
+ from .mask_converter import MaskConverter
17
+ from .postprocessing import PostProcessor
18
+
19
+
20
+ class BeanPredictor:
21
+ """High-level bean detection predictor."""
22
+
23
+ def __init__(
24
+ self,
25
+ model_path: Optional[Path] = None,
26
+ device: str = 'cpu',
27
+ max_detections: int = 500,
28
+ confidence_threshold: float = 0.5,
29
+ mask_threshold: float = 0.5,
30
+ nms_threshold: float = 0.3,
31
+ smooth_polygons: bool = False,
32
+ smoothing_factor: float = 0.0,
33
+ apply_nms: bool = True,
34
+ nms_type: str = 'box',
35
+ filter_edge_beans: bool = True,
36
+ edge_threshold: int = 10,
37
+ min_bean_area: float = 500,
38
+ max_bean_area: float = 30000
39
+ ):
40
+ """Initialize predictor.
41
+
42
+ Args:
43
+ model_path: Path to trained model checkpoint or pre-trained model name
44
+ device: Device to run on ('cpu' or 'cuda')
45
+ max_detections: Maximum detections per image
46
+ confidence_threshold: Minimum confidence for detections
47
+ mask_threshold: Threshold for mask binarization
48
+ nms_threshold: IoU threshold for NMS
49
+ smooth_polygons: Whether to apply polygon smoothing
50
+ smoothing_factor: Smoothing factor (0-1, 0=no smoothing)
51
+ apply_nms: Whether to apply Non-Maximum Suppression
52
+ nms_type: Type of NMS ('box' or 'mask')
53
+ filter_edge_beans: Whether to filter partial beans at edges
54
+ edge_threshold: Pixel distance from edge to check
55
+ min_bean_area: Minimum bean area in pixels
56
+ max_bean_area: Maximum bean area in pixels
57
+ """
58
+ self.device = torch.device(device)
59
+ if device == 'cuda' and not torch.cuda.is_available():
60
+ print("CUDA not available, using CPU")
61
+ self.device = torch.device('cpu')
62
+
63
+ self.max_detections = max_detections
64
+ self.confidence_threshold = confidence_threshold
65
+ self.mask_threshold = mask_threshold
66
+ self.nms_threshold = nms_threshold
67
+ self.apply_nms = apply_nms
68
+ self.nms_type = nms_type
69
+ self.filter_edge_beans = filter_edge_beans
70
+ self.edge_threshold = edge_threshold
71
+ self.min_bean_area = min_bean_area
72
+ self.max_bean_area = max_bean_area
73
+
74
+ # Check if model_path is a pre-trained model name
75
+ pretrained_models = ['maskrcnn_resnet50_fpn', 'maskrcnn_resnet50_fpn_v2']
76
+ use_pretrained = False
77
+
78
+ if model_path and str(model_path) in pretrained_models:
79
+ use_pretrained = True
80
+ print(f"Using pre-trained model: {model_path}")
81
+ # For pre-trained models, use num_classes=91 (COCO classes)
82
+ num_classes = 91
83
+ else:
84
+ # For custom trained models on beans
85
+ num_classes = 2
86
+
87
+ # Initialize model
88
+ self.model = BeanDetector(
89
+ num_classes=num_classes,
90
+ max_detections=max_detections,
91
+ pretrained=use_pretrained
92
+ )
93
+
94
+ # Load checkpoint if provided and not using pre-trained
95
+ if model_path and not use_pretrained:
96
+ self.load_model(model_path)
97
+
98
+ self.model.to(self.device)
99
+ self.model.set_eval_mode()
100
+
101
+ # Initialize converters and processors with optimized settings
102
+ self.mask_converter = MaskConverter(
103
+ simplify_epsilon=0.5,
104
+ use_detailed_contours=True,
105
+ smooth_polygons=smooth_polygons,
106
+ smoothing_factor=smoothing_factor
107
+ )
108
+ self.post_processor = PostProcessor(
109
+ confidence_threshold=confidence_threshold,
110
+ nms_threshold=nms_threshold
111
+ )
112
+
113
+ def load_model(self, model_path: Path) -> Dict[str, Any]:
114
+ """Load trained model.
115
+
116
+ Args:
117
+ model_path: Path to model checkpoint
118
+
119
+ Returns:
120
+ Model metadata
121
+ """
122
+ # Check if this is a file path that exists
123
+ import os
124
+ if os.path.exists(model_path):
125
+ metadata = self.model.load_checkpoint(model_path, self.device)
126
+ # Model loaded successfully - metadata available if needed
127
+ return metadata
128
+ else:
129
+ print(f"Model path {model_path} not found, using default initialization")
130
+ return {}
131
+
132
+ def preprocess_image(self, image: Image.Image, target_size: Tuple[int, int] = (800, 800)) -> torch.Tensor:
133
+ """Preprocess image for model input with padding for smaller images.
134
+
135
+ Args:
136
+ image: PIL Image
137
+ target_size: Target size (height, width) for the model
138
+
139
+ Returns:
140
+ Preprocessed tensor
141
+ """
142
+ # Convert to tensor first
143
+ image_tensor = F.to_tensor(image)
144
+
145
+ # Get current dimensions
146
+ c, h, w = image_tensor.shape
147
+ target_h, target_w = target_size
148
+
149
+ # If image is smaller than target, pad it instead of resizing
150
+ if h < target_h or w < target_w:
151
+ # Calculate padding needed
152
+ pad_h = max(0, target_h - h)
153
+ pad_w = max(0, target_w - w)
154
+
155
+ # Pad to center the image
156
+ pad_top = pad_h // 2
157
+ pad_bottom = pad_h - pad_top
158
+ pad_left = pad_w // 2
159
+ pad_right = pad_w - pad_left
160
+
161
+ # Apply padding (with gray background, value=0.5)
162
+ image_tensor = F.pad(
163
+ image_tensor,
164
+ padding=[pad_left, pad_top, pad_right, pad_bottom],
165
+ fill=0.5 # Gray padding
166
+ )
167
+
168
+ return image_tensor
169
+
170
+ def predict(
171
+ self,
172
+ image_path: Path,
173
+ return_polygons: bool = True,
174
+ return_masks: bool = True
175
+ ) -> Dict[str, Any]:
176
+ """Run prediction on a single image.
177
+
178
+ Args:
179
+ image_path: Path to input image
180
+ return_polygons: Whether to return polygon representations
181
+ return_masks: Whether to return binary masks
182
+
183
+ Returns:
184
+ Prediction results dictionary
185
+ """
186
+ # Load image
187
+ image = Image.open(image_path).convert("RGB")
188
+ original_size = image.size # (width, height)
189
+ image_tensor = self.preprocess_image(image)
190
+
191
+ # Track padding offsets for coordinate adjustment
192
+ c, padded_h, padded_w = image_tensor.shape
193
+ orig_w, orig_h = original_size
194
+ pad_left = (padded_w - orig_w) // 2 if padded_w > orig_w else 0
195
+ pad_top = (padded_h - orig_h) // 2 if padded_h > orig_h else 0
196
+
197
+ # Run inference
198
+ start_time = time.time()
199
+ with torch.no_grad():
200
+ predictions = self.model([image_tensor.to(self.device)])[0]
201
+ inference_time = time.time() - start_time
202
+
203
+ # Move to CPU
204
+ for key in predictions:
205
+ predictions[key] = predictions[key].cpu()
206
+
207
+ # Adjust coordinates if padding was applied
208
+ if pad_left > 0 or pad_top > 0:
209
+ # Adjust bounding boxes
210
+ if 'boxes' in predictions:
211
+ predictions['boxes'][:, [0, 2]] -= pad_left # x coordinates
212
+ predictions['boxes'][:, [1, 3]] -= pad_top # y coordinates
213
+
214
+ # Crop masks to remove padding
215
+ if 'masks' in predictions:
216
+ # Masks shape: [N, 1, H, W]
217
+ masks = predictions['masks']
218
+ if pad_left > 0 or pad_top > 0:
219
+ # Remove padding from masks
220
+ masks = masks[:, :,
221
+ pad_top:pad_top + orig_h,
222
+ pad_left:pad_left + orig_w]
223
+ predictions['masks'] = masks
224
+
225
+ # Apply post-processing
226
+ filtered_predictions = self.post_processor.filter_predictions(
227
+ predictions,
228
+ self.confidence_threshold
229
+ )
230
+
231
+ # Apply NMS if requested
232
+ if self.apply_nms and len(filtered_predictions['scores']) > 0:
233
+ if self.nms_type == 'mask' and 'masks' in filtered_predictions:
234
+ filtered_predictions = self.post_processor.apply_mask_nms(
235
+ filtered_predictions['boxes'],
236
+ filtered_predictions['scores'],
237
+ filtered_predictions['labels'],
238
+ filtered_predictions['masks'],
239
+ iou_threshold=self.nms_threshold
240
+ )
241
+ else:
242
+ filtered_predictions = self.post_processor.apply_nms(
243
+ filtered_predictions['boxes'],
244
+ filtered_predictions['scores'],
245
+ filtered_predictions['labels'],
246
+ filtered_predictions.get('masks'),
247
+ nms_threshold=self.nms_threshold
248
+ )
249
+
250
+ # Apply size filtering
251
+ if len(filtered_predictions['scores']) > 0 and 'masks' in filtered_predictions:
252
+ size_keep = self.post_processor.filter_by_size(
253
+ filtered_predictions['boxes'],
254
+ filtered_predictions['masks'],
255
+ min_area=self.min_bean_area,
256
+ max_area=self.max_bean_area
257
+ )
258
+ for key in filtered_predictions:
259
+ filtered_predictions[key] = filtered_predictions[key][size_keep]
260
+
261
+ # Apply edge filtering if requested
262
+ if self.filter_edge_beans and len(filtered_predictions['scores']) > 0 and 'masks' in filtered_predictions:
263
+ edge_keep = self.post_processor.filter_edge_beans(
264
+ filtered_predictions['boxes'],
265
+ filtered_predictions['masks'],
266
+ image.size[::-1], # Convert (width, height) to (height, width)
267
+ edge_threshold=self.edge_threshold
268
+ )
269
+ for key in filtered_predictions:
270
+ filtered_predictions[key] = filtered_predictions[key][edge_keep]
271
+
272
+ # Count beans (label 1)
273
+ bean_indices = filtered_predictions['labels'] == 1
274
+ bean_count = bean_indices.sum().item()
275
+
276
+ # Prepare results
277
+ result = {
278
+ 'image_path': str(image_path),
279
+ 'image_size': image.size,
280
+ 'inference_time': inference_time,
281
+ 'total_detections': len(predictions['scores']),
282
+ 'filtered_detections': len(filtered_predictions['scores']),
283
+ 'bean_count': bean_count,
284
+ 'confidence_threshold': self.confidence_threshold,
285
+ 'boxes': filtered_predictions['boxes'][bean_indices].tolist(),
286
+ 'scores': filtered_predictions['scores'][bean_indices].tolist(),
287
+ 'labels': filtered_predictions['labels'][bean_indices].tolist()
288
+ }
289
+
290
+ # Add masks if requested
291
+ if return_masks:
292
+ result['masks'] = filtered_predictions['masks'][bean_indices]
293
+
294
+ # Convert to polygons if requested
295
+ if return_polygons and return_masks:
296
+ bean_masks = filtered_predictions['masks'][bean_indices]
297
+ if len(bean_masks) > 0:
298
+ polygons = self.mask_converter.masks_to_polygons(
299
+ bean_masks,
300
+ self.mask_threshold
301
+ )
302
+ result['polygons'] = polygons
303
+
304
+ return result
305
+
306
+ def predict_batch(
307
+ self,
308
+ image_paths: List[Path],
309
+ batch_size: int = 1,
310
+ return_polygons: bool = True,
311
+ return_masks: bool = True
312
+ ) -> List[Dict[str, Any]]:
313
+ """Run prediction on multiple images.
314
+
315
+ Args:
316
+ image_paths: List of image paths
317
+ batch_size: Batch size for processing
318
+ return_polygons: Whether to return polygon representations
319
+ return_masks: Whether to return binary masks
320
+
321
+ Returns:
322
+ List of prediction results
323
+ """
324
+ results = []
325
+
326
+ for i in range(0, len(image_paths), batch_size):
327
+ batch_paths = image_paths[i:i + batch_size]
328
+
329
+ # Process batch
330
+ for path in batch_paths:
331
+ try:
332
+ result = self.predict(
333
+ path,
334
+ return_polygons=return_polygons,
335
+ return_masks=return_masks
336
+ )
337
+ results.append(result)
338
+ except Exception as e:
339
+ print(f"Error processing {path}: {e}")
340
+ results.append({
341
+ 'image_path': str(path),
342
+ 'error': str(e)
343
+ })
344
+
345
+ return results
346
+
347
+ def update_thresholds(
348
+ self,
349
+ confidence_threshold: Optional[float] = None,
350
+ mask_threshold: Optional[float] = None,
351
+ nms_threshold: Optional[float] = None
352
+ ):
353
+ """Update detection thresholds.
354
+
355
+ Args:
356
+ confidence_threshold: New confidence threshold
357
+ mask_threshold: New mask threshold
358
+ nms_threshold: New NMS threshold
359
+ """
360
+ if confidence_threshold is not None:
361
+ self.confidence_threshold = confidence_threshold
362
+ self.post_processor.confidence_threshold = confidence_threshold
363
+
364
+ if mask_threshold is not None:
365
+ self.mask_threshold = mask_threshold
366
+
367
+ if nms_threshold is not None:
368
+ self.nms_threshold = nms_threshold
369
+ self.post_processor.nms_threshold = nms_threshold
src/bean_vision/models/__init__.py ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Model modules for Bean Vision."""
2
+
3
+ import torch
4
+ import torch.nn as nn
5
+ from torchvision.models.detection import maskrcnn_resnet50_fpn
6
+ import torchvision
7
+ from typing import Optional
8
+ from pathlib import Path
9
+
10
+
11
+ class BeanDetector(nn.Module):
12
+ """Simple bean detector model for compatibility with inference code."""
13
+
14
+ def __init__(self, num_classes: int = 2, max_detections: int = 500, pretrained: bool = False):
15
+ super().__init__()
16
+
17
+ # Create MaskR-CNN model with modified heads
18
+ weights = "DEFAULT" if pretrained else None
19
+ self.model = maskrcnn_resnet50_fpn(
20
+ weights=weights,
21
+ rpn_pre_nms_top_n_train=6000,
22
+ rpn_pre_nms_top_n_test=3000,
23
+ rpn_post_nms_top_n_train=4000,
24
+ rpn_post_nms_top_n_test=2000,
25
+ box_detections_per_img=max_detections,
26
+ box_score_thresh=0.05
27
+ )
28
+
29
+ # Replace classifier head
30
+ in_features = self.model.roi_heads.box_predictor.cls_score.in_features
31
+ self.model.roi_heads.box_predictor = torchvision.models.detection.faster_rcnn.FastRCNNPredictor(
32
+ in_features, num_classes
33
+ )
34
+
35
+ # Replace mask predictor head
36
+ in_features_mask = self.model.roi_heads.mask_predictor.conv5_mask.in_channels
37
+ hidden_layer = 256
38
+ self.model.roi_heads.mask_predictor = torchvision.models.detection.mask_rcnn.MaskRCNNPredictor(
39
+ in_features_mask, hidden_layer, num_classes
40
+ )
41
+
42
+ def forward(self, images, targets=None):
43
+ return self.model(images, targets)
44
+
45
+ def set_eval_mode(self):
46
+ """Set model to evaluation mode."""
47
+ self.eval()
48
+
49
+ def load_state_dict(self, state_dict, strict=True):
50
+ """Load state dict, handling both wrapped and unwrapped models."""
51
+ # Check if state dict is from the wrapper model
52
+ if any(k.startswith('model.') for k in state_dict.keys()):
53
+ # It's from our wrapper, load directly
54
+ super().load_state_dict(state_dict, strict=strict)
55
+ else:
56
+ # It's from the underlying model, load into self.model
57
+ self.model.load_state_dict(state_dict, strict=strict)
58
+
59
+ def load_checkpoint(self, checkpoint_path, device):
60
+ """Load model checkpoint from .pth or .safetensors format."""
61
+ checkpoint_path = Path(checkpoint_path)
62
+
63
+ if checkpoint_path.suffix == '.safetensors':
64
+ # Load safetensors format
65
+ from safetensors.torch import load_file
66
+ state_dict = load_file(checkpoint_path)
67
+ self.load_state_dict(state_dict)
68
+
69
+ # Check for metadata file
70
+ metadata_path = checkpoint_path.with_suffix('.json')
71
+ if metadata_path.exists():
72
+ import json
73
+ with open(metadata_path, 'r') as f:
74
+ metadata = json.load(f)
75
+ return {
76
+ 'epoch': metadata.get('epoch', 0),
77
+ 'best_loss': metadata.get('best_val_loss', 0.0),
78
+ 'config': metadata.get('config', {})
79
+ }
80
+ return {}
81
+ else:
82
+ # Load .pth format
83
+ checkpoint = torch.load(checkpoint_path, map_location=device, weights_only=False)
84
+
85
+ # Handle different checkpoint formats
86
+ if isinstance(checkpoint, dict):
87
+ if 'model_state_dict' in checkpoint:
88
+ self.load_state_dict(checkpoint['model_state_dict'])
89
+ elif 'state_dict' in checkpoint:
90
+ self.load_state_dict(checkpoint['state_dict'])
91
+ else:
92
+ # Assume the dict is the state dict itself
93
+ self.load_state_dict(checkpoint)
94
+
95
+ # Return metadata if available
96
+ return {
97
+ 'epoch': checkpoint.get('epoch', 0),
98
+ 'best_loss': checkpoint.get('best_loss', 0.0),
99
+ 'config': checkpoint.get('config', {})
100
+ }
101
+ else:
102
+ # Direct state dict
103
+ self.load_state_dict(checkpoint)
104
+ return {}
105
+
106
+
107
+ # Keep original import for other uses
108
+ from .model import BeanModel
109
+
110
+ __all__ = ["BeanModel", "BeanDetector"]
src/bean_vision/models/__pycache__/__init__.cpython-313.pyc ADDED
Binary file (5.33 kB). View file