afshin-dini commited on
Commit
623606b
·
0 Parent(s):

transfer the repo

Browse files
.bumpversion.cfg ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [bumpversion]
2
+ current_version = 0.2.0
3
+ commit = False
4
+ tag = False
5
+
6
+ [bumpversion:file:pyproject.toml]
7
+ search = version = "{current_version}"
8
+ replace = version = "{new_version}"
9
+
10
+ [bumpversion:file:src/egg_segmentation_size/__init__.py]
11
+ search = __version__ = "{current_version}"
12
+ replace = __version__ = "{new_version}"
13
+
14
+ [bumpversion:file:tests/test_egg_segmentation_size.py]
15
+ search = __version__ == "{current_version}"
16
+ replace = __version__ == "{new_version}"
.gitattributes ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ *.jpg filter=lfs diff=lfs merge=lfs -text
2
+ *.pt filter=lfs diff=lfs merge=lfs -text
3
+ results/sample1.jpg filter=lfs diff=lfs merge=lfs -text
4
+ tests/test_data/sample1.jpg filter=lfs diff=lfs merge=lfs -text
5
+ src/egg_segmentation_size/model/egg_segmentor.pt filter=lfs diff=lfs merge=lfs -text
.github/workflows/check-file-size.yml ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Check File Size
2
+
3
+ on:
4
+ pull_request:
5
+ branches: [master] # Runs on PRs to the master branch
6
+ workflow_dispatch: # Allows manual triggering from the Actions tab
7
+
8
+ jobs:
9
+ check-file-size:
10
+ runs-on: ubuntu-latest
11
+ steps:
12
+ - name: Checkout code
13
+ uses: actions/checkout@v4
14
+
15
+ - name: Get list of changed files
16
+ id: changed-files
17
+ uses: tj-actions/changed-files@v39
18
+
19
+ - name: Check file sizes
20
+ run: |
21
+ MAX_SIZE=10485760 # 10MB in bytes
22
+ ERROR=0
23
+ for file in ${{ steps.changed-files.outputs.all_changed_files }}; do
24
+ if [ -f "$file" ]; then
25
+ FILE_SIZE=$(stat -c%s "$file")
26
+ if [ $FILE_SIZE -gt $MAX_SIZE ]; then
27
+ echo "❌ File $file exceeds the 10MB limit (Size: $FILE_SIZE bytes)"
28
+ ERROR=1
29
+ fi
30
+ fi
31
+ done
32
+
33
+ if [ $ERROR -eq 1 ]; then
34
+ echo "❌ Some files are too large! Please use Git LFS for large files."
35
+ exit 1 # Blocks the PR
36
+ fi
37
+
38
+ - name: Success Message
39
+ if: success()
40
+ run: echo "✅ All files are within the size limit."
.github/workflows/main.yml ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Sync to Hugging Face hub
2
+ on:
3
+ push:
4
+ branches:
5
+ - '**'
6
+
7
+ # to run this workflow manually from the Actions tab
8
+ workflow_dispatch:
9
+
10
+ jobs:
11
+ sync-to-hub:
12
+ runs-on: ubuntu-latest
13
+ steps:
14
+ - uses: actions/checkout@v3
15
+ with:
16
+ fetch-depth: 0
17
+ lfs: true
18
+
19
+ - name: Install Git LFS
20
+ run: |
21
+ git lfs install
22
+
23
+ - name: Configure Git user
24
+ run: |
25
+ git config --global user.name "industoai"
26
+ git config --global user.email "admin@industoai.com"
27
+
28
+ - name: Track .pt files with Git LFS
29
+ run: |
30
+ git lfs track "*.pt"
31
+ git lfs ls-files
32
+
33
+ - name: Add remote
34
+ env:
35
+ HF: ${{ secrets.HG }}
36
+ run: git remote add space https://industoai:$HF@huggingface.co/spaces/industoai/Deep-Egg-Segmentation-and-Sizing
37
+
38
+ - name: Push to hub
39
+ env:
40
+ HF: ${{ secrets.HG }}
41
+ run: |
42
+ git lfs ls-files
43
+ git lfs push --all space
44
+ git push --force space ${{ github.ref }}:main
.gitignore ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Pycharm files
2
+ .idea/
3
+ src/egg_segmentation_size/__pycache__/
4
+
5
+ # poetry
6
+ poetry.lock
7
+
8
+ # data files
9
+ src/egg_segmentation_size/data/
10
+
11
+ # Unit test files
12
+ .coverage
13
+ .coverage.*
14
+ .cache
15
+ .pytest_cache/
16
+ tests/__pycache__/
17
+
18
+ # mypy files
19
+ .mypy_cache/
20
+
21
+ # Probable environments files
22
+ .env
23
+ .venv
24
+ env/
25
+ venv/
.pre-commit-config.yaml ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # See https://pre-commit.com for more information
2
+ # See https://pre-commit.com/hooks.html for more hooks
3
+ default_language_version:
4
+ python: python3
5
+ repos:
6
+ - repo: https://github.com/pre-commit/pre-commit-hooks
7
+ rev: v4.5.0 # the release, git tag, or commit you want to use
8
+ hooks:
9
+ - id: check-toml
10
+ - id: check-yaml
11
+ - id: check-json
12
+ - id: end-of-file-fixer
13
+ - id: trailing-whitespace
14
+ - id: no-commit-to-branch
15
+ - id: check-executables-have-shebangs
16
+ - id: check-added-large-files
17
+ args: ["--maxkb=2000"]
18
+ - id: check-case-conflict
19
+ - id: check-merge-conflict
20
+ - id: pretty-format-json
21
+ args:
22
+ - --autofix
23
+ - id: check-symlinks
24
+ - id: check-ast
25
+ - id: detect-private-key
26
+ - repo: https://github.com/psf/black
27
+ rev: 24.1.1
28
+ hooks:
29
+ - id: black
30
+ language: python
31
+ - repo: https://github.com/pre-commit/mirrors-mypy
32
+ rev: v1.8.0
33
+ hooks:
34
+ - id: mypy
35
+ language: system
36
+ args: [--strict, --ignore-missing-imports]
37
+ - repo: https://github.com/pycqa/pylint
38
+ rev: v3.0.3
39
+ hooks:
40
+ - id: pylint
41
+ language: system
42
+ - repo: https://github.com/Lucas-C/pre-commit-hooks
43
+ rev: v1.5.4
44
+ hooks:
45
+ - id: forbid-crlf
46
+ - id: remove-crlf
47
+ - id: forbid-tabs
48
+ - id: remove-tabs
49
+ - repo: https://github.com/PyCQA/bandit
50
+ rev: 1.7.7
51
+ hooks:
52
+ - id: bandit
53
+ args: ["--skip=B101"]
54
+ - repo: https://github.com/Lucas-C/pre-commit-hooks-markup
55
+ rev: v1.0.1
56
+ hooks:
57
+ - id: rst-linter
58
+ - repo: https://github.com/Yelp/detect-secrets
59
+ rev: v1.4.0
60
+ hooks:
61
+ - id: detect-secrets
62
+ language: python
63
+ exclude: "poetry.lock"
64
+ # args: ['--baseline', '.secrets.baseline']
65
+ - repo: https://github.com/shellcheck-py/shellcheck-py
66
+ rev: v0.9.0.6
67
+ hooks:
68
+ - id: shellcheck
69
+ args: ["--external-sources"]
70
+ - repo: https://github.com/python-poetry/poetry
71
+ rev: '1.7.1'
72
+ hooks:
73
+ - id: poetry-check
74
+ - id: poetry-lock
75
+ args: ["--no-update"]
Dockerfile ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## syntax=docker/dockerfile:1.1.7-experimental
2
+
3
+ ################
4
+ # Base builder #
5
+ ################
6
+ FROM python:3.10-bookworm as base_build
7
+
8
+ ENV \
9
+ # locale environment variables
10
+ LC_ALL=C.UTF-8 \
11
+ # python environemnt variables
12
+ PYTHONFAULTHANDLER=1 \
13
+ PYTHONUNBUFFERED=1 \
14
+ PYTHONHASHSEED=random \
15
+ # pip environmental variables
16
+ PIP_NO_CACHE_DIR=off \
17
+ PIP_DISABLE_PIP_VERSION_CHECK=on \
18
+ PIP_DEFAULT_TIMEOUT=100 \
19
+ # poetry version
20
+ POETRY_VERSION=1.5.0
21
+
22
+ # Install requirements
23
+ RUN apt-get update && apt-get install -y \
24
+ curl \
25
+ git \
26
+ bash \
27
+ build-essential \
28
+ libffi-dev \
29
+ libssl-dev \
30
+ tini \
31
+ openssh-client \
32
+ cargo \
33
+ musl-dev \
34
+ && apt-get autoremove -y \
35
+ && rm -rf /var/lib/apt/lists/* \
36
+ # github ssh key setting
37
+ && mkdir -p -m 0700 ~/.ssh && ssh-keyscan github.com | sort > ~/.ssh/known_hosts \
38
+ # Installing poetry and set the PATH
39
+ && curl -sSL https://install.python-poetry.org | python3 - \
40
+ && echo 'export PATH="/root/.local/bin:$PATH"' >>/root/.profile \
41
+ && export PATH="/root/.local/bin:$PATH" \
42
+ && true
43
+ SHELL ["/bin/bash", "-lc"]
44
+
45
+ # Copy poetry lock and pyproject config files to the container
46
+ WORKDIR /pysetup
47
+ COPY ./poetry.lock ./pyproject.toml /pysetup/
48
+ # Install pip/wheel/virtualenv and build the wheels based on the poetry lock
49
+ RUN --mount=type=ssh pip3 install wheel virtualenv poetry-plugin-export \
50
+ && poetry export -f requirements.txt --without-hashes -o /tmp/requirements.txt \
51
+ && pip3 wheel --wheel-dir=/tmp/wheelhouse --trusted-host 172.17.0.1 --find-links=http://172.17.0.1:3141/debian/ -r /tmp/requirements.txt \
52
+ && virtualenv /.venv && source /.venv/bin/activate && echo 'source /.venv/bin/activate' >>/root/.profile \
53
+ && pip3 install --no-deps --trusted-host 172.17.0.1 --find-links=http://172.17.0.1:3141/debian/ --find-links=/tmp/wheelhouse/ /tmp/wheelhouse/*.whl \
54
+ && true
55
+
56
+
57
+ ###########################
58
+ # Production base builder #
59
+ ###########################
60
+ FROM base_build as production_build
61
+ # Copy entrypoint script to the container and src files to the app directory
62
+ COPY ./docker/entrypoint.sh /docker-entrypoint.sh
63
+ COPY . /app/
64
+ WORKDIR /app
65
+ # Build the wheel packages with poetry and add them to the wheelhouse
66
+ RUN --mount=type=ssh source /.venv/bin/activate \
67
+ && poetry build -f wheel --no-interaction --no-ansi \
68
+ && cp dist/*.whl /tmp/wheelhouse \
69
+ && chmod a+x /docker-entrypoint.sh \
70
+ && true
71
+
72
+
73
+
74
+ ########################
75
+ # Production Container #
76
+ ########################
77
+ FROM python:3.10-bookworm as production
78
+ COPY --from=production_build /tmp/wheelhouse /tmp/wheelhouse
79
+ COPY --from=production_build /docker-entrypoint.sh /docker-entrypoint.sh
80
+ WORKDIR /app
81
+ # Install system level deps for running the package and install the wheels we built in the previous step.
82
+ RUN --mount=type=ssh apt-get update && apt-get install -y \
83
+ bash \
84
+ libffi8 \
85
+ libgl1 \
86
+ tini \
87
+ && apt-get autoremove -y \
88
+ && rm -rf /var/lib/apt/lists/* \
89
+ && chmod a+x /docker-entrypoint.sh \
90
+ && WHEELFILE=`echo /tmp/wheelhouse/egg_segmentation_size-*.whl` \
91
+ && pip3 install --trusted-host 172.17.0.1 --find-links=http://172.17.0.1:3141/debian/ --find-links=/tmp/wheelhouse/ "$WHEELFILE"[all] \
92
+ && rm -rf /tmp/wheelhouse/ \
93
+ && true
94
+ ENTRYPOINT ["/usr/bin/tini", "--", "/docker-entrypoint.sh"]
95
+
96
+
97
+
98
+ ############################
99
+ # Development base builder #
100
+ ############################
101
+ FROM base_build as development_build
102
+ # Copy src to app directory
103
+ COPY . /app
104
+ WORKDIR /app
105
+ # Install dependencies from poetry lock
106
+ RUN --mount=type=ssh source /.venv/bin/activate \
107
+ && apt-get update && apt-get install -y libgl1 \
108
+ && export PIP_FIND_LINKS=http://172.17.0.1:3141/debian/ \
109
+ && export PIP_TRUSTED_HOST=172.17.0.1 \
110
+ && pip3 install nvidia-cublas-cu12 nvidia-cusparse-cu12 triton nvidia-nccl-cu12 nvidia-cudnn-cu12 nvidia-cufft-cu12 nvidia-cusolver-cu12 \
111
+ && poetry install --no-interaction --no-ansi \
112
+ && true
113
+
114
+
115
+
116
+ ###################
117
+ # Tests Container #
118
+ ###################
119
+ FROM development_build as test
120
+ RUN --mount=type=ssh source /.venv/bin/activate \
121
+ && chmod a+x docker/*.sh \
122
+ && docker/pre_commit_init.sh \
123
+ && true
124
+ ENTRYPOINT ["/usr/bin/tini", "--", "docker/entrypoint-test.sh"]
125
+
126
+
127
+ #########################
128
+ # Development Container #
129
+ #########################
130
+ FROM development_build as development
131
+ RUN apt-get update && apt-get install -y zsh \
132
+ && sh -c "$(curl -fsSL https://raw.githubusercontent.com/ohmyzsh/ohmyzsh/master/tools/install.sh)" \
133
+ && echo "if [ \"\$NO_WHEELHOUSE\" = \"1\" ]" >>/root/.profile \
134
+ && echo "then" >>/root/.profile \
135
+ && echo " echo \"Wheelhouse disabled\"" >>/root/.profile \
136
+ && echo "else">>/root/.profile \
137
+ && echo " export PIP_TRUSTED_HOST=172.17.0.1" >>/root/.profile \
138
+ && echo " export PIP_FIND_LINKS=http://172.17.0.1:3141/debian/" >>/root/.profile \
139
+ && echo "fi" >>/root/.profile \
140
+ && echo "source /root/.profile" >>/root/.zshrc \
141
+ && pip3 install git-up \
142
+ && true
143
+ ENTRYPOINT ["/bin/zsh", "-l"]
README.md ADDED
@@ -0,0 +1,169 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Deep Egg Segmentation And Sizing
3
+ emoji: 🥚📏
4
+ colorFrom: gray
5
+ colorTo: indigo
6
+ sdk: streamlit
7
+ sdk_version: 1.43.2
8
+ app_file: app.py
9
+ pinned: false
10
+ license: mit
11
+ short_description: Deep model to segment eggs in an egg shell and specify sizes
12
+ ---
13
+ # egg-segmentation-size
14
+ This repo segments the eggs in images and gives an estimation of their volume in cm3 and also area in pixels.
15
+ For segmentation purposes, we used yolo11-seg model which returns the segmentation mask of the eggs in the image.
16
+
17
+ For size estimation, we calculate the area of the mask (in pixel) by using **shoelace** method.
18
+ The volume of the eggs are estimated by knowing the fact that the eggs appear as ellipsoids or circles.
19
+ If the images are taken exactly from above the eggs, eggs would appear as circles and the volume can be calculated by using the formula of the volume of a sphere
20
+ which is `V = 4/3 * pi * r^3` where `r` is the radius. If the eggs appear as ellipsoids, the volume would be `V = 4/3 * pi * r1 * r2^2` where `r1` and `r2` are the major and minor radius of the ellipsoid.
21
+ We can measure those radius by related opencv functions.
22
+
23
+ **NOTE:** Since the radius measurements are in pixel, we should convert them to be used for volume estimation. To do so, we need a scale_factor which depends on the
24
+ camera characteristics. This scale factor can be defined as `scale_factor = DPI / 2.54` where `DPI` is the dots per inch of the camera. For my camera it is `11.61`
25
+ but one can find it easily for other cameras. The result for the test sample are based on my camera specifications.
26
+
27
+ ## Dataset
28
+ A dataset from real application is collected and labeled manually in YOLO-format in order to be used for training and validation purposes.
29
+ This dataset is available [here](https://huggingface.co/datasets/industoai/Egg-Instance-Segmentation).
30
+ The eggs are collected within different shell types and colours (the egg-shall might be transparent plastic, bright/dark carton, etc.) in order
31
+ to make the model robust to different background condition that might be encountered in real applications.
32
+ Right now, two classes exists in this dataset as:
33
+ - White eggs
34
+ - Brown eggs
35
+
36
+ The segmented areas of the eggs are also specified in the dataset with polygons.
37
+ We try to make this dataset more diverse and rich by adding more classes and more images in the future.
38
+
39
+
40
+ ### Dataset YOLO-format Structure
41
+ The [dataset](https://huggingface.co/datasets/industoai/Egg-Instance-Segmentation) is prepared in th YOLO-format make it easier to use.
42
+ This dataset is split into training and validation sets. The structure of the dataset to be used in YOLO models are as following:
43
+ ```
44
+ data/
45
+ ├── images/
46
+ │ ├── train/
47
+ │ │ ├── sample1.jpg
48
+ │ │ ├── ...
49
+ │ │ ├── sample49.jpg
50
+ │ ├── val/
51
+ │ │ ├── sample50.jpg
52
+ │ │ ├── sample51.jpg
53
+ ├── labels/
54
+ │ ├── train/
55
+ │ │ ├── sample1.txt
56
+ │ │ ├── ...
57
+ │ │ ├── sample49.txt
58
+ │ ├── val/
59
+ │ │ ├── sample50.txt
60
+ │ │ ├── sample51.txt
61
+ ├── data.yaml
62
+ ├── train.txt
63
+ ├── val.txt
64
+ ```
65
+ It is good to mention that although the number of images are not too much at the moment, there are many different eggs in each image which increases the
66
+ number of samples used in the training and validation stages. The `txt` files in the labels folders contain the polygon boxes around each egg in the images as well as
67
+ the class of egg which is white/brown.
68
+
69
+ The `train.txt` and `val.txt` files contain the path to the images in the training and validation sets.
70
+ The `data.yaml` file contains the class names, the path to the training and validation sets, and also the path to the parent `data` directory.
71
+
72
+ **NOTE:** For training, it is important to change the path to **absolute path** of the main directory where the data is located (In the above tree-structure it should be the absolute path to `data` directory.)
73
+
74
+ ## How to Use Locally
75
+ This repo can be used for fine-tuning the YOLO model to segment eggs and measure eggs volumes and also can be used for testing purposes with the current fine-tuned model.
76
+ In order to use the model for training/testing purposes locally, one can first create a virtual environment and then install the requirements
77
+ by running the `poetry install` command (Install poetry if you do not have it in your system from [here](https://python-poetry.org/docs/#installing-with-pipx).)
78
+
79
+ ### Fine-Tuning
80
+ YOLO model is fine-tuned with the collected dataset. In order to find-tune the model with other egg classes or repeat the whole process,
81
+ one can clone this repo and download the dataset from [here](https://huggingface.co/datasets/industoai/Egg-Instance-Segmentation) and put in the `src/egg_segmentation_size/data` directory.
82
+ Then for training or fine-tuning the model, one can run the following command:
83
+ ```bash
84
+ egg_segmentation_size -vv train --conf_path src/egg_segmentation_size/data/data.yaml --img_resize 640 --batch_size 16 --epochs 100 --device cuda
85
+ ```
86
+
87
+ ### Inference
88
+ The fine-tuned model can be used for inference purposes. The model is provided in the `src/egg_segmentation_size/model` directory.
89
+ By uploading and using the model, one can segment white/brown eggs and measure their volumes based on teh camera scale factor.
90
+ The model can be used with the following command:
91
+ ```bash
92
+ egg_segmentation_size -vv infer --model_path src/egg_segmentation_size/model/egg_segmentor.pt --data_path ./tests/test_data --result_path ./results --scale_factor 11.61
93
+ ```
94
+ It is good to mention that, the `data_path` could be a directory containing images or a single image. The `result_path` is the directory where the results are saved.
95
+
96
+ As an example, white and brown eggs are segmented properly in the following image:
97
+ <p align="center">
98
+ <img width="800" src="./results/sample1.jpg" alt="Egg Segmentation">
99
+ </p>
100
+
101
+
102
+ ## Hugging Face Deployment
103
+ The repository is also deployed in [hugging face](https://huggingface.co/spaces/industoai/Deep-Egg-Segmentation-and-Sizing) in which one can upload images,
104
+ and then the segmented white/brown eggs and their volumes will be shown.
105
+
106
+ It is good to mention that you can also run the demo application locally by running the following command:
107
+ ```shell
108
+ streamlit run app.py
109
+ ```
110
+ and then open the browser and go to the address `http://localhost:8501`.
111
+
112
+
113
+ ## How to load Trained Model from Hugging Face
114
+ The trained model is also uploaded to [hugging face](https://huggingface.co/industoai/Egg-Instance-Segmentation) from which one can use it as following:
115
+ ```shell
116
+ from huggingface_hub import hf_hub_download
117
+ from ultralytics import YOLO
118
+
119
+ model_path = hf_hub_download(repo_id="industoai/Egg-Instance-Segmentation", filename="model/egg_segmentor.pt")
120
+ model = YOLO(model_path)
121
+ result = model("path/to/image")
122
+ ```
123
+ Then, the uploaded model can be used for different purposes.
124
+
125
+
126
+ ## Docker Container
127
+ To run the docker with ssh, do the following first and then based on your need select ,test, development, or production containers:
128
+ ```shell
129
+ export DOCKER_BUILDKIT=1
130
+ export DOCKER_SSHAGENT="-v $SSH_AUTH_SOCK:$SSH_AUTH_SOCK -e SSH_AUTH_SOCK"
131
+ ```
132
+ ### Test Container
133
+ This container is used for testing purposes while it runs the test
134
+ ```shell
135
+ docker build --progress plain --ssh default --target test -t egg_segmentation_docker:test .
136
+ docker run -it --rm -v "$(pwd):/app" $(echo $DOCKER_SSHAGENT) egg_segmentation_docker:test
137
+ ```
138
+
139
+ ### Development Container
140
+ This container can be used for development purposes:
141
+ ```shell
142
+ docker build --progress plain --ssh default --target development -t egg_segmentation_docker:development .
143
+ docker run -it --rm -v "$(pwd):/app" -v /tmp:/tmp $(echo $DOCKER_SSHAGENT) egg_segmentation_docker:development
144
+ ```
145
+
146
+ ### Production Container
147
+ This container can be used for production purposes:
148
+ ```shell
149
+ docker build --progress plain --ssh default --target production -t egg_segmentation_docker:production .
150
+ docker run -it --rm -v "$(pwd):/app" -v /tmp:/tmp $(echo $DOCKER_SSHAGENT) egg_segmentation_docker:production egg_segmentation_size -vv infer --model_path src/egg_segmentation_size/model/egg_segmentor.pt --data_path ./tests/test_data --result_path ./results --scale_factor 11.61
151
+ ```
152
+
153
+
154
+
155
+
156
+ ## How to Develop
157
+ Do the following only once after creating your project:
158
+ - Init the git repo with `git init`.
159
+ - Add files with `git add .`.
160
+ - Then `git commit -m 'initialize the project'`.
161
+ - Add remote url with `git remote add origin REPO_URL`.
162
+ - Then `git branch -M master`.
163
+ - `git push origin master`.
164
+ Then create a branch with `git checkout -b BRANCH_NAME` for further developments.
165
+ - Install poetry if you do not have it in your system from [here](https://python-poetry.org/docs/#installing-with-pipx).
166
+ - Create a virtual env preferably with virtualenv wrapper and `mkvirtualenv -p $(which python3.10) ENVNAME`.
167
+ - Then `git add poetry.lock`.
168
+ - Then `pre-commit install`.
169
+ - For applying changes use `pre-commit run --all-files`.
app.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """This is a demo for running the egg segmentation and sizing using streamlit library"""
2
+
3
+ from dataclasses import dataclass, field
4
+ from pathlib import Path
5
+ import tempfile
6
+
7
+ import streamlit as st
8
+ import pandas as pd
9
+ from PIL import Image
10
+
11
+
12
+ from src.egg_segmentation_size.segmentor import EggSegmentorInference
13
+
14
+
15
+ @dataclass
16
+ class DemoEggSegmentationSizing:
17
+ """Class for running the egg segmentation and sizing app using Streamlit."""
18
+
19
+ image: str = field(init=False)
20
+ scale_factor: float = field(default=11.61)
21
+
22
+ def upload_image(self) -> None:
23
+ """Upload an image from the streamlit page"""
24
+ uploaded_file = st.file_uploader(
25
+ "Upload an image or use the default one...", type=["jpg", "png", "jpeg"]
26
+ )
27
+ if uploaded_file is not None:
28
+ with tempfile.NamedTemporaryFile(delete=False, suffix=".jpg") as tmp_file:
29
+ tmp_file.write(uploaded_file.getbuffer())
30
+ self.image = tmp_file.name
31
+ else:
32
+ self.image = "tests/test_data/sample1.jpg"
33
+ st.image(
34
+ Image.open(self.image),
35
+ caption="Original/Uploaded Image",
36
+ use_container_width=True,
37
+ )
38
+ self.scale_factor = st.number_input(
39
+ "Choose the scale factor based on your camera for volume calculation as `factor=DPI/2.54` "
40
+ "(For [this dataset](https://huggingface.co/datasets/afshin-dini/Egg-Instance-Segmentation) is 11.61):",
41
+ value=11.61,
42
+ step=0.01,
43
+ )
44
+
45
+ def process_image(self) -> None:
46
+ """Process the image for the egg segmentation and sizing"""
47
+ if st.button("Segment/Size Eggs"):
48
+ inferer = EggSegmentorInference(
49
+ model_path=Path("./src/egg_segmentation_size/model/egg_segmentor.pt"),
50
+ result_path="",
51
+ scale_factor=self.scale_factor,
52
+ )
53
+ segmentations = inferer.inference(data_path=self.image)
54
+
55
+ result_image = inferer.result_images(segmentations)
56
+ st.markdown("<h3>Segmented Results</h3>", unsafe_allow_html=True)
57
+ st.image(
58
+ result_image[0], caption="Segmented Eggs", use_container_width=True
59
+ )
60
+
61
+ res = inferer.results_detail(segmentations)
62
+
63
+ extracted_data = []
64
+ if res:
65
+ for key, val in res.items():
66
+ for detection in val:
67
+ extracted_data.append(
68
+ {
69
+ "Image": key,
70
+ "Type": detection["class"],
71
+ "Area in pixel": detection["areas in pixel"],
72
+ "Volume in cm3": detection["volume in cm3"],
73
+ }
74
+ )
75
+ extracted_data = pd.DataFrame(extracted_data).round(2)
76
+
77
+ st.markdown('<div class="center-container">', unsafe_allow_html=True)
78
+ st.markdown(
79
+ "<h3>Detailed Information of Segmentations</h3>", unsafe_allow_html=True
80
+ )
81
+ st.markdown(
82
+ """
83
+ <style>
84
+ table {
85
+ width: 100%;
86
+ }
87
+ th, td {
88
+ text-align: center !important;
89
+ }
90
+ </style>
91
+ """,
92
+ unsafe_allow_html=True,
93
+ )
94
+ st.table(extracted_data)
95
+ st.markdown("</div>", unsafe_allow_html=True)
96
+
97
+ def design_page(self) -> None:
98
+ """Design the streamlit page for eg detector and counter"""
99
+ st.title("Egg segmentor and sizer")
100
+ self.upload_image()
101
+ self.process_image()
102
+
103
+
104
+ demo = DemoEggSegmentationSizing()
105
+ demo.design_page()
docker/entrypoint-test.sh ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash -l
2
+ set -e
3
+ if [ "$#" -eq 0 ]; then
4
+ # Kill cache, pytest complains about it if running local and docker tests in mapped volume
5
+ find tests -type d -name '__pycache__' -print0 | xargs -0 rm -rf {}
6
+ # Make sure the service itself is installed
7
+ poetry install
8
+ # Make sure pre-commit checks were not missed and run tests
9
+ git config --global --add safe.directory /app
10
+ poetry run pre-commit install
11
+ pre-commit run --all-files
12
+ pytest -v --junitxml=pytest.xml tests/
13
+ else
14
+ exec "$@"
15
+ fi
docker/entrypoint.sh ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ #!/bin/bash -l
2
+ set -e
3
+ if [ "$#" -eq 0 ]; then
4
+ exec egg_segmentation_size --help
5
+ else
6
+ exec "$@"
7
+ fi
docker/pre_commit_init.sh ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash -l
2
+ if [ ! -d .git ]
3
+ then
4
+ git init
5
+ git checkout -b precommit_init
6
+ git add .
7
+ fi
8
+ set -e
9
+ poetry run pre-commit install
10
+ SKIP="poetry-lock" poetry run pre-commit run --all-files
pyproject.toml ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [tool.poetry]
2
+ name = "egg_segmentation_size"
3
+ version = "0.2.0"
4
+ description = "This repo segments the eggs in images and gives an estimation for their sizes"
5
+ authors = ["Afshin Dini <Afshin Dini>"]
6
+ readme = "README.md"
7
+ packages = [{include = "egg_segmentation_size", from = "src"}]
8
+
9
+ [tool.poetry.scripts]
10
+ egg_segmentation_size = "egg_segmentation_size.main:egg_segmentation_size_cli"
11
+
12
+ [tool.pylint.format]
13
+ max-line-length=150 # This defines the maximum number of characters on a single line in pylint
14
+
15
+ [tool.pylint.design]
16
+ max-attributes=10
17
+ max-positional-arguments=6
18
+ max-args=6
19
+
20
+ [tool.pylint.messages_control]
21
+ disable=["fixme"]
22
+
23
+ [tool.pylint.similarities]
24
+ min-similarity-lines = 8 # Minimum lines number of a similarity.
25
+ ignore-imports = true # Ignore imports when computing similarities.
26
+
27
+ [tool.pytest.ini_options]
28
+ junit_family="xunit2"
29
+ addopts="--cov=egg_segmentation_size --cov-fail-under=65 --cov-branch"
30
+ asyncio_mode="strict"
31
+
32
+
33
+ [tool.coverage.run]
34
+ omit = ["tests/*"]
35
+ branch = true
36
+
37
+
38
+ [tool.poetry.dependencies]
39
+ python = "^3.10"
40
+ click = "^8.1.7"
41
+ ultralytics = "^8.3.94"
42
+ streamlit = "^1.43.2"
43
+ opencv-python = "^4.11.0.86"
44
+ pandas = "^2.2.3"
45
+
46
+
47
+ [tool.poetry.group.dev.dependencies]
48
+ pytest = "^8.3.3"
49
+ coverage = "^7.6"
50
+ pytest-cov = "^6.0"
51
+ pylint = "^3.3.1"
52
+ black = "^24.10.0"
53
+ mypy = "^1.13.0"
54
+ bump2version = "^1.0.1"
55
+ bandit = "^1.7.10"
56
+ pre-commit = "^4.0.1"
57
+ detect-secrets = "^1.5"
58
+
59
+ [build-system]
60
+ requires = ["poetry-core>=1.5.0"]
61
+ build-backend = "poetry.core.masonry.api"
requirements.txt ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ altair==5.5.0 ; python_version >= "3.10" and python_version < "4.0"
2
+ attrs==25.3.0 ; python_version >= "3.10" and python_version < "4.0"
3
+ blinker==1.9.0 ; python_version >= "3.10" and python_version < "4.0"
4
+ cachetools==5.5.2 ; python_version >= "3.10" and python_version < "4.0"
5
+ certifi==2025.1.31 ; python_version >= "3.10" and python_version < "4.0"
6
+ charset-normalizer==3.4.1 ; python_version >= "3.10" and python_version < "4.0"
7
+ click==8.1.8 ; python_version >= "3.10" and python_version < "4.0"
8
+ colorama==0.4.6 ; python_version >= "3.10" and python_version < "4.0" and platform_system == "Windows"
9
+ contourpy==1.3.1 ; python_version >= "3.10" and python_version < "4.0"
10
+ cycler==0.12.1 ; python_version >= "3.10" and python_version < "4.0"
11
+ filelock==3.18.0 ; python_version >= "3.10" and python_version < "4.0"
12
+ fonttools==4.56.0 ; python_version >= "3.10" and python_version < "4.0"
13
+ fsspec==2025.3.0 ; python_version >= "3.10" and python_version < "4.0"
14
+ gitdb==4.0.12 ; python_version >= "3.10" and python_version < "4.0"
15
+ gitpython==3.1.44 ; python_version >= "3.10" and python_version < "4.0"
16
+ idna==3.10 ; python_version >= "3.10" and python_version < "4.0"
17
+ jinja2==3.1.6 ; python_version >= "3.10" and python_version < "4.0"
18
+ jsonschema-specifications==2024.10.1 ; python_version >= "3.10" and python_version < "4.0"
19
+ jsonschema==4.23.0 ; python_version >= "3.10" and python_version < "4.0"
20
+ kiwisolver==1.4.8 ; python_version >= "3.10" and python_version < "4.0"
21
+ markupsafe==3.0.2 ; python_version >= "3.10" and python_version < "4.0"
22
+ matplotlib==3.10.1 ; python_version >= "3.10" and python_version < "4.0"
23
+ mpmath==1.3.0 ; python_version >= "3.10" and python_version < "4.0"
24
+ narwhals==1.32.0 ; python_version >= "3.10" and python_version < "4.0"
25
+ networkx==3.4.2 ; python_version >= "3.10" and python_version < "4.0"
26
+ numpy==2.1.1 ; python_version >= "3.10" and python_version < "4.0"
27
+ nvidia-cublas-cu12==12.4.5.8 ; platform_system == "Linux" and platform_machine == "x86_64" and python_version >= "3.10" and python_version < "4.0"
28
+ nvidia-cuda-cupti-cu12==12.4.127 ; platform_system == "Linux" and platform_machine == "x86_64" and python_version >= "3.10" and python_version < "4.0"
29
+ nvidia-cuda-nvrtc-cu12==12.4.127 ; platform_system == "Linux" and platform_machine == "x86_64" and python_version >= "3.10" and python_version < "4.0"
30
+ nvidia-cuda-runtime-cu12==12.4.127 ; platform_system == "Linux" and platform_machine == "x86_64" and python_version >= "3.10" and python_version < "4.0"
31
+ nvidia-cudnn-cu12==9.1.0.70 ; platform_system == "Linux" and platform_machine == "x86_64" and python_version >= "3.10" and python_version < "4.0"
32
+ nvidia-cufft-cu12==11.2.1.3 ; platform_system == "Linux" and platform_machine == "x86_64" and python_version >= "3.10" and python_version < "4.0"
33
+ nvidia-curand-cu12==10.3.5.147 ; platform_system == "Linux" and platform_machine == "x86_64" and python_version >= "3.10" and python_version < "4.0"
34
+ nvidia-cusolver-cu12==11.6.1.9 ; platform_system == "Linux" and platform_machine == "x86_64" and python_version >= "3.10" and python_version < "4.0"
35
+ nvidia-cusparse-cu12==12.3.1.170 ; platform_system == "Linux" and platform_machine == "x86_64" and python_version >= "3.10" and python_version < "4.0"
36
+ nvidia-cusparselt-cu12==0.6.2 ; platform_system == "Linux" and platform_machine == "x86_64" and python_version >= "3.10" and python_version < "4.0"
37
+ nvidia-nccl-cu12==2.21.5 ; platform_system == "Linux" and platform_machine == "x86_64" and python_version >= "3.10" and python_version < "4.0"
38
+ nvidia-nvjitlink-cu12==12.4.127 ; platform_system == "Linux" and platform_machine == "x86_64" and python_version >= "3.10" and python_version < "4.0"
39
+ nvidia-nvtx-cu12==12.4.127 ; platform_system == "Linux" and platform_machine == "x86_64" and python_version >= "3.10" and python_version < "4.0"
40
+ opencv-python==4.11.0.86 ; python_version >= "3.10" and python_version < "4.0"
41
+ packaging==24.2 ; python_version >= "3.10" and python_version < "4.0"
42
+ pandas==2.2.3 ; python_version >= "3.10" and python_version < "4.0"
43
+ pillow==11.1.0 ; python_version >= "3.10" and python_version < "4.0"
44
+ protobuf==5.29.4 ; python_version >= "3.10" and python_version < "4.0"
45
+ psutil==7.0.0 ; python_version >= "3.10" and python_version < "4.0"
46
+ py-cpuinfo==9.0.0 ; python_version >= "3.10" and python_version < "4.0"
47
+ pyarrow==19.0.1 ; python_version >= "3.10" and python_version < "4.0"
48
+ pydeck==0.9.1 ; python_version >= "3.10" and python_version < "4.0"
49
+ pyparsing==3.2.1 ; python_version >= "3.10" and python_version < "4.0"
50
+ python-dateutil==2.9.0.post0 ; python_version >= "3.10" and python_version < "4.0"
51
+ pytz==2025.1 ; python_version >= "3.10" and python_version < "4.0"
52
+ pyyaml==6.0.2 ; python_version >= "3.10" and python_version < "4.0"
53
+ referencing==0.36.2 ; python_version >= "3.10" and python_version < "4.0"
54
+ requests==2.32.3 ; python_version >= "3.10" and python_version < "4.0"
55
+ rpds-py==0.23.1 ; python_version >= "3.10" and python_version < "4.0"
56
+ scipy==1.15.2 ; python_version >= "3.10" and python_version < "4.0"
57
+ seaborn==0.13.2 ; python_version >= "3.10" and python_version < "4.0"
58
+ setuptools==77.0.3 ; python_version >= "3.12" and python_version < "4.0"
59
+ six==1.17.0 ; python_version >= "3.10" and python_version < "4.0"
60
+ smmap==5.0.2 ; python_version >= "3.10" and python_version < "4.0"
61
+ streamlit==1.43.2 ; python_version >= "3.10" and python_version < "4.0"
62
+ sympy==1.13.1 ; python_version >= "3.10" and python_version < "4.0"
63
+ tenacity==9.0.0 ; python_version >= "3.10" and python_version < "4.0"
64
+ toml==0.10.2 ; python_version >= "3.10" and python_version < "4.0"
65
+ torch==2.6.0 ; python_version >= "3.10" and python_version < "4.0"
66
+ torchvision==0.21.0 ; python_version >= "3.10" and python_version < "4.0"
67
+ tornado==6.4.2 ; python_version >= "3.10" and python_version < "4.0"
68
+ tqdm==4.67.1 ; python_version >= "3.10" and python_version < "4.0"
69
+ triton==3.2.0 ; platform_system == "Linux" and platform_machine == "x86_64" and python_version >= "3.10" and python_version < "4.0"
70
+ typing-extensions==4.12.2 ; python_version >= "3.10" and python_version < "4.0"
71
+ tzdata==2025.2 ; python_version >= "3.10" and python_version < "4.0"
72
+ ultralytics-thop==2.0.14 ; python_version >= "3.10" and python_version < "4.0"
73
+ ultralytics==8.3.94 ; python_version >= "3.10" and python_version < "4.0"
74
+ urllib3==2.3.0 ; python_version >= "3.10" and python_version < "4.0"
75
+ watchdog==6.0.0 ; python_version >= "3.10" and python_version < "4.0" and platform_system != "Darwin"
results/sample1.jpg ADDED

Git LFS Details

  • SHA256: ab775b556350fcd0e98d7ee191aeb78884b3b1a006502746200a9fc0f07e7497
  • Pointer size: 132 Bytes
  • Size of remote file: 1.41 MB
src/egg_segmentation_size/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ """ This repo segments the eggs in images and gives an estimation for their sizes """
2
+
3
+ __version__ = "0.2.0"
src/egg_segmentation_size/logging.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Logger initialization"""
2
+
3
+ import logging
4
+ import logging.config
5
+ from typing import Any
6
+
7
+
8
+ def config_logger(loglevel: int) -> Any:
9
+ """Initialize a custom logger"""
10
+ default_logging_config = {
11
+ "version": 1,
12
+ "disable_existing_loggers": False,
13
+ "formatters": {
14
+ "standard": {
15
+ "format": "%(asctime)s - [%(levelname)s] [%(name)s.%(funcName)s:%(lineno)d (%(process)d)] | %(message)s",
16
+ "datefmt": "%Y-%m-%d %H:%M:%S",
17
+ },
18
+ },
19
+ "handlers": {
20
+ "console": {
21
+ "class": "logging.StreamHandler",
22
+ "formatter": "standard",
23
+ },
24
+ },
25
+ "root": {
26
+ "handlers": ["console"],
27
+ "level": loglevel,
28
+ },
29
+ }
30
+ logging.config.dictConfig(default_logging_config)
src/egg_segmentation_size/main.py ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Run the main code for egg-segmentation-size"""
2
+
3
+ from pathlib import Path
4
+ import logging
5
+
6
+ import click
7
+
8
+ from egg_segmentation_size import __version__
9
+ from egg_segmentation_size.logging import config_logger
10
+ from egg_segmentation_size.segmentor import EggSegmentorTrainer, EggSegmentorInference
11
+
12
+
13
+ logger = logging.getLogger(__name__)
14
+
15
+
16
+ @click.group()
17
+ @click.version_option(version=__version__)
18
+ @click.option(
19
+ "-v",
20
+ "--verbose",
21
+ count=True,
22
+ help="Shorthand for info/debug/warning/error loglevel (-v/-vv/-vvv/-vvvv)",
23
+ )
24
+ def egg_segmentation_size_cli(verbose: int) -> None:
25
+ """This repo segments the eggs in images and gives an estimation for their sizes"""
26
+ if verbose == 1:
27
+ log_level = 10
28
+ elif verbose == 2:
29
+ log_level = 20
30
+ elif verbose == 3:
31
+ log_level = 30
32
+ else:
33
+ log_level = 40
34
+ config_logger(log_level)
35
+
36
+ click.echo("Run the main code.")
37
+
38
+
39
+ @egg_segmentation_size_cli.command()
40
+ @click.option("--img_resize", type=int, default=640, help="Resize images to this size.")
41
+ @click.option(
42
+ "--conf_path",
43
+ type=str,
44
+ default="src/egg_segmentation_size/data/data.yaml",
45
+ help="Path to the config file",
46
+ )
47
+ @click.option(
48
+ "--epochs", type=int, default=100, help="Number of epochs used in training."
49
+ )
50
+ @click.option("--batch_size", type=int, default=16, help="Batch size used in training.")
51
+ @click.option(
52
+ "--device", type=str, default="cuda", help="Use cuda or cpu for training."
53
+ )
54
+ def train(
55
+ img_resize: int, conf_path: str, epochs: int, batch_size: int, device: str
56
+ ) -> None:
57
+ """This the CLI for training purposes"""
58
+ segmentation = EggSegmentorTrainer(
59
+ conf=conf_path,
60
+ img_size=img_resize,
61
+ epochs=epochs,
62
+ device=device,
63
+ batch_size=batch_size,
64
+ )
65
+ segmentation.train()
66
+ _ = segmentation.validation()
67
+ segmentation.model_export()
68
+
69
+
70
+ @egg_segmentation_size_cli.command()
71
+ @click.option(
72
+ "--model_path",
73
+ type=click.Path(),
74
+ default=Path("./src/egg_segmentation_size/model/egg_segmentor.pt"),
75
+ help="Path to the pre-trained model.",
76
+ )
77
+ @click.option(
78
+ "--data_path",
79
+ type=click.Path(),
80
+ default=Path("./tests/test_data"),
81
+ help="Path to the test data.",
82
+ )
83
+ @click.option(
84
+ "--result_path", type=str, default="./results", help="Path to the results."
85
+ )
86
+ @click.option(
87
+ "--scale_factor",
88
+ type=float,
89
+ default=11.61,
90
+ help="This is the scale factor of the camera to calculate the volume of eggs, scale_factor=DPI/2.54",
91
+ )
92
+ def infer(
93
+ model_path: Path, data_path: str, result_path: str, scale_factor: float
94
+ ) -> None:
95
+ """This the CLI for testing purposes"""
96
+ logger.info("Testing the YOLO model for egg detection...")
97
+ inferer = EggSegmentorInference(
98
+ model_path=Path(model_path), result_path=result_path, scale_factor=scale_factor
99
+ )
100
+ segmentations = inferer.inference(data_path=data_path)
101
+ counts = inferer.number_of_eggs(segmentations)
102
+ if counts:
103
+ for key, val in counts.items():
104
+ logger.info(
105
+ "%s eggs are detected in %s as: %s",
106
+ sum(item["count"] for item in val),
107
+ key,
108
+ val,
109
+ )
110
+ res = inferer.results_detail(segmentations)
111
+ if res:
112
+ for key, val in res.items():
113
+ for detection in val:
114
+ logger.info(
115
+ "In image %s an egg with type: %s, and area(pixel): %d, and volume(cm3) %.2f was detected.",
116
+ key,
117
+ detection["class"],
118
+ detection["areas in pixel"],
119
+ detection["volume in cm3"],
120
+ )
src/egg_segmentation_size/model/egg_segmentor.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cceea6d26db8ef8dce1151d7c714270f3f9a1cb3afdd591faf0fa351459a7858
3
+ size 6777261
src/egg_segmentation_size/segmentor.py ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """This is the code for training the YOLO model for egg segmentation."""
2
+
3
+ import logging
4
+ from pathlib import Path
5
+ from dataclasses import dataclass, field
6
+ from typing import Any, Optional, Mapping, List
7
+
8
+ from collections import Counter
9
+ from ultralytics import YOLO
10
+ import numpy as np
11
+ import cv2
12
+
13
+
14
+ logger = logging.getLogger(__name__)
15
+
16
+
17
+ @dataclass
18
+ class EggSegmentorTrainer:
19
+ """Class for training the YOLO model for egg segmentation."""
20
+
21
+ conf: str = field(default="src/egg_segmentation_size/data/data.yaml")
22
+ epochs: int = field(default=100)
23
+ img_size: int = field(default=640)
24
+ batch_size: int = field(default=16)
25
+ device: str = field(default="cuda")
26
+ model: Any = field(init=False)
27
+
28
+ def train(self) -> None:
29
+ """Train the YOLO model for egg segmentation."""
30
+ logger.info("Start training the YOLO model for egg segmentation.")
31
+ self.model = YOLO("yolov8n-seg.pt")
32
+ self.model.train(
33
+ data=self.conf,
34
+ epochs=self.epochs,
35
+ imgsz=self.img_size,
36
+ batch=self.batch_size,
37
+ device=self.device,
38
+ )
39
+
40
+ def validation(self) -> Any:
41
+ """Validate the YOLO model for egg segmentation."""
42
+ logger.info("Validating the YOLO model for egg segmentation.")
43
+ return self.model.val()
44
+
45
+ def model_export(self) -> None:
46
+ """Export the YOLO model for egg segmentation."""
47
+ logger.info("Exporting the YOLO model for egg segmentation.")
48
+ self.model.export(format="onnx")
49
+
50
+
51
+ @dataclass
52
+ class EggSegmentorInference:
53
+ """Class for testing the YOLO model for egg segmentation."""
54
+
55
+ model_path: Optional[Any] = field(default=None)
56
+ result_path: Optional[str] = field(default=None)
57
+ scale_factor: float = field(default=11.61)
58
+
59
+ def __post_init__(self) -> None:
60
+ """Post-initialization method for EggSegmentorInference."""
61
+ if self.model_path is None or not self.model_path.exists():
62
+ raise ValueError("Model does not exist or the path is not correct.")
63
+
64
+ def load_model(self) -> Any:
65
+ """Load the YOLO model for egg detection."""
66
+ logger.info("Loading the trained model for egg segmentation.")
67
+ return YOLO(self.model_path)
68
+
69
+ def inference(self, data_path: str) -> Any:
70
+ """Inference code for egg segmentation"""
71
+ if not Path(data_path).exists():
72
+ logger.error("Data path does not exist or the path is not correct.")
73
+ model = self.load_model()
74
+ results = model(
75
+ data_path,
76
+ save=False if not self.result_path else True, # pylint: disable=R1719
77
+ project=self.result_path,
78
+ name="detections",
79
+ )
80
+ return results
81
+
82
+ @staticmethod
83
+ def _shoelace_area(polygon: Any) -> float:
84
+ """Calculate the area of a polygon using the shoelace formula."""
85
+ x, y = polygon[:, 0], polygon[:, 1]
86
+ return float(0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1))))
87
+
88
+ @staticmethod
89
+ def number_of_eggs(detections: Any) -> Mapping[str, Any]:
90
+ """Count the number of eggs detected."""
91
+ counts = {}
92
+ for result in detections:
93
+ class_count = Counter(int(box.cls.item()) for box in result.boxes)
94
+ temp = []
95
+ for name, count in class_count.items():
96
+ temp.append({"class": result.names[name], "count": count})
97
+ file_name = Path(result.path).name
98
+ counts[str(file_name)] = temp
99
+ return counts
100
+
101
+ def _egg_volume(self, polygon: Any, circularity_thr: int = 15) -> float:
102
+ """Calculate the volume of eggs based on the detected polygon for each egg."""
103
+ polygon = polygon.reshape((-1, 1, 2))
104
+ ellipse = cv2.fitEllipse(polygon) # pylint: disable=E1101
105
+ minor_axis, major_axis = (
106
+ ellipse[1][0] / self.scale_factor,
107
+ ellipse[1][1] / self.scale_factor,
108
+ )
109
+
110
+ if (major_axis - minor_axis) > circularity_thr:
111
+ return 4 * np.pi * (major_axis / 2) * ((minor_axis / 2) ** 2) / 3000
112
+ return 4 * np.pi * (((major_axis + minor_axis) / 4) ** 3) / 3000
113
+
114
+ def results_detail(self, detections: Any) -> Mapping[str, Any]:
115
+ """Get the detailed results of the segmented eggs such as bounding boxes, class names, and confidences."""
116
+ results = {}
117
+ for result in detections:
118
+ temp = []
119
+ if result.masks is not None:
120
+ boxes = result.boxes
121
+ masks = result.masks.xy
122
+ for i, mask in enumerate(masks):
123
+ polygon = np.array(mask, dtype=np.float32)
124
+ temp.append(
125
+ {
126
+ "class": result.names[int(boxes.cls[i].item())],
127
+ "confidence": boxes.conf[i].item(),
128
+ "areas in pixel": self._shoelace_area(polygon),
129
+ "volume in cm3": self._egg_volume(polygon),
130
+ }
131
+ )
132
+ file_name = Path(result.path).name
133
+ results[str(file_name)] = temp
134
+ return results
135
+
136
+ @staticmethod
137
+ def result_images(detections: Any) -> List[Any]:
138
+ """Make a list of the result images with detections."""
139
+ images = []
140
+ for result in detections:
141
+ images.append(np.array(result.plot())[:, :, [2, 1, 0]])
142
+ return images
tests/__init__.py ADDED
File without changes
tests/test_data/sample1.jpg ADDED

Git LFS Details

  • SHA256: c67e62c7e5fbe20d2bb14e9052d753c608152142268564c0e019b8d1f775fcf6
  • Pointer size: 132 Bytes
  • Size of remote file: 6.03 MB
tests/test_logging.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Unit test to test logging"""
2
+
3
+ import logging
4
+ from egg_segmentation_size.logging import config_logger
5
+
6
+ logger = logging.getLogger(__name__)
7
+
8
+
9
+ def test_config_logger() -> None:
10
+ """Unit test to test the logger"""
11
+ config_logger(10)
12
+ assert logger.name == "tests.test_logging"
13
+ assert logger.root.level == 10
14
+ assert logger.handlers == []
tests/test_main.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Package level tests"""
2
+
3
+ from click.testing import CliRunner
4
+
5
+ from egg_segmentation_size import __version__
6
+ from egg_segmentation_size.main import egg_segmentation_size_cli
7
+
8
+
9
+ def test_version() -> None:
10
+ """Unit test for checking the version of the code"""
11
+ assert __version__ == "0.2.0"
12
+
13
+
14
+ def test_egg_segmentation_size_cli() -> None:
15
+ """Unit test for checking the CLI for egg segmentation sizing"""
16
+ runner = CliRunner()
17
+ result = runner.invoke(egg_segmentation_size_cli, ["--help"])
18
+ assert result.exit_code == 0
19
+ assert result
20
+
21
+
22
+ def test_train() -> None:
23
+ """Unit test for training the YOLO model for egg segmentation"""
24
+ runner = CliRunner()
25
+ result = runner.invoke(egg_segmentation_size_cli, ["train", "--help"])
26
+ assert result.exit_code == 0
27
+ assert result
28
+
29
+
30
+ def test_infer() -> None:
31
+ """Unit test for testing the YOLO model for egg segmentation"""
32
+ runner = CliRunner()
33
+ result = runner.invoke(
34
+ egg_segmentation_size_cli,
35
+ ["infer", "--data_path", "./tests/test_data/sample1.jpg", "--result_path", ""],
36
+ )
37
+ assert result.exit_code == 0
38
+ assert result
tests/test_segmentor.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Test the segmentor module."""
2
+
3
+ from pathlib import Path
4
+ import pytest
5
+ from ultralytics import YOLO
6
+ from egg_segmentation_size.segmentor import EggSegmentorInference
7
+
8
+
9
+ @pytest.fixture(name="infer_function")
10
+ def fixture_infer_function() -> EggSegmentorInference:
11
+ """Fixture to create an EggInference instance before each test."""
12
+ return EggSegmentorInference(
13
+ model_path=Path("./src/egg_segmentation_size/model/egg_segmentor.pt"),
14
+ result_path="",
15
+ )
16
+
17
+
18
+ def test_load_model(infer_function: EggSegmentorInference) -> None:
19
+ """Test the EggInference class."""
20
+ model = infer_function.load_model()
21
+ assert isinstance(model, YOLO)
22
+
23
+
24
+ def test_inference(infer_function: EggSegmentorInference) -> None:
25
+ """Test the inference method of EggInference class."""
26
+ result = infer_function.inference(data_path="./tests/test_data/sample1.jpg")
27
+ assert result
28
+
29
+
30
+ def test_number_of_eggs(infer_function: EggSegmentorInference) -> None:
31
+ """Test the number of eggs detected."""
32
+ result = infer_function.inference(data_path="./tests/test_data/sample1.jpg")
33
+ counts = infer_function.number_of_eggs(result)
34
+ if counts:
35
+ for key, val in counts.items():
36
+ assert sum(item["count"] for item in val) == 3
37
+ assert key == "sample1.jpg"
38
+ assert val == [
39
+ {"class": "White-Egg", "count": 3},
40
+ ]
41
+
42
+
43
+ def test_result_images(infer_function: EggSegmentorInference) -> None:
44
+ """Test the result_images method of EggInference class."""
45
+ result = infer_function.inference(data_path="./tests/test_data/sample1.jpg")
46
+ result_images = infer_function.result_images(result)
47
+ assert result_images