Readidno commited on
Commit
7fb0075
·
verified ·
1 Parent(s): 59e0b82

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -1,35 +1,11 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
- *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
1
+ # Hugging Face Hub / Git LFS — большие бинарники
2
+ *.pth filter=lfs diff=lfs merge=lfs -text
3
+ *.pt filter=lfs diff=lfs merge=lfs -text
4
+ *.bin filter=lfs diff=lfs merge=lfs -text
5
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
6
+ *.onnx filter=lfs diff=lfs merge=lfs -text
7
+ *.gif filter=lfs diff=lfs merge=lfs -text
8
+ assets/architecture_pipeline.png filter=lfs diff=lfs merge=lfs -text
9
+ assets/interpretability_pca_saliency.png filter=lfs diff=lfs merge=lfs -text
10
+ assets/learning_curves.png filter=lfs diff=lfs merge=lfs -text
11
+ assets/teaser_ordinal_scale.png filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
.gitignore ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ # Не скрывать веса при подготовке к upload_folder — только мусор среды
2
+ __pycache__/
3
+ # Локальный список картинок для пересборки README (не в assets/)
4
+ figures_config.yaml
5
+ *.py[cod]
6
+ .venv/
7
+ venv/
8
+ .env
9
+ .DS_Store
LICENSE ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Apache License
2
+ Version 2.0, January 2004
3
+ http://www.apache.org/licenses/
4
+
5
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+ 1. Definitions.
8
+
9
+ "License" shall mean the terms and conditions for use, reproduction,
10
+ and distribution as defined by Sections 1 through 9 of this document.
11
+
12
+ "Licensor" shall mean the copyright owner or entity authorized by
13
+ the copyright owner that is granting the License.
14
+
15
+ "Legal Entity" shall mean the union of the acting entity and all
16
+ other entities that control, are controlled by, or are under common
17
+ control with that entity. For the purposes of this definition,
18
+ "control" means (i) the power, direct or indirect, to cause the
19
+ direction or management of such entity, whether by contract or
20
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
+ outstanding shares, or (iii) beneficial ownership of such entity.
22
+
23
+ "You" (or "Your") shall mean an individual or Legal Entity
24
+ exercising permissions granted by this License.
25
+
26
+ "Source" form shall mean the preferred form for making modifications,
27
+ including but not limited to software source code, documentation
28
+ source, and configuration files.
29
+
30
+ "Object" form shall mean any form resulting from mechanical
31
+ transformation or translation of a Source form, including but
32
+ not limited to compiled object code, generated documentation,
33
+ and conversions to other media types.
34
+
35
+ "Work" shall mean the work of authorship, whether in Source or
36
+ Object form, made available under the License, as indicated by a
37
+ copyright notice that is included in or attached to the work
38
+ (an example is provided in the Appendix below).
39
+
40
+ "Derivative Works" shall mean any work, whether in Source or Object
41
+ form, that is based on (or derived from) the Work and for which the
42
+ editorial revisions, annotations, elaborations, or other modifications
43
+ represent, as a whole, an original work of authorship. For the purposes
44
+ of this License, Derivative Works shall not include works that remain
45
+ separable from, or merely link (or bind by name) to the interfaces of,
46
+ the Work and Derivative Works thereof.
47
+
48
+ "Contribution" shall mean any work of authorship, including
49
+ the original version of the Work and any modifications or additions
50
+ to that Work or Derivative Works thereof, that is intentionally
51
+ submitted to Licensor for inclusion in the Work by the copyright owner
52
+ or by an individual or Legal Entity authorized to submit on behalf of
53
+ the copyright owner. For the purposes of this definition, "submitted"
54
+ means any form of electronic, verbal, or written communication sent
55
+ to the Licensor or its representatives, including but not limited to
56
+ communication on electronic mailing lists, source code control systems,
57
+ and issue tracking systems that are managed by, or on behalf of, the
58
+ Licensor for the purpose of discussing and improving the Work, but
59
+ excluding communication that is conspicuously marked or otherwise
60
+ designated in writing by the copyright owner as "Not a Contribution."
61
+
62
+ "Contributor" shall mean Licensor and any individual or Legal Entity
63
+ on behalf of whom a Contribution has been received by Licensor and
64
+ subsequently incorporated within the Work.
65
+
66
+ 2. Grant of Copyright License. Subject to the terms and conditions of
67
+ this License, each Contributor hereby grants to You a perpetual,
68
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
+ copyright license to reproduce, prepare Derivative Works of,
70
+ publicly display, publicly perform, sublicense, and distribute the
71
+ Work and such Derivative Works in Source or Object form.
72
+
73
+ 3. Grant of Patent License. Subject to the terms and conditions of
74
+ this License, each Contributor hereby grants to You a perpetual,
75
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
+ (except as stated in this section) patent license to make, have made,
77
+ use, offer to sell, sell, import, and otherwise transfer the Work,
78
+ where such license applies only to those patent claims licensable
79
+ by such Contributor that are necessarily infringed by their
80
+ Contribution(s) alone or by combination of their Contribution(s)
81
+ with the Work to which such Contribution(s) was submitted. If You
82
+ institute patent litigation against any entity (including a
83
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
84
+ or a Contribution incorporated within the Work constitutes direct
85
+ or contributory patent infringement, then any patent licenses
86
+ granted to You under this License for that Work shall terminate
87
+ as of the date such litigation is filed.
88
+
89
+ 4. Redistribution. You may reproduce and distribute copies of the
90
+ Work or Derivative Works thereof in any medium, with or without
91
+ modifications, and in Source or Object form, provided that You
92
+ meet the following conditions:
93
+
94
+ (a) You must give any other recipients of the Work or
95
+ Derivative Works a copy of this License; and
96
+
97
+ (b) You must cause any modified files to carry prominent notices
98
+ stating that You changed the files; and
99
+
100
+ (c) You must retain, in the Source form of any Derivative Works
101
+ that You distribute, all copyright, patent, trademark, and
102
+ attribution notices from the Source form of the Work,
103
+ excluding those notices that do not pertain to any part of
104
+ the Derivative Works; and
105
+
106
+ (d) If the Work includes a "NOTICE" text file as part of its
107
+ distribution, then any Derivative Works that You distribute must
108
+ include a readable copy of the attribution notices contained
109
+ within such NOTICE file, excluding those notices that do not
110
+ pertain to any part of the Derivative Works, in at least one
111
+ of the following places: within a NOTICE text file distributed
112
+ as part of the Derivative Works; within the Source form or
113
+ documentation, if provided along with the Derivative Works; or,
114
+ within a display generated by the Derivative Works, if and
115
+ wherever such third-party notices normally appear. The contents
116
+ of the NOTICE file are for informational purposes only and
117
+ do not modify the License. You may add Your own attribution
118
+ notices within Derivative Works that You distribute, alongside
119
+ or as an addendum to the NOTICE text from the Work, provided
120
+ that such additional attribution notices cannot be construed
121
+ as modifying the License.
122
+
123
+ You may add Your own copyright statement to Your modifications and
124
+ may provide additional or different license terms and conditions
125
+ for use, reproduction, or distribution of Your modifications, or
126
+ for any such Derivative Works as a whole, provided Your use,
127
+ reproduction, and distribution of the Work otherwise complies with
128
+ the conditions stated in this License.
129
+
130
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
131
+ any Contribution intentionally submitted for inclusion in the Work
132
+ by You to the Licensor shall be under the terms and conditions of
133
+ this License, without any additional terms or conditions.
134
+ Notwithstanding the above, nothing herein shall supersede or modify
135
+ the terms of any separate license agreement you may have executed
136
+ with Licensor regarding such Contributions.
137
+
138
+ 6. Trademarks. This License does not grant permission to use the trade
139
+ names, trademarks, service marks, or product names of the Licensor,
140
+ except as required for reasonable and customary use in describing the
141
+ origin of the Work and reproducing the content of the NOTICE file.
142
+
143
+ 7. Disclaimer of Warranty. Unless required by applicable law or
144
+ agreed to in writing, Licensor provides the Work (and each
145
+ Contributor provides its Contributions) on an "AS IS" BASIS,
146
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
+ implied, including, without limitation, any warranties or conditions
148
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
+ PARTICULAR PURPOSE. You are solely responsible for determining the
150
+ appropriateness of using or redistributing the Work and assume any
151
+ risks associated with Your exercise of permissions under this License.
152
+
153
+ 8. Limitation of Liability. In no event and under no legal theory,
154
+ whether in tort (including negligence), contract, or otherwise,
155
+ unless required by applicable law (such as deliberate and grossly
156
+ negligent acts) or agreed to in writing, shall any Contributor be
157
+ liable to You for damages, including any direct, indirect, special,
158
+ incidental, or consequential damages of any character arising as a
159
+ result of this License or out of the use or inability to use the
160
+ Work (including but not limited to damages for loss of goodwill,
161
+ work stoppage, computer failure or malfunction, or any and all
162
+ other commercial damages or losses), even if such Contributor
163
+ has been advised of the possibility of such damages.
164
+
165
+ 9. Accepting Warranty or Additional Liability. While redistributing
166
+ the Work or Derivative Works thereof, You may choose to offer,
167
+ and charge a fee for, acceptance of support, warranty, indemnity,
168
+ or other liability obligations and/or rights consistent with this
169
+ License. However, in accepting such obligations, You may act only
170
+ on Your own behalf and on Your sole responsibility, not on behalf
171
+ of any other Contributor, and only if You agree to indemnify,
172
+ defend, and hold each Contributor harmless for any liability
173
+ incurred by, or claims asserted against, such Contributor by reason
174
+ of your accepting any such warranty or additional liability.
175
+
176
+ END OF TERMS AND CONDITIONS
177
+
178
+ APPENDIX: How to apply the Apache License to your work.
179
+
180
+ To apply the Apache License to your work, attach the following
181
+ boilerplate notice, with the fields enclosed by brackets "[]"
182
+ replaced with your own identifying information. (Don't include
183
+ the brackets!) The text should be enclosed in the appropriate
184
+ comment syntax for the file format. We also recommend that a
185
+ file or class name and description of purpose be included on the
186
+ same "printed page" as the copyright notice for easier
187
+ identification within third-party archives.
188
+
189
+ Copyright [yyyy] [name of copyright owner]
190
+
191
+ Licensed under the Apache License, Version 2.0 (the "License");
192
+ you may not use this file except in compliance with the License.
193
+ You may obtain a copy of the License at
194
+
195
+ http://www.apache.org/licenses/LICENSE-2.0
196
+
197
+ Unless required by applicable law or agreed to in writing, software
198
+ distributed under the License is distributed on an "AS IS" BASIS,
199
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200
+ See the License for the specific language governing permissions and
201
+ limitations under the License.
LICENSE-DINOv3-Meta.txt ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # DINOv3 License
2
+
3
+ Last Updated: August 19, 2025
4
+
5
+ “Agreement” means the terms and conditions for use, reproduction, distribution and modification of the DINO Materials set forth herein.
6
+
7
+ “DINO Materials” means, collectively, Documentation and the models, software and algorithms, including machine-learning model code, trained model weights, inference-enabling code, training-enabling code, fine-tuning enabling code, and other elements of the foregoing distributed by Meta and made available under this Agreement.
8
+
9
+ “Documentation” means the specifications, manuals and documentation accompanying DINO Materials distributed by Meta.
10
+
11
+ “Licensee” or “you” means you, or your employer or any other person or entity (if you are entering into this Agreement on such person or entity’s behalf), of the age required under applicable laws, rules or regulations to provide legal consent and that has legal authority to bind your employer or such other person or entity if you are entering in this Agreement on their behalf.
12
+
13
+ “Meta” or “we” means Meta Platforms Ireland Limited (if you are located in or, if you are an entity, your principal place of business is in the EEA or Switzerland) or Meta Platforms, Inc. (if you are located outside of the EEA or Switzerland).
14
+
15
+ “Sanctions” means any economic or trade sanctions or restrictions administered or enforced by the United States (including the Office of Foreign Assets Control of the U.S. Department of the Treasury (“OFAC”), the U.S. Department of State and the U.S. Department of Commerce), the United Nations, the European Union, or the United Kingdom.
16
+
17
+ “Trade Controls” means any of the following: Sanctions and applicable export and import controls.
18
+
19
+ By clicking “I Accept” below or by using or distributing any portion or element of the DINO Materials, you agree to be bound by this Agreement.
20
+
21
+ ## 1. License Rights and Redistribution.
22
+
23
+ a. Grant of Rights. You are granted a non-exclusive, worldwide, non-transferable and royalty-free limited license under Meta’s intellectual property or other rights owned by Meta embodied in the DINO Materials to use, reproduce, distribute, copy, create derivative works of, and make modifications to the DINO Materials.
24
+
25
+ b. Redistribution and Use.
26
+
27
+ i. Distribution of DINO Materials, and any derivative works thereof, are subject to the terms of this Agreement. If you distribute or make the DINO Materials, or any derivative works thereof, available to a third party, you may only do so under the terms of this Agreement and you shall provide a copy of this Agreement with any such DINO Materials.
28
+
29
+ ii. If you submit for publication the results of research you perform on, using, or otherwise in connection with DINO Materials, you must acknowledge the use of DINO Materials in your publication.
30
+
31
+ iii. Your use of the DINO Materials must comply with applicable laws and regulations, including Trade Control Laws and applicable privacy and data protection laws.
32
+
33
+ iv. Your use of the DINO Materials will not involve or encourage others to reverse engineer, decompile or discover the underlying components of the DINO Materials.
34
+
35
+ v. You are not the target of Trade Controls and your use of DINO Materials must comply with Trade Controls. You agree not to use, or permit others to use, DINO Materials for any activities subject to the International Traffic in Arms Regulations (ITAR) or end uses prohibited by Trade Controls, including those related to military or warfare purposes, nuclear industries or applications, espionage, or the development or use of guns or illegal weapons.
36
+
37
+ ## 2. User Support.
38
+
39
+ Your use of the DINO Materials is done at your own discretion; Meta does not process any information nor provide any service in relation to such use. Meta is under no obligation to provide any support services for the DINO Materials. Any support provided is “as is”, “with all faults”, and without warranty of any kind.
40
+
41
+ ## 3. Disclaimer of Warranty.
42
+
43
+ UNLESS REQUIRED BY APPLICABLE LAW, THE DINO MATERIALS AND ANY OUTPUT AND RESULTS THEREFROM ARE PROVIDED ON AN “AS IS” BASIS, WITHOUT WARRANTIES OF ANY KIND, AND META DISCLAIMS ALL WARRANTIES OF ANY KIND, BOTH EXPRESS AND IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE. YOU ARE SOLELY RESPONSIBLE FOR DETERMINING THE APPROPRIATENESS OF USING OR REDISTRIBUTING THE DINO MATERIALS AND ASSUME ANY RISKS ASSOCIATED WITH YOUR USE OF THE DINO MATERIALS AND ANY OUTPUT AND RESULTS.
44
+
45
+ ## 4. Limitation of Liability.
46
+
47
+ IN NO EVENT WILL META OR ITS AFFILIATES BE LIABLE UNDER ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, TORT, NEGLIGENCE, PRODUCTS LIABILITY, OR OTHERWISE, ARISING OUT OF THIS AGREEMENT, FOR ANY LOST PROFITS OR ANY DIRECT OR INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL, EXEMPLARY OR PUNITIVE DAMAGES, EVEN IF META OR ITS AFFILIATES HAVE BEEN ADVISED OF THE POSSIBILITY OF ANY OF THE FOREGOING.
48
+
49
+ ## 5. Intellectual Property.
50
+
51
+ a. Subject to Meta’s ownership of DINO Materials and derivatives made by or for Meta, with respect to any derivative works and modifications of the DINO Materials that are made by you, as between you and Meta, you are and will be the owner of such derivative works and modifications.
52
+
53
+ b. If you institute litigation or other proceedings against Meta or any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the DINO Materials, outputs or results, or any portion of any of the foregoing, constitutes infringement of intellectual property or other rights owned or licensable by you, then any licenses granted to you under this Agreement shall terminate as of the date such litigation or claim is filed or instituted. You will indemnify and hold harmless Meta from and against any claim by any third party arising out of or related to your use or distribution of the DINO Materials.
54
+
55
+ ## 6. Term and Termination.
56
+
57
+ The term of this Agreement will commence upon your acceptance of this Agreement or access to the DINO Materials and will continue in full force and effect until terminated in accordance with the terms and conditions herein. Meta may terminate this Agreement if you are in breach of any term or condition of this Agreement. Upon termination of this Agreement, you shall delete and cease use of the DINO Materials. Sections 3, 4 and 7 shall survive the termination of this Agreement.
58
+
59
+ ## 7. Governing Law and Jurisdiction.
60
+
61
+ This Agreement will be governed and construed under the laws of the State of California without regard to choice of law principles, and the UN Convention on Contracts for the International Sale of Goods does not apply to this Agreement. The courts of California shall have exclusive jurisdiction of any dispute arising out of this Agreement.
62
+
63
+ ## 8. Modifications and Amendments.
64
+
65
+ Meta may modify this Agreement from time to time; provided that they are similar in spirit to the current version of the Agreement, but may differ in detail to address new problems or concerns. All such changes will be effective immediately. Your continued use of the DINO Materials after any modification to this Agreement constitutes your agreement to such modification. Except as provided in this Agreement, no modification or addition to any provision of this Agreement will be binding unless it is in writing and signed by an authorized representative of both you and Meta.
NOTICE ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Anime.MILI Ordinal — Hugging Face model repository
2
+
3
+ This repository includes:
4
+
5
+ 1) Inference code (Python package `mili_score_inference/`), `example_predict.py`,
6
+ `requirements.txt`, and documentation in `README.md`, licensed under the
7
+ Apache License, Version 2.0 — see the file `LICENSE`.
8
+
9
+ 2) Fine-tuned checkpoint weights (`weights/anime_mili_ordinal_corn_vitl16.pth`)
10
+ are distributed by the repository authors under the Apache License, Version 2.0,
11
+ to the extent they constitute an original contribution separate from Meta’s
12
+ DINO Materials. Such weights are a derivative of a DINOv3-based backbone;
13
+ use and redistribution of the backbone weights themselves are governed by
14
+ Meta’s DINOv3 License — see `LICENSE-DINOv3-Meta.txt`.
15
+
16
+ 3) Optional bundled pretrained backbone files under `backbone/dinov3_vitl16/`
17
+ (and users who load DINOv3 from Hugging Face, e.g.
18
+ `facebook/dinov3-vitl16-pretrain-lvd1689m`) must comply with Meta’s DINOv3
19
+ License and the terms on the model card.
20
+
21
+ For third-party notices required by the Apache License, see this file and
22
+ `LICENSE-DINOv3-Meta.txt`.
README.md CHANGED
@@ -1,3 +1,192 @@
1
  ---
2
  license: apache-2.0
 
 
 
 
 
 
 
 
 
 
 
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
  license: apache-2.0
3
+ language:
4
+ - en
5
+ library_name: pytorch
6
+ tags:
7
+ - pytorch
8
+ - vision
9
+ - ordinal-regression
10
+ - dinov3
11
+ - anime
12
+ - anime-mili-ordinal
13
+ pipeline_tag: image-classification
14
  ---
15
+
16
+ # Anime.MILI Ordinal
17
+
18
+ *Ordinal Regression of Subjective Visual Characteristics via Feature Adaptation of Self-Supervised Vision Transformers*
19
+
20
+ Release weights: **`anime_mili_ordinal_corn_vitl16.pth`** — full `state_dict` (DINOv3 ViT-L/16 + CORN head). Outputs a continuous subjective score in **[0, 1]**. A local DINOv3 backbone may be included under `backbone/dinov3_vitl16/` (Meta DINOv3 License); the same architecture can be loaded from Hugging Face by model id.
21
+
22
+ ## Abstract
23
+
24
+ Estimating subjective visual metrics, such as the mili characteristic, requires models capable of mapping input images to a strictly ordered scale. Traditional multiclass classification ignores the internal hierarchy of quality levels. Standard continuous regression approaches suffer from vanishing gradients at scale boundaries and instability when processing unevenly distributed ordinal data. This technical release presents an architecture based on the DINOv3 vision transformer, adapted specifically for ordinal regression. The implementation utilizes a two-phase training method: initial freezing of the backbone to warm up a specialized multi-layer perceptron, followed by partial fine-tuning of the final transformer blocks. Employing a Conditional Ordinal Regression Network (CORN) loss function allows the model to explicitly enforce rank monotonicity. Evaluation on a dataset of 4,715 images demonstrates a Quadratic Weighted Kappa (QWK) of 0.74. The results confirm the efficacy of combining self-supervised features with ordinal-aware architectures for visual quality assessment.
25
+
26
+ ### Teaser: ordinal scale with validation examples
27
+
28
+ Two validation images per ordinal bin (columns **0.00 … 1.00**).
29
+
30
+ ![Teaser: ordinal scale and example images per bin](assets/teaser_ordinal_scale.png)
31
+
32
+ ## Introduction
33
+
34
+ Human assessment of visual characteristics frequently relies on discrete quality levels that form an ordinal structure. The mili attribute represents a human-centric score measured on a scale from 0 to 1 with fixed 0.25 increments. Standard neural network architectures typically process such values either as independent classes, discarding the distance information between them, or as continuous regression targets. Utilizing Mean Squared Error (MSE) for regression tasks with fixed boundaries minimizes derivatives during extreme predictions, restricting the ability of the model to correct severe errors.
35
+
36
+ The development of self-supervised methods, particularly the DINOv3 architecture, provides access to universal visual features that encode both high-level semantics and low-level structural patterns. Applying frozen DINOv3 features to highly specialized downstream tasks necessitates specific adaptation mechanisms. The current architecture transforms the base DINOv3 representations into a probabilistic representation of an ordinal scale. Replacing standard classification layers with an architecture that predicts the conditional probabilities of exceeding each scale threshold ensures increased prediction accuracy and stability.
37
+
38
+ ## Related Work
39
+
40
+ Vision transformers consistently demonstrate high efficiency in feature extraction tasks compared to convolutional neural networks. Self-supervised methods utilize feature distillation and regularization to construct robust global and local descriptors. Adapting these models for regression tasks traditionally relies on L1 and L2 loss functions, which assume symmetric error penalties.
41
+
42
+ Ordinal regression accounts for label ranking, a critical requirement for subjective evaluations. The CORN framework addresses this by decomposing the ordinal problem into a series of binary classifications for each rank threshold. The implementation detailed here extends the application of CORN logic by integrating it on top of the high-capacity DINOv3 transformer architecture. It employs partial weight unfreezing strategies to preserve the generalization capabilities of the self-supervised features while adapting to the target domain.
43
+
44
+ ## Method
45
+
46
+ The architecture utilizes a pre-trained DINOv3 ViT-L/16 backbone as the primary feature extractor. Input images are processed at a resolution of 224×224 pixels. To maintain the integrity of the self-supervised representations, the base network remains frozen during the initial training phase. The output of the base network is the **[CLS]** token: a vector whose dimension matches the backbone hidden size (1024 in the released checkpoint), aggregating global image information.
47
+
48
+ ### Pipeline diagram (CORN on frozen / partially unfrozen DINOv3)
49
+
50
+ ![Architecture: image → DINOv3 → CLS → MLP head → CORN logits → expected ordinal score](assets/architecture_pipeline.png)
51
+
52
+ The specialized head is a multi-layer perceptron optimized for ordinal estimation. The sequence consists of a LayerNorm operation, a Dropout layer with a 0.2 probability, a linear projection into a 256-dimensional hidden space, a GELU activation function, an additional Dropout layer at 0.1, and a final linear projection. Unlike standard DINOv3 classification usage, the final layer returns K−1 logits for K quality levels, specifically yielding 4 logits for the 5 target classes.
53
+
54
+ Optimization is driven by the CORN loss function, calculated as the sum of binary cross-entropy terms for each ordinal threshold. For a target label belonging to the set of ranks 0 through 4, the model predicts the probability of the rank exceeding a threshold k. The expected class is computed as the sum of probabilities across all thresholds. This loss function forces the model to learn the cumulative distribution of quality levels, guaranteeing the mathematical consistency of the ordinal scale.
55
+
56
+ ## Experiments
57
+
58
+ The dataset comprises 4,715 images, partitioned into a 3,771-image training split and a 944-image validation split. Labels are distributed unevenly across five classes: 1,204 samples for class 0.00, 513 for class 0.25, 949 for class 0.50, 1,394 for class 0.75, and 655 for class 1.00.
59
+
60
+ ### Train / validation distribution (real counts)
61
+
62
+ Bar heights are **file counts per ordinal bin** in the training and validation splits used for this release.
63
+
64
+ ![Dataset distribution: train vs validation per ordinal bin](assets/dataset_distribution.png)
65
+
66
+ The training procedure spans two phases. In the first phase, spanning 5 epochs, only the head weights are updated with a learning rate of 3e-4. This calibrates the classifier to the fixed DINOv3 features. In the second phase, lasting 30 epochs, the final 6 transformer blocks are unfrozen. The learning rate is reduced to 2e-5 and regulated by a Cosine Annealing schedule. Optimization utilizes the AdamW algorithm with a weight decay coefficient of 1e-3.
67
+
68
+ To increase inference reliability, Test-Time Augmentation (TTA) is applied. Each validation image is processed in three variations: original, horizontally flipped, and center-cropped. The logits from the three passes are averaged before calculating the expected class. Model evaluation relies on the Quadratic Weighted Kappa metric, which penalizes predictions proportionally to the squared distance between the true and predicted ranks.
69
+
70
+ ## Results
71
+
72
+ The two-phase training strategy produces a consistent performance increase. During the first phase, utilizing a completely frozen backbone, the model reaches a QWK of 0.54. This establishes a strong baseline derived entirely from the raw DINOv3 features. Transitioning to the second phase with partial unfreezing of the transformer blocks facilitates further metric growth.
73
+
74
+ The validation loss steadily decreases, reaching a minimum of 0.38, while the QWK stabilizes at 0.74. The discrepancy between training and validation errors remains minimal throughout the training process. The combination of the CORN loss function and the TTA algorithm prevents the severe overfitting observed in early experimental iterations that utilized standard MSE and a fully unfrozen backbone.
75
+
76
+ ### Learning curves
77
+
78
+ Train/val CORN loss and validation QWK by epoch (dashed line: start of phase 2 — last six transformer blocks unfrozen). The GIF animates the QWK curve over epochs.
79
+
80
+ ![Learning curves: CORN loss and validation QWK](assets/learning_curves.png)
81
+
82
+ ![Animation: validation QWK over training](assets/learning_qwk_animation.gif)
83
+
84
+ ## Discussion
85
+
86
+ The efficiency of the described architecture stems from the mathematical alignment between the CORN loss function and the QWK metric. Decomposing the task into predicting conditional probabilities for each threshold eliminates the vanishing gradient problem. The model receives a stable error signal even when predictions deviate significantly from the true value.
87
+
88
+ The success of incorporating DINOv3 lies in the high resilience of its self-supervised features to visual noise. The two-phase training strategy is critical for preserving these features. The initial freezing prevents the destruction of the complex transformer attention system by random gradients from the uninitialized head. The subsequent unfreezing of the final layers allows the model to adjust its perception of specific textural and geometric patterns relevant exclusively to the mili characteristic. The stabilization of the metric by the 30th epoch indicates complete assimilation of patterns from the available training volume.
89
+
90
+ ### Ordinal confusion matrix (validation, single-view inference)
91
+
92
+ Cell counts: validation labels vs. predicted bins (rounded). Weights file: `anime_mili_ordinal_corn_vitl16.pth`.
93
+
94
+ ![Confusion matrix on validation set](assets/confusion_matrix.png)
95
+
96
+ ### Self-supervised features vs. gradient saliency
97
+
98
+ Each row: input · PCA of DINOv3 patch tokens (after register tokens) · saliency from the CORN head path.
99
+
100
+ ![Interpretability: PCA of DINOv3 patches and input saliency](assets/interpretability_pca_saliency.png)
101
+
102
+ ## Limitations
103
+
104
+ 1. The computational complexity of the ViT-L/16 architecture requires significant hardware resources for inference, complicating deployment on memory-constrained devices.
105
+ 2. The mathematical formulation for calculating the expected class implicitly assumes equidistant intervals between classes, potentially failing to fully reflect the non-linear nature of human quality perception.
106
+ 3. The dataset imbalance, specifically the prevalence of the 0.00 and 0.75 classes over the 0.25 class, introduces a risk of prediction bias toward majority classes despite the use of robust loss functions.
107
+ 4. The performance improvement achieved through Test-Time Augmentation triples the inference latency, severely limiting application in real-time systems.
108
+
109
+ ## Conclusion
110
+
111
+ The described architecture provides a framework for ordinal regression of subjective visual evaluations using DINOv3 features. The methodology connects self-supervised visual representations with domain-specific ordinal classification via a CORN head and phased fine-tuning. The reported metrics support ordinal consistency as a training objective for human-centric quality scores.
112
+
113
+ ---
114
+
115
+ ## Repository layout
116
+
117
+ | Path | Description |
118
+ |------|-------------|
119
+ | `mili_score_inference/` | Package: architecture, preprocessing, `load_model`, `predict_pil` |
120
+ | `weights/anime_mili_ordinal_corn_vitl16.pth` | Fine-tuned full checkpoint (backbone + CORN head) |
121
+ | `backbone/dinov3_vitl16/` | Local DINOv3 ViT-L/16 HF-format checkpoint (`config.json`, `model.safetensors`, …) for offline inference |
122
+ | `requirements.txt` | Inference dependencies |
123
+ | `example_predict.py` | CLI: image → score |
124
+ | `LICENSE` | Apache 2.0 — code and Anime.MILI Ordinal weights in this repository |
125
+ | `LICENSE-DINOv3-Meta.txt` | Meta DINOv3 License (bundled backbone and Hub DINOv3 use) |
126
+ | `NOTICE` | Licensing summary |
127
+ | `assets/` | Figures for this README |
128
+
129
+ ## Installation
130
+
131
+ ```bash
132
+ pip install -r requirements.txt
133
+ export PYTHONPATH="$PWD" # Linux/macOS
134
+ # set PYTHONPATH=%CD% # Windows cmd
135
+ ```
136
+
137
+ ## Quick start (local backbone)
138
+
139
+ ```python
140
+ from pathlib import Path
141
+ from PIL import Image
142
+ import torch
143
+ from mili_score_inference.predict import load_model, predict_pil
144
+
145
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
146
+ model = load_model(
147
+ Path("weights/anime_mili_ordinal_corn_vitl16.pth"),
148
+ Path("backbone/dinov3_vitl16"),
149
+ device=device,
150
+ local_backbone=True,
151
+ )
152
+ score = predict_pil(model, Image.open("photo.jpg"), device=device)
153
+ print(score)
154
+ ```
155
+
156
+ ## Backbone from Hugging Face Hub
157
+
158
+ `load_model` accepts either a path to `backbone/dinov3_vitl16` or the Hub model id **`facebook/dinov3-vitl16-pretrain-lvd1689m`** (gated; Meta terms on the model card apply).
159
+
160
+ ```python
161
+ model = load_model(
162
+ Path("weights/anime_mili_ordinal_corn_vitl16.pth"),
163
+ "facebook/dinov3-vitl16-pretrain-lvd1689m",
164
+ device=device,
165
+ local_backbone=False,
166
+ )
167
+ ```
168
+
169
+ Use the same model id as in training (`from_pretrained`).
170
+
171
+ ## CLI
172
+
173
+ ```bash
174
+ python example_predict.py \
175
+ --weights weights/anime_mili_ordinal_corn_vitl16.pth \
176
+ --backbone backbone/dinov3_vitl16 \
177
+ --image photo.jpg
178
+ ```
179
+
180
+ For Hub-only backbone, pass `--backbone facebook/dinov3-vitl16-pretrain-lvd1689m` instead of a local directory.
181
+
182
+ ## Licensing
183
+
184
+ - **Code, documentation, and fine-tuned checkpoint** in this repository: **Apache License 2.0** (`LICENSE`).
185
+ - **DINOv3 architecture and pretrained weights** are subject to Meta’s **DINOv3 License** (`LICENSE-DINOv3-Meta.txt`). When loading a backbone from the Hub, follow that model card’s terms.
186
+ - Details: **`NOTICE`**.
187
+
188
+ Domain-specific scores reflect the training distribution; use only in appropriate scenarios.
189
+
190
+ ## Citation
191
+
192
+ Cite this repository when using the release. For research publications, acknowledge use of DINO Materials per Meta’s requirements (see `LICENSE-DINOv3-Meta.txt`).
assets/architecture_pipeline.png ADDED

Git LFS Details

  • SHA256: 33c8aa4e8aad90e312b38fa7e90374075dd358e3c47db9f0f03b215f9b55895b
  • Pointer size: 131 Bytes
  • Size of remote file: 106 kB
assets/confusion_matrix.png ADDED
assets/dataset_distribution.png ADDED
assets/interpretability_pca_saliency.png ADDED

Git LFS Details

  • SHA256: c3740842388bf99834c6a525df7441622511bdd5ba29b23511904f2c42278e99
  • Pointer size: 132 Bytes
  • Size of remote file: 1.89 MB
assets/learning_curves.png ADDED

Git LFS Details

  • SHA256: 109cae907a29ee32a3a64534ee0a49303a89a5dac2a32443685359d03f46c255
  • Pointer size: 131 Bytes
  • Size of remote file: 103 kB
assets/learning_qwk_animation.gif ADDED

Git LFS Details

  • SHA256: 17e1d1e950e49e6aecb501f4c34b5509842f4d8a46f0a39c4a07184aa887043f
  • Pointer size: 131 Bytes
  • Size of remote file: 116 kB
assets/teaser_ordinal_scale.png ADDED

Git LFS Details

  • SHA256: 3f3dff20666fecc810907002ab6ab93f036becebb0b6145895e10794e3de2cf6
  • Pointer size: 131 Bytes
  • Size of remote file: 814 kB
backbone/README.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ Directory `dinov3_vitl16/` holds a Hugging Face–format DINOv3 ViT-L/16 checkpoint
2
+ (`config.json`, `model.safetensors`, `preprocessor_config.json`) for offline inference.
3
+
4
+ Weights are DINO Materials under Meta’s DINOv3 License — see `../LICENSE-DINOv3-Meta.txt`.
5
+ Upstream Hub id: `facebook/dinov3-vitl16-pretrain-lvd1689m` (gated).
backbone/dinov3_vitl16/config.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "DINOv3ViTModel"
4
+ ],
5
+ "attention_dropout": 0.0,
6
+ "drop_path_rate": 0.0,
7
+ "dtype": "float32",
8
+ "hidden_act": "gelu",
9
+ "hidden_size": 1024,
10
+ "image_size": 224,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 4096,
13
+ "key_bias": false,
14
+ "layer_norm_eps": 1e-05,
15
+ "layerscale_value": 1.0,
16
+ "mlp_bias": true,
17
+ "model_type": "dinov3_vit",
18
+ "num_attention_heads": 16,
19
+ "num_channels": 3,
20
+ "num_hidden_layers": 24,
21
+ "num_register_tokens": 4,
22
+ "patch_size": 16,
23
+ "pos_embed_jitter": null,
24
+ "pos_embed_rescale": 2.0,
25
+ "pos_embed_shift": null,
26
+ "proj_bias": true,
27
+ "query_bias": true,
28
+ "rope_theta": 100.0,
29
+ "transformers_version": "4.57.6",
30
+ "use_gated_mlp": false,
31
+ "value_bias": true
32
+ }
backbone/dinov3_vitl16/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dcb2e45127cccbf1601e5f42fef165eea275c8e5213197e8dcf3f48822718179
3
+ size 1212559808
backbone/dinov3_vitl16/preprocessor_config.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crop_size": null,
3
+ "data_format": "channels_first",
4
+ "default_to_square": true,
5
+ "device": null,
6
+ "disable_grouping": null,
7
+ "do_center_crop": null,
8
+ "do_convert_rgb": null,
9
+ "do_normalize": true,
10
+ "do_pad": null,
11
+ "do_rescale": true,
12
+ "do_resize": true,
13
+ "image_mean": [
14
+ 0.485,
15
+ 0.456,
16
+ 0.406
17
+ ],
18
+ "image_processor_type": "DINOv3ViTImageProcessorFast",
19
+ "image_std": [
20
+ 0.229,
21
+ 0.224,
22
+ 0.225
23
+ ],
24
+ "input_data_format": null,
25
+ "pad_size": null,
26
+ "resample": 2,
27
+ "rescale_factor": 0.00392156862745098,
28
+ "return_tensors": null,
29
+ "size": {
30
+ "height": 224,
31
+ "width": 224
32
+ }
33
+ }
example_predict.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """CLI: image path to mili score in [0, 1]. --backbone: local directory or Hugging Face model id."""
3
+
4
+ from __future__ import annotations
5
+
6
+ import argparse
7
+ from pathlib import Path
8
+
9
+ import torch
10
+ from PIL import Image
11
+
12
+ from mili_score_inference.predict import load_model, predict_pil
13
+
14
+
15
+ def main() -> int:
16
+ p = argparse.ArgumentParser()
17
+ p.add_argument("--weights", type=Path, required=True)
18
+ p.add_argument(
19
+ "--backbone",
20
+ type=str,
21
+ required=True,
22
+ help="Local checkpoint directory or Hugging Face model id",
23
+ )
24
+ p.add_argument("--image", type=Path, required=True)
25
+ p.add_argument("--device", default="auto")
26
+ args = p.parse_args()
27
+
28
+ if args.device == "auto":
29
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
30
+ else:
31
+ device = torch.device(args.device)
32
+
33
+ bb = Path(args.backbone)
34
+ if bb.is_dir():
35
+ backbone_arg: Path | str = bb
36
+ local_bb = True
37
+ else:
38
+ backbone_arg = args.backbone
39
+ local_bb = False
40
+
41
+ model = load_model(
42
+ args.weights,
43
+ backbone_arg,
44
+ device=device,
45
+ local_backbone=local_bb,
46
+ )
47
+ score = predict_pil(model, Image.open(args.image), device=device)
48
+ print(f"score={score:.6f}")
49
+ return 0
50
+
51
+
52
+ if __name__ == "__main__":
53
+ raise SystemExit(main())
mili_score_inference/__init__.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Inference: ordinal (CORN) score on top of DINOv3 ViT."""
2
+
3
+ from mili_score_inference.model import MiliModelCORN
4
+ from mili_score_inference.predict import (
5
+ logits_to_score_01,
6
+ predict_expected_class,
7
+ predict_pil,
8
+ )
9
+
10
+ __all__ = [
11
+ "MiliModelCORN",
12
+ "predict_expected_class",
13
+ "logits_to_score_01",
14
+ "predict_pil",
15
+ ]
mili_score_inference/model.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """CORN head + DINOv3 ViT (Hugging Face) — matches `anime_mili_ordinal_corn_vitl16` state_dict."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from pathlib import Path
6
+
7
+ import torch
8
+ import torch.nn as nn
9
+ from transformers import AutoConfig, AutoModel
10
+
11
+
12
+ def backbone_transformer_blocks(backbone: nn.Module) -> list[nn.Module]:
13
+ enc = getattr(backbone, "encoder", None)
14
+ if enc is not None and hasattr(enc, "layer"):
15
+ return list(enc.layer)
16
+ if hasattr(backbone, "layer"):
17
+ return list(backbone.layer)
18
+ raise AttributeError("expected backbone.encoder.layer or backbone.layer")
19
+
20
+
21
+ class MiliModelCORN(nn.Module):
22
+ """DINOv3 (HF) + CORN: outputs (B, K-1) logits; score in [0,1] via predict_expected_class in predict.py."""
23
+
24
+ def __init__(
25
+ self,
26
+ backbone_checkpoint_dir: Path | str,
27
+ *,
28
+ dropout_head: float = 0.2,
29
+ drop_path_rate: float = 0.2,
30
+ num_classes: int = 5,
31
+ local_files_only: bool = True,
32
+ ) -> None:
33
+ super().__init__()
34
+ raw = backbone_checkpoint_dir
35
+ p = Path(raw)
36
+ if p.exists() and p.is_dir():
37
+ ckpt_str = str(p.resolve())
38
+ else:
39
+ ckpt_str = raw if isinstance(raw, str) else str(raw)
40
+ config = AutoConfig.from_pretrained(ckpt_str, local_files_only=local_files_only)
41
+ if hasattr(config, "drop_path_rate"):
42
+ config.drop_path_rate = drop_path_rate
43
+ self.backbone = AutoModel.from_pretrained(ckpt_str, config=config, local_files_only=local_files_only)
44
+ embed_dim = int(self.backbone.config.hidden_size)
45
+
46
+ self.head = nn.Sequential(
47
+ nn.Dropout(dropout_head),
48
+ nn.Linear(embed_dim, 256),
49
+ nn.GELU(),
50
+ nn.Dropout(dropout_head / 2),
51
+ nn.Linear(256, num_classes - 1),
52
+ )
53
+ for p in self.backbone.parameters():
54
+ p.requires_grad = False
55
+ self.blocks = backbone_transformer_blocks(self.backbone)
56
+
57
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
58
+ out = self.backbone(pixel_values=x)
59
+ cls_token = out.last_hidden_state[:, 0, :]
60
+ return self.head(cls_token)
mili_score_inference/predict.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Load weights and predict score in [0, 1]."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from pathlib import Path
6
+
7
+ import torch
8
+ from PIL import Image
9
+
10
+ from mili_score_inference.model import MiliModelCORN
11
+ from mili_score_inference.transforms import build_val_transform
12
+
13
+
14
+ def predict_expected_class(logits: torch.Tensor) -> torch.Tensor:
15
+ probs = torch.sigmoid(logits)
16
+ return probs.sum(dim=1)
17
+
18
+
19
+ def logits_to_score_01(logits: torch.Tensor) -> torch.Tensor:
20
+ return predict_expected_class(logits) / 4.0
21
+
22
+
23
+ def load_model(
24
+ weights_path: Path | str,
25
+ backbone_checkpoint_dir: Path | str,
26
+ *,
27
+ dropout_head: float = 0.2,
28
+ drop_path_rate: float = 0.2,
29
+ num_classes: int = 5,
30
+ device: torch.device | None = None,
31
+ local_backbone: bool = True,
32
+ ) -> MiliModelCORN:
33
+ device = device or torch.device("cuda" if torch.cuda.is_available() else "cpu")
34
+ model = MiliModelCORN(
35
+ backbone_checkpoint_dir,
36
+ dropout_head=dropout_head,
37
+ drop_path_rate=drop_path_rate,
38
+ num_classes=num_classes,
39
+ local_files_only=local_backbone,
40
+ )
41
+ state = torch.load(weights_path, map_location="cpu", weights_only=True)
42
+ model.load_state_dict(state)
43
+ model.to(device)
44
+ model.eval()
45
+ return model
46
+
47
+
48
+ @torch.inference_mode()
49
+ def predict_pil(
50
+ model: MiliModelCORN,
51
+ image: Image.Image,
52
+ *,
53
+ device: torch.device,
54
+ img_size: int = 224,
55
+ ) -> float:
56
+ tf = build_val_transform(img_size)
57
+ t = tf(image.convert("RGB")).unsqueeze(0).to(device)
58
+ logits = model(t)
59
+ return float(logits_to_score_01(logits)[0].item())
mili_score_inference/transforms.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Validation-style preprocessing: resize + ImageNet normalization."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from torchvision import transforms
6
+
7
+
8
+ def build_val_transform(img_size: int = 224) -> transforms.Compose:
9
+ mean = (0.485, 0.456, 0.406)
10
+ std = (0.229, 0.224, 0.225)
11
+ return transforms.Compose(
12
+ [
13
+ transforms.Resize((img_size, img_size)),
14
+ transforms.ToTensor(),
15
+ transforms.Normalize(mean, std),
16
+ ]
17
+ )
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ # Инференс MILI score (CORN head + DINOv3 backbone из локального или HF чекпоинта)
2
+ torch>=2.2.0,<2.7.0
3
+ torchvision>=0.17.0,<0.22.0
4
+ transformers>=4.48.0,<5.0.0
5
+ numpy>=1.26.0,<3.0.0
6
+ pillow>=10.2.0,<12.0.0
weights/.gitkeep ADDED
File without changes
weights/README.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ Fine-tuned full-model checkpoint (Apache 2.0, derivative of DINOv3):
2
+
3
+ anime_mili_ordinal_corn_vitl16.pth
4
+
5
+ Contains the backbone + CORN head state_dict for use with mili_score_inference.
weights/anime_mili_ordinal_corn_vitl16.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:372b9dc22759635f555e94b7a921b903232ad035b6475a8edabfdca44e0b32aa
3
+ size 1213732210