Add rtmw-m-256x192 RTMW/RTMDet HF port
Browse files- README.md +3 -14
- config.json +1 -1
- modeling_rtmw.py +4 -0
README.md
CHANGED
|
@@ -38,23 +38,12 @@ Detector: human AP = 56.4 on COCO val2017.
|
|
| 38 |
## Usage
|
| 39 |
|
| 40 |
```python
|
| 41 |
-
from transformers import AutoImageProcessor
|
| 42 |
from PIL import Image
|
| 43 |
import torch
|
| 44 |
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
# Or directly:
|
| 49 |
-
import sys, json
|
| 50 |
-
from pathlib import Path
|
| 51 |
-
|
| 52 |
-
# Using the custom modules:
|
| 53 |
-
from rtmw_modules.configuration_rtmw import RTMWConfig
|
| 54 |
-
from rtmw_modules.modeling_rtmw import RTMWModel
|
| 55 |
-
|
| 56 |
-
config = RTMWConfig.from_pretrained("akore/rtmw-m-256x192", trust_remote_code=True)
|
| 57 |
-
model = RTMWModel.from_pretrained("akore/rtmw-m-256x192", trust_remote_code=True)
|
| 58 |
model.eval()
|
| 59 |
|
| 60 |
processor = AutoImageProcessor.from_pretrained("akore/rtmw-m-256x192")
|
|
|
|
| 38 |
## Usage
|
| 39 |
|
| 40 |
```python
|
| 41 |
+
from transformers import AutoConfig, AutoModel, AutoImageProcessor
|
| 42 |
from PIL import Image
|
| 43 |
import torch
|
| 44 |
|
| 45 |
+
config = AutoConfig.from_pretrained("akore/rtmw-m-256x192", trust_remote_code=True)
|
| 46 |
+
model = AutoModel.from_pretrained("akore/rtmw-m-256x192", trust_remote_code=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 47 |
model.eval()
|
| 48 |
|
| 49 |
processor = AutoImageProcessor.from_pretrained("akore/rtmw-m-256x192")
|
config.json
CHANGED
|
@@ -44,6 +44,6 @@
|
|
| 44 |
"transformers_version": "5.2.0",
|
| 45 |
"auto_map": {
|
| 46 |
"AutoConfig": "configuration_rtmw.RTMWConfig",
|
| 47 |
-
"
|
| 48 |
}
|
| 49 |
}
|
|
|
|
| 44 |
"transformers_version": "5.2.0",
|
| 45 |
"auto_map": {
|
| 46 |
"AutoConfig": "configuration_rtmw.RTMWConfig",
|
| 47 |
+
"AutoModel": "modeling_rtmw.RTMWModel"
|
| 48 |
}
|
| 49 |
}
|
modeling_rtmw.py
CHANGED
|
@@ -1258,6 +1258,10 @@ class RTMWModel(PreTrainedModel):
|
|
| 1258 |
This model consists of a backbone, neck, and pose head for keypoint detection.
|
| 1259 |
All implementations use PyTorch only with no NumPy or OpenCV dependencies.
|
| 1260 |
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1261 |
|
| 1262 |
def __init__(self, config: RTMWConfig):
|
| 1263 |
super().__init__(config)
|
|
|
|
| 1258 |
This model consists of a backbone, neck, and pose head for keypoint detection.
|
| 1259 |
All implementations use PyTorch only with no NumPy or OpenCV dependencies.
|
| 1260 |
"""
|
| 1261 |
+
|
| 1262 |
+
config_class = RTMWConfig
|
| 1263 |
+
base_model_prefix = "rtmw"
|
| 1264 |
+
main_input_name = "pixel_values"
|
| 1265 |
|
| 1266 |
def __init__(self, config: RTMWConfig):
|
| 1267 |
super().__init__(config)
|