Thastp commited on
Commit
6ae798d
·
verified ·
1 Parent(s): b661790

Upload model

Browse files
Files changed (3) hide show
  1. config.json +1 -2
  2. configuration_rf_detr.py +3 -3
  3. modeling_rf_detr.py +6 -6
config.json CHANGED
@@ -11,7 +11,6 @@
11
  "ca_nheads": 16,
12
  "dec_layers": 3,
13
  "dec_n_points": 2,
14
- "device": "cpu",
15
  "encoder": "dinov2_windowed_small",
16
  "gradient_checkpointing": false,
17
  "group_detr": 13,
@@ -36,6 +35,6 @@
36
  "resolution": 560,
37
  "sa_nheads": 8,
38
  "torch_dtype": "float32",
39
- "transformers_version": "4.50.3",
40
  "two_stage": true
41
  }
 
11
  "ca_nheads": 16,
12
  "dec_layers": 3,
13
  "dec_n_points": 2,
 
14
  "encoder": "dinov2_windowed_small",
15
  "gradient_checkpointing": false,
16
  "group_detr": 13,
 
35
  "resolution": 560,
36
  "sa_nheads": 8,
37
  "torch_dtype": "float32",
38
+ "transformers_version": "4.51.1",
39
  "two_stage": true
40
  }
configuration_rf_detr.py CHANGED
@@ -6,7 +6,7 @@ from optimum.exporters.onnx.model_configs import ViTOnnxConfig
6
 
7
  ### modified from https://github.com/roboflow/rf-detr/blob/main/rfdetr/config.py
8
 
9
- DEVICE = "cuda" if torch.cuda.is_available() else "mps" if torch.backends.mps.is_available() else "cpu"
10
 
11
  class RFDetrConfig(PretrainedConfig):
12
  model_type = 'rf-detr'
@@ -24,7 +24,7 @@ class RFDetrConfig(PretrainedConfig):
24
  amp: bool = True,
25
  num_classes: int = 90,
26
  num_queries: int = 300,
27
- device: Literal["cpu", "cuda", "mps"] = DEVICE,
28
  resolution: int = 560,
29
  group_detr: int = 13,
30
  gradient_checkpointing: bool = False,
@@ -40,7 +40,7 @@ class RFDetrConfig(PretrainedConfig):
40
  self.layer_norm = layer_norm
41
  self.amp = amp
42
  self.num_classes = num_classes
43
- self.device = device
44
  self.resolution = resolution
45
  self.group_detr = group_detr
46
  self.gradient_checkpointing = gradient_checkpointing
 
6
 
7
  ### modified from https://github.com/roboflow/rf-detr/blob/main/rfdetr/config.py
8
 
9
+ #DEVICE = "cuda" if torch.cuda.is_available() else "mps" if torch.backends.mps.is_available() else "cpu"
10
 
11
  class RFDetrConfig(PretrainedConfig):
12
  model_type = 'rf-detr'
 
24
  amp: bool = True,
25
  num_classes: int = 90,
26
  num_queries: int = 300,
27
+ # device: Literal["cpu", "cuda", "mps"] = DEVICE,
28
  resolution: int = 560,
29
  group_detr: int = 13,
30
  gradient_checkpointing: bool = False,
 
40
  self.layer_norm = layer_norm
41
  self.amp = amp
42
  self.num_classes = num_classes
43
+ # self.device = device
44
  self.resolution = resolution
45
  self.group_detr = group_detr
46
  self.gradient_checkpointing = gradient_checkpointing
modeling_rf_detr.py CHANGED
@@ -40,7 +40,7 @@ class RFDetrModelForObjectDetection(PreTrainedModel):
40
  layer_norm = config.layer_norm,
41
  amp = config.amp,
42
  num_classes = config.num_classes,
43
- device = config.device,
44
  resolution = config.resolution,
45
  group_detr = config.group_detr,
46
  gradient_checkpointing = config.gradient_checkpointing,
@@ -106,7 +106,7 @@ class RFDetrModelForObjectDetection(PreTrainedModel):
106
  wr = self.config.resolution / float(w)
107
 
108
  for label in labels:
109
- boxes = label["boxes"].to(device=self.config.device, dtype=torch.float32)
110
  # resize boxes to model's resolution
111
  boxes[:, [0, 2]] *= wr
112
  boxes[:, [1, 3]] *= hr
@@ -117,8 +117,8 @@ class RFDetrModelForObjectDetection(PreTrainedModel):
117
  # normalize to [0, 1] by model's resolution
118
  boxes[:] /= self.config.resolution
119
  label["boxes"] = boxes
120
- if "labels" in label:
121
- label["labels"] = label["labels"].to(self.config.device)
122
 
123
  def forward(self, pixel_values: torch.Tensor, pixel_mask: torch.Tensor, labels=None, **kwargs) -> ModelOutput:
124
  resize = Resize((self.config.resolution, self.config.resolution))
@@ -135,8 +135,8 @@ class RFDetrModelForObjectDetection(PreTrainedModel):
135
  self.criterion.training = False
136
 
137
  # resize pixel values and mask to model's resolution
138
- pixel_values = pixel_values.to(self.config.device)
139
- pixel_mask = pixel_mask.to(self.config.device)
140
  pixel_values = resize(pixel_values)
141
  pixel_mask = resize(pixel_mask)
142
 
 
40
  layer_norm = config.layer_norm,
41
  amp = config.amp,
42
  num_classes = config.num_classes,
43
+ #device = config.device,
44
  resolution = config.resolution,
45
  group_detr = config.group_detr,
46
  gradient_checkpointing = config.gradient_checkpointing,
 
106
  wr = self.config.resolution / float(w)
107
 
108
  for label in labels:
109
+ boxes = label["boxes"]#.to(device=self.config.device, dtype=torch.float32)
110
  # resize boxes to model's resolution
111
  boxes[:, [0, 2]] *= wr
112
  boxes[:, [1, 3]] *= hr
 
117
  # normalize to [0, 1] by model's resolution
118
  boxes[:] /= self.config.resolution
119
  label["boxes"] = boxes
120
+ # if "labels" in label:
121
+ # label["labels"] = label["labels"].to(self.config.device)
122
 
123
  def forward(self, pixel_values: torch.Tensor, pixel_mask: torch.Tensor, labels=None, **kwargs) -> ModelOutput:
124
  resize = Resize((self.config.resolution, self.config.resolution))
 
135
  self.criterion.training = False
136
 
137
  # resize pixel values and mask to model's resolution
138
+ # pixel_values = pixel_values.to(self.config.device)
139
+ # pixel_mask = pixel_mask.to(self.config.device)
140
  pixel_values = resize(pixel_values)
141
  pixel_mask = resize(pixel_mask)
142