Saarthak-GenBio-AI commited on
Commit
f1ab597
·
verified ·
1 Parent(s): 4c04e3e

Update modeling_genbio_pathfm.py

Browse files

To work with latest transformer version

Files changed (1) hide show
  1. modeling_genbio_pathfm.py +14 -2
modeling_genbio_pathfm.py CHANGED
@@ -5,6 +5,8 @@ This file is intended to live in the HuggingFace repo at
5
 
6
  from transformers import AutoModel
7
  model = AutoModel.from_pretrained("genbio-ai/genbio-pathfm", trust_remote_code=True)
 
 
8
  """
9
 
10
  import math
@@ -484,14 +486,21 @@ class GenBioPathFMModel(PreTrainedModel):
484
  model = AutoModel.from_pretrained("genbio-ai/genbio-pathfm", trust_remote_code=True)
485
 
486
  # CLS-only: [B, embed_dim*3]
487
- cls_features = model(rgb_tensor)
488
 
489
  # CLS + patch tokens:
490
- cls_features, patch_features = model.forward_with_patches(rgb_tensor)
491
  """
492
 
493
  config_class = GenBioPathFMConfig
494
 
 
 
 
 
 
 
 
495
  def __init__(self, config: GenBioPathFMConfig):
496
  super().__init__(config)
497
  self.backbone = VisionTransformer(
@@ -519,6 +528,9 @@ class GenBioPathFMModel(PreTrainedModel):
519
  pos_embed_rope_rescale_coords=config.pos_embed_rope_rescale_coords,
520
  pos_embed_rope_dtype=config.pos_embed_rope_dtype,
521
  )
 
 
 
522
 
523
  def _encode(self, x: Tensor) -> Dict[str, Tensor]:
524
  """Encode single-channel [B, 1, H, W] images."""
 
5
 
6
  from transformers import AutoModel
7
  model = AutoModel.from_pretrained("genbio-ai/genbio-pathfm", trust_remote_code=True)
8
+
9
+ Compatible with transformers v4.x and v5.0+.
10
  """
11
 
12
  import math
 
486
  model = AutoModel.from_pretrained("genbio-ai/genbio-pathfm", trust_remote_code=True)
487
 
488
  # CLS-only: [B, embed_dim*3]
489
+ cls_features = model(pixel_values)
490
 
491
  # CLS + patch tokens:
492
+ cls_features, patch_features = model.forward_with_patches(pixel_values)
493
  """
494
 
495
  config_class = GenBioPathFMConfig
496
 
497
+ # -- HuggingFace class attributes for compatibility (v4.x + v5.0+) --
498
+ base_model_prefix = "backbone"
499
+ main_input_name = "pixel_values"
500
+ supports_gradient_checkpointing = False
501
+ _no_split_modules = ["SelfAttentionBlock"]
502
+ _tied_weights_keys = [] # no tied weights (required by transformers v5.0+)
503
+
504
  def __init__(self, config: GenBioPathFMConfig):
505
  super().__init__(config)
506
  self.backbone = VisionTransformer(
 
528
  pos_embed_rope_rescale_coords=config.pos_embed_rope_rescale_coords,
529
  pos_embed_rope_dtype=config.pos_embed_rope_dtype,
530
  )
531
+ # Required: sets up all_tied_weights_keys, parallelism plans, etc.
532
+ # Safe on v4.x, required on v5.0+.
533
+ self.post_init()
534
 
535
  def _encode(self, x: Tensor) -> Dict[str, Tensor]:
536
  """Encode single-channel [B, 1, H, W] images."""