Hayloo9838 commited on
Commit
990ca41
·
verified ·
1 Parent(s): a195ff4

Update model.py

Browse files
Files changed (1) hide show
  1. model.py +4 -2
model.py CHANGED
@@ -1,3 +1,5 @@
 
 
1
  import cv2
2
  import torch
3
  import numpy as np
@@ -5,7 +7,7 @@ from transformers import CLIPProcessor, CLIPVisionModel
5
  from PIL import Image
6
  from torch import nn
7
 
8
- MODEL_PATH = "uno_clip_large.pth"
9
 
10
  class CLIPVisionClassifier(nn.Module):
11
  def __init__(self, num_labels):
@@ -13,7 +15,7 @@ class CLIPVisionClassifier(nn.Module):
13
  self.vision_model = CLIPVisionModel.from_pretrained('openai/clip-vit-large-patch14',
14
  attn_implementation="eager") # shows heat
15
  self.classifier = nn.Linear(self.vision_model.config.hidden_size, num_labels, bias=False)
16
- self.dropout = nn.Dropout(0.1)
17
 
18
  def forward(self, pixel_values, output_attentions=False):
19
  outputs = self.vision_model(pixel_values, output_attentions=output_attentions)
 
1
+ # model usage + heatmap of attention (what the model is focusing on)
2
+
3
  import cv2
4
  import torch
5
  import numpy as np
 
7
  from PIL import Image
8
  from torch import nn
9
 
10
+ MODEL_PATH = "pytorch_model.bin"
11
 
12
  class CLIPVisionClassifier(nn.Module):
13
  def __init__(self, num_labels):
 
15
  self.vision_model = CLIPVisionModel.from_pretrained('openai/clip-vit-large-patch14',
16
  attn_implementation="eager") # shows heat
17
  self.classifier = nn.Linear(self.vision_model.config.hidden_size, num_labels, bias=False)
18
+ self.dropout = nn.Dropout(0.1) # this is not used dont worry :
19
 
20
  def forward(self, pixel_values, output_attentions=False):
21
  outputs = self.vision_model(pixel_values, output_attentions=output_attentions)