Ubuntu commited on
Commit
8ddd85c
·
1 Parent(s): 020203d
Files changed (2) hide show
  1. .ipynb_checkpoints/app-checkpoint.py +10 -6
  2. app.py +10 -6
.ipynb_checkpoints/app-checkpoint.py CHANGED
@@ -23,7 +23,7 @@ class ImageClassificationCollator:
23
 
24
  def __call__(self, batch):
25
  encodings = self.feature_extractor([x[0] for x in batch], return_tensors='pt')
26
- encodings['labels'] = torch.tensor([x[1] for x in batch], dtype=torch.float)
27
  return encodings
28
 
29
  class Classifier(pl.LightningModule):
@@ -38,11 +38,16 @@ class Classifier(pl.LightningModule):
38
  num_classes=model.config.num_labels
39
  )
40
 
 
 
 
 
41
  def training_step(self, batch, batch_idx):
42
  outputs = self(**batch)
43
- self.log(f"train_loss", outputs.loss)
44
- return outputs.loss
45
-
 
46
  def validation_step(self, batch, batch_idx):
47
  outputs = self(**batch)
48
  self.log(f"val_loss", outputs.loss)
@@ -138,8 +143,7 @@ def video_identity(video,user_name,class_name,trainortest,ready):
138
  frameNr = frameNr+10
139
 
140
  img=cv2.imread(class_d+'/frame_0.jpg')
141
- a=str(transformers.__version__)
142
- return img, a, class_d
143
  demo = gr.Interface(video_identity,
144
  inputs=[gr.Video(source='upload'),
145
  gr.Text(),
 
23
 
24
  def __call__(self, batch):
25
  encodings = self.feature_extractor([x[0] for x in batch], return_tensors='pt')
26
+ encodings['labels'] = torch.tensor([x[1] for x in batch], dtype=torch.long)
27
  return encodings
28
 
29
  class Classifier(pl.LightningModule):
 
38
  num_classes=model.config.num_labels
39
  )
40
 
41
+ # def training_step(self, batch, batch_idx):
42
+ # outputs = self(**batch)
43
+ # self.log(f"train_loss", outputs.loss)
44
+ # return outputs.loss
45
  def training_step(self, batch, batch_idx):
46
  outputs = self(**batch)
47
+ loss = outputs.loss.float() # Convert to float
48
+ self.log(f"train_loss", loss)
49
+ return loss
50
+
51
  def validation_step(self, batch, batch_idx):
52
  outputs = self(**batch)
53
  self.log(f"val_loss", outputs.loss)
 
143
  frameNr = frameNr+10
144
 
145
  img=cv2.imread(class_d+'/frame_0.jpg')
146
+ return img, user_d, class_d
 
147
  demo = gr.Interface(video_identity,
148
  inputs=[gr.Video(source='upload'),
149
  gr.Text(),
app.py CHANGED
@@ -23,7 +23,7 @@ class ImageClassificationCollator:
23
 
24
  def __call__(self, batch):
25
  encodings = self.feature_extractor([x[0] for x in batch], return_tensors='pt')
26
- encodings['labels'] = torch.tensor([x[1] for x in batch], dtype=torch.float)
27
  return encodings
28
 
29
  class Classifier(pl.LightningModule):
@@ -38,11 +38,16 @@ class Classifier(pl.LightningModule):
38
  num_classes=model.config.num_labels
39
  )
40
 
 
 
 
 
41
  def training_step(self, batch, batch_idx):
42
  outputs = self(**batch)
43
- self.log(f"train_loss", outputs.loss)
44
- return outputs.loss
45
-
 
46
  def validation_step(self, batch, batch_idx):
47
  outputs = self(**batch)
48
  self.log(f"val_loss", outputs.loss)
@@ -138,8 +143,7 @@ def video_identity(video,user_name,class_name,trainortest,ready):
138
  frameNr = frameNr+10
139
 
140
  img=cv2.imread(class_d+'/frame_0.jpg')
141
- a=str(transformers.__version__)
142
- return img, a, class_d
143
  demo = gr.Interface(video_identity,
144
  inputs=[gr.Video(source='upload'),
145
  gr.Text(),
 
23
 
24
  def __call__(self, batch):
25
  encodings = self.feature_extractor([x[0] for x in batch], return_tensors='pt')
26
+ encodings['labels'] = torch.tensor([x[1] for x in batch], dtype=torch.long)
27
  return encodings
28
 
29
  class Classifier(pl.LightningModule):
 
38
  num_classes=model.config.num_labels
39
  )
40
 
41
+ # def training_step(self, batch, batch_idx):
42
+ # outputs = self(**batch)
43
+ # self.log(f"train_loss", outputs.loss)
44
+ # return outputs.loss
45
  def training_step(self, batch, batch_idx):
46
  outputs = self(**batch)
47
+ loss = outputs.loss.float() # Convert to float
48
+ self.log(f"train_loss", loss)
49
+ return loss
50
+
51
  def validation_step(self, batch, batch_idx):
52
  outputs = self(**batch)
53
  self.log(f"val_loss", outputs.loss)
 
143
  frameNr = frameNr+10
144
 
145
  img=cv2.imread(class_d+'/frame_0.jpg')
146
+ return img, user_d, class_d
 
147
  demo = gr.Interface(video_identity,
148
  inputs=[gr.Video(source='upload'),
149
  gr.Text(),