Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,48 +1,66 @@
|
|
| 1 |
import os
|
| 2 |
import gradio as gr
|
| 3 |
import torch
|
|
|
|
| 4 |
|
| 5 |
-
# Define your model architecture
|
| 6 |
class YourModelArchitecture(torch.nn.Module):
|
|
|
|
| 7 |
def __init__(self):
|
| 8 |
super(YourModelArchitecture, self).__init__()
|
| 9 |
-
#
|
| 10 |
-
# Example: self.conv1 = torch.nn.Conv2d(in_channels, out_channels, kernel_size)
|
| 11 |
|
| 12 |
def forward(self, x):
|
| 13 |
-
#
|
| 14 |
-
return x
|
| 15 |
|
| 16 |
-
# Load model function
|
| 17 |
def load_model(model_path):
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
model.load_state_dict(checkpoint['model'], strict=False) # Use strict=False
|
| 23 |
-
|
| 24 |
-
model.eval() # Set model to evaluation mode
|
| 25 |
return model
|
| 26 |
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
def
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import os
|
| 2 |
import gradio as gr
|
| 3 |
import torch
|
| 4 |
+
import cv2
|
| 5 |
|
|
|
|
| 6 |
class YourModelArchitecture(torch.nn.Module):
|
| 7 |
+
# Remplacez ceci par votre architecture réelle
|
| 8 |
def __init__(self):
|
| 9 |
super(YourModelArchitecture, self).__init__()
|
| 10 |
+
# Définissez votre architecture ici
|
|
|
|
| 11 |
|
| 12 |
def forward(self, x):
|
| 13 |
+
# Implémentez la méthode forward
|
| 14 |
+
return x
|
| 15 |
|
|
|
|
| 16 |
def load_model(model_path):
|
| 17 |
+
model = YourModelArchitecture()
|
| 18 |
+
checkpoint = torch.load(model_path, map_location=torch.device('cpu'))
|
| 19 |
+
model.load_state_dict(checkpoint['model'])
|
| 20 |
+
model.eval()
|
|
|
|
|
|
|
|
|
|
| 21 |
return model
|
| 22 |
|
| 23 |
+
def colorize_frame(frame, model):
|
| 24 |
+
# Effectuer la colorisation sur une frame ici
|
| 25 |
+
with torch.no_grad():
|
| 26 |
+
# Transformez l'image et passez-la par le modèle
|
| 27 |
+
colorized_frame = model(frame) # Assurez-vous que `frame` est correctement transformé
|
| 28 |
+
return colorized_frame
|
| 29 |
+
|
| 30 |
+
def colorize_video(video_path, model):
|
| 31 |
+
cap = cv2.VideoCapture(video_path)
|
| 32 |
+
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
|
| 33 |
+
output_path = "output_video.mp4"
|
| 34 |
+
out = cv2.VideoWriter(output_path, fourcc, 30, (int(cap.get(3)), int(cap.get(4))))
|
| 35 |
+
|
| 36 |
+
while cap.isOpened():
|
| 37 |
+
ret, frame = cap.read()
|
| 38 |
+
if not ret:
|
| 39 |
+
break
|
| 40 |
+
|
| 41 |
+
# Convertir l'image au format approprié
|
| 42 |
+
input_tensor = preprocess_frame(frame) # Implémentez cette fonction pour le prétraitement
|
| 43 |
+
colorized_frame = colorize_frame(input_tensor, model)
|
| 44 |
+
|
| 45 |
+
# Enregistrez chaque frame colorisée
|
| 46 |
+
out.write(colorized_frame.numpy()) # Assurez-vous que `colorized_frame` est converti en numpy array
|
| 47 |
+
|
| 48 |
+
cap.release()
|
| 49 |
+
out.release()
|
| 50 |
+
return output_path
|
| 51 |
+
|
| 52 |
+
def preprocess_frame(frame):
|
| 53 |
+
# Convertir l'image de BGR à RGB et normaliser
|
| 54 |
+
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
| 55 |
+
frame = frame / 255.0 # Normaliser
|
| 56 |
+
# Convertir en tensor PyTorch
|
| 57 |
+
tensor_frame = torch.tensor(frame).permute(2, 0, 1).unsqueeze(0) # Ajouter la dimension du batch
|
| 58 |
+
return tensor_frame
|
| 59 |
+
|
| 60 |
+
def main(video_path):
|
| 61 |
+
model = load_model("model.pth")
|
| 62 |
+
output_video_path = colorize_video(video_path, model)
|
| 63 |
+
return output_video_path
|
| 64 |
+
|
| 65 |
+
iface = gr.Interface(fn=main, inputs="video", outputs="video")
|
| 66 |
+
iface.launch()
|