infinity1096 commited on
Commit
74be47f
·
1 Parent(s): 8683722

add CUDA inference

Browse files
Files changed (1) hide show
  1. app.py +9 -2
app.py CHANGED
@@ -37,6 +37,9 @@ from uniflowmatch.utils.viz import warp_image_with_flow
37
  model = None
38
  USE_REFINEMENT_MODEL = False
39
 
 
 
 
40
 
41
  def initialize_model(use_refinement: bool = False):
42
  """Initialize the model - call this once at startup"""
@@ -55,6 +58,10 @@ def initialize_model(use_refinement: bool = False):
55
  if hasattr(model, "eval"):
56
  model.eval()
57
 
 
 
 
 
58
  print("Model loaded successfully!")
59
  return True
60
  except Exception as e:
@@ -99,8 +106,8 @@ def process_images(source_image, target_image, model_type_choice):
99
  # === Predict Correspondences ===
100
  with torch.no_grad():
101
  result = model.predict_correspondences_batched(
102
- source_image=torch.from_numpy(source_rgb),
103
- target_image=torch.from_numpy(target_rgb),
104
  )
105
 
106
  # Extract results based on your model's output structure
 
37
  model = None
38
  USE_REFINEMENT_MODEL = False
39
 
40
+ use_gpu = torch.cuda.is_available()
41
+ if use_gpu:
42
+ print("Using GPU for processing.")
43
 
44
  def initialize_model(use_refinement: bool = False):
45
  """Initialize the model - call this once at startup"""
 
58
  if hasattr(model, "eval"):
59
  model.eval()
60
 
61
+ if use_gpu:
62
+ print("Moving model to GPU...")
63
+ model = model.to("cuda")
64
+
65
  print("Model loaded successfully!")
66
  return True
67
  except Exception as e:
 
106
  # === Predict Correspondences ===
107
  with torch.no_grad():
108
  result = model.predict_correspondences_batched(
109
+ source_image=torch.from_numpy(source_rgb).to("cuda" if use_gpu else "cpu"),
110
+ target_image=torch.from_numpy(target_rgb).to("cuda" if use_gpu else "cpu"),
111
  )
112
 
113
  # Extract results based on your model's output structure