Spaces:
Running
on
Zero
Running
on
Zero
load metauas models from huggingface model repositories
Browse files- app.py +6 -4
- metauas.py +2 -1
app.py
CHANGED
|
@@ -52,12 +52,14 @@ metauas_model = MetaUAS(encoder_name,
|
|
| 52 |
def process_image(prompt_img, query_img, options):
|
| 53 |
# Load the model based on selected options
|
| 54 |
if 'model-512' in options:
|
| 55 |
-
ckt_path = "weights/metauas-512.ckpt"
|
| 56 |
-
model = safely_load_state_dict(metauas_model, ckt_path)
|
|
|
|
| 57 |
img_size = 512
|
| 58 |
else:
|
| 59 |
-
ckt_path = 'weights/metauas-256.ckpt'
|
| 60 |
-
model = safely_load_state_dict(metauas_model, ckt_path)
|
|
|
|
| 61 |
img_size = 256
|
| 62 |
|
| 63 |
model.to(device)
|
|
|
|
| 52 |
def process_image(prompt_img, query_img, options):
|
| 53 |
# Load the model based on selected options
|
| 54 |
if 'model-512' in options:
|
| 55 |
+
#ckt_path = "weights/metauas-512.ckpt"
|
| 56 |
+
#model = safely_load_state_dict(metauas_model, ckt_path)
|
| 57 |
+
model = MetaUAS.from_pretrained("csgaobb/MetaUAS-512")
|
| 58 |
img_size = 512
|
| 59 |
else:
|
| 60 |
+
#ckt_path = 'weights/metauas-256.ckpt'
|
| 61 |
+
#model = safely_load_state_dict(metauas_model, ckt_path)
|
| 62 |
+
model = MetaUAS.from_pretrained("csgaobb/MetaUAS-256")
|
| 63 |
img_size = 256
|
| 64 |
|
| 65 |
model.to(device)
|
metauas.py
CHANGED
|
@@ -31,6 +31,7 @@ from torchvision.transforms.functional import pil_to_tensor
|
|
| 31 |
from segmentation_models_pytorch.unet.model import UnetDecoder
|
| 32 |
from segmentation_models_pytorch.fpn.decoder import FPNDecoder
|
| 33 |
from segmentation_models_pytorch.encoders import get_encoder, get_preprocessing_params
|
|
|
|
| 34 |
|
| 35 |
def set_random_seed(seed=233, reproduce=False):
|
| 36 |
np.random.seed(seed)
|
|
@@ -131,7 +132,7 @@ class AlignmentLayer(nn.Module):
|
|
| 131 |
return aligned_features
|
| 132 |
|
| 133 |
|
| 134 |
-
class MetaUAS(pl.LightningModule):
|
| 135 |
def __init__(self, encoder_name, decoder_name, encoder_depth, decoder_depth, num_alignment_layers, alignment_type, fusion_policy):
|
| 136 |
super().__init__()
|
| 137 |
|
|
|
|
| 31 |
from segmentation_models_pytorch.unet.model import UnetDecoder
|
| 32 |
from segmentation_models_pytorch.fpn.decoder import FPNDecoder
|
| 33 |
from segmentation_models_pytorch.encoders import get_encoder, get_preprocessing_params
|
| 34 |
+
from huggingface_hub import PyTorchModelHubMixin
|
| 35 |
|
| 36 |
def set_random_seed(seed=233, reproduce=False):
|
| 37 |
np.random.seed(seed)
|
|
|
|
| 132 |
return aligned_features
|
| 133 |
|
| 134 |
|
| 135 |
+
class MetaUAS(pl.LightningModule, PyTorchModelHubMixin):
|
| 136 |
def __init__(self, encoder_name, decoder_name, encoder_depth, decoder_depth, num_alignment_layers, alignment_type, fusion_policy):
|
| 137 |
super().__init__()
|
| 138 |
|