code stringlengths 17 6.64M |
|---|
def get_examples_for_learning():
'\n Get a few examples to learn to compose given LoRA modules\n '
return [{'input': 'Infer the date from context.\n\nQ: Jane is celebrating the last day of Jan 2012. What is the date tomorrow in MM/DD/YYYY?\nOptions:\n(A) 02/02/2012\n(B) 02/15/2012\n(C) 01/25/2012\n(D) 0... |
def get_examples_for_inference():
'\n Inference on the examples to get the performance of the composed LoRA modules\n '
return [{'input': 'Infer the date from context.\n\nQ: The current local time is 3:02 pm of 5/4/2004. What is the date one week ago from today in MM/DD/YYYY?\nOptions:\n(A) 04/27/2004\n... |
def get_lora_module_list():
'\n You can have a custom filtering strategy to select the modules to be used in the composition. Here we randomly select 20 modules.\n '
random.seed(42)
return random.sample(LORA_MODULE_NAMES, 20)
|
def main():
'\n Perform lorahub learning\n '
modules = get_lora_module_list()
print('modules:', modules)
(example_inputs, examples_outputs) = ([], [])
for example in get_examples_for_learning():
example_inputs.append(example['input'])
examples_outputs.append(example['output']... |
def load_base_model_and_lora_modules(lora_module_list: List[str], model_name_or_path: Optional[str]=None):
'load base model and lora modules from huggingface model hub\n\n Args:\n lora_module_list (List[str]): a list of lora module names available in huggingface model hub\n model_name_or_path (Op... |
def preprocess_function(examples, tokenizer):
'\n standard preprocess function for dataset\n '
inputs = examples['input']
targets = examples['output']
model_inputs = tokenizer(inputs, max_length=2048, padding=True, truncation=True, return_tensors='pt')
labels = tokenizer(targets, max_length=... |
def load_dataset(example_inputs, example_outputs, tokenizer):
if (example_outputs is None):
example_outputs = ([''] * len(example_inputs))
df = [{'input': example_inputs[i], 'output': example_outputs[i]} for i in range(len(example_inputs))]
dataset = Dataset.from_pandas(pd.DataFrame(df))
prepr... |
def default_get_loss(example_dataset, model, batch_size):
'\n Get the loss of the model on the example dataset. Usually the example dataset only contains a few examples.\n '
data_batch_size = (len(example_dataset) if (batch_size is None) else min(len(example_dataset), batch_size))
train_dataloader =... |
def default_l1_regularization(weights):
'\n Get the L1 regularization term for the weights\n '
sum_of_squares = (sum([abs(x) for x in weights]) / len(weights))
return (0.05 * sum_of_squares)
|
def get_score(weights, model, cache, example_dataset, batch_size, get_loss, get_regular):
final_state_dict = {}
lora_module_list = list(cache.keys())
keys = cache[lora_module_list[0]].keys()
for (i, peft_model_id) in enumerate(lora_module_list):
lora_state_dict = cache[peft_model_id]
i... |
def get_final_weights(weights, lora_module_list, cache):
final_state_dict = {}
keys = cache[lora_module_list[0]].keys()
for (i, peft_model_id) in enumerate(lora_module_list):
lora_state_dict = cache[peft_model_id]
if (i == 0):
for key in keys:
final_state_dict[k... |
def lorahub_inference(example_inputs: List[str], model_or_name_path: Union[(AutoModelForSeq2SeqLM, str)], tokenizer_or_tokenizer_path: Union[(AutoTokenizer, str)], batch_size: int, example_outputs: List[str]=None):
def accuracy_score(outputs, ground_truths):
correct = 0
total = 0
for (out... |
def lorahub_learning(lora_module_list: List[str], example_inputs: List[str], example_outputs: List[str], max_inference_step: int, model_name_or_path=None, batch_size=None, get_loss=default_get_loss, get_regular=default_l1_regularization, seed=42):
random.seed(seed)
numpy.random.seed(seed)
number_of_loras ... |
class Predictor(BasePredictor):
def setup(self) -> None:
'Load the model into memory to make running multiple predictions efficient'
pass
def predict(self, example_inputs: str=Input(description='List of input examples, one Line one input.', default='Infer the date from context. Q: Today, 8/... |
def evaluate_flan_results_zero_shot(folder, flan_model_name):
sub_dirs = os.listdir(folder)
for sub_dir in sub_dirs:
test_file_path = os.path.join(folder, sub_dir, 'zero_shot.jsonl')
(task_inputs, task_outputs) = ([], [])
for line in open(test_file_path, 'r', encoding='utf-8'):
... |
def evaluate_flan_results_few_shot(folder, flan_model_name):
sub_dirs = os.listdir(folder)
for sub_dir in sub_dirs:
test_file_path = os.path.join(folder, sub_dir, 'few_shot.jsonl')
(task_inputs, task_outputs) = ([], [])
for line in open(test_file_path, 'r', encoding='utf-8'):
... |
def evaluate_lorahub_results_few_shot(folder, flan_model_name):
sub_dirs = os.listdir(folder)
for sub_dir in sub_dirs:
(example_inputs, examples_outputs) = ([], [])
example_file_path = os.path.join(folder, sub_dir, 'example.jsonl')
for line in open(example_file_path, 'r', encoding='utf... |
@dataclass
class ModelArguments():
'\n Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.\n '
model_name_or_path: str = field(metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'})
config_name: Optional[str] = field(default=Non... |
@dataclass
class DataTrainingArguments():
'\n Arguments pertaining to what data we are going to input our model for training and eval.\n '
dataset_name: Optional[str] = field(default='sail/symbolic-instruction-tuning', metadata={'help': 'The name of the dataset to use (via the datasets library).'})
... |
def main():
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments))
if ((len(sys.argv) == 2) and sys.argv[1].endswith('.json')):
(model_args, data_args, training_args) = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
(model_args, d... |
def _mp_fn(index):
main()
|
def resize_and_save(path, resize=RESIZE):
for img_name in tqdm(os.listdir(os.path.join(temp_root, path))):
img = cv2.imread(os.path.join(temp_root, path, img_name))
img = cv2.resize(img, RESIZE)
cv2.imwrite(os.path.join(local_root, path, img_name), img)
|
class T1Dataset(torch.utils.data.Dataset):
'\n This is to use with array of files and array of labels as an input\n '
def __init__(self, X, y, transform=None):
self.X = X
self.y = y
self.transform = transform
def __len__(self):
return len(self.X)
def __getitem__(se... |
def ss_train(model, criterion, optimizer, num_epochs, start_alpha_from=5, reach_max_alpha_in=15, max_alpha=1, scheduler=None):
writer = SummaryWriter()
alphas = np.linspace(0, max_alpha, (reach_max_alpha_in - start_alpha_from))
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
... |
def sexy_tuner():
for lr in [0.01]:
model = torchvision.models.resnet18(pretrained=True)
final_layer_in = model.fc.in_features
model.fc = torch.nn.Linear(final_layer_in, 2)
model = model.to(device)
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(... |
def train(model, criterion, optimizer, num_epochs, scheduler=None):
writer = SummaryWriter()
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(num_epochs):
for phase in ['train', 'valid']:
if (phase == 'train'):
... |
def process_state(state, v_size=0, p_size=0):
'\n :param state: tuple of vars\n :return: convert into device arrays with additional batch dim\n '
state_new = ()
for i in range(len(state)):
var_tmp = jnp.array(state[i])
if (v_size > 0):
var_tmp = var_tmp[(None, ...)].re... |
def rollout(env_name, num_steps=128, use_expert=False, seed=1):
env_fn = envs.create_fn(env_name)
env = env_fn(batch_size=1, episode_length=(num_steps * 2), auto_reset=False)
env.step = jax.jit(env.step)
if (not use_expert):
parametric_action_distribution = distribution.NormalTanhDistribution(... |
def visualize(state_list, env_name, num_steps):
env = envs.create(env_name=env_name, episode_length=num_steps)
visual_states = []
for i in range(state_list.qp.ang.shape[0]):
qp_state = brax.QP(np.array(state_list.qp.pos[(i, 0)]), np.array(state_list.qp.rot[(i, 0)]), np.array(state_list.qp.vel[(i, ... |
def _cfg(url='', **kwargs):
return {'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'crop_pct': 1.0, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'classifier': 'head', **kwargs}
|
class Downsampling(nn.Module):
'\n Downsampling implemented by a layer of convolution.\n '
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, pre_norm=None, post_norm=None, pre_permute=False):
super().__init__()
self.pre_norm = (pre_norm(in_channels) if pre_... |
class Scale(nn.Module):
'\n Scale vector by element multiplications.\n '
def __init__(self, dim, init_value=1.0, trainable=True):
super().__init__()
self.scale = nn.Parameter((init_value * torch.ones(dim)), requires_grad=trainable)
def forward(self, x):
return (x * self.sca... |
class SquaredReLU(nn.Module):
'\n Squared ReLU: https://arxiv.org/abs/2109.08668\n '
def __init__(self, inplace=False):
super().__init__()
self.relu = nn.ReLU(inplace=inplace)
def forward(self, x):
return torch.square(self.relu(x))
|
class StarReLU(nn.Module):
'\n StarReLU: s * relu(x) ** 2 + b\n '
def __init__(self, scale_value=1.0, bias_value=0.0, scale_learnable=True, bias_learnable=True, mode=None, inplace=False):
super().__init__()
self.inplace = inplace
self.relu = nn.ReLU(inplace=inplace)
self... |
class Attention(nn.Module):
'\n Vanilla self-attention from Transformer: https://arxiv.org/abs/1706.03762.\n Modified from timm.\n '
def __init__(self, dim, head_dim=32, num_heads=None, qkv_bias=False, attn_drop=0.0, proj_drop=0.0, proj_bias=False, **kwargs):
super().__init__()
self.... |
class RandomMixing(nn.Module):
def __init__(self, num_tokens=196, **kwargs):
super().__init__()
self.random_matrix = nn.parameter.Parameter(data=torch.softmax(torch.rand(num_tokens, num_tokens), dim=(- 1)), requires_grad=False)
def forward(self, x):
(B, H, W, C) = x.shape
x =... |
class LayerNormGeneral(nn.Module):
' General LayerNorm for different situations.\n\n Args:\n affine_shape (int, list or tuple): The shape of affine weight and bias.\n Usually the affine_shape=C, but in some implementation, like torch.nn.LayerNorm,\n the affine_shape is the same as ... |
class LayerNormWithoutBias(nn.Module):
'\n Equal to partial(LayerNormGeneral, bias=False) but faster, \n because it directly utilizes otpimized F.layer_norm\n '
def __init__(self, normalized_shape, eps=1e-05, **kwargs):
super().__init__()
self.eps = eps
self.bias = None
... |
class SepConv(nn.Module):
'\n Inverted separable convolution from MobileNetV2: https://arxiv.org/abs/1801.04381.\n '
def __init__(self, dim, expansion_ratio=2, act1_layer=StarReLU, act2_layer=nn.Identity, bias=False, kernel_size=7, padding=3, **kwargs):
super().__init__()
med_channels =... |
class Pooling(nn.Module):
'\n Implementation of pooling for PoolFormer: https://arxiv.org/abs/2111.11418\n Modfiled for [B, H, W, C] input\n '
def __init__(self, pool_size=3, **kwargs):
super().__init__()
self.pool = nn.AvgPool2d(pool_size, stride=1, padding=(pool_size // 2), count_i... |
class Mlp(nn.Module):
' MLP as used in MetaFormer models, eg Transformer, MLP-Mixer, PoolFormer, MetaFormer baslines and related networks.\n Mostly copied from timm.\n '
def __init__(self, dim, mlp_ratio=4, out_features=None, act_layer=StarReLU, drop=0.0, bias=False, **kwargs):
super().__init__... |
class MlpHead(nn.Module):
' MLP classification head\n '
def __init__(self, dim, num_classes=1000, mlp_ratio=4, act_layer=SquaredReLU, norm_layer=nn.LayerNorm, head_dropout=0.0, bias=True):
super().__init__()
hidden_features = int((mlp_ratio * dim))
self.fc1 = nn.Linear(dim, hidden_... |
class MetaFormerBlock(nn.Module):
'\n Implementation of one MetaFormer block.\n '
def __init__(self, dim, token_mixer=nn.Identity, mlp=Mlp, norm_layer=nn.LayerNorm, drop=0.0, drop_path=0.0, layer_scale_init_value=None, res_scale_init_value=None):
super().__init__()
self.norm1 = norm_lay... |
class MetaFormer(nn.Module):
' MetaFormer\n A PyTorch impl of : `MetaFormer Baselines for Vision` -\n https://arxiv.org/abs/2210.13452\n\n Args:\n in_chans (int): Number of input image channels. Default: 3.\n num_classes (int): Number of classes for classification head. Default: ... |
@register_model
def identityformer_s12(pretrained=False, **kwargs):
model = MetaFormer(depths=[2, 2, 6, 2], dims=[64, 128, 320, 512], token_mixers=nn.Identity, norm_layers=partial(LayerNormGeneral, normalized_dim=(1, 2, 3), eps=1e-06, bias=False), **kwargs)
model.default_cfg = default_cfgs['identityformer_s12... |
@register_model
def identityformer_s24(pretrained=False, **kwargs):
model = MetaFormer(depths=[4, 4, 12, 4], dims=[64, 128, 320, 512], token_mixers=nn.Identity, norm_layers=partial(LayerNormGeneral, normalized_dim=(1, 2, 3), eps=1e-06, bias=False), **kwargs)
model.default_cfg = default_cfgs['identityformer_s2... |
@register_model
def identityformer_s36(pretrained=False, **kwargs):
model = MetaFormer(depths=[6, 6, 18, 6], dims=[64, 128, 320, 512], token_mixers=nn.Identity, norm_layers=partial(LayerNormGeneral, normalized_dim=(1, 2, 3), eps=1e-06, bias=False), **kwargs)
model.default_cfg = default_cfgs['identityformer_s3... |
@register_model
def identityformer_m36(pretrained=False, **kwargs):
model = MetaFormer(depths=[6, 6, 18, 6], dims=[96, 192, 384, 768], token_mixers=nn.Identity, norm_layers=partial(LayerNormGeneral, normalized_dim=(1, 2, 3), eps=1e-06, bias=False), **kwargs)
model.default_cfg = default_cfgs['identityformer_m3... |
@register_model
def identityformer_m48(pretrained=False, **kwargs):
model = MetaFormer(depths=[8, 8, 24, 8], dims=[96, 192, 384, 768], token_mixers=nn.Identity, norm_layers=partial(LayerNormGeneral, normalized_dim=(1, 2, 3), eps=1e-06, bias=False), **kwargs)
model.default_cfg = default_cfgs['identityformer_m4... |
@register_model
def randformer_s12(pretrained=False, **kwargs):
model = MetaFormer(depths=[2, 2, 6, 2], dims=[64, 128, 320, 512], token_mixers=[nn.Identity, nn.Identity, RandomMixing, partial(RandomMixing, num_tokens=49)], norm_layers=partial(LayerNormGeneral, normalized_dim=(1, 2, 3), eps=1e-06, bias=False), **k... |
@register_model
def randformer_s24(pretrained=False, **kwargs):
model = MetaFormer(depths=[4, 4, 12, 4], dims=[64, 128, 320, 512], token_mixers=[nn.Identity, nn.Identity, RandomMixing, partial(RandomMixing, num_tokens=49)], norm_layers=partial(LayerNormGeneral, normalized_dim=(1, 2, 3), eps=1e-06, bias=False), **... |
@register_model
def randformer_s36(pretrained=False, **kwargs):
model = MetaFormer(depths=[6, 6, 18, 6], dims=[64, 128, 320, 512], token_mixers=[nn.Identity, nn.Identity, RandomMixing, partial(RandomMixing, num_tokens=49)], norm_layers=partial(LayerNormGeneral, normalized_dim=(1, 2, 3), eps=1e-06, bias=False), **... |
@register_model
def randformer_m36(pretrained=False, **kwargs):
model = MetaFormer(depths=[6, 6, 18, 6], dims=[96, 192, 384, 768], token_mixers=[nn.Identity, nn.Identity, RandomMixing, partial(RandomMixing, num_tokens=49)], norm_layers=partial(LayerNormGeneral, normalized_dim=(1, 2, 3), eps=1e-06, bias=False), **... |
@register_model
def randformer_m48(pretrained=False, **kwargs):
model = MetaFormer(depths=[8, 8, 24, 8], dims=[96, 192, 384, 768], token_mixers=[nn.Identity, nn.Identity, RandomMixing, partial(RandomMixing, num_tokens=49)], norm_layers=partial(LayerNormGeneral, normalized_dim=(1, 2, 3), eps=1e-06, bias=False), **... |
@register_model
def poolformerv2_s12(pretrained=False, **kwargs):
model = MetaFormer(depths=[2, 2, 6, 2], dims=[64, 128, 320, 512], token_mixers=Pooling, norm_layers=partial(LayerNormGeneral, normalized_dim=(1, 2, 3), eps=1e-06, bias=False), **kwargs)
model.default_cfg = default_cfgs['poolformerv2_s12']
i... |
@register_model
def poolformerv2_s24(pretrained=False, **kwargs):
model = MetaFormer(depths=[4, 4, 12, 4], dims=[64, 128, 320, 512], token_mixers=Pooling, norm_layers=partial(LayerNormGeneral, normalized_dim=(1, 2, 3), eps=1e-06, bias=False), **kwargs)
model.default_cfg = default_cfgs['poolformerv2_s24']
... |
@register_model
def poolformerv2_s36(pretrained=False, **kwargs):
model = MetaFormer(depths=[6, 6, 18, 6], dims=[64, 128, 320, 512], token_mixers=Pooling, norm_layers=partial(LayerNormGeneral, normalized_dim=(1, 2, 3), eps=1e-06, bias=False), **kwargs)
model.default_cfg = default_cfgs['poolformerv2_s36']
... |
@register_model
def poolformerv2_m36(pretrained=False, **kwargs):
model = MetaFormer(depths=[6, 6, 18, 6], dims=[96, 192, 384, 768], token_mixers=Pooling, norm_layers=partial(LayerNormGeneral, normalized_dim=(1, 2, 3), eps=1e-06, bias=False), **kwargs)
model.default_cfg = default_cfgs['poolformerv2_m36']
... |
@register_model
def poolformerv2_m48(pretrained=False, **kwargs):
model = MetaFormer(depths=[8, 8, 24, 8], dims=[96, 192, 384, 768], token_mixers=Pooling, norm_layers=partial(LayerNormGeneral, normalized_dim=(1, 2, 3), eps=1e-06, bias=False), **kwargs)
model.default_cfg = default_cfgs['poolformerv2_m48']
... |
@register_model
def convformer_s18(pretrained=False, **kwargs):
model = MetaFormer(depths=[3, 3, 9, 3], dims=[64, 128, 320, 512], token_mixers=SepConv, head_fn=MlpHead, **kwargs)
model.default_cfg = default_cfgs['convformer_s18']
if pretrained:
state_dict = torch.hub.load_state_dict_from_url(url=m... |
@register_model
def convformer_s18_384(pretrained=False, **kwargs):
model = MetaFormer(depths=[3, 3, 9, 3], dims=[64, 128, 320, 512], token_mixers=SepConv, head_fn=MlpHead, **kwargs)
model.default_cfg = default_cfgs['convformer_s18_384']
if pretrained:
state_dict = torch.hub.load_state_dict_from_u... |
@register_model
def convformer_s18_in21ft1k(pretrained=False, **kwargs):
model = MetaFormer(depths=[3, 3, 9, 3], dims=[64, 128, 320, 512], token_mixers=SepConv, head_fn=MlpHead, **kwargs)
model.default_cfg = default_cfgs['convformer_s18_in21ft1k']
if pretrained:
state_dict = torch.hub.load_state_d... |
@register_model
def convformer_s18_384_in21ft1k(pretrained=False, **kwargs):
model = MetaFormer(depths=[3, 3, 9, 3], dims=[64, 128, 320, 512], token_mixers=SepConv, head_fn=MlpHead, **kwargs)
model.default_cfg = default_cfgs['convformer_s18_384_in21ft1k']
if pretrained:
state_dict = torch.hub.load... |
@register_model
def convformer_s18_in21k(pretrained=False, **kwargs):
model = MetaFormer(depths=[3, 3, 9, 3], dims=[64, 128, 320, 512], token_mixers=SepConv, head_fn=MlpHead, **kwargs)
model.default_cfg = default_cfgs['convformer_s18_in21k']
if pretrained:
state_dict = torch.hub.load_state_dict_fr... |
@register_model
def convformer_s36(pretrained=False, **kwargs):
model = MetaFormer(depths=[3, 12, 18, 3], dims=[64, 128, 320, 512], token_mixers=SepConv, head_fn=MlpHead, **kwargs)
model.default_cfg = default_cfgs['convformer_s36']
if pretrained:
state_dict = torch.hub.load_state_dict_from_url(url... |
@register_model
def convformer_s36_384(pretrained=False, **kwargs):
model = MetaFormer(depths=[3, 12, 18, 3], dims=[64, 128, 320, 512], token_mixers=SepConv, head_fn=MlpHead, **kwargs)
model.default_cfg = default_cfgs['convformer_s36_384']
if pretrained:
state_dict = torch.hub.load_state_dict_from... |
@register_model
def convformer_s36_in21ft1k(pretrained=False, **kwargs):
model = MetaFormer(depths=[3, 12, 18, 3], dims=[64, 128, 320, 512], token_mixers=SepConv, head_fn=MlpHead, **kwargs)
model.default_cfg = default_cfgs['convformer_s36_in21ft1k']
if pretrained:
state_dict = torch.hub.load_state... |
@register_model
def convformer_s36_384_in21ft1k(pretrained=False, **kwargs):
model = MetaFormer(depths=[3, 12, 18, 3], dims=[64, 128, 320, 512], token_mixers=SepConv, head_fn=MlpHead, **kwargs)
model.default_cfg = default_cfgs['convformer_s36_384_in21ft1k']
if pretrained:
state_dict = torch.hub.lo... |
@register_model
def convformer_s36_in21k(pretrained=False, **kwargs):
model = MetaFormer(depths=[3, 12, 18, 3], dims=[64, 128, 320, 512], token_mixers=SepConv, head_fn=MlpHead, **kwargs)
model.default_cfg = default_cfgs['convformer_s36_in21k']
if pretrained:
state_dict = torch.hub.load_state_dict_... |
@register_model
def convformer_m36(pretrained=False, **kwargs):
model = MetaFormer(depths=[3, 12, 18, 3], dims=[96, 192, 384, 576], token_mixers=SepConv, head_fn=MlpHead, **kwargs)
model.default_cfg = default_cfgs['convformer_m36']
if pretrained:
state_dict = torch.hub.load_state_dict_from_url(url... |
@register_model
def convformer_m36_384(pretrained=False, **kwargs):
model = MetaFormer(depths=[3, 12, 18, 3], dims=[96, 192, 384, 576], token_mixers=SepConv, head_fn=MlpHead, **kwargs)
model.default_cfg = default_cfgs['convformer_m36_384']
if pretrained:
state_dict = torch.hub.load_state_dict_from... |
@register_model
def convformer_m36_in21ft1k(pretrained=False, **kwargs):
model = MetaFormer(depths=[3, 12, 18, 3], dims=[96, 192, 384, 576], token_mixers=SepConv, head_fn=MlpHead, **kwargs)
model.default_cfg = default_cfgs['convformer_m36_in21ft1k']
if pretrained:
state_dict = torch.hub.load_state... |
@register_model
def convformer_m36_384_in21ft1k(pretrained=False, **kwargs):
model = MetaFormer(depths=[3, 12, 18, 3], dims=[96, 192, 384, 576], token_mixers=SepConv, head_fn=MlpHead, **kwargs)
model.default_cfg = default_cfgs['convformer_m36_384_in21ft1k']
if pretrained:
state_dict = torch.hub.lo... |
@register_model
def convformer_m36_in21k(pretrained=False, **kwargs):
model = MetaFormer(depths=[3, 12, 18, 3], dims=[96, 192, 384, 576], token_mixers=SepConv, head_fn=MlpHead, **kwargs)
model.default_cfg = default_cfgs['convformer_m36_in21k']
if pretrained:
state_dict = torch.hub.load_state_dict_... |
@register_model
def convformer_b36(pretrained=False, **kwargs):
model = MetaFormer(depths=[3, 12, 18, 3], dims=[128, 256, 512, 768], token_mixers=SepConv, head_fn=MlpHead, **kwargs)
model.default_cfg = default_cfgs['convformer_b36']
if pretrained:
state_dict = torch.hub.load_state_dict_from_url(ur... |
@register_model
def convformer_b36_384(pretrained=False, **kwargs):
model = MetaFormer(depths=[3, 12, 18, 3], dims=[128, 256, 512, 768], token_mixers=SepConv, head_fn=MlpHead, **kwargs)
model.default_cfg = default_cfgs['convformer_b36_384']
if pretrained:
state_dict = torch.hub.load_state_dict_fro... |
@register_model
def convformer_b36_in21ft1k(pretrained=False, **kwargs):
model = MetaFormer(depths=[3, 12, 18, 3], dims=[128, 256, 512, 768], token_mixers=SepConv, head_fn=MlpHead, **kwargs)
model.default_cfg = default_cfgs['convformer_b36_in21ft1k']
if pretrained:
state_dict = torch.hub.load_stat... |
@register_model
def convformer_b36_384_in21ft1k(pretrained=False, **kwargs):
model = MetaFormer(depths=[3, 12, 18, 3], dims=[128, 256, 512, 768], token_mixers=SepConv, head_fn=MlpHead, **kwargs)
model.default_cfg = default_cfgs['convformer_b36_384_in21ft1k']
if pretrained:
state_dict = torch.hub.l... |
@register_model
def convformer_b36_in21k(pretrained=False, **kwargs):
model = MetaFormer(depths=[3, 12, 18, 3], dims=[128, 256, 512, 768], token_mixers=SepConv, head_fn=MlpHead, **kwargs)
model.default_cfg = default_cfgs['convformer_b36_in21k']
if pretrained:
state_dict = torch.hub.load_state_dict... |
@register_model
def caformer_s18(pretrained=False, **kwargs):
model = MetaFormer(depths=[3, 3, 9, 3], dims=[64, 128, 320, 512], token_mixers=[SepConv, SepConv, Attention, Attention], head_fn=MlpHead, **kwargs)
model.default_cfg = default_cfgs['caformer_s18']
if pretrained:
state_dict = torch.hub.l... |
@register_model
def caformer_s18_384(pretrained=False, **kwargs):
model = MetaFormer(depths=[3, 3, 9, 3], dims=[64, 128, 320, 512], token_mixers=[SepConv, SepConv, Attention, Attention], head_fn=MlpHead, **kwargs)
model.default_cfg = default_cfgs['caformer_s18_384']
if pretrained:
state_dict = tor... |
@register_model
def caformer_s18_in21ft1k(pretrained=False, **kwargs):
model = MetaFormer(depths=[3, 3, 9, 3], dims=[64, 128, 320, 512], token_mixers=[SepConv, SepConv, Attention, Attention], head_fn=MlpHead, **kwargs)
model.default_cfg = default_cfgs['caformer_s18_in21ft1k']
if pretrained:
state_... |
@register_model
def caformer_s18_384_in21ft1k(pretrained=False, **kwargs):
model = MetaFormer(depths=[3, 3, 9, 3], dims=[64, 128, 320, 512], token_mixers=[SepConv, SepConv, Attention, Attention], head_fn=MlpHead, **kwargs)
model.default_cfg = default_cfgs['caformer_s18_384_in21ft1k']
if pretrained:
... |
@register_model
def caformer_s18_in21k(pretrained=False, **kwargs):
model = MetaFormer(depths=[3, 3, 9, 3], dims=[64, 128, 320, 512], token_mixers=[SepConv, SepConv, Attention, Attention], head_fn=MlpHead, **kwargs)
model.default_cfg = default_cfgs['caformer_s18_in21k']
if pretrained:
state_dict =... |
@register_model
def caformer_s36(pretrained=False, **kwargs):
model = MetaFormer(depths=[3, 12, 18, 3], dims=[64, 128, 320, 512], token_mixers=[SepConv, SepConv, Attention, Attention], head_fn=MlpHead, **kwargs)
model.default_cfg = default_cfgs['caformer_s36']
if pretrained:
state_dict = torch.hub... |
@register_model
def caformer_s36_384(pretrained=False, **kwargs):
model = MetaFormer(depths=[3, 12, 18, 3], dims=[64, 128, 320, 512], token_mixers=[SepConv, SepConv, Attention, Attention], head_fn=MlpHead, **kwargs)
model.default_cfg = default_cfgs['caformer_s36_384']
if pretrained:
state_dict = t... |
@register_model
def caformer_s36_in21ft1k(pretrained=False, **kwargs):
model = MetaFormer(depths=[3, 12, 18, 3], dims=[64, 128, 320, 512], token_mixers=[SepConv, SepConv, Attention, Attention], head_fn=MlpHead, **kwargs)
model.default_cfg = default_cfgs['caformer_s36_in21ft1k']
if pretrained:
stat... |
@register_model
def caformer_s36_384_in21ft1k(pretrained=False, **kwargs):
model = MetaFormer(depths=[3, 12, 18, 3], dims=[64, 128, 320, 512], token_mixers=[SepConv, SepConv, Attention, Attention], head_fn=MlpHead, **kwargs)
model.default_cfg = default_cfgs['caformer_s36_384_in21ft1k']
if pretrained:
... |
@register_model
def caformer_s36_in21k(pretrained=False, **kwargs):
model = MetaFormer(depths=[3, 12, 18, 3], dims=[64, 128, 320, 512], token_mixers=[SepConv, SepConv, Attention, Attention], head_fn=MlpHead, **kwargs)
model.default_cfg = default_cfgs['caformer_s36_in21k']
if pretrained:
state_dict... |
@register_model
def caformer_m36(pretrained=False, **kwargs):
model = MetaFormer(depths=[3, 12, 18, 3], dims=[96, 192, 384, 576], token_mixers=[SepConv, SepConv, Attention, Attention], head_fn=MlpHead, **kwargs)
model.default_cfg = default_cfgs['caformer_m36']
if pretrained:
state_dict = torch.hub... |
@register_model
def caformer_m36_384(pretrained=False, **kwargs):
model = MetaFormer(depths=[3, 12, 18, 3], dims=[96, 192, 384, 576], token_mixers=[SepConv, SepConv, Attention, Attention], head_fn=MlpHead, **kwargs)
model.default_cfg = default_cfgs['caformer_m36_384']
if pretrained:
state_dict = t... |
@register_model
def caformer_m36_in21ft1k(pretrained=False, **kwargs):
model = MetaFormer(depths=[3, 12, 18, 3], dims=[96, 192, 384, 576], token_mixers=[SepConv, SepConv, Attention, Attention], head_fn=MlpHead, **kwargs)
model.default_cfg = default_cfgs['caformer_m36_in21ft1k']
if pretrained:
stat... |
@register_model
def caformer_m36_384_in21ft1k(pretrained=False, **kwargs):
model = MetaFormer(depths=[3, 12, 18, 3], dims=[96, 192, 384, 576], token_mixers=[SepConv, SepConv, Attention, Attention], head_fn=MlpHead, **kwargs)
model.default_cfg = default_cfgs['caformer_m36_384_in21ft1k']
if pretrained:
... |
@register_model
def caformer_m364_in21k(pretrained=False, **kwargs):
model = MetaFormer(depths=[3, 12, 18, 3], dims=[96, 192, 384, 576], token_mixers=[SepConv, SepConv, Attention, Attention], head_fn=MlpHead, **kwargs)
model.default_cfg = default_cfgs['caformer_m364_in21k']
if pretrained:
state_di... |
@register_model
def caformer_b36(pretrained=False, **kwargs):
model = MetaFormer(depths=[3, 12, 18, 3], dims=[128, 256, 512, 768], token_mixers=[SepConv, SepConv, Attention, Attention], head_fn=MlpHead, **kwargs)
model.default_cfg = default_cfgs['caformer_b36']
if pretrained:
state_dict = torch.hu... |
@register_model
def caformer_b36_384(pretrained=False, **kwargs):
model = MetaFormer(depths=[3, 12, 18, 3], dims=[128, 256, 512, 768], token_mixers=[SepConv, SepConv, Attention, Attention], head_fn=MlpHead, **kwargs)
model.default_cfg = default_cfgs['caformer_b36_384']
if pretrained:
state_dict = ... |
@register_model
def caformer_b36_in21ft1k(pretrained=False, **kwargs):
model = MetaFormer(depths=[3, 12, 18, 3], dims=[128, 256, 512, 768], token_mixers=[SepConv, SepConv, Attention, Attention], head_fn=MlpHead, **kwargs)
model.default_cfg = default_cfgs['caformer_b36_in21ft1k']
if pretrained:
sta... |
@register_model
def caformer_b36_384_in21ft1k(pretrained=False, **kwargs):
model = MetaFormer(depths=[3, 12, 18, 3], dims=[128, 256, 512, 768], token_mixers=[SepConv, SepConv, Attention, Attention], head_fn=MlpHead, **kwargs)
model.default_cfg = default_cfgs['caformer_b36_384_in21ft1k']
if pretrained:
... |
@register_model
def caformer_b36_in21k(pretrained=False, **kwargs):
model = MetaFormer(depths=[3, 12, 18, 3], dims=[128, 256, 512, 768], token_mixers=[SepConv, SepConv, Attention, Attention], head_fn=MlpHead, **kwargs)
model.default_cfg = default_cfgs['caformer_b36_in21k']
if pretrained:
state_dic... |
def _parse_args():
(args_config, remaining) = config_parser.parse_known_args()
if args_config.config:
with open(args_config.config, 'r') as f:
cfg = yaml.safe_load(f)
parser.set_defaults(**cfg)
args = parser.parse_args(remaining)
args_text = yaml.safe_dump(args.__dict__... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.