code stringlengths 101 5.91M |
|---|
class InPlaceABN(ABN):
def __init__(self, num_features, eps=1e-05, momentum=0.1, affine=True, activation='leaky_relu', slope=0.01):
super(InPlaceABN, self).__init__(num_features, eps, momentum, affine, activation, slope)
def forward(self, x):
return inplace_abn(x, self.weight, self.bias, self.running_mean, self.running_var, self.training, self.momentum, self.eps, self.activation, self.slope) |
def main(args):
logging_dir = Path(args.output_dir, args.logging_dir)
accelerator_project_config = ProjectConfiguration(total_limit=args.checkpoints_total_limit)
accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps, mixed_precision=args.mixed_precision, log_with=args.report_to, logging_dir=logging_dir, project_config=accelerator_project_config)
if (args.report_to == 'wandb'):
if (not is_wandb_available()):
raise ImportError('Make sure to install wandb if you want to use it for logging during training.')
import wandb
if (args.train_text_encoder and (args.gradient_accumulation_steps > 1) and (accelerator.num_processes > 1)):
raise ValueError('Gradient accumulation is not supported when training the text encoder in distributed training. Please set gradient_accumulation_steps to 1. This feature will be supported in the future.')
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO)
logger.info(accelerator.state, main_process_only=False)
if accelerator.is_local_main_process:
transformers.utils.logging.set_verbosity_warning()
diffusers.utils.logging.set_verbosity_info()
else:
transformers.utils.logging.set_verbosity_error()
diffusers.utils.logging.set_verbosity_error()
if (args.seed is not None):
set_seed(args.seed)
if args.with_prior_preservation:
class_images_dir = Path(args.class_data_dir)
if (not class_images_dir.exists()):
class_images_dir.mkdir(parents=True)
cur_class_images = len(list(class_images_dir.iterdir()))
if (cur_class_images < args.num_class_images):
torch_dtype = (torch.float16 if (accelerator.device.type == 'cuda') else torch.float32)
if (args.prior_generation_precision == 'fp32'):
torch_dtype = torch.float32
elif (args.prior_generation_precision == 'fp16'):
torch_dtype = torch.float16
elif (args.prior_generation_precision == 'bf16'):
torch_dtype = torch.bfloat16
pipeline = DiffusionPipeline.from_pretrained(args.pretrained_model_name_or_path, torch_dtype=torch_dtype, safety_checker=None, revision=args.revision)
pipeline.set_progress_bar_config(disable=True)
num_new_images = (args.num_class_images - cur_class_images)
logger.info(f'Number of class images to sample: {num_new_images}.')
sample_dataset = PromptDataset(args.class_prompt, num_new_images)
sample_dataloader = torch.utils.data.DataLoader(sample_dataset, batch_size=args.sample_batch_size)
sample_dataloader = accelerator.prepare(sample_dataloader)
pipeline.to(accelerator.device)
for example in tqdm(sample_dataloader, desc='Generating class images', disable=(not accelerator.is_local_main_process)):
images = pipeline(example['prompt']).images
for (i, image) in enumerate(images):
hash_image = hashlib.sha1(image.tobytes()).hexdigest()
image_filename = (class_images_dir / f"{(example['index'][i] + cur_class_images)}-{hash_image}.jpg")
image.save(image_filename)
del pipeline
if torch.cuda.is_available():
torch.cuda.empty_cache()
if accelerator.is_main_process:
if (args.output_dir is not None):
os.makedirs(args.output_dir, exist_ok=True)
if args.push_to_hub:
repo_id = create_repo(repo_id=(args.hub_model_id or Path(args.output_dir).name), exist_ok=True, token=args.hub_token).repo_id
if args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name, revision=args.revision, use_fast=False)
elif args.pretrained_model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='tokenizer', revision=args.revision, use_fast=False)
text_encoder_cls = import_model_class_from_model_name_or_path(args.pretrained_model_name_or_path, args.revision)
noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder='scheduler')
text_encoder = text_encoder_cls.from_pretrained(args.pretrained_model_name_or_path, subfolder='text_encoder', revision=args.revision)
try:
vae = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='vae', revision=args.revision)
except OSError:
vae = None
unet = UNet2DConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='unet', revision=args.revision)
if (vae is not None):
vae.requires_grad_(False)
text_encoder.requires_grad_(False)
unet.requires_grad_(False)
weight_dtype = torch.float32
if (accelerator.mixed_precision == 'fp16'):
weight_dtype = torch.float16
elif (accelerator.mixed_precision == 'bf16'):
weight_dtype = torch.bfloat16
unet.to(accelerator.device, dtype=weight_dtype)
if (vae is not None):
vae.to(accelerator.device, dtype=weight_dtype)
text_encoder.to(accelerator.device, dtype=weight_dtype)
if args.enable_xformers_memory_efficient_attention:
if is_xformers_available():
import xformers
xformers_version = version.parse(xformers.__version__)
if (xformers_version == version.parse('0.0.16')):
logger.warn('xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See for more details.')
unet.enable_xformers_memory_efficient_attention()
else:
raise ValueError('xformers is not available. Make sure it is installed correctly')
unet_lora_attn_procs = {}
for (name, attn_processor) in unet.attn_processors.items():
cross_attention_dim = (None if name.endswith('attn1.processor') else unet.config.cross_attention_dim)
if name.startswith('mid_block'):
hidden_size = unet.config.block_out_channels[(- 1)]
elif name.startswith('up_blocks'):
block_id = int(name[len('up_blocks.')])
hidden_size = list(reversed(unet.config.block_out_channels))[block_id]
elif name.startswith('down_blocks'):
block_id = int(name[len('down_blocks.')])
hidden_size = unet.config.block_out_channels[block_id]
if isinstance(attn_processor, (AttnAddedKVProcessor, SlicedAttnAddedKVProcessor, AttnAddedKVProcessor2_0)):
lora_attn_processor_class = LoRAAttnAddedKVProcessor
else:
lora_attn_processor_class = (LoRAAttnProcessor2_0 if hasattr(F, 'scaled_dot_product_attention') else LoRAAttnProcessor)
unet_lora_attn_procs[name] = lora_attn_processor_class(hidden_size=hidden_size, cross_attention_dim=cross_attention_dim, rank=args.lora_rank)
unet.set_attn_processor(unet_lora_attn_procs)
unet_lora_layers = AttnProcsLayers(unet.attn_processors)
text_encoder_lora_layers = None
if args.train_text_encoder:
text_lora_attn_procs = {}
for (name, module) in text_encoder.named_modules():
if name.endswith(TEXT_ENCODER_ATTN_MODULE):
text_lora_attn_procs[name] = LoRAAttnProcessor(hidden_size=module.out_proj.out_features, cross_attention_dim=None)
text_encoder_lora_layers = AttnProcsLayers(text_lora_attn_procs)
temp_pipeline = DiffusionPipeline.from_pretrained(args.pretrained_model_name_or_path, text_encoder=text_encoder)
temp_pipeline._modify_text_encoder(text_lora_attn_procs)
text_encoder = temp_pipeline.text_encoder
del temp_pipeline
def save_model_hook(models, weights, output_dir):
unet_lora_layers_to_save = None
text_encoder_lora_layers_to_save = None
if args.train_text_encoder:
text_encoder_keys = accelerator.unwrap_model(text_encoder_lora_layers).state_dict().keys()
unet_keys = accelerator.unwrap_model(unet_lora_layers).state_dict().keys()
for model in models:
state_dict = model.state_dict()
if ((text_encoder_lora_layers is not None) and (text_encoder_keys is not None) and (state_dict.keys() == text_encoder_keys)):
text_encoder_lora_layers_to_save = state_dict
elif (state_dict.keys() == unet_keys):
unet_lora_layers_to_save = state_dict
weights.pop()
LoraLoaderMixin.save_lora_weights(output_dir, unet_lora_layers=unet_lora_layers_to_save, text_encoder_lora_layers=text_encoder_lora_layers_to_save)
def load_model_hook(models, input_dir):
temp_pipeline = DiffusionPipeline.from_pretrained(args.pretrained_model_name_or_path, revision=args.revision, torch_dtype=weight_dtype)
temp_pipeline.load_lora_weights(input_dir)
models[0].load_state_dict(AttnProcsLayers(temp_pipeline.unet.attn_processors).state_dict())
if (len(models) > 1):
models[1].load_state_dict(AttnProcsLayers(temp_pipeline.text_encoder_lora_attn_procs).state_dict())
del temp_pipeline
for _ in range(len(models)):
models.pop()
accelerator.register_save_state_pre_hook(save_model_hook)
accelerator.register_load_state_pre_hook(load_model_hook)
if args.allow_tf32:
torch.backends.cuda.matmul.allow_tf32 = True
if args.scale_lr:
args.learning_rate = (((args.learning_rate * args.gradient_accumulation_steps) * args.train_batch_size) * accelerator.num_processes)
if args.use_8bit_adam:
try:
import bitsandbytes as bnb
except ImportError:
raise ImportError('To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`.')
optimizer_class = bnb.optim.AdamW8bit
else:
optimizer_class = torch.optim.AdamW
params_to_optimize = (itertools.chain(unet_lora_layers.parameters(), text_encoder_lora_layers.parameters()) if args.train_text_encoder else unet_lora_layers.parameters())
optimizer = optimizer_class(params_to_optimize, lr=args.learning_rate, betas=(args.adam_beta1, args.adam_beta2), weight_decay=args.adam_weight_decay, eps=args.adam_epsilon)
if args.pre_compute_text_embeddings:
def compute_text_embeddings(prompt):
with torch.no_grad():
text_inputs = tokenize_prompt(tokenizer, prompt, tokenizer_max_length=args.tokenizer_max_length)
prompt_embeds = encode_prompt(text_encoder, text_inputs.input_ids, text_inputs.attention_mask, text_encoder_use_attention_mask=args.text_encoder_use_attention_mask)
return prompt_embeds
pre_computed_encoder_hidden_states = compute_text_embeddings(args.instance_prompt)
validation_prompt_negative_prompt_embeds = compute_text_embeddings('')
if (args.validation_prompt is not None):
validation_prompt_encoder_hidden_states = compute_text_embeddings(args.validation_prompt)
else:
validation_prompt_encoder_hidden_states = None
if (args.instance_prompt is not None):
pre_computed_instance_prompt_encoder_hidden_states = compute_text_embeddings(args.instance_prompt)
else:
pre_computed_instance_prompt_encoder_hidden_states = None
text_encoder = None
tokenizer = None
gc.collect()
torch.cuda.empty_cache()
else:
pre_computed_encoder_hidden_states = None
validation_prompt_encoder_hidden_states = None
validation_prompt_negative_prompt_embeds = None
pre_computed_instance_prompt_encoder_hidden_states = None
train_dataset = DreamBoothDataset(instance_data_root=args.instance_data_dir, instance_prompt=args.instance_prompt, class_data_root=(args.class_data_dir if args.with_prior_preservation else None), class_prompt=args.class_prompt, class_num=args.num_class_images, tokenizer=tokenizer, size=args.resolution, center_crop=args.center_crop, encoder_hidden_states=pre_computed_encoder_hidden_states, instance_prompt_encoder_hidden_states=pre_computed_instance_prompt_encoder_hidden_states, tokenizer_max_length=args.tokenizer_max_length)
train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=args.train_batch_size, shuffle=True, collate_fn=(lambda examples: collate_fn(examples, args.with_prior_preservation)), num_workers=args.dataloader_num_workers)
overrode_max_train_steps = False
num_update_steps_per_epoch = math.ceil((len(train_dataloader) / args.gradient_accumulation_steps))
if (args.max_train_steps is None):
args.max_train_steps = (args.num_train_epochs * num_update_steps_per_epoch)
overrode_max_train_steps = True
lr_scheduler = get_scheduler(args.lr_scheduler, optimizer=optimizer, num_warmup_steps=(args.lr_warmup_steps * args.gradient_accumulation_steps), num_training_steps=(args.max_train_steps * args.gradient_accumulation_steps), num_cycles=args.lr_num_cycles, power=args.lr_power)
if args.train_text_encoder:
(unet_lora_layers, text_encoder_lora_layers, optimizer, train_dataloader, lr_scheduler) = accelerator.prepare(unet_lora_layers, text_encoder_lora_layers, optimizer, train_dataloader, lr_scheduler)
else:
(unet_lora_layers, optimizer, train_dataloader, lr_scheduler) = accelerator.prepare(unet_lora_layers, optimizer, train_dataloader, lr_scheduler)
num_update_steps_per_epoch = math.ceil((len(train_dataloader) / args.gradient_accumulation_steps))
if overrode_max_train_steps:
args.max_train_steps = (args.num_train_epochs * num_update_steps_per_epoch)
args.num_train_epochs = math.ceil((args.max_train_steps / num_update_steps_per_epoch))
if accelerator.is_main_process:
accelerator.init_trackers('dreambooth-lora', config=vars(args))
total_batch_size = ((args.train_batch_size * accelerator.num_processes) * args.gradient_accumulation_steps)
logger.info('***** Running training *****')
logger.info(f' Num examples = {len(train_dataset)}')
logger.info(f' Num batches each epoch = {len(train_dataloader)}')
logger.info(f' Num Epochs = {args.num_train_epochs}')
logger.info(f' Instantaneous batch size per device = {args.train_batch_size}')
logger.info(f' Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}')
logger.info(f' Gradient Accumulation steps = {args.gradient_accumulation_steps}')
logger.info(f' Total optimization steps = {args.max_train_steps}')
global_step = 0
first_epoch = 0
if args.resume_from_checkpoint:
if (args.resume_from_checkpoint != 'latest'):
path = os.path.basename(args.resume_from_checkpoint)
else:
dirs = os.listdir(args.output_dir)
dirs = [d for d in dirs if d.startswith('checkpoint')]
dirs = sorted(dirs, key=(lambda x: int(x.split('-')[1])))
path = (dirs[(- 1)] if (len(dirs) > 0) else None)
if (path is None):
accelerator.print(f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run.")
args.resume_from_checkpoint = None
else:
accelerator.print(f'Resuming from checkpoint {path}')
accelerator.load_state(os.path.join(args.output_dir, path))
global_step = int(path.split('-')[1])
resume_global_step = (global_step * args.gradient_accumulation_steps)
first_epoch = (global_step // num_update_steps_per_epoch)
resume_step = (resume_global_step % (num_update_steps_per_epoch * args.gradient_accumulation_steps))
progress_bar = tqdm(range(global_step, args.max_train_steps), disable=(not accelerator.is_local_main_process))
progress_bar.set_description('Steps')
for epoch in range(first_epoch, args.num_train_epochs):
unet.train()
if args.train_text_encoder:
text_encoder.train()
for (step, batch) in enumerate(train_dataloader):
if (args.resume_from_checkpoint and (epoch == first_epoch) and (step < resume_step)):
if ((step % args.gradient_accumulation_steps) == 0):
progress_bar.update(1)
continue
with accelerator.accumulate(unet):
pixel_values = batch['pixel_values'].to(dtype=weight_dtype)
if (vae is not None):
model_input = vae.encode(pixel_values).latent_dist
model_input = (model_input.sample() * vae.config.scaling_factor)
else:
model_input = pixel_values
noise = torch.randn_like(model_input)
(bsz, channels, height, width) = model_input.shape
timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (bsz,), device=model_input.device)
timesteps = timesteps.long()
noisy_model_input = noise_scheduler.add_noise(model_input, noise, timesteps)
if args.pre_compute_text_embeddings:
encoder_hidden_states = batch['input_ids']
else:
encoder_hidden_states = encode_prompt(text_encoder, batch['input_ids'], batch['attention_mask'], text_encoder_use_attention_mask=args.text_encoder_use_attention_mask)
if (accelerator.unwrap_model(unet).config.in_channels == (channels * 2)):
noisy_model_input = torch.cat([noisy_model_input, noisy_model_input], dim=1)
if (args.class_labels_conditioning == 'timesteps'):
class_labels = timesteps
else:
class_labels = None
model_pred = unet(noisy_model_input, timesteps, encoder_hidden_states, class_labels=class_labels).sample
if (model_pred.shape[1] == 6):
(model_pred, _) = torch.chunk(model_pred, 2, dim=1)
if (noise_scheduler.config.prediction_type == 'epsilon'):
target = noise
elif (noise_scheduler.config.prediction_type == 'v_prediction'):
target = noise_scheduler.get_velocity(model_input, noise, timesteps)
else:
raise ValueError(f'Unknown prediction type {noise_scheduler.config.prediction_type}')
if args.with_prior_preservation:
(model_pred, model_pred_prior) = torch.chunk(model_pred, 2, dim=0)
(target, target_prior) = torch.chunk(target, 2, dim=0)
loss = F.mse_loss(model_pred.float(), target.float(), reduction='mean')
prior_loss = F.mse_loss(model_pred_prior.float(), target_prior.float(), reduction='mean')
loss = (loss + (args.prior_loss_weight * prior_loss))
else:
loss = F.mse_loss(model_pred.float(), target.float(), reduction='mean')
accelerator.backward(loss)
if accelerator.sync_gradients:
params_to_clip = (itertools.chain(unet_lora_layers.parameters(), text_encoder_lora_layers.parameters()) if args.train_text_encoder else unet_lora_layers.parameters())
accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
if accelerator.sync_gradients:
progress_bar.update(1)
global_step += 1
if accelerator.is_main_process:
if ((global_step % args.checkpointing_steps) == 0):
save_path = os.path.join(args.output_dir, f'checkpoint-{global_step}')
accelerator.save_state(save_path)
logger.info(f'Saved state to {save_path}')
logs = {'loss': loss.detach().item(), 'lr': lr_scheduler.get_last_lr()[0]}
progress_bar.set_postfix(**logs)
accelerator.log(logs, step=global_step)
if (global_step >= args.max_train_steps):
break
if accelerator.is_main_process:
if ((args.validation_prompt is not None) and ((epoch % args.validation_epochs) == 0)):
logger.info(f'''Running validation...
Generating {args.num_validation_images} images with prompt: {args.validation_prompt}.''')
pipeline = DiffusionPipeline.from_pretrained(args.pretrained_model_name_or_path, unet=accelerator.unwrap_model(unet), text_encoder=(None if args.pre_compute_text_embeddings else accelerator.unwrap_model(text_encoder)), revision=args.revision, torch_dtype=weight_dtype)
scheduler_args = {}
if ('variance_type' in pipeline.scheduler.config):
variance_type = pipeline.scheduler.config.variance_type
if (variance_type in ['learned', 'learned_range']):
variance_type = 'fixed_small'
scheduler_args['variance_type'] = variance_type
pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config, **scheduler_args)
pipeline = pipeline.to(accelerator.device)
pipeline.set_progress_bar_config(disable=True)
generator = (torch.Generator(device=accelerator.device).manual_seed(args.seed) if args.seed else None)
if args.pre_compute_text_embeddings:
pipeline_args = {'prompt_embeds': validation_prompt_encoder_hidden_states, 'negative_prompt_embeds': validation_prompt_negative_prompt_embeds}
else:
pipeline_args = {'prompt': args.validation_prompt}
if (args.validation_images is None):
images = [pipeline(**pipeline_args, generator=generator).images[0] for _ in range(args.num_validation_images)]
else:
images = []
for image in args.validation_images:
image = Image.open(image)
image = pipeline(**pipeline_args, image=image, generator=generator).images[0]
images.append(image)
for tracker in accelerator.trackers:
if (tracker.name == 'tensorboard'):
np_images = np.stack([np.asarray(img) for img in images])
tracker.writer.add_images('validation', np_images, epoch, dataformats='NHWC')
if (tracker.name == 'wandb'):
tracker.log({'validation': [wandb.Image(image, caption=f'{i}: {args.validation_prompt}') for (i, image) in enumerate(images)]})
del pipeline
torch.cuda.empty_cache()
accelerator.wait_for_everyone()
if accelerator.is_main_process:
unet = unet.to(torch.float32)
unet_lora_layers = accelerator.unwrap_model(unet_lora_layers)
if (text_encoder is not None):
text_encoder = text_encoder.to(torch.float32)
text_encoder_lora_layers = accelerator.unwrap_model(text_encoder_lora_layers)
LoraLoaderMixin.save_lora_weights(save_directory=args.output_dir, unet_lora_layers=unet_lora_layers, text_encoder_lora_layers=text_encoder_lora_layers)
accelerator.end_training() |
class SimpleNet(Model):
OUT_PIXEL_DIST = 4
def __init__(self, in_channels, out_channels, config, D=3, **kwargs):
super(SimpleNet, self).__init__(in_channels, out_channels, config, D)
kernel_size = 3
self.conv1 = ME.MinkowskiConvolution(in_channels=in_channels, out_channels=64, pixel_dist=1, kernel_size=kernel_size, stride=2, dilation=1, has_bias=False, dimension=D)
self.bn1 = nn.BatchNorm1d(64)
self.conv2 = ME.MinkowskiConvolution(in_channels=64, out_channels=128, pixel_dist=2, kernel_size=kernel_size, stride=2, dilation=1, has_bias=False, dimension=D)
self.bn2 = nn.BatchNorm1d(128)
self.conv3 = ME.MinkowskiConvolution(in_channels=128, out_channels=128, pixel_dist=4, kernel_size=kernel_size, stride=1, dilation=1, has_bias=False, dimension=D)
self.bn3 = nn.BatchNorm1d(128)
self.conv4 = ME.MinkowskiConvolution(in_channels=128, out_channels=128, pixel_dist=4, kernel_size=kernel_size, stride=1, dilation=1, has_bias=False, dimension=D)
self.bn4 = nn.BatchNorm1d(128)
self.conv5 = ME.MinkowskiConvolution(in_channels=128, out_channels=out_channels, pixel_dist=4, kernel_size=kernel_size, stride=1, dilation=1, has_bias=False, dimension=D)
self.bn5 = nn.BatchNorm1d(out_channels)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out = self.relu(out)
out = self.conv4(out)
out = self.bn4(out)
out = self.relu(out)
return self.conv5(out) |
class NodeResourceLimit(object):
MAX_CPU_CORES = 32
MIN_CPU_CORES = 4
MIN_MEMORY = 6144
MAX_MEMORY = 65536
MAX_WORKER_NUM = 60
MAX_PS_NUM = 15
INCREMENTAL_MEMORY_FACTOR = 2
MAX_INCREMENTAL_MEMORY = 8192
HUGE_MEMORY_THRESHOLD = 102400
HUGE_CPU_THRESHOLD = 100
WAIT_CHIEF_TIMEOUT_SECS = 1800
WAIT_DATA_SHARD_SERVICE_CREATION_SECS = 600
PS_CPU_GROWTH_RATE = 1.2
PS_CPU_DECREASED_RATE = 0.5
MIN_VALID_MEMORY = 1024
MIN_VALID_CPU = 2
MAX_HANG_TIMEOUT_SECS = 7200 |
def test_impure_interaction_is_zero():
X = [['A', 'A'], ['A', 'B'], ['B', 'A'], ['B', 'B']]
y = [(3.0 + 11.0), (3.0 + 7.0), (5.0 + 11.0), (5.0 + 7.0)]
sample_weight = [24.25, 21.5, 8.125, 11.625]
ranked_strengths = dict(measure_interactions(X, y, min_samples_leaf=1, sample_weight=sample_weight, objective='rmse'))
assert (ranked_strengths[(0, 1)] == 0.0) |
class Tracker():
def __init__(self, metric, max_iou_distance=0.7, max_age=70, n_init=3):
self.metric = metric
self.max_iou_distance = max_iou_distance
self.max_age = max_age
self.n_init = n_init
self.kf = kalman_filter.KalmanFilter()
self.tracks = []
self._next_id = 1
def predict(self):
for track in self.tracks:
track.predict(self.kf)
def update(self, detections):
(matches, unmatched_tracks, unmatched_detections) = self._match(detections)
for (track_idx, detection_idx) in matches:
self.tracks[track_idx].update(self.kf, detections[detection_idx])
for track_idx in unmatched_tracks:
self.tracks[track_idx].mark_missed()
for detection_idx in unmatched_detections:
self._initiate_track(detections[detection_idx])
self.tracks = [t for t in self.tracks if (not t.is_deleted())]
active_targets = [t.track_id for t in self.tracks if t.is_confirmed()]
(features, targets) = ([], [])
for track in self.tracks:
if (not track.is_confirmed()):
continue
features += track.features
targets += [track.track_id for _ in track.features]
track.features = []
self.metric.partial_fit(np.asarray(features), np.asarray(targets), active_targets)
def _match(self, detections):
def gated_metric(tracks, dets, track_indices, detection_indices):
features = np.array([dets[i].feature for i in detection_indices])
targets = np.array([tracks[i].track_id for i in track_indices])
cost_matrix = self.metric.distance(features, targets)
cost_matrix = linear_assignment.gate_cost_matrix(self.kf, cost_matrix, tracks, dets, track_indices, detection_indices)
return cost_matrix
confirmed_tracks = [i for (i, t) in enumerate(self.tracks) if t.is_confirmed()]
unconfirmed_tracks = [i for (i, t) in enumerate(self.tracks) if (not t.is_confirmed())]
(matches_a, unmatched_tracks_a, unmatched_detections) = linear_assignment.matching_cascade(gated_metric, self.metric.matching_threshold, self.max_age, self.tracks, detections, confirmed_tracks)
iou_track_candidates = (unconfirmed_tracks + [k for k in unmatched_tracks_a if (self.tracks[k].time_since_update == 1)])
unmatched_tracks_a = [k for k in unmatched_tracks_a if (self.tracks[k].time_since_update != 1)]
(matches_b, unmatched_tracks_b, unmatched_detections) = linear_assignment.min_cost_matching(iou_matching.iou_cost, self.max_iou_distance, self.tracks, detections, iou_track_candidates, unmatched_detections)
matches = (matches_a + matches_b)
unmatched_tracks = list(set((unmatched_tracks_a + unmatched_tracks_b)))
return (matches, unmatched_tracks, unmatched_detections)
def _initiate_track(self, detection):
(mean, covariance) = self.kf.initiate(detection.to_xyah())
self.tracks.append(Track(mean, covariance, self._next_id, self.n_init, self.max_age, detection.feature))
self._next_id += 1 |
_start_docstrings('CamemBERT Model transformer with a sequence classification/regression head on top (a linear layer\n on top of the pooled output) e.g. for GLUE tasks. ', CAMEMBERT_START_DOCSTRING)
class TFCamembertForSequenceClassification(TFRobertaForSequenceClassification):
config_class = CamembertConfig
pretrained_model_archive_map = TF_CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_MAP |
class StemWithFixedBatchNorm(BaseStem):
def __init__(self, cfg):
super(StemWithFixedBatchNorm, self).__init__(cfg, norm_func=FrozenBatchNorm2d) |
class DynamicLossScale(LossScale):
def __init__(self, init_loss_scale=(2 ** 15), increment_every=2000, factor=2.0):
super(DynamicLossScale, self).__init__()
self.scale = layers.create_global_var(name=unique_name.generate('loss_scale'), shape=[1], value=init_loss_scale, dtype='float32', persistable=True)
self.good_steps = layers.create_global_var(name=unique_name.generate('good_steps'), shape=[1], value=0, dtype='int32', persistable=True)
self.increment_every = layers.fill_constant(shape=[1], dtype='int32', value=increment_every)
self.factor = factor
def increment(self):
enough_steps = layers.less_than(self.increment_every, (self.good_steps + 1))
def increment_step():
layers.increment(self.good_steps)
def maybe_update():
new_scale = (self.scale * self.factor)
scale_valid = layers.isfinite(new_scale)
def update_scale_and_step():
layers.assign(new_scale, self.scale)
layers.assign(layers.zeros_like(self.good_steps), self.good_steps)
layers.cond(scale_valid, update_scale_and_step)
layers.cond(enough_steps, maybe_update, increment_step)
def decrement(self):
new_scale = (self.scale / self.factor)
one = layers.fill_constant(shape=[1], dtype='float32', value=1.0)
layers.assign(layers.elementwise_max(new_scale, one), self.scale)
layers.assign(layers.zeros_like(self.good_steps), self.good_steps) |
_metaclass(abc.ABCMeta)
class TextMetricSpec(Configurable, MetricSpec):
def __init__(self, params, name):
Configurable.__init__(self, params, tf.contrib.learn.ModeKeys.EVAL)
self._name = name
self._eos_token = self.params['eos_token']
self._sos_token = self.params['sos_token']
self._separator = self.params['separator']
self._postproc_fn = None
if self.params['postproc_fn']:
self._postproc_fn = locate(self.params['postproc_fn'])
if (self._postproc_fn is None):
raise ValueError('postproc_fn not found: {}'.format(self.params['postproc_fn']))
def name(self):
return self._name
def default_params():
return {'sos_token': 'SEQUENCE_START', 'eos_token': 'SEQUENCE_END', 'separator': ' ', 'postproc_fn': ''}
def create_metric_ops(self, _inputs, labels, predictions):
with tf.variable_scope(self._name):
predictions_flat = tf.reduce_join(predictions['predicted_tokens'], 1, separator=self._separator)
labels_flat = tf.reduce_join(labels['target_tokens'], 1, separator=self._separator)
(sources_value, sources_update) = accumulate_strings(values=predictions_flat, name='sources')
(targets_value, targets_update) = accumulate_strings(values=labels_flat, name='targets')
metric_value = tf.py_func(func=self._py_func, inp=[sources_value, targets_value], Tout=tf.float32, name='value')
with tf.control_dependencies([sources_update, targets_update]):
update_op = tf.identity(metric_value, name='update_op')
return (metric_value, update_op)
def _py_func(self, hypotheses, references):
if (hypotheses.dtype.kind == np.dtype('U')):
hypotheses = np.char.encode(hypotheses, 'utf-8')
if (references.dtype.kind == np.dtype('U')):
references = np.char.encode(references, 'utf-8')
hypotheses = [_.decode('utf-8') for _ in hypotheses]
references = [_.decode('utf-8') for _ in references]
sliced_hypotheses = [postproc.slice_text(_, self._eos_token, self._sos_token) for _ in hypotheses]
sliced_references = [postproc.slice_text(_, self._eos_token, self._sos_token) for _ in references]
if self._postproc_fn:
sliced_hypotheses = [self._postproc_fn(_) for _ in sliced_hypotheses]
sliced_references = [self._postproc_fn(_) for _ in sliced_references]
return self.metric_fn(sliced_hypotheses, sliced_references)
def metric_fn(self, hypotheses, references):
raise NotImplementedError() |
def load_model(model, checkpoint_file):
(e, p) = (model.embed, 'bert/embeddings/')
load_param(checkpoint_file, {e.tok_embed.weight: (p + 'word_embeddings'), e.pos_embed.weight: (p + 'position_embeddings'), e.seg_embed.weight: (p + 'token_type_embeddings'), e.norm.gamma: (p + 'LayerNorm/gamma'), e.norm.beta: (p + 'LayerNorm/beta')})
for i in range(len(model.blocks)):
(b, p) = (model.blocks[i], ('bert/encoder/layer_%d/' % i))
load_param(checkpoint_file, {b.attn.proj_q.weight: (p + 'attention/self/query/kernel'), b.attn.proj_q.bias: (p + 'attention/self/query/bias'), b.attn.proj_k.weight: (p + 'attention/self/key/kernel'), b.attn.proj_k.bias: (p + 'attention/self/key/bias'), b.attn.proj_v.weight: (p + 'attention/self/value/kernel'), b.attn.proj_v.bias: (p + 'attention/self/value/bias'), b.proj.weight: (p + 'attention/output/dense/kernel'), b.proj.bias: (p + 'attention/output/dense/bias'), b.pwff.fc1.weight: (p + 'intermediate/dense/kernel'), b.pwff.fc1.bias: (p + 'intermediate/dense/bias'), b.pwff.fc2.weight: (p + 'output/dense/kernel'), b.pwff.fc2.bias: (p + 'output/dense/bias'), b.norm1.gamma: (p + 'attention/output/LayerNorm/gamma'), b.norm1.beta: (p + 'attention/output/LayerNorm/beta'), b.norm2.gamma: (p + 'output/LayerNorm/gamma'), b.norm2.beta: (p + 'output/LayerNorm/beta')}) |
def ca_perspective(n=5):
c = pd.read_csv('data/c_parenttext.csv')
c.set_index(['id'], inplace=True)
data = [pd.read_csv(f'data/standard.622/random_ten/{i}/ic.val.csv') for i in range(n)]
scores = []
for sample in data:
sample['ca_score'] = sample['id'].apply((lambda x: c.loc[x].TOXICITY))
scores.append(roc_auc_score(sample.label, sample.ca_score))
return scores |
class MultiProcessFileTensorStorage(MultiProcessTensorStorage):
def __init__(self, data_schema: Dict[(str, SizeData)], rank_to_fpath: Dict[(int, str)], mode: str):
rank_to_storage = {rank: SingleProcessFileTensorStorage(data_schema, fpath, mode) for (rank, fpath) in rank_to_fpath.items()}
super().__init__(rank_to_storage) |
class ResidualBlock(nn.Module):
def __init__(self, in_planes, planes, norm_fn='group', stride=1):
super(ResidualBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, padding=1, stride=stride)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1)
self.relu = nn.ReLU(inplace=True)
num_groups = (planes // 8)
if (norm_fn == 'group'):
self.norm1 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)
self.norm2 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)
self.norm3 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)
elif (norm_fn == 'batch'):
self.norm1 = nn.BatchNorm2d(planes)
self.norm2 = nn.BatchNorm2d(planes)
self.norm3 = nn.BatchNorm2d(planes)
elif (norm_fn == 'instance'):
self.norm1 = nn.InstanceNorm2d(planes)
self.norm2 = nn.InstanceNorm2d(planes)
self.norm3 = nn.InstanceNorm2d(planes)
elif (norm_fn == 'none'):
self.norm1 = nn.Sequential()
self.norm2 = nn.Sequential()
self.norm3 = nn.Sequential()
self.downsample = nn.Sequential(nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride), self.norm3)
def forward(self, x):
y = x
y = self.relu(self.norm1(self.conv1(y)))
y = self.relu(self.norm2(self.conv2(y)))
if (self.downsample is not None):
x = self.downsample(x)
return self.relu((x + y)) |
def main():
args = get_args()
set_seed(args.seed)
dataset = load_dataset('codeparrot/codecomplex', split='train')
train_test = dataset.train_test_split(test_size=0.2)
test_validation = train_test['test'].train_test_split(test_size=0.5)
train_test_validation = DatasetDict({'train': train_test['train'], 'test': test_validation['train'], 'valid': test_validation['test']})
print('Loading tokenizer and model')
tokenizer = AutoTokenizer.from_pretrained(args.model_ckpt)
tokenizer.pad_token = tokenizer.eos_token
model = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt, num_labels=7)
model.config.pad_token_id = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
param.requires_grad = False
labels = ClassLabel(num_classes=7, names=list(set(train_test_validation['train']['complexity'])))
def tokenize(example):
inputs = tokenizer(example['src'], truncation=True, max_length=1024)
label = labels.str2int(example['complexity'])
return {'input_ids': inputs['input_ids'], 'attention_mask': inputs['attention_mask'], 'label': label}
tokenized_datasets = train_test_validation.map(tokenize, batched=True, remove_columns=train_test_validation['train'].column_names)
data_collator = DataCollatorWithPadding(tokenizer=tokenizer)
training_args = TrainingArguments(output_dir=args.output_dir, learning_rate=args.learning_rate, lr_scheduler_type=args.lr_scheduler_type, evaluation_strategy='epoch', save_strategy='epoch', logging_strategy='epoch', per_device_train_batch_size=args.batch_size, per_device_eval_batch_size=args.batch_size, num_train_epochs=args.num_epochs, gradient_accumulation_steps=args.gradient_accumulation_steps, weight_decay=0.01, metric_for_best_model='accuracy', run_name='complexity-java', report_to='wandb')
trainer = Trainer(model=model, args=training_args, train_dataset=tokenized_datasets['train'], eval_dataset=tokenized_datasets['valid'], tokenizer=tokenizer, data_collator=data_collator, compute_metrics=compute_metrics)
print('Training...')
trainer.add_callback(CustomCallback(trainer))
trainer.train() |
class ODEBlockTimeLastK(nn.Module):
def __init__(self, odeFunction, num_split, solver, K):
super(ODEBlockTimeLastK, self).__init__()
self.odefunc = odeFunction
self.num_split = num_split
self.final_time = world.config['K']
self.one = torch.tensor([self.final_time], requires_grad=False).to('cuda')
self.solver = solver
def forward(self, x, t):
odetime = t
odetime_tensor = torch.cat(odetime, dim=0)
all_time = torch.cat([odetime_tensor, self.one], dim=0).to('cuda')
all_time1 = all_time.type_as(x)
total_integration_time = all_time1
if (self.solver == 'euler'):
out = odeint(func=self.odefunc, y0=x, t=total_integration_time, method=self.solver)
elif (self.solver == 'dopri5'):
out = odeint(func=self.odefunc, y0=x, t=total_integration_time, method=self.solver, rtol=world.rtol, atol=world.atol)
elif (self.solver == 'rk4'):
out = odeint(func=self.odefunc, y0=x, t=total_integration_time, method=self.solver)
else:
out = odeint(func=self.odefunc, y0=x, t=total_integration_time, method=self.solver)
return out[1] |
class NN_tb3():
def __init__(self, env, policy, action_bound, OBS_SIZE, index, num_env):
self.beam_mum = OBS_SIZE
self.laser_cb_num = 0
self.scan = None
self.env = env
self.env.index = 0
self.policy = policy
self.action_bound = action_bound
self.global_goal = PoseStamped()
self.goal = PoseStamped()
self.sub_goal = Vector3()
self.sub_goal.x = self.sub_goal.y = None
self.pose = PoseStamped()
self.vel = Vector3()
self.psi = 0.0
self.pub_twist = rospy.Publisher('/cmd_vel', Twist, queue_size=1)
self.sub_pose = rospy.Subscriber('/odom', Odometry, self.cbPose)
self.sub_subgoal = rospy.Subscriber('/subgoal', PoseStamped, self.cbSubGoal)
self.laser_sub = rospy.Subscriber('/scan', LaserScan, self.laser_scan_callback)
self.nn_timer = rospy.Timer(rospy.Duration(0.1), self.cbComputeAction)
def cbGlobalGoal(self, msg):
self.stop_moving_flag = True
self.new_global_goal_received = True
self.global_goal = msg
self.goal.pose.position.x = msg.pose.position.x
self.goal.pose.position.y = msg.pose.position.y
self.goal.header = msg.header
print(('new goal: ' + str([self.goal.pose.position.x, self.goal.pose.position.y])))
def cbSubGoal(self, msg):
self.sub_goal.x = msg.pose.position.x
self.sub_goal.y = msg.pose.position.y
def cbPose(self, msg):
self.cbVel(msg)
q = msg.pose.pose.orientation
self.psi = np.arctan2((2.0 * ((q.w * q.z) + (q.x * q.y))), (1 - (2 * ((q.y * q.y) + (q.z * q.z)))))
self.pose = msg.pose
def cbVel(self, msg):
self.vel = msg.twist.twist.linear
self.vel_angular = msg.twist.twist.angular.z
def stop_moving(self):
twist = Twist()
self.pub_twist.publish(twist)
def laser_scan_callback(self, scan):
self.scan_param = [scan.angle_min, scan.angle_max, scan.angle_increment, scan.time_increment, scan.scan_time, scan.range_min, scan.range_max]
self.scan = np.array(scan.ranges)
self.laser_cb_num += 1
def get_laser_observation(self):
scan = copy.deepcopy(self.scan)
scan[np.isnan(scan)] = 6.0
scan[np.isinf(scan)] = 6.0
raw_beam_num = len(scan)
sparse_beam_num = self.beam_mum
step = (float(raw_beam_num) / sparse_beam_num)
sparse_scan_left = []
index = 0.0
for x in range(int((sparse_beam_num / 2))):
sparse_scan_left.append(scan[int(index)])
index += step
sparse_scan_right = []
index = (raw_beam_num - 1.0)
for x in range(int((sparse_beam_num / 2))):
sparse_scan_right.append(scan[int(index)])
index -= step
scan_sparse = np.concatenate((sparse_scan_left, sparse_scan_right[::(- 1)]), axis=0)
return ((scan_sparse / 6.0) - 0.5)
def control_vel(self, action):
move_cmd = Twist()
move_cmd.linear.x = action[0]
move_cmd.linear.y = 0.0
move_cmd.linear.z = 0.0
move_cmd.angular.x = 0.0
move_cmd.angular.y = 0.0
move_cmd.angular.z = action[1]
self.pub_twist.publish(move_cmd)
def get_local_goal(self):
[x, y, theta] = self.state
[goal_x, goal_y] = [self.sub_goal.x, self.sub_goal.y]
local_x = (((goal_x - x) * np.cos(theta)) + ((goal_y - y) * np.sin(theta)))
local_y = (((- (goal_x - x)) * np.sin(theta)) + ((goal_y - y) * np.cos(theta)))
return [local_x, local_y]
def cbComputeAction(self, event):
while ((self.scan is None) or (self.sub_goal.x is None)):
pass
obs = self.get_laser_observation()
obs_stack = deque([obs, obs, obs])
self.state = [self.pose.pose.position.x, self.pose.pose.position.y, self.psi]
self.goal = np.asarray(self.get_local_goal())
self.speed = np.asarray([self.vel.x, self.vel_angular], dtype='float64')
obs_state_list = [[obs_stack, self.goal, self.speed]]
(_, scaled_action) = generate_action_no_sampling(self.env, obs_state_list, self.policy, self.action_bound)
action = scaled_action[0]
action[0] = (0.3 * action[0])
self.control_vel(action)
def visualize_pose(self, pos, orientation):
marker = Marker()
marker.header.stamp = rospy.Time.now()
marker.header.frame_id = 'map'
marker.ns = 'agent'
marker.id = 0
marker.type = marker.CUBE
marker.action = marker.ADD
marker.pose.position = pos
marker.pose.orientation = orientation
marker.scale = Vector3(x=0.7, y=0.42, z=1)
marker.color = ColorRGBA(r=1.0, g=1.0, a=1.0)
marker.lifetime = rospy.Duration(1.0)
self.pub_pose_marker.publish(marker)
marker = Marker()
marker.header.stamp = rospy.Time.now()
marker.header.frame_id = 'map'
marker.ns = 'agent'
marker.id = self.num_poses
marker.type = marker.CUBE
marker.action = marker.ADD
marker.pose.position = pos
marker.pose.orientation = orientation
marker.scale = Vector3(x=0.2, y=0.2, z=0.2)
marker.color = ColorRGBA(r=1.0, a=1.0)
marker.lifetime = rospy.Duration(10.0)
self.pub_pose_marker.publish(marker)
def on_shutdown(self):
rospy.loginfo('[%s] Shutting down.')
self.stop_moving()
rospy.loginfo("Stopped %s's velocity.") |
class SummationNode(ExprNode):
def __init__(self, parse_info=None, raw_text=None):
super().__init__(IRNodeType.Summation, parse_info=parse_info, raw_text=raw_text)
self.sub = None
self.exp = None
self.id = None
self.cond = None
self.symbols = None
self.symbol = None
self.content = None
self.sym_dict = None
self.enum_list = None
self.range = None |
def main():
args = parse_args()
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout)])
log_level = logging.INFO
logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
random.seed(args.seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
model_name = (args.input_model if (args.input_model is not None) else 'bert-base-uncased')
output_directory = args.output_folder
num_labels = len(tag2id)
logger.info('*** Loading the Tokenizer (FastTokenizer) ***')
tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True, do_lower_case=True)
logger.info('*** Loading the Sequence Classification model ***')
base_model = AutoModelForSequenceClassification.from_pretrained(model_name, num_labels=num_labels)
base_model.config.label2id = tag2id
base_model.config.id2label = id2tag
if ((args.val_data == '') or (args.val_data == None)):
(train_data, train_spk_id, train_tags) = load_data_from_text_file(args.train_data)
(train_data, val_data, train_spk_id, val_spk_id, train_tags, val_tags) = train_test_split(train_data, train_spk_id, train_tags, test_size=0.1)
else:
(train_data, train_spk_id, train_tags) = load_data_from_text_file(args.train_data)
(val_data, val_spk_id, val_tags) = load_data_from_text_file(args.val_data)
X_train_tokenized = tokenizer(train_data, padding='max_length', truncation=True)
X_val_tokenized = tokenizer(val_data, padding='max_length', truncation=True)
train_dataset = ATCO2Dataset_seqcla(X_train_tokenized, train_spk_id)
val_dataset = ATCO2Dataset_seqcla(X_val_tokenized, val_spk_id)
if ((args.max_train_samples is not None) and (args.max_train_samples != (- 1))):
max_train_samples = min(len(train_dataset), args.max_train_samples)
logger.info(f'*** Reducing the training dataset size to: {max_train_samples} ***')
indices = torch.arange(max_train_samples)
train_dataset = torch.utils.data.Subset(train_dataset, indices)
if (args.test_data is not None):
(test_data, test_spk_id, test_tags) = load_data_from_text_file(args.test_data)
X_test_tokenized = tokenizer(test_data, padding='max_length', truncation=True)
test_dataset = ATCO2Dataset_seqcla(X_test_tokenized, test_spk_id)
else:
(test_dataset, test_spk_id, test_tags) = (val_dataset, val_spk_id, val_tags)
data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8)
args = TrainingArguments(report_to=args.report_to, output_dir=output_directory, num_train_epochs=args.epochs, per_device_train_batch_size=args.train_batch_size, per_device_eval_batch_size=args.eval_batch_size, load_best_model_at_end=True, warmup_steps=args.warmup_steps, weight_decay=0.001, gradient_accumulation_steps=args.gradient_accumulation_steps, evaluation_strategy='steps', logging_dir=(output_directory + '/logs'), logging_steps=args.logging_steps, max_steps=args.max_steps, save_steps=args.save_steps, eval_steps=args.eval_steps, save_total_limit=0)
trainer = Trainer(model=base_model, args=args, train_dataset=train_dataset, eval_dataset=val_dataset, compute_metrics=compute_metrics, data_collator=data_collator)
logger.info('*** Training ***')
train_results = trainer.train()
metrics = train_results.metrics
trainer.log_metrics('train', metrics)
trainer.save_metrics('train', metrics)
trainer.save_state()
trainer.save_model(output_dir=f'{output_directory}/')
tokenizer.save_pretrained(f'{output_directory}/')
logger.info('*** Evaluate ***')
(raw_pred, _, _) = trainer.predict(test_dataset)
f_metrics = compute_metrics([raw_pred, np.array(test_spk_id)])
print(f'''Validation set metrics:
{f_metrics}''')
kwargs = {'finetuned_from': model_name, 'tasks': 'sequence-classification'}
trainer.create_model_card(**kwargs) |
def main():
args = parse(sys.argv[1:])
experiments = pandas.read_csv(args.experiments)
settings = common.load_settings_path(args.settings_path)
stop = (len(experiments) if (args.stop is None) else args.stop)
experiments = experiments.loc[range(args.start, stop)]
overrides = {}
folds = list(range(1, (args.folds + 1)))
assert (max(folds) <= 10)
if args.check:
batches = 2
overrides['batch'] = 10
overrides['epochs'] = 1
overrides['train_samples'] = (batches * overrides['batch'])
overrides['val_samples'] = (batches * overrides['batch'])
cmds = generate_train_jobs(experiments, args.settings_path, folds, overrides)
print('Preparing {} jobs', len(cmds))
print('\n'.join([c['name'] for c in cmds]))
out = run_jobs(cmds, args.models_dir, n_jobs=args.jobs)
print(out)
success = all([(o['exitcode'] == 0) for o in out])
assert success |
class PredictionBuilder():
def __init__(self, max_depth: int=20):
self.max_depth = max_depth
self.env = None
def set_env(self, env: Environment):
self.env = env
def reset(self):
pass
def get(self, handle: int=0):
raise NotImplementedError() |
class BatchFeature(UserDict):
def __init__(self, data: Optional[Dict[(str, Any)]]=None, tensor_type: Union[(None, str, TensorType)]=None):
super().__init__(data)
self.convert_to_tensors(tensor_type=tensor_type)
def __getitem__(self, item: str) -> Union[Any]:
if isinstance(item, str):
return self.data[item]
else:
raise KeyError('Indexing with integers is not available when using Python based feature extractors')
def __getattr__(self, item: str):
try:
return self.data[item]
except KeyError:
raise AttributeError
def __getstate__(self):
return {'data': self.data}
def __setstate__(self, state):
if ('data' in state):
self.data = state['data']
def keys(self):
return self.data.keys()
def values(self):
return self.data.values()
def items(self):
return self.data.items()
def convert_to_tensors(self, tensor_type: Optional[Union[(str, TensorType)]]=None):
if (tensor_type is None):
return self
if (not isinstance(tensor_type, TensorType)):
tensor_type = TensorType(tensor_type)
if (tensor_type == TensorType.TENSORFLOW):
if (not is_tf_available()):
raise ImportError('Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.')
import tensorflow as tf
as_tensor = tf.constant
is_tensor = tf.is_tensor
elif (tensor_type == TensorType.PYTORCH):
if (not is_torch_available()):
raise ImportError('Unable to convert output to PyTorch tensors format, PyTorch is not installed.')
import torch
def as_tensor(value):
if (isinstance(value, (list, tuple)) and (len(value) > 0) and isinstance(value[0], np.ndarray)):
value = np.array(value)
return torch.tensor(value)
is_tensor = torch.is_tensor
elif (tensor_type == TensorType.JAX):
if (not is_flax_available()):
raise ImportError('Unable to convert output to JAX tensors format, JAX is not installed.')
import jax.numpy as jnp
as_tensor = jnp.array
is_tensor = _is_jax
else:
as_tensor = np.asarray
is_tensor = _is_numpy
for (key, value) in self.items():
try:
if (not is_tensor(value)):
tensor = as_tensor(value)
self[key] = tensor
except:
if (key == 'overflowing_values'):
raise ValueError('Unable to create tensor returning overflowing values of different lengths. ')
raise ValueError("Unable to create tensor, you should probably activate padding with 'padding=True' to have batched tensors with the same length.")
return self
_required
def to(self, device: Union[(str, 'torch.device')]) -> 'BatchFeature':
if (isinstance(device, str) or _is_torch_device(device) or isinstance(device, int)):
self.data = {k: v.to(device=device) for (k, v) in self.data.items()}
else:
logger.warning(f'Attempting to cast a BatchFeature to type {str(device)}. This is not supported.')
return self |
class LTRTrainer(BaseTrainer):
def __init__(self, actor, loaders, optimizer, settings, lr_scheduler=None, ratio=(1 / 8)):
super().__init__(actor, loaders, optimizer, settings, lr_scheduler)
self._set_default_settings()
self.stats = OrderedDict({loader.name: None for loader in self.loaders})
self.ratio = ratio
tensorboard_writer_dir = os.path.join(self.settings.env.tensorboard_dir, self.settings.project_path)
self.tensorboard_writer = TensorboardWriter(tensorboard_writer_dir, [l.name for l in loaders])
self.move_data_to_gpu = getattr(settings, 'move_data_to_gpu', True)
def _set_default_settings(self):
default = {'print_interval': 10, 'print_stats': None, 'description': ''}
for (param, default_value) in default.items():
if (getattr(self.settings, param, None) is None):
setattr(self.settings, param, default_value)
def cycle_dataset(self, loader):
self.actor.train(loader.training)
torch.set_grad_enabled(loader.training)
self._init_timing()
for (i, data) in enumerate(loader, 1):
if self.move_data_to_gpu:
data = data.to(self.device)
data['epoch'] = self.epoch
data['settings'] = self.settings
(loss, stats) = self.actor(data, ratio=self.ratio)
if loader.training:
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
batch_size = data['search_images'].shape[loader.stack_dim]
self._update_stats(stats, batch_size, loader)
self._print_stats(i, loader, batch_size)
def train_epoch(self):
for loader in self.loaders:
if ((self.epoch % loader.epoch_interval) == 0):
self.cycle_dataset(loader)
self._stats_new_epoch()
self._write_tensorboard()
def _init_timing(self):
self.num_frames = 0
self.start_time = time.time()
self.prev_time = self.start_time
def _update_stats(self, new_stats: OrderedDict, batch_size, loader):
if ((loader.name not in self.stats.keys()) or (self.stats[loader.name] is None)):
self.stats[loader.name] = OrderedDict({name: AverageMeter() for name in new_stats.keys()})
for (name, val) in new_stats.items():
if (name not in self.stats[loader.name].keys()):
self.stats[loader.name][name] = AverageMeter()
self.stats[loader.name][name].update(val, batch_size)
def _print_stats(self, i, loader, batch_size):
self.num_frames += batch_size
current_time = time.time()
batch_fps = (batch_size / (current_time - self.prev_time))
average_fps = (self.num_frames / (current_time - self.start_time))
self.prev_time = current_time
if (((i % self.settings.print_interval) == 0) or (i == loader.__len__())):
print_str = ('[%s: %d, %d / %d] ' % (loader.name, self.epoch, i, loader.__len__()))
print_str += ('FPS: %.1f (%.1f) , ' % (average_fps, batch_fps))
for (name, val) in self.stats[loader.name].items():
if (((self.settings.print_stats is None) or (name in self.settings.print_stats)) and hasattr(val, 'avg')):
print_str += ('%s: %.5f , ' % (name, val.avg))
print_str += ('%s: %.5f ,' % ('img ratio', max(0.2, (1 - ((((self.epoch - 1) * 1000.0) + i) / (100 * 1000.0))))))
print(print_str[:(- 5)])
def _stats_new_epoch(self):
for loader in self.loaders:
if loader.training:
lr_list = self.lr_scheduler.get_lr()
for (i, lr) in enumerate(lr_list):
var_name = 'LearningRate/group{}'.format(i)
if (var_name not in self.stats[loader.name].keys()):
self.stats[loader.name][var_name] = StatValue()
self.stats[loader.name][var_name].update(lr)
for loader_stats in self.stats.values():
if (loader_stats is None):
continue
for stat_value in loader_stats.values():
if hasattr(stat_value, 'new_epoch'):
stat_value.new_epoch()
def _write_tensorboard(self):
if (self.epoch == 1):
self.tensorboard_writer.write_info(self.settings.module_name, self.settings.script_name, self.settings.description)
self.tensorboard_writer.write_epoch(self.stats, self.epoch) |
def test_show():
import mmcv
from os import path as osp
from mmdet3d.core.bbox import LiDARInstance3DBoxes
tmp_dir = tempfile.TemporaryDirectory()
temp_dir = tmp_dir.name
(data_root, ann_file, classes, pts_prefix, pipeline, modality, split) = _generate_kitti_dataset_config()
kitti_dataset = KittiDataset(data_root, ann_file, split=split, modality=modality, pipeline=pipeline)
boxes_3d = LiDARInstance3DBoxes(torch.tensor([[46.1218, (- 4.6496), (- 0.9275), 0.5316, 1.4442, 1.745, 1.1749], [33.3189, 0.1981, 0.3136, 0.5656, 1.2301, 1.7985, 1.5723], [46.1366, (- 4.6404), (- 0.951), 0.5162, 1.6501, 1.754, 1.3778], [33.2646, 0.2297, 0.3446, 0.5746, 1.3365, 1.7947, 1.543], [58.9079, 16.6272, (- 1.5829), 1.5656, 3.9313, 1.4899, 1.5505]]))
scores_3d = torch.tensor([0.1815, 0.1663, 0.5792, 0.2194, 0.278])
labels_3d = torch.tensor([0, 0, 1, 1, 2])
result = dict(boxes_3d=boxes_3d, scores_3d=scores_3d, labels_3d=labels_3d)
results = [result]
kitti_dataset.show(results, temp_dir, show=False)
pts_file_path = osp.join(temp_dir, '000000', '000000_points.obj')
gt_file_path = osp.join(temp_dir, '000000', '000000_gt.obj')
pred_file_path = osp.join(temp_dir, '000000', '000000_pred.obj')
mmcv.check_file_exist(pts_file_path)
mmcv.check_file_exist(gt_file_path)
mmcv.check_file_exist(pred_file_path)
tmp_dir.cleanup()
eval_pipeline = [dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=4, use_dim=4), dict(type='DefaultFormatBundle3D', class_names=classes, with_label=False), dict(type='Collect3D', keys=['points'])]
tmp_dir = tempfile.TemporaryDirectory()
temp_dir = tmp_dir.name
kitti_dataset.show(results, temp_dir, show=False, pipeline=eval_pipeline)
pts_file_path = osp.join(temp_dir, '000000', '000000_points.obj')
gt_file_path = osp.join(temp_dir, '000000', '000000_gt.obj')
pred_file_path = osp.join(temp_dir, '000000', '000000_pred.obj')
mmcv.check_file_exist(pts_file_path)
mmcv.check_file_exist(gt_file_path)
mmcv.check_file_exist(pred_file_path)
tmp_dir.cleanup()
tmp_dir = tempfile.TemporaryDirectory()
temp_dir = tmp_dir.name
(_, _, _, _, multi_modality_pipeline, modality, _) = _generate_kitti_multi_modality_dataset_config()
kitti_dataset = KittiDataset(data_root, ann_file, split, pts_prefix, multi_modality_pipeline, classes, modality)
kitti_dataset.show(results, temp_dir, show=False)
pts_file_path = osp.join(temp_dir, '000000', '000000_points.obj')
gt_file_path = osp.join(temp_dir, '000000', '000000_gt.obj')
pred_file_path = osp.join(temp_dir, '000000', '000000_pred.obj')
img_file_path = osp.join(temp_dir, '000000', '000000_img.png')
img_pred_path = osp.join(temp_dir, '000000', '000000_pred.png')
img_gt_file = osp.join(temp_dir, '000000', '000000_gt.png')
mmcv.check_file_exist(pts_file_path)
mmcv.check_file_exist(gt_file_path)
mmcv.check_file_exist(pred_file_path)
mmcv.check_file_exist(img_file_path)
mmcv.check_file_exist(img_pred_path)
mmcv.check_file_exist(img_gt_file)
tmp_dir.cleanup()
eval_pipeline = [dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=4, use_dim=4), dict(type='LoadImageFromFile'), dict(type='DefaultFormatBundle3D', class_names=classes, with_label=False), dict(type='Collect3D', keys=['points', 'img'])]
tmp_dir = tempfile.TemporaryDirectory()
temp_dir = tmp_dir.name
kitti_dataset.show(results, temp_dir, show=False, pipeline=eval_pipeline)
pts_file_path = osp.join(temp_dir, '000000', '000000_points.obj')
gt_file_path = osp.join(temp_dir, '000000', '000000_gt.obj')
pred_file_path = osp.join(temp_dir, '000000', '000000_pred.obj')
img_file_path = osp.join(temp_dir, '000000', '000000_img.png')
img_pred_path = osp.join(temp_dir, '000000', '000000_pred.png')
img_gt_file = osp.join(temp_dir, '000000', '000000_gt.png')
mmcv.check_file_exist(pts_file_path)
mmcv.check_file_exist(gt_file_path)
mmcv.check_file_exist(pred_file_path)
mmcv.check_file_exist(img_file_path)
mmcv.check_file_exist(img_pred_path)
mmcv.check_file_exist(img_gt_file)
tmp_dir.cleanup() |
class PResNet(Module):
def __init__(self, block, layers, num_classes=1000):
self.ni = 64
super().__init__()
self.conv1 = conv_act(3, 16, stride=2)
self.conv2 = conv_act(16, 32)
self.conv3 = conv_act(32, 64)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
ni = (512 * block.expansion)
self.avgpool = nn.Sequential(act_fn(), nn.BatchNorm2d(ni), nn.AdaptiveAvgPool2d(1))
self.fc = nn.Linear(ni, num_classes)
init_cnn(self)
def _make_layer(self, block, nf, blocks, stride=1):
downsample = None
if ((stride != 1) or (self.ni != (nf * block.expansion))):
layers = ([act_fn(), nn.BatchNorm2d(self.ni), nn.AvgPool2d(kernel_size=2)] if (stride == 2) else [])
layers.append(conv(self.ni, (nf * block.expansion)))
downsample = nn.Sequential(*layers)
layers = [block(self.ni, nf, stride, downsample)]
self.ni = (nf * block.expansion)
for i in range(1, blocks):
layers.append(block(self.ni, nf))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), (- 1))
x = self.fc(x)
return x |
class BConvCell(nn.Module):
def __init__(self, input_dim, output_dim, kernel_size=3, stride=1, padding=0):
super(BConvCell, self).__init__()
self.output_dim = output_dim
self.stride = stride
self.padding = padding
self.kernel_size = kernel_size
self.gates = nn.Conv2d(input_dim, (4 * output_dim), kernel_size, stride=stride, padding=padding, bias=False)
self.latentState = None
def resetlatentstate(self):
self.latentState = None
def forward(self, input_):
batch_size = input_.size(0)
spatial_size = input_.data.size()[2:]
height = int(math.floor((((((list(spatial_size)[0] + (2 * self.padding)) - (self.kernel_size - 1)) - 1) / float(self.stride)) + 1)))
weight = int((math.floor(((((list(spatial_size)[1] + (2 * self.padding)) - (self.kernel_size - 1)) - 1) / self.stride)) + 1))
if (self.latentState is None):
cell_state_size = ([batch_size, self.output_dim] + [height, weight])
prev_states = torch.nn.Parameter(torch.zeros(cell_state_size)).cuda()
nn.init.normal_(prev_states, std=0.001)
else:
prev_states = self.latentState.detach()
if (prev_states.size(0) != batch_size):
prev_states = prev_states[:batch_size]
gates = self.gates(input_)
(in_gate, forget_gate, out_gate, cell_gate) = gates.chunk(4, 1)
in_gate = torch.sigmoid(in_gate)
forget_gate = torch.sigmoid(forget_gate)
out_gate = torch.sigmoid(out_gate)
cell_gate = torch.tanh(cell_gate)
now_state = ((forget_gate * prev_states) + (in_gate * cell_gate))
output = (out_gate * torch.tanh(now_state))
self.latentState = now_state.detach()
return output |
def test_pair_aggregator(saliency_mt_model: HuggingfaceEncoderDecoderModel):
out = saliency_mt_model.attribute([EXAMPLES['source'], EXAMPLES['alternative_source']], show_progress=False)
orig_seqattr = out.sequence_attributions[0].aggregate(['vnorm'])
alt_seqattr = out.sequence_attributions[1].aggregate(['vnorm'])
diff_seqattr = orig_seqattr.aggregate(PairAggregator, paired_attr=alt_seqattr)
for (idx, token) in enumerate(diff_seqattr.source):
assert (token.token == EXAMPLES['diff_subwords'][idx])
assert torch.allclose((alt_seqattr.source_attributions - orig_seqattr.source_attributions), diff_seqattr.source_attributions)
orig_seqattr_other = out.sequence_attributions[0].aggregate()
alt_seqattr_other = out.sequence_attributions[1].aggregate()
diff_seqattr_other = orig_seqattr_other.aggregate('pair', paired_attr=alt_seqattr_other)
assert torch.allclose(diff_seqattr_other.source_attributions, diff_seqattr.source_attributions) |
def main():
input = ImageParam(UInt(8), 3, 'input')
erode = get_erode(input)
erode.compile_jit()
input_data = get_input_data()
input_image = Buffer(input_data)
input.set(input_image)
output_data = np.empty(input_data.shape, dtype=input_data.dtype, order='F')
output_image = Buffer(output_data)
print('input_image', input_image)
print('output_image', output_image)
erode.realize(output_image)
input_path = 'erode_input.png'
output_path = 'erode_result.png'
imsave(input_path, input_data)
imsave(output_path, output_data)
print('\nerode realized on output image.', 'Result saved at', output_path, '( input data copy at', input_path, ')')
print('\nEnd of game. Have a nice day!')
return |
class Instruction():
def __init__(self, name, num_qubits, num_clbits, params):
if ((not isinstance(num_qubits, int)) or (not isinstance(num_clbits, int))):
raise QiskitError('num_qubits and num_clbits must be integer.')
if ((num_qubits < 0) or (num_clbits < 0)):
raise QiskitError(('bad instruction dimensions: %d qubits, %d clbits.' % num_qubits), num_clbits)
self.name = name
self.num_qubits = num_qubits
self.num_clbits = num_clbits
self._params = []
self.control = None
self._definition = None
self.params = params
def __eq__(self, other):
if ((type(self) is not type(other)) or (self.name != other.name) or (self.num_qubits != other.num_qubits) or (self.num_clbits != other.num_clbits) or (self.definition != other.definition)):
return False
for (self_param, other_param) in zip_longest(self.params, other.params):
if (self_param == other_param):
continue
try:
if numpy.isclose(float(self_param), float(other_param), atol=_CUTOFF_PRECISION):
continue
except TypeError:
pass
return False
return True
def _define(self):
pass
def params(self):
return self._params
def params(self, parameters):
self._params = []
for single_param in parameters:
if isinstance(single_param, (Parameter, sympy.Basic)):
self._params.append(single_param)
elif isinstance(single_param, node.Node):
self._params.append(single_param.sym())
elif isinstance(single_param, (int, float)):
self._params.append(sympy.Number(single_param))
elif isinstance(single_param, complex):
self._params.append((single_param.real + (single_param.imag * sympy.I)))
elif isinstance(single_param, str):
self._params.append(sympy.Symbol(single_param))
elif isinstance(single_param, numpy.ndarray):
self._params.append(single_param)
elif isinstance(single_param, sympy.Matrix):
self._params.append(single_param)
elif isinstance(single_param, sympy.Expr):
self._params.append(single_param)
elif isinstance(single_param, numpy.number):
self._params.append(sympy.Number(single_param.item()))
else:
raise QiskitError('invalid param type {0} in instruction {1}'.format(type(single_param), self.name))
def definition(self):
if (self._definition is None):
self._define()
return self._definition
def definition(self, array):
self._definition = array
def assemble(self):
instruction = QasmQobjInstruction(name=self.name)
if self.params:
params = [(x.evalf() if hasattr(x, 'evalf') else x) for x in self.params]
params = [(sympy.matrix2numpy(x, dtype=complex) if isinstance(x, sympy.Matrix) else x) for x in params]
instruction.params = params
if self.num_qubits:
instruction.qubits = list(range(self.num_qubits))
if self.num_clbits:
instruction.memory = list(range(self.num_clbits))
if self.control:
instruction._control = self.control
return instruction
def mirror(self):
if (not self._definition):
return self.copy()
reverse_inst = self.copy(name=(self.name + '_mirror'))
reverse_inst.definition = []
for (inst, qargs, cargs) in reversed(self._definition):
reverse_inst._definition.append((inst.mirror(), qargs, cargs))
return reverse_inst
def inverse(self):
if (not self.definition):
raise QiskitError(('inverse() not implemented for %s.' % self.name))
inverse_gate = self.copy(name=(self.name + '_dg'))
inverse_gate._definition = []
for (inst, qargs, cargs) in reversed(self._definition):
inverse_gate._definition.append((inst.inverse(), qargs, cargs))
return inverse_gate
def c_if(self, classical, val):
if (not isinstance(classical, ClassicalRegister)):
raise QiskitError('c_if must be used with a classical register')
if (val < 0):
raise QiskitError('control value should be non-negative')
self.control = (classical, val)
return self
def copy(self, name=None):
cpy = copy.copy(self)
if name:
cpy.name = name
return cpy
def _qasmif(self, string):
if (self.control is None):
return string
return (('if(%s==%d) ' % (self.control[0].name, self.control[1])) + string)
def qasm(self):
name_param = self.name
if self.params:
name_param = ('%s(%s)' % (name_param, ','.join([str(i) for i in self.params])))
return self._qasmif(name_param)
def broadcast_arguments(self, qargs, cargs):
if (len(qargs) != self.num_qubits):
raise QiskitError('The amount of qubit arguments does not match the instruction expectation.')
if (len(cargs) != self.num_clbits):
raise QiskitError('The amount of clbit arguments does not match the instruction expectation.')
if (len(cargs) == len(qargs)):
for (qarg, carg) in zip(qargs, cargs):
(yield ([qarg], [carg]))
elif ((not cargs) and (len(qargs) == 1)):
for qarg in qargs[0]:
(yield ([qarg], []))
else:
flat_qargs = [qarg for sublist in qargs for qarg in sublist]
flat_cargs = [carg for sublist in cargs for carg in sublist]
(yield (flat_qargs, flat_cargs)) |
def IB_IRM_hyper(sample):
if sample:
return {'ib_weight': (lambda r: (10 ** r.uniform((- 1), 5))), 'ib_anneal': (lambda r: r.uniform(0, 2000)), 'irm_weight': (lambda r: (10 ** r.uniform((- 1), 5))), 'irm_anneal': (lambda r: r.uniform(0, 2000))}
else:
return {'ib_weight': (lambda r: 100.0), 'ib_anneal': (lambda r: 500), 'irm_weight': (lambda r: 100.0), 'irm_anneal': (lambda r: 500)} |
def generate_memory(sent, speaker, time):
sent_new = []
sent_token = sent.split(' ')
if ((speaker == '$u') or (speaker == '$s')):
for (idx, word) in enumerate(sent_token):
temp = ([word, speaker, ('turn' + str(time)), ('word' + str(idx))] + (['PAD'] * (MEM_TOKEN_SIZE - 4)))
sent_new.append(temp)
else:
if (sent_token[1] == 'R_rating'):
sent_token = (sent_token + (['PAD'] * (MEM_TOKEN_SIZE - len(sent_token))))
else:
sent_token = (sent_token[::(- 1)] + (['PAD'] * (MEM_TOKEN_SIZE - len(sent_token))))
sent_new.append(sent_token)
return sent_new |
def main(args):
update_config_from_file(args.cfg)
if (args.launcher == 'none'):
args.distributed = False
else:
args.distributed = True
init_dist(launcher=args.launcher)
args.rank = int(os.environ['SLURM_PROCID'])
args.gpu = (args.rank % torch.cuda.device_count())
print(args)
args_text = yaml.safe_dump(args.__dict__, default_flow_style=False)
device = torch.device(args.device)
seed = (args.seed + utils.get_rank())
torch.manual_seed(seed)
np.random.seed(seed)
cudnn.benchmark = True
(dataset_train, args.nb_classes) = build_dataset(is_train=True, args=args, is_individual_prompt=(args.is_visual_prompt_tuning or args.is_adapter or args.is_LoRA or args.is_prefix))
(dataset_val, _) = build_dataset(is_train=False, args=args, is_individual_prompt=(args.is_visual_prompt_tuning or args.is_adapter or args.is_LoRA or args.is_prefix))
if args.distributed:
num_tasks = utils.get_world_size()
global_rank = utils.get_rank()
if args.repeated_aug:
sampler_train = RASampler(dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True)
else:
sampler_train = torch.utils.data.DistributedSampler(dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True)
if args.dist_eval:
if ((len(dataset_val) % num_tasks) != 0):
print('Warning: Enabling distributed evaluation with an eval dataset not divisible by process number. This will slightly alter validation results as extra duplicate entries are added to achieve equal num of samples per-process.')
sampler_val = torch.utils.data.DistributedSampler(dataset_val, num_replicas=num_tasks, rank=global_rank, shuffle=False)
else:
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
else:
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
sampler_train = torch.utils.data.RandomSampler(dataset_train)
data_loader_train = torch.utils.data.DataLoader(dataset_train, sampler=sampler_train, batch_size=args.batch_size, num_workers=args.num_workers, pin_memory=args.pin_mem, drop_last=True)
data_loader_val = torch.utils.data.DataLoader(dataset_val, batch_size=int((2 * args.batch_size)), sampler=sampler_val, num_workers=args.num_workers, pin_memory=args.pin_mem, drop_last=False)
mixup_fn = None
mixup_active = ((args.mixup > 0) or (args.cutmix > 0.0) or (args.cutmix_minmax is not None))
print('mixup_active', mixup_active)
if mixup_active:
mixup_fn = Mixup(mixup_alpha=args.mixup, cutmix_alpha=args.cutmix, cutmix_minmax=args.cutmix_minmax, prob=args.mixup_prob, switch_prob=args.mixup_switch_prob, mode=args.mixup_mode, label_smoothing=args.smoothing, num_classes=args.nb_classes)
print(f'Creating SuperVisionTransformer')
print(cfg)
model = models.__dict__[cfg.MODEL_NAME](img_size=args.input_size, drop_rate=args.drop, drop_path_rate=args.drop_path, super_prompt_tuning_dim=cfg.SUPERNET.VISUAL_PROMPT_DIM, super_LoRA_dim=cfg.SUPERNET.LORA_DIM, super_adapter_dim=cfg.SUPERNET.ADAPTER_DIM, super_prefix_dim=cfg.SUPERNET.PREFIX_DIM, drop_rate_LoRA=args.drop_rate_LoRA, drop_rate_prompt=args.drop_rate_prompt, drop_rate_adapter=args.drop_rate_adapter, IS_not_position_VPT=args.IS_not_position_VPT)
choices = {'depth': cfg.SUPERNET.DEPTH, 'super_prompt_tuning_dim': cfg.SUPERNET.VISUAL_PROMPT_DIM, 'super_LoRA_dim': cfg.SUPERNET.LORA_DIM, 'super_adapter_dim': cfg.SUPERNET.ADAPTER_DIM, 'super_prefix_dim': cfg.SUPERNET.PREFIX_DIM, 'visual_prompt_dim': cfg.SEARCH_SPACE.VISUAL_PROMPT_DIM, 'lora_dim': cfg.SEARCH_SPACE.LORA_DIM, 'adapter_dim': cfg.SEARCH_SPACE.ADAPTER_DIM, 'prefix_dim': cfg.SEARCH_SPACE.PREFIX_DIM, 'visual_prompt_depth': cfg.SEARCH_SPACE.VISUAL_PROMPT_DEPTH, 'lora_depth': cfg.SEARCH_SPACE.LORA_DEPTH, 'adapter_depth': cfg.SEARCH_SPACE.ADAPTER_DEPTH, 'prefix_depth': cfg.SEARCH_SPACE.PREFIX_DEPTH}
if args.resume:
if ('pth' in args.resume):
if (args.nb_classes != model.head.weight.shape[0]):
model.reset_classifier(args.nb_classes)
incompatible_keys = load_checkpoint(model, args.resume, strict=False)
print(incompatible_keys)
else:
load_checkpoint(model, args.resume)
if (args.nb_classes != model.head.weight.shape[0]):
model.reset_classifier(args.nb_classes)
model.to(device)
if args.teacher_model:
teacher_model = create_model(args.teacher_model, pretrained=True, num_classes=args.nb_classes)
teacher_model.to(device)
teacher_loss = LabelSmoothingCrossEntropy(smoothing=args.smoothing)
else:
teacher_model = None
teacher_loss = None
model_ema = None
model_without_ddp = model
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu], find_unused_parameters=True)
model_without_ddp = model.module
n_parameters = sum((p.numel() for p in model.parameters() if p.requires_grad))
print('number of params:', n_parameters)
optimizer = create_optimizer(args, model_without_ddp)
loss_scaler = NativeScaler()
(lr_scheduler, _) = create_scheduler(args, optimizer)
if (args.mixup > 0.0):
criterion = SoftTargetCrossEntropy()
elif args.smoothing:
criterion = LabelSmoothingCrossEntropy(smoothing=args.smoothing)
else:
criterion = torch.nn.CrossEntropyLoss()
output_dir = Path(args.output_dir)
if (not output_dir.exists()):
output_dir.mkdir(parents=True)
with open((output_dir / 'config.yaml'), 'w') as f:
f.write(args_text)
retrain_config = None
if ((args.mode == 'retrain') and ('RETRAIN' in cfg)):
retrain_config = {'visual_prompt_dim': cfg.RETRAIN.VISUAL_PROMPT_DIM, 'lora_dim': cfg.RETRAIN.LORA_DIM, 'adapter_dim': cfg.RETRAIN.ADAPTER_DIM, 'prefix_dim': cfg.RETRAIN.PREFIX_DIM}
if args.eval:
test_stats = evaluate(data_loader_val, model, device, mode=args.mode, retrain_config=retrain_config, is_visual_prompt_tuning=args.is_visual_prompt_tuning, is_adapter=args.is_adapter, is_LoRA=args.is_LoRA, is_prefix=args.is_prefix)
print(f"Accuracy of the network on the {len(dataset_val)} test images: {test_stats['acc1']:.1f}%")
return
print('Start training')
start_time = time.time()
max_accuracy = 0.0
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
data_loader_train.sampler.set_epoch(epoch)
train_stats = train_one_epoch(model, criterion, data_loader_train, optimizer, device, epoch, loss_scaler, args.clip_grad, model_ema, mixup_fn, amp=args.amp, teacher_model=teacher_model, teach_loss=teacher_loss, choices=choices, mode=args.mode, retrain_config=retrain_config, is_visual_prompt_tuning=args.is_visual_prompt_tuning, is_adapter=args.is_adapter, is_LoRA=args.is_LoRA, is_prefix=args.is_prefix)
lr_scheduler.step(epoch)
if args.output_dir:
checkpoint_paths = [(output_dir / 'checkpoint.pth')]
for checkpoint_path in checkpoint_paths:
utils.save_on_master({'model': model_without_ddp.state_dict(), 'optimizer': optimizer.state_dict(), 'lr_scheduler': lr_scheduler.state_dict(), 'epoch': epoch, 'scaler': loss_scaler.state_dict(), 'args': args}, checkpoint_path)
if (((epoch % args.val_interval) == 0) or (epoch == (args.epochs - 1))):
test_stats = evaluate(data_loader_val, model, device, amp=args.amp, choices=choices, mode=args.mode, retrain_config=retrain_config, is_visual_prompt_tuning=args.is_visual_prompt_tuning, is_adapter=args.is_adapter, is_LoRA=args.is_LoRA, is_prefix=args.is_prefix)
print(f"Accuracy of the network on the {len(dataset_val)} test images: {test_stats['acc1']:.1f}%")
max_accuracy = max(max_accuracy, test_stats['acc1'])
print(f'Max accuracy: {max_accuracy:.2f}%')
log_stats = {**{f'train_{k}': v for (k, v) in train_stats.items()}, **{f'test_{k}': v for (k, v) in test_stats.items()}, 'epoch': epoch, 'n_parameters': n_parameters}
if (args.output_dir and utils.is_main_process()):
with (output_dir / 'log.txt').open('a') as f:
f.write((json.dumps(log_stats) + '\n'))
total_time = (time.time() - start_time)
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str)) |
def find_index_path(index_file):
with open(index_file, 'r') as f:
lines = f.readlines()
for line in lines:
pos = line.find('index.html" class="icon icon-home"')
if (pos < 0):
continue
pos1 = line.rfind('"', 0, pos)
if (pos1 < 0):
return ''
else:
return ('../' + line[(pos1 + 1):pos])
return 'ignore' |
def create_pseudo_labeled_data(args, infer_input, infer_output, eval_result, id2label, next_data_dir):
dataset = datasets.concatenate_datasets([infer_input, infer_output], axis=1)
if args.do_filter_by_confidence:
dataset = dataset.filter((lambda example: (example['probability'] > args.confidence_threshold)))
if args.do_filter_by_val_performance:
assert ((eval_result >= 0.0) and (eval_result <= 1.0))
num_selected_rows = int((eval_result * len(dataset)))
print(num_selected_rows)
dataset = dataset.sort('probability', reverse=True)
dataset = dataset.select(range(num_selected_rows))
dataset = dataset.remove_columns(['label', 'probability'])
dataset = dataset.rename_column('prediction', 'label')
dataset = dataset.map((lambda example: {'label': id2label[example['label']]}))
dataset = dataset.shuffle(seed=args.seed)
pseudo_labeled_data_file = os.path.join(next_data_dir, f'train_pseudo.{args.data_file_extension}')
if (args.data_file_extension == 'csv'):
dataset.to_csv(pseudo_labeled_data_file, index=False)
else:
dataset.to_json(pseudo_labeled_data_file) |
def test_transformer_layer():
decoder_layer = TFDecoderLayer()
in_dec = torch.rand(1, 30, 512)
out_enc = torch.rand(1, 128, 512)
out_dec = decoder_layer(in_dec, out_enc)
assert (out_dec.shape == torch.Size([1, 30, 512]))
decoder_layer = TFDecoderLayer(operation_order=('self_attn', 'norm', 'enc_dec_attn', 'norm', 'ffn', 'norm'))
out_dec = decoder_layer(in_dec, out_enc)
assert (out_dec.shape == torch.Size([1, 30, 512]))
pos_encoder = PositionalEncoding()
x = torch.rand(1, 30, 512)
out = pos_encoder(x)
assert (out.size() == x.size())
encoder_layer = TFEncoderLayer()
in_enc = torch.rand(1, 20, 512)
out_enc = encoder_layer(in_enc)
assert (out_dec.shape == torch.Size([1, 30, 512]))
encoder_layer = TFEncoderLayer(operation_order=('self_attn', 'norm', 'ffn', 'norm'))
out_enc = encoder_layer(in_enc)
assert (out_dec.shape == torch.Size([1, 30, 512])) |
def main():
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if ((len(sys.argv) == 2) and sys.argv[1].endswith('.json')):
(model_args, data_args) = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
(model_args, data_args, training_args) = parser.parse_args_into_dataclasses()
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout)])
logger.setLevel((logging.INFO if is_main_process(training_args.local_rank) else logging.WARN))
logger.warning((f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}' + f'distributed training: {bool((training_args.local_rank != (- 1)))}, 16-bits training: {training_args.fp16}'))
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info(f'Training/evaluation parameters {training_args}')
set_seed(training_args.seed)
if (data_args.dataset_name == 'funsd'):
import funsd
datasets = load_dataset(os.path.abspath(funsd.__file__), cache_dir=model_args.cache_dir)
else:
raise NotImplementedError()
column_names = datasets['test'].column_names
features = datasets['test'].features
text_column_name = ('words' if ('words' in column_names) else 'tokens')
boxes_column_name = 'bboxes'
label_column_name = (f'{data_args.task_name}_tags' if (f'{data_args.task_name}_tags' in column_names) else column_names[1])
remove_columns = column_names
def get_label_list(labels):
unique_labels = set()
for label in labels:
unique_labels = (unique_labels | set(label))
label_list = sorted(unique_labels)
return label_list
if isinstance(features[label_column_name].feature, ClassLabel):
label_list = features[label_column_name].feature.names
label_to_id = {i: i for i in range(len(label_list))}
else:
label_list = get_label_list(datasets['train'][label_column_name])
label_to_id = {l: i for (i, l) in enumerate(label_list)}
num_labels = len(label_list)
config = AutoConfig.from_pretrained((model_args.config_name if model_args.config_name else model_args.model_name_or_path), num_labels=num_labels, finetuning_task=data_args.task_name, cache_dir=model_args.cache_dir, revision=model_args.model_revision, input_size=data_args.input_size, use_auth_token=(True if model_args.use_auth_token else None))
tokenizer = AutoTokenizer.from_pretrained((model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path), tokenizer_file=None, cache_dir=model_args.cache_dir, use_fast=True, add_prefix_space=True, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None))
if (not isinstance(tokenizer, PreTrainedTokenizerFast)):
raise ValueError('This example script only works for models that have a fast tokenizer. Checkout the big table of models at to find the model types that meet this requirement')
padding = ('max_length' if data_args.pad_to_max_length else False)
if data_args.visual_embed:
imagenet_default_mean_and_std = data_args.imagenet_default_mean_and_std
mean = (IMAGENET_INCEPTION_MEAN if (not imagenet_default_mean_and_std) else IMAGENET_DEFAULT_MEAN)
std = (IMAGENET_INCEPTION_STD if (not imagenet_default_mean_and_std) else IMAGENET_DEFAULT_STD)
common_transform = Compose([RandomResizedCropAndInterpolationWithTwoPic(size=data_args.input_size, interpolation=data_args.train_interpolation)])
patch_transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=torch.tensor(mean), std=torch.tensor(std))])
def tokenize_and_align_labels(examples, augmentation=False):
tokenized_inputs = tokenizer(examples[text_column_name], padding=False, truncation=True, return_overflowing_tokens=True, boxes=examples[boxes_column_name])
labels = []
bboxes = []
images = []
for batch_index in range(len(tokenized_inputs['input_ids'])):
word_ids = tokenized_inputs.word_ids(batch_index=batch_index)
org_batch_index = tokenized_inputs['overflow_to_sample_mapping'][batch_index]
label = examples[label_column_name][org_batch_index]
bbox = examples['bboxes'][org_batch_index]
previous_word_idx = None
label_ids = []
bbox_inputs = []
for word_idx in word_ids:
if (word_idx is None):
label_ids.append((- 100))
bbox_inputs.append([0, 0, 0, 0])
elif (word_idx != previous_word_idx):
label_ids.append(label_to_id[label[word_idx]])
bbox_inputs.append(bbox[word_idx])
else:
label_ids.append((label_to_id[label[word_idx]] if data_args.label_all_tokens else (- 100)))
bbox_inputs.append(bbox[word_idx])
previous_word_idx = word_idx
labels.append(label_ids)
bboxes.append(bbox_inputs)
if data_args.visual_embed:
ipath = examples['image_path'][org_batch_index]
img = pil_loader(ipath)
(for_patches, _) = common_transform(img, augmentation=augmentation)
patch = patch_transform(for_patches)
images.append(patch)
tokenized_inputs['labels'] = labels
tokenized_inputs['bbox'] = bboxes
if data_args.visual_embed:
tokenized_inputs['images'] = images
return tokenized_inputs
validation_name = 'test'
if (validation_name not in datasets):
raise ValueError('--do_eval requires a validation dataset')
eval_dataset = datasets[validation_name]
if (data_args.max_val_samples is not None):
eval_dataset = eval_dataset.select(range(data_args.max_val_samples))
eval_dataset = eval_dataset.map(tokenize_and_align_labels, batched=True, remove_columns=remove_columns, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=(not data_args.overwrite_cache))
metric = load_metric('seqeval')
def compute_metrics(p):
(predictions, labels) = p
predictions = np.argmax(predictions, axis=2)
true_predictions = [[label_list[p] for (p, l) in zip(prediction, label) if (l != (- 100))] for (prediction, label) in zip(predictions, labels)]
true_labels = [[label_list[l] for (p, l) in zip(prediction, label) if (l != (- 100))] for (prediction, label) in zip(predictions, labels)]
results = metric.compute(predictions=true_predictions, references=true_labels)
if data_args.return_entity_level_metrics:
final_results = {}
for (key, value) in results.items():
if isinstance(value, dict):
for (n, v) in value.items():
final_results[f'{key}_{n}'] = v
else:
final_results[key] = value
return final_results
else:
return {'precision': results['overall_precision'], 'recall': results['overall_recall'], 'f1': results['overall_f1'], 'accuracy': results['overall_accuracy']}
from model import ORTModel
def eval_func(model):
logger.info('*** Evaluate ***')
ort_model = ORTModel(model, compute_metrics=compute_metrics)
outputs = ort_model.evaluation_loop(eval_dataset)
return outputs.metrics['f1']
if model_args.tune:
from neural_compressor import PostTrainingQuantConfig, quantization
from neural_compressor.utils.constant import FP32
import onnx
onnx_model = onnx.load(model_args.input_model)
calib_dataset = IncDataset(eval_dataset, onnx_model)
config = PostTrainingQuantConfig(approach='dynamic', quant_level=1, quant_format=model_args.quant_format)
q_model = quantization.fit(onnx_model, config, eval_func=eval_func, calib_dataloader=DataLoader(framework='onnxruntime', dataset=calib_dataset, batch_size=1))
q_model.save(model_args.save_path)
if model_args.benchmark:
import onnx
onnx_model = onnx.load(model_args.input_model)
if (model_args.mode == 'performance'):
from neural_compressor.benchmark import fit
from neural_compressor.config import BenchmarkConfig
b_dataset = IncDataset(eval_dataset, onnx_model)
conf = BenchmarkConfig(iteration=100, cores_per_instance=28, num_of_instance=1)
b_dataloader = DataLoader(framework='onnxruntime', dataset=b_dataset, batch_size=model_args.batch_size)
fit(onnx_model, conf, b_dataloader=b_dataloader)
elif (model_args.mode == 'accuracy'):
eval_f1 = eval_func(onnx_model)
print(('Batch size = %d' % model_args.batch_size))
print(('Accuracy: %.5f' % eval_f1)) |
def json_encode_np(obj):
if isinstance(obj, np.ndarray):
return list(obj)
elif isinstance(obj, np.float32):
return float(obj)
elif isinstance(obj, np.float64):
return float(obj)
elif isinstance(obj, np.int32):
return int(obj)
elif isinstance(obj, np.int64):
return int(obj)
else:
return obj |
def loadShader(shaderType, shaderFile):
strFilename = findFileOrThrow(shaderFile)
shaderData = None
with open(strFilename, 'r') as f:
shaderData = f.read()
shader = glCreateShader(shaderType)
glShaderSource(shader, shaderData)
glCompileShader(shader)
status = glGetShaderiv(shader, GL_COMPILE_STATUS)
if (status == GL_FALSE):
strInfoLog = glGetShaderInfoLog(shader)
strShaderType = ''
if (shaderType is GL_VERTEX_SHADER):
strShaderType = 'vertex'
elif (shaderType is GL_GEOMETRY_SHADER):
strShaderType = 'geometry'
elif (shaderType is GL_FRAGMENT_SHADER):
strShaderType = 'fragment'
print(((('Compilation failure for ' + strShaderType) + ' shader:\n') + str(strInfoLog)))
return shader |
class PyTorchBenchmarkArguments():
def __init__(self, *args, **kwargs):
requires_pytorch(self) |
def validsample(model, val_inputs, train_opt):
try:
val_inputs = [val_inputs['ret_img'], val_inputs['ret_img'], val_inputs['ret_img'], val_inputs['ret_img_mo']]
except:
(xfull, xbg, xid, xmo) = val_inputs
val_inputs = [xfull, xfull, xfull, xmo]
val_inputs = [item.to(train_opt['device']) for item in val_inputs]
output = model(val_inputs, is_training=False, writer=None)
cur_ssim = (output['ssim_metric'].clone().detach().cpu().numpy() if ('ssim_metric' in output.keys()) else 0)
cur_rec = (output['rec_loss'].clone().detach().cpu().numpy() if ('rec_loss' in output.keys()) else 0)
cur_lpips = (output['lpips_loss'].clone().detach().cpu().numpy() if ('lpips_loss' in output.keys()) else 0)
print(f'SSIM: {cur_ssim} MSE: {cur_rec} LPIPS: {cur_lpips}') |
def prefetch_batch_input_shape(model: nn.Module, ori_wh: Tuple[(int, int)]) -> dict:
cfg = model.cfg
(w, h) = ori_wh
cfg.test_dataloader.dataset.pipeline[0].type = 'LoadImageFromNDArray'
test_pipeline = Compose(cfg.test_dataloader.dataset.pipeline)
data = {'img': np.zeros((h, w, 3), dtype=np.uint8), 'img_id': 0}
data = test_pipeline(data)
(_, data_sample) = model.data_preprocessor([data], False)
batch_input_shape = data_sample[0].batch_input_shape
return batch_input_shape |
class APIPool():
def __init__(self, base_model_path='stabilityai/stable-diffusion-xl-base-1.0', image_encoder_path='checkpoints/sdxl_models/image_encoder', ip_ckpt='checkpoints/sdxl_models/ip-adapter_sdxl.bin', device='cuda') -> None:
self.pipe = StableDiffusionXLPipeline.from_pretrained(base_model_path, torch_dtype=torch.float16, add_watermarker=False)
self.pipe.to(device)
self.ip_model = IPAdapterXL(self.pipe, image_encoder_path, ip_ckpt, device)
self.t2i = DiffusionPipeline.from_pretrained(base_model_path, torch_dtype=torch.float16, add_watermarker=False, local_files_only=True)
self.t2i.to(device)
self.a_prompt = 'best quality, extremely detailed'
self.n_prompt = 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality'
def text_to_image(self, prompt, num_samples=1):
prompt = ((prompt + ', ') + self.a_prompt)
images = self.t2i(prompt, negative_prompt=self.n_prompt, num_images_per_prompt=num_samples, num_inference_steps=30).images
return image_grid(images)
def variation(self, image, num_samples=1):
images = self.ip_model.generate(pil_image=image, num_samples=num_samples, num_inference_steps=30)
return image_grid(images)
def edit(self, image, prompt, num_samples=1):
prompt = ((prompt + ', ') + self.a_prompt)
images = self.ip_model.generate(pil_image=image, num_samples=num_samples, num_inference_steps=30, prompt=prompt, scale=0.3, negative_prompt=self.n_prompt)
return image_grid(images) |
def clip_grad_norm(model, max_norm, norm_type=2, optimizer=None, process_group_name_prefix=''):
if isinstance(optimizer, DeepSpeedZeroOptimizer):
assert (norm_type == 2), 'deep speed zero optimizer only supports L2 norm'
optimizer.clip_grad = max_norm
return None
if ((torch_version() >= (1, 12, 1)) and (torch_version() <= (1, 13, 1))):
from torch.distributed.fsdp.fully_sharded_data_parallel import TrainingState_ as TrainingState
elif (torch_version() >= (2, 0, 0)):
from torch.distributed.fsdp._common_utils import TrainingState
else:
TrainingState = None
counter = getattr(model, '_auto_acc_ctx_counter', 0)
if (counter == 0):
raise ValueError('model is not returned by auto_accelerate')
strategy_opt_names = AutoAccelerateContext.strategy_opt_names[counter]
if (('amp_native' in strategy_opt_names) and hasattr(AutoAccelerateContext, 'amp_native_grad_scaler') and (AutoAccelerateContext.amp_native_grad_scaler.get(counter) is not None)):
if (optimizer is None):
raise TypeError('Before cliping gradient norm, gradient values need to be unscaled. Please pass optimizer when calling `clip_grad_norm`.')
optimizer.unscale_()
def calculate_grad_norm(parameters, norm_type) -> torch.Tensor:
parameters = [p for p in parameters if (p.grad is not None)]
if (len(parameters) == 0):
return torch.tensor(0.0)
if (norm_type == math.inf):
local_norm = torch.tensor(max((par.grad.detach().abs().max() for par in parameters)))
else:
local_norm = torch.linalg.vector_norm(torch.stack([torch.linalg.vector_norm(par.grad.detach(), norm_type, dtype=torch.float32) for par in parameters]), norm_type)
local_norm.to(dtype=parameters[0].dtype)
return local_norm
parameters = model.parameters()
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
max_norm = float(max_norm)
norm_type = float(norm_type)
use_fsdp = ('fsdp' in strategy_opt_names)
use_zero2 = ('zero2' in strategy_opt_names)
use_tp = ('tensor_parallel' in strategy_opt_names)
data_process_group = parallel_group((process_group_name_prefix + 'data'))
tensor_parallel_group = parallel_group((process_group_name_prefix + 'tensor'))
if ((not use_fsdp) and (not use_zero2) and (not use_tp)):
total_norm = calculate_grad_norm(parameters, norm_type)
elif (use_fsdp or use_zero2):
if ((torch_version() >= (1, 12, 1)) and (torch_version() <= (1, 13, 1))):
model._lazy_init()
model._wait_for_previous_optim_step()
assert model._is_root, 'clip_grad_norm should only be called on the root (parent) instance'
model._assert_state(TrainingState.IDLE)
local_norm = calculate_grad_norm(parameters, norm_type).cuda()
if (norm_type == math.inf):
total_norm = local_norm
dist.all_reduce(total_norm, op=dist.ReduceOp.MAX, group=data_process_group)
if use_tp:
dist.all_reduce(total_norm, op=dist.ReduceOp.MAX, group=tensor_parallel_group)
else:
total_norm = (local_norm ** norm_type)
dist.all_reduce(total_norm, op=dist.ReduceOp.SUM, group=data_process_group)
if use_tp:
dist.all_reduce(total_norm, op=dist.ReduceOp.SUM, group=tensor_parallel_group)
total_norm = (total_norm ** (1.0 / norm_type))
elif (torch_version() >= (2, 0, 0)):
import torch.distributed.fsdp._traversal_utils as traversal_utils
from torch.distributed.fsdp._runtime_utils import _lazy_init
_lazy_init(model, model)
if (not model._is_root):
raise RuntimeError('`clip_grad_norm_()` should only be called on the root FSDP instance')
model._assert_state(TrainingState.IDLE)
all_no_shard = all(((not handle.uses_sharded_strategy) for handle in traversal_utils._get_fsdp_handles(model)))
if all_no_shard:
return torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm, norm_type)
max_norm = float(max_norm)
norm_type = float(norm_type)
sharded_params = set()
nonsharded_params = set()
grads = []
for handle in traversal_utils._get_fsdp_handles(model):
target_set = (sharded_params if handle.uses_sharded_strategy else nonsharded_params)
if handle._use_orig_params:
for param in handle.flat_param._params:
target_set.add(param)
if (param.grad is not None):
grads.append(param.grad)
else:
target_set.add(handle.flat_param)
if (handle.flat_param.grad is not None):
grads.append(handle.flat_param.grad)
for param in model.parameters():
not_fsdp_managed = ((param not in sharded_params) and (param not in nonsharded_params))
if not_fsdp_managed:
nonsharded_params.add(param)
if (param.grad is not None):
grads.append(param.grad)
local_sharded_norm = calculate_grad_norm(sharded_params, norm_type).to(model.compute_device)
local_nonsharded_norm = calculate_grad_norm(nonsharded_params, norm_type).to(model.compute_device)
if (norm_type == math.inf):
total_norm = torch.maximum(local_sharded_norm, local_nonsharded_norm)
dist.all_reduce(total_norm, op=torch.distributed.ReduceOp.MAX, group=model.process_group)
else:
total_norm = (local_sharded_norm ** norm_type)
dist.all_reduce(total_norm, group=model.process_group)
total_norm += (local_nonsharded_norm ** norm_type)
total_norm = (total_norm ** (1.0 / norm_type))
if model.cpu_offload.offload_params:
total_norm = total_norm.cpu()
elif (use_tp and (not (use_fsdp or use_zero2))):
total_norm = calculate_grad_norm(parameters, norm_type).cuda()
if (norm_type == math.inf):
dist.all_reduce(total_norm, op=dist.ReduceOp.MAX, group=tensor_parallel_group)
else:
total_norm = (total_norm ** norm_type)
dist.all_reduce(total_norm, op=dist.ReduceOp.SUM, group=tensor_parallel_group)
total_norm = (total_norm ** (1.0 / norm_type))
clip_coef = (max_norm / (total_norm + 1e-06))
clip_coef_clamped = torch.clamp(clip_coef, max=1.0)
for p in parameters:
if (p.grad is not None):
p.grad.detach().mul_(clip_coef_clamped.to(p.grad.device))
return total_norm |
class B2_VGG(nn.Module):
def __init__(self):
super(B2_VGG, self).__init__()
conv1 = nn.Sequential()
conv1.add_module('conv1_1', nn.Conv2d(3, 64, 3, 1, 1))
conv1.add_module('relu1_1', nn.ReLU(inplace=True))
conv1.add_module('conv1_2', nn.Conv2d(64, 64, 3, 1, 1))
conv1.add_module('relu1_2', nn.ReLU(inplace=True))
self.conv1 = conv1
conv2 = nn.Sequential()
conv2.add_module('pool1', nn.AvgPool2d(2, stride=2))
conv2.add_module('conv2_1', nn.Conv2d(64, 128, 3, 1, 1))
conv2.add_module('relu2_1', nn.ReLU())
conv2.add_module('conv2_2', nn.Conv2d(128, 128, 3, 1, 1))
conv2.add_module('relu2_2', nn.ReLU())
self.conv2 = conv2
conv3 = nn.Sequential()
conv3.add_module('pool2', nn.AvgPool2d(2, stride=2))
conv3.add_module('conv3_1', nn.Conv2d(128, 256, 3, 1, 1))
conv3.add_module('relu3_1', nn.ReLU())
conv3.add_module('conv3_2', nn.Conv2d(256, 256, 3, 1, 1))
conv3.add_module('relu3_2', nn.ReLU())
conv3.add_module('conv3_3', nn.Conv2d(256, 256, 3, 1, 1))
conv3.add_module('relu3_3', nn.ReLU())
self.conv3 = conv3
conv4_1 = nn.Sequential()
conv4_1.add_module('pool3_1', nn.AvgPool2d(2, stride=2))
conv4_1.add_module('conv4_1_1', nn.Conv2d(256, 512, 3, 1, 1))
conv4_1.add_module('relu4_1_1', nn.ReLU())
conv4_1.add_module('conv4_2_1', nn.Conv2d(512, 512, 3, 1, 1))
conv4_1.add_module('relu4_2_1', nn.ReLU())
conv4_1.add_module('conv4_3_1', nn.Conv2d(512, 512, 3, 1, 1))
conv4_1.add_module('relu4_3_1', nn.ReLU())
self.conv4_1 = conv4_1
conv5_1 = nn.Sequential()
conv5_1.add_module('pool4_1', nn.AvgPool2d(2, stride=2))
conv5_1.add_module('conv5_1_1', nn.Conv2d(512, 512, 3, 1, 1))
conv5_1.add_module('relu5_1_1', nn.ReLU())
conv5_1.add_module('conv5_2_1', nn.Conv2d(512, 512, 3, 1, 1))
conv5_1.add_module('relu5_2_1', nn.ReLU())
conv5_1.add_module('conv5_3_1', nn.Conv2d(512, 512, 3, 1, 1))
conv5_1.add_module('relu5_3_1', nn.ReLU())
self.conv5_1 = conv5_1
conv4_2 = nn.Sequential()
conv4_2.add_module('pool3_2', nn.AvgPool2d(2, stride=2))
conv4_2.add_module('conv4_1_2', nn.Conv2d(256, 512, 3, 1, 1))
conv4_2.add_module('relu4_1_2', nn.ReLU())
conv4_2.add_module('conv4_2_2', nn.Conv2d(512, 512, 3, 1, 1))
conv4_2.add_module('relu4_2_2', nn.ReLU())
conv4_2.add_module('conv4_3_2', nn.Conv2d(512, 512, 3, 1, 1))
conv4_2.add_module('relu4_3_2', nn.ReLU())
self.conv4_2 = conv4_2
conv5_2 = nn.Sequential()
conv5_2.add_module('pool4_2', nn.AvgPool2d(2, stride=2))
conv5_2.add_module('conv5_1_2', nn.Conv2d(512, 512, 3, 1, 1))
conv5_2.add_module('relu5_1_2', nn.ReLU())
conv5_2.add_module('conv5_2_2', nn.Conv2d(512, 512, 3, 1, 1))
conv5_2.add_module('relu5_2_2', nn.ReLU())
conv5_2.add_module('conv5_3_2', nn.Conv2d(512, 512, 3, 1, 1))
conv5_2.add_module('relu5_3_2', nn.ReLU())
self.conv5_2 = conv5_2
pre_train = torch.load('~/.torch/models/vgg16-397923af.pth')
self._initialize_weights(pre_train)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x1 = self.conv4_1(x)
x1 = self.conv5_1(x1)
x2 = self.conv4_2(x)
x2 = self.conv5_2(x2)
return (x1, x2)
def _initialize_weights(self, pre_train):
keys = pre_train.keys()
self.conv1.conv1_1.weight.data.copy_(pre_train[keys[0]])
self.conv1.conv1_2.weight.data.copy_(pre_train[keys[2]])
self.conv2.conv2_1.weight.data.copy_(pre_train[keys[4]])
self.conv2.conv2_2.weight.data.copy_(pre_train[keys[6]])
self.conv3.conv3_1.weight.data.copy_(pre_train[keys[8]])
self.conv3.conv3_2.weight.data.copy_(pre_train[keys[10]])
self.conv3.conv3_3.weight.data.copy_(pre_train[keys[12]])
self.conv4_1.conv4_1_1.weight.data.copy_(pre_train[keys[14]])
self.conv4_1.conv4_2_1.weight.data.copy_(pre_train[keys[16]])
self.conv4_1.conv4_3_1.weight.data.copy_(pre_train[keys[18]])
self.conv5_1.conv5_1_1.weight.data.copy_(pre_train[keys[20]])
self.conv5_1.conv5_2_1.weight.data.copy_(pre_train[keys[22]])
self.conv5_1.conv5_3_1.weight.data.copy_(pre_train[keys[24]])
self.conv4_2.conv4_1_2.weight.data.copy_(pre_train[keys[14]])
self.conv4_2.conv4_2_2.weight.data.copy_(pre_train[keys[16]])
self.conv4_2.conv4_3_2.weight.data.copy_(pre_train[keys[18]])
self.conv5_2.conv5_1_2.weight.data.copy_(pre_train[keys[20]])
self.conv5_2.conv5_2_2.weight.data.copy_(pre_train[keys[22]])
self.conv5_2.conv5_3_2.weight.data.copy_(pre_train[keys[24]])
self.conv1.conv1_1.bias.data.copy_(pre_train[keys[1]])
self.conv1.conv1_2.bias.data.copy_(pre_train[keys[3]])
self.conv2.conv2_1.bias.data.copy_(pre_train[keys[5]])
self.conv2.conv2_2.bias.data.copy_(pre_train[keys[7]])
self.conv3.conv3_1.bias.data.copy_(pre_train[keys[9]])
self.conv3.conv3_2.bias.data.copy_(pre_train[keys[11]])
self.conv3.conv3_3.bias.data.copy_(pre_train[keys[13]])
self.conv4_1.conv4_1_1.bias.data.copy_(pre_train[keys[15]])
self.conv4_1.conv4_2_1.bias.data.copy_(pre_train[keys[17]])
self.conv4_1.conv4_3_1.bias.data.copy_(pre_train[keys[19]])
self.conv5_1.conv5_1_1.bias.data.copy_(pre_train[keys[21]])
self.conv5_1.conv5_2_1.bias.data.copy_(pre_train[keys[23]])
self.conv5_1.conv5_3_1.bias.data.copy_(pre_train[keys[25]])
self.conv4_2.conv4_1_2.bias.data.copy_(pre_train[keys[15]])
self.conv4_2.conv4_2_2.bias.data.copy_(pre_train[keys[17]])
self.conv4_2.conv4_3_2.bias.data.copy_(pre_train[keys[19]])
self.conv5_2.conv5_1_2.bias.data.copy_(pre_train[keys[21]])
self.conv5_2.conv5_2_2.bias.data.copy_(pre_train[keys[23]])
self.conv5_2.conv5_3_2.bias.data.copy_(pre_train[keys[25]]) |
class TestPruningTypes(unittest.TestCase):
model = torchvision.models.resnet18()
def test_pruning_types(self):
compression_manager = prepare_compression(model=self.model, confs=fake_snip_config)
compression_manager.callbacks.on_train_begin()
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(self.model.parameters(), lr=0.0001)
datasets = Datasets('pytorch')
dummy_dataset = datasets['dummy'](shape=(10, 3, 224, 224), low=0.0, high=1.0, label=True)
dummy_dataloader = PyTorchDataLoader(dummy_dataset)
compression_manager.callbacks.on_train_begin()
for epoch in range(2):
self.model.train()
compression_manager.callbacks.on_epoch_begin(epoch)
local_step = 0
for (image, target) in dummy_dataloader:
compression_manager.callbacks.on_step_begin(local_step)
output = self.model(image)
loss = criterion(output, target)
optimizer.zero_grad()
loss.backward()
compression_manager.callbacks.on_before_optimizer_step()
optimizer.step()
compression_manager.callbacks.on_after_optimizer_step()
compression_manager.callbacks.on_step_end()
local_step += 1
compression_manager.callbacks.on_epoch_end()
compression_manager.callbacks.on_train_end()
compression_manager.callbacks.on_before_eval()
compression_manager.callbacks.on_after_eval()
from neural_compressor.compression.pruner.utils import parse_to_prune
for config in compression_manager.conf.pruning.pruning_configs:
zero_cnt = 0
all_cnt = 0
layers = list(parse_to_prune(config=config, model=self.model).keys())
for layer in layers:
layer_weight = self.model.state_dict()[(layer + '.weight')]
zero_cnt += (layer_weight == 0).sum().item()
all_cnt += layer_weight.numel()
sparsity = ((layer_weight == 0.0).sum().item() / layer_weight.numel())
print(layer, sparsity, (layer_weight == 0.0).sum().item(), layer_weight.numel()) |
class LossWrapperBase(nn.Module):
def __init__(self):
super().__init__()
self._LossModuleDict = nn.ModuleDict()
def __iter__(self):
for (k, v) in self._LossModuleDict.items():
(yield v)
def __getitem__(self, item):
if (item in self._LossModuleDict.keys()):
return self._LossModuleDict[item]
raise IndexError(item)
def items(self):
return self._LossModuleDict.items() |
def _parse_args():
parser = argparse.ArgumentParser(description='\n This script will walk into each folder in path, read and count h5f\n files with nonzero image frames, read pre-existing train/val/test\n txt files, and split all the h5f files in this directory into\n success_only, task_failure_only, error_failure_only, and\n task_and_error_failure txt files.\n\n Path defaults to ~/.keras/datasets/costar_block_stacking_dataset_v0.4/\n We expect that folder will contain directories containing h5f files.\n This is done to split the dataset across various collection runs.\n Details can be found in the "folder structure" section of\n To split the success_only subset or to add new files ot the\n success_only subset, use --success_only flag.\n\n Use --help to see all possible uses for this function.\n ')
parser.add_argument('--path', type=str, default=os.path.join(os.path.expanduser('~'), '.keras/datasets/costar_block_stacking_dataset_v0.4/'), help='path to dataset folder containing many files')
parser.add_argument('--dataset_path', type=str, default='~/.keras/dataset/', help='The folder that is expected stores the dataset. Filenames in the output file will reference this path.')
parser.add_argument('--dataset_name', type=str, default='costar_block_stacking_dataset_v0.4', help='Dataset name to store under dataset path.Filenames in the output file will reference this name.')
parser.add_argument('--success_only', action='store_true', default=False, help='Only visit stacking data labeled as successful')
parser.add_argument('--output_name', type=str, default='costar_block_stacking_dataset', help='output file name')
parser.add_argument('--val_len', type=int, default=None, help='Expected val set length')
parser.add_argument('--test_len', type=int, default=None, help='Expected test set length')
parser.add_argument('--seed', type=int, default=0, help='Random seed for reproducing the output lists')
parser.add_argument('--write', action='store_true', default=False, help='Write to output files')
parser.add_argument('--existing_file_prefix', type=str, nargs='+', default=['costar_plush_block_stacking_v0.4', 'costar_block_stacking_v0.4'], help='Existing txt file prefixes to look for when opening train/val/test files.')
return vars(parser.parse_args()) |
def calibrate_thresholds(K, *args):
(fx, fy) = (K[(0, 0)], K[(1, 1)])
factor = (1.0 / (0.5 * (fx + fy)))
for cfg in args:
if isinstance(cfg, dict):
key = [k for k in cfg.keys() if ('threshold' in k.lower())]
assert (len(key) == 1)
cfg[key[0]] *= factor
elif isinstance(cfg, cv2.UsacParams):
cfg.threshold *= factor
else:
raise TypeError |
class TestGradient(unittest.TestCase):
def test_odeint(self):
for device in DEVICES:
for method in METHODS:
if (method == 'scipy_solver'):
continue
with self.subTest(device=device, method=method):
(f, y0, t_points, _) = construct_problem(device=device)
func = (lambda y0, t_points: torchdiffeq.odeint(f, y0, t_points, method=method))
self.assertTrue(torch.autograd.gradcheck(func, (y0, t_points)))
def test_adjoint(self):
for device in DEVICES:
for method in METHODS:
with self.subTest(device=device, method=method):
(f, y0, t_points, _) = construct_problem(device=device)
func = (lambda y0, t_points: torchdiffeq.odeint_adjoint(f, y0, t_points, method=method))
self.assertTrue(torch.autograd.gradcheck(func, (y0, t_points)))
def test_adjoint_against_odeint(self):
for device in DEVICES:
for ode in PROBLEMS:
for t_grad in (True, False):
if (ode == 'constant'):
eps = 1e-12
elif (ode == 'linear'):
eps = 1e-05
elif (ode == 'sine'):
eps = 0.005
else:
raise RuntimeError
with self.subTest(device=device, ode=ode, t_grad=t_grad):
(f, y0, t_points, _) = construct_problem(device=device, ode=ode)
t_points.requires_grad_(t_grad)
ys = torchdiffeq.odeint(f, y0, t_points, rtol=1e-09, atol=1e-12)
torch.manual_seed(0)
gradys = torch.rand_like(ys)
ys.backward(gradys)
reg_y0_grad = y0.grad.clone()
reg_t_grad = (t_points.grad.clone() if t_grad else None)
reg_params_grads = []
for param in f.parameters():
reg_params_grads.append(param.grad.clone())
y0.grad.zero_()
if t_grad:
t_points.grad.zero_()
for param in f.parameters():
param.grad.zero_()
ys = torchdiffeq.odeint_adjoint(f, y0, t_points, rtol=1e-09, atol=1e-12)
ys.backward(gradys)
adj_y0_grad = y0.grad
adj_t_grad = (t_points.grad if t_grad else None)
adj_params_grads = []
for param in f.parameters():
adj_params_grads.append(param.grad)
self.assertLess(max_abs((reg_y0_grad - adj_y0_grad)), eps)
if t_grad:
self.assertLess(max_abs((reg_t_grad - adj_t_grad)), eps)
for (reg_grad, adj_grad) in zip(reg_params_grads, adj_params_grads):
self.assertLess(max_abs((reg_grad - adj_grad)), eps) |
def tolerance_clustsolonpath_set(tol):
from phcpy.phcpy2c3 import py2c_set_value_of_continuation_parameter as set
return set(31, tol) |
def weight_l1_loss(pred_loc, label_loc, loss_weight):
(b, _, sh, sw) = pred_loc.size()
pred_loc = pred_loc.view(b, 4, (- 1), sh, sw)
diff = (pred_loc - label_loc).abs()
diff = diff.sum(dim=1).view(b, (- 1), sh, sw)
loss = (diff * loss_weight)
return loss.sum().div(b) |
class _RPN(nn.Module):
def __init__(self, din):
super(_RPN, self).__init__()
self.din = din
self.anchor_scales = cfg.ANCHOR_SCALES
self.anchor_ratios = cfg.ANCHOR_RATIOS
self.feat_stride = cfg.FEAT_STRIDE[0]
self.RPN_Conv = nn.Conv2d(self.din, 512, 3, 1, 1, bias=True)
self.nc_score_out = ((len(self.anchor_scales) * len(self.anchor_ratios)) * 2)
self.RPN_cls_score = nn.Conv2d(512, self.nc_score_out, 1, 1, 0)
self.nc_bbox_out = ((len(self.anchor_scales) * len(self.anchor_ratios)) * 4)
self.RPN_bbox_pred = nn.Conv2d(512, self.nc_bbox_out, 1, 1, 0)
self.RPN_proposal = _ProposalLayer(self.feat_stride, self.anchor_scales, self.anchor_ratios)
self.RPN_anchor_target = _AnchorTargetLayer(self.feat_stride, self.anchor_scales, self.anchor_ratios)
self.rpn_loss_cls = 0
self.rpn_loss_box = 0
def reshape(x, d):
input_shape = x.size()
x = x.view(input_shape[0], int(d), int((float((input_shape[1] * input_shape[2])) / float(d))), input_shape[3])
return x
def forward(self, base_feat, im_info, gt_boxes, num_boxes):
batch_size = base_feat.size(0)
rpn_conv1 = F.relu(self.RPN_Conv(base_feat), inplace=True)
rpn_cls_score = self.RPN_cls_score(rpn_conv1)
rpn_cls_score_reshape = self.reshape(rpn_cls_score, 2)
rpn_cls_prob_reshape = F.softmax(rpn_cls_score_reshape, 1)
rpn_cls_prob = self.reshape(rpn_cls_prob_reshape, self.nc_score_out)
rpn_bbox_pred = self.RPN_bbox_pred(rpn_conv1)
cfg_key = ('TRAIN' if self.training else 'TEST')
rois = self.RPN_proposal((rpn_cls_prob.data, rpn_bbox_pred.data, im_info, cfg_key))
self.rpn_loss_cls = 0
self.rpn_loss_box = 0
if self.training:
assert (gt_boxes is not None)
rpn_data = self.RPN_anchor_target((rpn_cls_score.data, gt_boxes, im_info, num_boxes))
rpn_cls_score = rpn_cls_score_reshape.permute(0, 2, 3, 1).contiguous().view(batch_size, (- 1), 2)
rpn_label = rpn_data[0].view(batch_size, (- 1))
rpn_keep = Variable(rpn_label.view((- 1)).ne((- 1)).nonzero().view((- 1)))
rpn_cls_score = torch.index_select(rpn_cls_score.view((- 1), 2), 0, rpn_keep)
rpn_label = torch.index_select(rpn_label.view((- 1)), 0, rpn_keep.data)
rpn_label = Variable(rpn_label.long())
self.rpn_loss_cls = F.cross_entropy(rpn_cls_score, rpn_label)
fg_cnt = torch.sum(rpn_label.data.ne(0))
(rpn_bbox_targets, rpn_bbox_inside_weights, rpn_bbox_outside_weights) = rpn_data[1:]
rpn_bbox_inside_weights = Variable(rpn_bbox_inside_weights)
rpn_bbox_outside_weights = Variable(rpn_bbox_outside_weights)
rpn_bbox_targets = Variable(rpn_bbox_targets)
self.rpn_loss_box = _smooth_l1_loss(rpn_bbox_pred, rpn_bbox_targets, rpn_bbox_inside_weights, rpn_bbox_outside_weights, sigma=3, dim=[1, 2, 3])
return (rois, self.rpn_loss_cls, self.rpn_loss_box) |
def get_global_rank() -> int:
if (dist.is_available() and dist.is_initialized()):
return dist.get_rank()
if (('RANK' in os.environ) and ('WORLD_SIZE' in os.environ)):
rank = int(os.environ['RANK'])
elif (int(os.environ.get('SLURM_NPROCS', 1)) > 1):
rank = int(os.environ['SLURM_PROCID'])
else:
rank = 0
return rank |
def main(infile, outfile, num_symbols, min_frequency=2, verbose=False, is_dict=False):
outfile.write('#version: 0.2\n')
vocab = get_vocabulary(infile, is_dict)
vocab = dict([((tuple(x[:(- 1)]) + ((x[(- 1)] + '</w>'),)), y) for (x, y) in vocab.items()])
sorted_vocab = sorted(vocab.items(), key=(lambda x: x[1]), reverse=True)
(stats, indices) = get_pair_statistics(sorted_vocab)
big_stats = copy.deepcopy(stats)
threshold = (max(stats.values()) / 10)
for i in range(num_symbols):
if stats:
most_frequent = max(stats, key=(lambda x: (stats[x], x)))
if ((not stats) or (i and (stats[most_frequent] < threshold))):
prune_stats(stats, big_stats, threshold)
stats = copy.deepcopy(big_stats)
most_frequent = max(stats, key=(lambda x: (stats[x], x)))
threshold = ((stats[most_frequent] * i) / (i + 10000.0))
prune_stats(stats, big_stats, threshold)
if (stats[most_frequent] < min_frequency):
sys.stderr.write('no pair has frequency >= {0}. Stopping\n'.format(min_frequency))
break
if verbose:
sys.stderr.write('pair {0}: {1} {2} -> {1}{2} (frequency {3})\n'.format(i, most_frequent[0], most_frequent[1], stats[most_frequent]))
outfile.write('{0} {1}\n'.format(*most_frequent))
changes = replace_pair(most_frequent, sorted_vocab, indices)
update_pair_statistics(most_frequent, changes, stats, indices)
stats[most_frequent] = 0
if (not (i % 100)):
prune_stats(stats, big_stats, threshold) |
def search_topics(search_string, max_results=50):
with closing(getDb().cursor()) as cur:
sql = "SELECT topic FROM topics WHERE topic \n LIKE CONCAT(LOWER(%s), '%') LIMIT %s"
cur.execute(sql, (search_string, max_results))
return [x[0] for x in cur.fetchall()] |
def hard2chandep(chan_deps, params=None, device=None):
if (device is None):
device = torch.device('cpu')
split_deps = torch.split(chan_deps, 1, 1)
split_deps = list(split_deps)
(b_sz, __, h_sz, w_sz) = split_deps[0].size()
alpha = torch.sigmoid(split_deps[2])
max_dep = (alpha > 0.5).long()
max_dep = max_dep.permute(0, 2, 3, 1).view((- 1), 1)
dep_cat = (torch.cat((F.relu(split_deps[1]), F.relu(split_deps[0])), 1) * params['depth_maxrange'])
dep_cat = dep_cat.permute(0, 2, 3, 1).view((- 1), 2)
final_dep = torch.gather(dep_cat, 1, max_dep)
final_dep = final_dep.view(b_sz, h_sz, w_sz, 1).permute(0, 3, 1, 2)
return final_dep |
class RASampler(torch.utils.data.Sampler):
def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True, num_repeats: int=3):
if (num_replicas is None):
if (not dist.is_available()):
raise RuntimeError('Requires distributed package to be available')
num_replicas = dist.get_world_size()
if (rank is None):
if (not dist.is_available()):
raise RuntimeError('Requires distributed package to be available')
rank = dist.get_rank()
if (num_repeats < 1):
raise ValueError('num_repeats should be greater than 0')
self.dataset = dataset
self.num_replicas = num_replicas
self.rank = rank
self.num_repeats = num_repeats
self.epoch = 0
self.num_samples = int(math.ceil(((len(self.dataset) * self.num_repeats) / self.num_replicas)))
self.total_size = (self.num_samples * self.num_replicas)
self.num_selected_samples = int(math.floor((((len(self.dataset) // 256) * 256) / self.num_replicas)))
self.shuffle = shuffle
def __iter__(self):
if self.shuffle:
g = torch.Generator()
g.manual_seed(self.epoch)
indices = torch.randperm(len(self.dataset), generator=g)
else:
indices = torch.arange(start=0, end=len(self.dataset))
indices = torch.repeat_interleave(indices, repeats=self.num_repeats, dim=0).tolist()
padding_size: int = (self.total_size - len(indices))
if (padding_size > 0):
indices += indices[:padding_size]
assert (len(indices) == self.total_size)
indices = indices[self.rank:self.total_size:self.num_replicas]
assert (len(indices) == self.num_samples)
return iter(indices[:self.num_selected_samples])
def __len__(self):
return self.num_selected_samples
def set_epoch(self, epoch):
self.epoch = epoch |
def cnn_to_mlp(convs, hiddens, dueling=False, layer_norm=False):
return (lambda *args, **kwargs: _cnn_to_mlp(convs, hiddens, dueling, *args, layer_norm=layer_norm, **kwargs)) |
def _tower_loss(scope, images, labels, network, dataset, num_classes, top_name, tf_training, kargs):
logits = network.network(images, num_classes=num_classes, scope=top_name, is_training=tf_training, kargs=kargs)
(total_loss, re_loss) = network.loss(scope, logits, labels)
metric_op = network.metric_op(logits, labels)
return (total_loss, re_loss, metric_op) |
def rename_layernorm_keys(sd):
keys = ['model.encoder.layernorm_embedding.weight', 'model.encoder.layernorm_embedding.bias', 'model.decoder.layernorm_embedding.weight', 'model.decoder.layernorm_embedding.bias']
for k in keys:
v = sd.pop(k)
new_k = k.replace('layernorm_embedding', 'layer_norm')
assert (new_k not in sd)
sd[new_k] = v |
def UnPickleTM(file):
tmp = None
if (sys.version_info[0] < 3):
f = open(file, 'rb')
unpickler = pickle.Unpickler(f)
unpickler.dispatch[pickle.GLOBAL] = mapped_load_global
tmp = unpickler.load()
f.close()
else:
f = open(file, 'rb')
unpickler = MyUnpickler(f, encoding='latin1')
tmp = unpickler.load()
f.close()
tmp.pop('evaluate', None)
tmp.pop('MolInstance_fc_sqdiff_BP', None)
tmp.pop('Eval_BPForceSingle', None)
tmp.pop('TFMolManage', None)
tmp.pop('TFManage', None)
tmp.pop('Prepare', None)
tmp.pop('load', None)
tmp.pop('Load', None)
tmp.pop('TensorMol.TFMolManage.path', None)
tmp.pop('TensorMol.TFMolManage.Load', None)
tmp.pop('TensorMol.TFMolManage.Prepare', None)
tmp.pop('TensorMol.TFInstance', None)
tmp.pop('TensorMol.TFInstance.train_dir', None)
tmp.pop('TensorMol.TFMolInstance.train_dir', None)
tmp.pop('TensorMol.TFInstance.chk_file', None)
tmp.pop('TensorMol.TFMolInstance.chk_file', None)
tmp.pop('save', None)
tmp.pop('Save', None)
tmp.pop('Trainable', None)
tmp.pop('TFMolManage.Trainable', None)
tmp.pop('__init__', None)
return tmp |
def mmdet2torchserve(config_file: str, checkpoint_file: str, output_folder: str, model_name: str, model_version: str='1.0', force: bool=False):
mkdir_or_exist(output_folder)
config = Config.fromfile(config_file)
with TemporaryDirectory() as tmpdir:
config.dump(f'{tmpdir}/config.py')
args = Namespace(**{'model_file': f'{tmpdir}/config.py', 'serialized_file': checkpoint_file, 'handler': f'{Path(__file__).parent}/mmdet_handler.py', 'model_name': (model_name or Path(checkpoint_file).stem), 'version': model_version, 'export_path': output_folder, 'force': force, 'requirements_file': None, 'extra_files': None, 'runtime': 'python', 'archive_format': 'default'})
manifest = ModelExportUtils.generate_manifest_json(args)
package_model(args, manifest) |
class LeftPaddingMaskDataset(PaddingMaskDataset):
def __init__(self, dataset):
super().__init__(dataset, left_pad=True) |
class ExampleGreyboxExplainer(ExplainerMixin):
available_explanations = ['local']
explainer_type = 'specific'
def __init__(self, model, data, feature_names=None, feature_types=None):
pass
def explain_local(self, X, y=None, name=None):
return ExampleExplanation() |
class TFTrainingHelper(Layer):
def __init__(self, path, config_proto, saver, meta, sess):
self.saver = saver
self.meta = meta
self.export_dir = path
self.sess = sess
if (config_proto is not None):
import tensorflow as tf
invalidInputError(isinstance(config_proto, tf.ConfigProto), 'session_config should be a tf.ConfigProto')
config_proto.use_per_session_threads = True
byte_arr = bytearray(config_proto.SerializeToString())
else:
byte_arr = None
super(TFTrainingHelper, self).__init__(None, 'float', path, byte_arr)
def save_checkpoint(self):
callZooFunc(self.bigdl_type, 'saveCheckpoint', self.value)
def get_weights_to_python(self):
self.save_checkpoint()
self.saver.restore(self.sess, os.path.join(self.export_dir, 'model'))
def load_checkpoint(self, path):
callZooFunc(self.bigdl_type, 'loadZooCheckpoint', self.value, path)
self.get_weights_to_python() |
def eval(args, model=None):
if (model is None):
if ('summarization' in args.task_mode):
if (args.tuning_mode == 'prefixtune'):
model = PrefixSummarizationModule(args)
print('the length penalty is {}'.format(args.length_penalty))
with torch.no_grad():
model.eval()
model = model.cuda()
data_loader = model.test_dataloader()
print('DATALOADER_LEN', len(data_loader))
out_lst = []
for (batch_idx, batch) in enumerate(data_loader):
batch = model.transfer_batch_to_device(batch, model.device)
out = model.test_step(batch, batch_idx)
out_lst.append(out)
if ((batch_idx % 50) == 0):
print(model.test_epoch_end(out_lst))
print(out['preds'])
result = model.test_epoch_end(out_lst)
for (k, v) in result.items():
if (k != 'preds'):
print('FINAL_RESULTS')
print(k, v)
out_path = os.path.join(args.output_dir, 'test_beam_{}'.format(args.length_penalty))
print('writing the test results to ', out_path)
with open(out_path, 'w') as f:
for preds in result['preds']:
print(preds, file=f) |
def _tether_sprites(sprites, updates_per_env_step, update_angle_vel=True, anchor=None):
if (len(sprites) == 0):
return
total_mass = sum([s.mass for s in sprites])
if np.isinf(total_mass):
return
center_of_mass = (sum([(s.mass * s.position) for s in sprites]) / total_mass)
total_momentum = sum([(s.mass * s.velocity) for s in sprites])
total_velocity = (total_momentum / total_mass)
if (anchor is not None):
center_of_mass = anchor
total_velocity = np.zeros(2)
if update_angle_vel:
(angular_momenta, moments_of_inertia, radii, perpendiculars) = zip(*[_change_rotation_coordinates(s, center_of_mass, total_velocity, updates_per_env_step) for s in sprites])
total_angular_momentum = sum(angular_momenta)
total_moment_of_inertia = sum(moments_of_inertia)
total_angular_velocity = (total_angular_momentum / total_moment_of_inertia)
for (s, radius, perp) in zip(sprites, radii, perpendiculars):
s.velocity = (total_velocity + ((radius * perp) * total_angular_velocity))
s.angle_vel = total_angular_velocity
else:
for s in sprites:
s.velocity = total_velocity
s.angle_vel = 0.0 |
.parametrize('env_config', _ALL_ENV_CONFIGS)
def test_render(env_config):
env = gym.make(env_config[0], render_mode='rgb_array', **env_config[1])
env.reset()
frames = []
for _ in range(10):
frames.append(env.render())
env.step(env.action_space.sample())
for frame in frames:
assert isinstance(frame, np.ndarray), f'Expected render frames to be of type `np.ndarray`, got {type(frame)}.'
assert (frame.shape[(- 1)] == 4), f'Expected 4 channels in the rendered image, got {frame.shape[(- 1)]}.'
env.close() |
def temporal_nms(predictions, nms_thd, max_after_nms=100):
if (len(predictions) == 1):
return predictions
predictions = sorted(predictions, key=(lambda x: x[2]), reverse=True)
tstart = [e[0] for e in predictions]
tend = [e[1] for e in predictions]
tscore = [e[2] for e in predictions]
rstart = []
rend = []
rscore = []
while ((len(tstart) > 1) and (len(rscore) < max_after_nms)):
idx = 1
while (idx < len(tstart)):
if (compute_temporal_iou([tstart[0], tend[0]], [tstart[idx], tend[idx]]) > nms_thd):
tstart.pop(idx)
tend.pop(idx)
tscore.pop(idx)
else:
idx += 1
rstart.append(tstart.pop(0))
rend.append(tend.pop(0))
rscore.append(tscore.pop(0))
if ((len(rscore) < max_after_nms) and (len(tstart) >= 1)):
rstart.append(tstart.pop(0))
rend.append(tend.pop(0))
rscore.append(tscore.pop(0))
predictions_after_nms = [[st, ed, s] for (s, st, ed) in zip(rscore, rstart, rend)]
return predictions_after_nms |
def get_logger(name='root'):
formatter = logging.Formatter(fmt='%(asctime)s [%(levelname)s]: %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger = logging.getLogger(name)
logger.setLevel(logging.INFO)
logger.addHandler(handler)
return logger |
def colorize(diff_lines):
def bold(s):
return (('\x1b[1m' + s) + '\x1b[0m')
def cyan(s):
return (('\x1b[36m' + s) + '\x1b[0m')
def green(s):
return (('\x1b[32m' + s) + '\x1b[0m')
def red(s):
return (('\x1b[31m' + s) + '\x1b[0m')
for line in diff_lines:
if (line[:4] in ['--- ', '+++ ']):
(yield bold(line))
elif line.startswith(' '):
(yield cyan(line))
elif line.startswith('+'):
(yield green(line))
elif line.startswith('-'):
(yield red(line))
else:
(yield line) |
def hsv_node(node_tree: bpy.types.NodeTree, input_node: bpy.types.Node) -> bpy.types.Node:
hsv_node = zpy.nodes.get_or_make('HSV', 'CompositorNodeHueSat', node_tree)
node_tree.links.new(input_node.outputs['Image'], hsv_node.inputs['Image'])
return hsv_node |
def read_cifar10(filename_queue):
class CIFAR10Record(object):
pass
result = CIFAR10Record()
label_bytes = 1
result.height = 32
result.width = 32
result.depth = 3
image_bytes = ((result.height * result.width) * result.depth)
record_bytes = (label_bytes + image_bytes)
reader = tf.FixedLengthRecordReader(record_bytes=record_bytes)
(result.key, value) = reader.read(filename_queue)
record_bytes = tf.decode_raw(value, tf.uint8)
result.label = tf.cast(tf.strided_slice(record_bytes, [0], [label_bytes]), tf.int32)
depth_major = tf.reshape(tf.strided_slice(record_bytes, [label_bytes], [(label_bytes + image_bytes)]), [result.depth, result.height, result.width])
result.uint8image = tf.transpose(depth_major, [1, 2, 0])
return result |
def convSuper(c, **kargs):
return n.LeNet([(32, 3, 3, 1), (32, 4, 4, 1), (64, 3, 3, 1), (64, 4, 4, 1)], [512, 512, c], last_lin=True, **kargs) |
class DglLinkPropPredDataset(object):
'Adapted from
def __init__(self, name, root='dataset', meta_dict=None):
self.name = name
if (meta_dict is None):
self.dir_name = '_'.join(name.split('-'))
if osp.exists(osp.join(root, (self.dir_name + '_dgl'))):
self.dir_name = (self.dir_name + '_dgl')
self.original_root = root
self.root = osp.join(root, self.dir_name)
master = pd.read_csv(os.path.join(os.path.dirname(__file__), 'master.csv'), index_col=0)
if (not (self.name in master)):
error_mssg = 'Invalid dataset name {}.\n'.format(self.name)
error_mssg += 'Available datasets are as follows:\n'
error_mssg += '\n'.join(master.keys())
raise ValueError(error_mssg)
self.meta_info = master[self.name]
else:
self.dir_name = meta_dict['dir_path']
self.original_root = ''
self.root = meta_dict['dir_path']
self.meta_info = meta_dict
if (osp.isdir(self.root) and (not osp.exists(osp.join(self.root, (('RELEASE_v' + str(self.meta_info['version'])) + '.txt'))))):
print((self.name + ' has been updated.'))
if (input('Will you update the dataset now? (y/N)\n').lower() == 'y'):
shutil.rmtree(self.root)
self.download_name = self.meta_info['download_name']
self.task_type = self.meta_info['task type']
self.eval_metric = self.meta_info['eval metric']
self.is_hetero = (self.meta_info['is hetero'] == 'True')
self.binary = (self.meta_info['binary'] == 'True')
super(DglLinkPropPredDataset, self).__init__()
self.pre_process()
def pre_process(self):
processed_dir = osp.join(self.root, 'processed')
pre_processed_file_path = osp.join(processed_dir, 'dgl_data_processed')
if osp.exists(pre_processed_file_path):
(self.graph, _) = load_graphs(pre_processed_file_path)
else:
if self.binary:
has_necessary_file_simple = (osp.exists(osp.join(self.root, 'raw', 'data.npz')) and (not self.is_hetero))
has_necessary_file_hetero = (osp.exists(osp.join(self.root, 'raw', 'edge_index_dict.npz')) and self.is_hetero)
else:
has_necessary_file_simple = (osp.exists(osp.join(self.root, 'raw', 'edge.csv.gz')) and (not self.is_hetero))
has_necessary_file_hetero = (osp.exists(osp.join(self.root, 'raw', 'triplet-type-list.csv.gz')) and self.is_hetero)
has_necessary_file = (has_necessary_file_simple or has_necessary_file_hetero)
if (not has_necessary_file):
url = self.meta_info['url']
if decide_download(url):
path = download_url(url, self.original_root)
extract_zip(path, self.original_root)
os.unlink(path)
try:
shutil.rmtree(self.root)
except:
pass
shutil.move(osp.join(self.original_root, self.download_name), self.root)
else:
print('Stop download.')
exit((- 1))
raw_dir = osp.join(self.root, 'raw')
add_inverse_edge = (self.meta_info['add_inverse_edge'] == 'True')
if (self.meta_info['additional node files'] == 'None'):
additional_node_files = []
else:
additional_node_files = self.meta_info['additional node files'].split(',')
if (self.meta_info['additional edge files'] == 'None'):
additional_edge_files = []
else:
additional_edge_files = self.meta_info['additional edge files'].split(',')
if self.is_hetero:
graph = read_heterograph_dgl(raw_dir, add_inverse_edge=add_inverse_edge, additional_node_files=additional_node_files, additional_edge_files=additional_edge_files, binary=self.binary)[0]
else:
graph = read_graph_dgl(raw_dir, add_inverse_edge=add_inverse_edge, additional_node_files=additional_node_files, additional_edge_files=additional_edge_files, binary=self.binary)[0]
print('Saving...')
save_graphs(pre_processed_file_path, graph, {})
(self.graph, _) = load_graphs(pre_processed_file_path)
def get_edge_split(self, split_type=None):
if (split_type is None):
split_type = self.meta_info['split']
path = osp.join(self.root, 'split', split_type)
if os.path.isfile(os.path.join(path, 'split_dict.pt')):
return torch.load(os.path.join(path, 'split_dict.pt'))
train = replace_numpy_with_torchtensor(torch.load(osp.join(path, 'train.pt')))
valid = replace_numpy_with_torchtensor(torch.load(osp.join(path, 'valid.pt')))
test = replace_numpy_with_torchtensor(torch.load(osp.join(path, 'test.pt')))
return {'train': train, 'valid': valid, 'test': test}
def __getitem__(self, idx):
assert (idx == 0), 'This dataset has only one graph'
return self.graph[0]
def __len__(self):
return 1
def __repr__(self):
return '{}({})'.format(self.__class__.__name__, len(self)) |
def add_itm_params(parser: argparse.ArgumentParser):
parser.add_argument('--conf_th', default=0.2, type=float, help='')
parser.add_argument('--caption_score_weight', default=0.0, type=float, help='')
parser.add_argument('--negative_size', default=10, type=int, help='')
parser.add_argument('--num_hard_negatives', default=0, type=int, help='')
parser.add_argument('--sample_init_hard_negatives', action='store_true', help='')
parser.add_argument('--hard_negatives_sampling', default='none', type=str, choices=['none', 'random', 'top', 'top-random', '10-20', '20-30'], help='')
parser.add_argument('--max_bb', default=100, type=int, help='')
parser.add_argument('--min_bb', default=10, type=int, help='')
parser.add_argument('--num_bb', default=36, type=int, help='')
parser.add_argument('--train_txt_dbs', default=None, type=str, help='')
parser.add_argument('--train_img_dbs', default=None, type=str, help='')
parser.add_argument('--txt_db_mapping', default=None, type=str, help='')
parser.add_argument('--img_db_mapping', default=None, type=str, help='')
parser.add_argument('--pretrain_mapping', default=None, type=str, help='')
parser.add_argument('--val_txt_db', default=None, type=str, help='')
parser.add_argument('--val_img_db', default=None, type=str, help='')
parser.add_argument('--test_txt_db', default=None, type=str, help='')
parser.add_argument('--test_img_db', default=None, type=str, help='')
parser.add_argument('--steps_per_hard_neg', default=(- 1), type=int, help='')
parser.add_argument('--inf_minibatch_size', default=400, type=int, help='')
parser.add_argument('--project_dim', default=0, type=int, help='')
parser.add_argument('--cls_concat', default='', type=str, help='')
parser.add_argument('--fix_txt_encoder', action='store_true', help='')
parser.add_argument('--fix_img_encoder', action='store_true', help='')
parser.add_argument('--compressed_db', action='store_true', help='use compressed LMDB')
parser.add_argument('--retrieval_mode', default='both', choices=['img_only', 'txt_only', 'both'], type=str, help='') |
def rollout(env, policy, max_path_length=np.inf, animated=False, ignore_done=False, num_rollouts=1, adapt_batch_size=None):
wrapped_env = env
while hasattr(wrapped_env, '_wrapped_env'):
wrapped_env = wrapped_env._wrapped_env
paths = []
a_bs = adapt_batch_size
for i in range(num_rollouts):
observations = []
actions = []
rewards = []
agent_infos = []
env_infos = []
o = env.reset()
policy.reset()
path_length = 0
while (path_length < max_path_length):
if ((a_bs is not None) and (len(observations) > (a_bs + 1))):
adapt_obs = observations[((- a_bs) - 1):(- 1)]
adapt_act = actions[((- a_bs) - 1):(- 1)]
adapt_next_obs = observations[(- a_bs):]
policy.dynamics_model.switch_to_pre_adapt()
policy.dynamics_model.adapt([np.array(adapt_obs)], [np.array(adapt_act)], [np.array(adapt_next_obs)])
(a, agent_info) = policy.get_action(o)
(next_o, r, d, env_info) = env.step(a)
observations.append(o)
rewards.append(r)
actions.append(a[0])
agent_infos.append(agent_info)
env_infos.append(env_info)
path_length += 1
if (d and (not ignore_done)):
break
o = next_o
if animated:
env.render()
paths.append(dict(observations=observations, actons=actions, rewards=rewards, agent_infos=agent_infos, env_infos=env_infos))
return paths |
def resolve_overlaps(ctm_edits, segments):
total_ctm_edits = []
assert (len(ctm_edits) > 0)
next_utt = ctm_edits[0][0][0]
for (utt_index, ctm_edits_for_cur_utt) in enumerate(ctm_edits):
if (utt_index == (len(ctm_edits) - 1)):
break
if (len(ctm_edits_for_cur_utt) == 0):
next_utt = ctm_edits[(utt_index + 1)][0][0]
continue
cur_utt = ctm_edits_for_cur_utt[0][0]
if (cur_utt != next_utt):
logger.error('Current utterance %s is not the same as the next utterance %s in previous iteration.\nCTM is not sorted by utterance-id?', cur_utt, next_utt)
raise ValueError
ctm_edits_for_next_utt = ctm_edits[(utt_index + 1)]
next_utt = ctm_edits_for_next_utt[0][0]
if (segments[next_utt][1] < segments[cur_utt][1]):
logger.error('Next utterance %s <= Current utterance %s. CTM edits is not sorted by utterance-id.', next_utt, cur_utt)
raise ValueError
try:
window_length = (segments[cur_utt][2] - segments[cur_utt][1])
try:
overlap = (segments[cur_utt][2] - segments[next_utt][1])
except KeyError:
logger('Could not find utterance %s in segments', next_utt)
raise
try:
cur_utt_end_index = next((i for (i, line) in enumerate(ctm_edits_for_cur_utt) if ((line[2] + (line[3] / 2.0)) > (window_length - overlap))))
except StopIteration:
cur_utt_end_index = len(ctm_edits_for_cur_utt)
cur_utt_end_lines = ctm_edits_for_cur_utt[cur_utt_end_index:]
try:
next_utt_start_index = next((i for (i, line) in enumerate(ctm_edits_for_next_utt) if ((line[2] + (line[3] / 2.0)) > overlap)))
except StopIteration:
next_utt_start_index = 0
next_utt_start_lines = ctm_edits_for_next_utt[:next_utt_start_index]
choose_index = choose_best_ctm_lines(cur_utt_end_lines, next_utt_start_lines, window_length, overlap)
if (choose_index == 1):
total_ctm_edits.extend(ctm_edits_for_cur_utt[:cur_utt_end_index])
else:
total_ctm_edits.extend(ctm_edits_for_cur_utt)
if ((choose_index == 0) and (next_utt_start_index > 0)):
ctm_edits[(utt_index + 1)] = ctm_edits_for_next_utt[next_utt_start_index:]
except:
logger.error('Could not resolve overlaps between CTM edits for %s and %s', cur_utt, next_utt)
logger.error('Current CTM:')
for line in ctm_edits_for_cur_utt:
logger.error(ctm_edit_line_to_string(line))
logger.error('Next CTM:')
for line in ctm_edits_for_next_utt:
logger.error(ctm_edit_line_to_string(line))
raise
total_ctm_edits.extend(ctm_edits[(- 1)])
return total_ctm_edits |
def test_tactile():
config = get_config()
if (not os.path.exists(config.SIMULATOR.SCENE)):
pytest.skip('Please download Habitat test data to data folder.')
config.defrost()
config.TASK.SENSORS = ['PROXIMITY_SENSOR']
config.freeze()
with habitat.Env(config=config, dataset=None) as env:
env.reset()
random.seed(1234)
for _ in range(20):
_random_episode(env, config)
env.reset()
for _ in range(10):
obs = env.step(action=MoveForwardAction.name)
proximity = obs['proximity']
assert (0.0 <= proximity)
assert (2.0 >= proximity) |
.parametrize('seed', range(10))
.parametrize('which', ['greedy', 'optimal'])
def test_basic_perverse(seed, which):
(inputs, output, shapes, size_dict) = ctg.utils.perverse_equation(10, seed=seed)
eq = ctg.utils.inputs_output_to_eq(inputs, output)
print(eq)
path = {'greedy': pb.optimize_greedy, 'optimal': pb.optimize_optimal}[which](inputs, output, size_dict)
tree = ctg.ContractionTree.from_path(inputs, output, size_dict, path=path)
arrays = [np.random.randn(*s) for s in shapes]
assert_allclose(tree.contract(arrays), np.einsum(eq, *arrays, optimize=True)) |
def get_config():
arg_seed = 1
parser = argparse.ArgumentParser()
parser.add_argument('--project-dir', type=str, default='output')
parser.add_argument('--dataset-dir', type=str, default='output')
parser.add_argument('--num-epochs', type=float, default=300)
parser.add_argument('--data-seed', type=int, default=1)
parser.add_argument('--train-seed', type=int, default=arg_seed)
parser.add_argument('--config-override', type=str, default='')
args = parser.parse_args()
with open('config.json', 'r') as read_file:
config = json.load(read_file)
args_dict = vars(args)
config.update(args_dict)
if (config['config_override'] == ''):
del config['config_override']
else:
print(config['config_override'])
config_override = json.loads(config['config_override'])
del config['config_override']
config.update(config_override)
return config |
class BatchNorm2d(_BatchNorm2d):
def forward(self, inputs):
if (not (has_parameters(self) or has_running_stats(self))):
return inputs
return super(BatchNorm2d, self).forward(inputs) |
def DPrint(name, var):
if (PRINT_VARS is False):
return var
return theano.printing.Print(name)(var) |
def FedAvg(w, dict_len):
w_avg = copy.deepcopy(w[0])
for k in w_avg.keys():
w_avg[k] = (w_avg[k] * dict_len[0])
for i in range(1, len(w)):
w_avg[k] += (w[i][k] * dict_len[i])
w_avg[k] = (w_avg[k] / sum(dict_len))
return w_avg |
def conv_init(m):
classname = m.__class__.__name__
if (classname.find('Conv') != (- 1)):
init.xavier_uniform_(m.weight, gain=1.414)
init.constant_(m.bias, 0)
elif (classname.find('BatchNorm') != (- 1)):
init.constant_(m.weight, 1)
init.constant_(m.bias, 0) |
class DummyGenerator(Generator):
def __init__(self) -> None:
super().__init__(num_cities=5)
def __call__(self, key: chex.PRNGKey) -> State:
del key
coordinates = jnp.array([[0.0, 0.0], [0.0, 1.0], [1.0, 0.0], [1.0, 1.0], [0.5, 0.5]], float)
position = jnp.array((- 1), jnp.int32)
visited_mask = jnp.array([False, False, False, False, False])
trajectory = jnp.array([(- 1), (- 1), (- 1), (- 1), (- 1)], jnp.int32)
num_visited = jnp.array(0, jnp.int32)
state = State(coordinates=coordinates, position=position, visited_mask=visited_mask, trajectory=trajectory, num_visited=num_visited, key=jax.random.PRNGKey(0))
return state |
def get_config(model: str, trust_remote_code: bool, revision: Optional[str]=None) -> PretrainedConfig:
if ('mistral' in model.lower()):
return MistralConfig.from_pretrained(model, revision=revision)
try:
config = AutoConfig.from_pretrained(model, trust_remote_code=trust_remote_code, revision=revision)
except ValueError as e:
if ((not trust_remote_code) and ('requires you to execute the configuration file' in str(e))):
err_msg = 'Failed to load the model config. If the model is a custom model not yet available in the HuggingFace transformers library, consider setting `trust_remote_code=True` in LLM or using the `--trust-remote-code` flag in the CLI.'
invalidInputError(err_msg)
else:
invalidInputError(e)
return config |
def encode_region(region):
if isinstance(region, Polygon):
return ','.join(['{},{}'.format(p.x, p.y) for p in region.points])
elif isinstance(region, Rectangle):
return '{},{},{},{}'.format(region.x, region.y, region.width, region.height)
else:
return '' |
def get_placeholder(name, dtype, shape):
if (name in _PLACEHOLDER_CACHE):
(out, dtype1, shape1) = _PLACEHOLDER_CACHE[name]
if (out.graph == tf.get_default_graph()):
assert ((dtype1 == dtype) and (shape1 == shape)), 'Placeholder with name {} has already been registered and has shape {}, different from requested {}'.format(name, shape1, shape)
return out
out = tf.placeholder(dtype=dtype, shape=shape, name=name)
_PLACEHOLDER_CACHE[name] = (out, dtype, shape)
return out |
def _validate_data(data_json, n_class):
for split in data_json['splits']:
assert isinstance(split['float_feature_index'], int)
assert isinstance(split['border'], (int, float))
for value in data_json['leaf_values']:
assert isinstance(value, (int, float, list, tuple))
num_splits = len(data_json['splits'])
num_values = len(data_json['leaf_values'])
if (n_class > 2):
assert (num_values == ((2 ** num_splits) * n_class))
else:
assert (num_values == (2 ** num_splits)) |
class Histogram(np.ndarray):
def __new__(cls, *args, **kwargs):
return np.asarray(*args, **kwargs).view(cls) |
def biop(opname, vtype):
g = GraphInterface()
vtype = (('t(' + vtype) + ')')
vp = g.add_vertex(vtype, is_input=True)
vp1 = g.add_vertex(vtype, is_output=True)
vp2 = g.add_vertex(vtype, is_output=True)
vpar = g.add_vertex((('t(' + opname) + ')'))
g.add_edge(vp, vpar)
g.add_edge(vpar, vp1)
g.add_edge(vpar, vp2)
return g |
_model
def tf_efficientnet_b8(pretrained=False, **kwargs):
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet('tf_efficientnet_b8', channel_multiplier=2.2, depth_multiplier=3.6, pretrained=pretrained, **kwargs)
return model |
def dump_yaml_and_check_difference(obj, filename, sort_keys=False):
str_dump = dump(obj, None, file_format='yaml', sort_keys=sort_keys)
if osp.isfile(filename):
file_exists = True
with open(filename, 'r', encoding='utf-8') as f:
str_orig = f.read()
else:
file_exists = False
str_orig = None
if (file_exists and (str_orig == str_dump)):
is_different = False
else:
is_different = True
with open(filename, 'w', encoding='utf-8') as f:
f.write(str_dump)
return is_different |
class VQVAEModel(nn.Module):
def __init__(self, model_opt, opt):
super(VQVAEModel, self).__init__()
num_hiddens = model_opt['num_hiddens']
num_residual_layers = model_opt['num_residual_layers']
num_residual_hiddens = model_opt['num_residual_hiddens']
suf_method = model_opt['suf_method']
ds_motion = model_opt['ds_motion']
ds_content = model_opt['ds_content']
num_frames = opt['dataset']['num_frames']
num_head = model_opt['num_head']
embedding_dim = model_opt['embedding_dim']
num_embeddings = model_opt['num_embeddings']
commitment_cost = model_opt['commitment_cost']
decay = model_opt['decay']
augcb = model_opt['if_augcb']
self._disc_start_step = opt['train']['disc_start_step']
self._discriminator = NLayerDiscriminator(**model_opt['disc_opt'])
self._discriminator_loss = hinge_d_loss
self.perceptual_factor = model_opt['lpips_factor']
self.perceptual_loss = None
self.generator_weight = model_opt['Gen_weight']
if ((model_opt['encoder_mo_type'] is None) or (model_opt['encoder_mo_type'] == 'default')):
print(f'Loading Motion Encoder: Encoder_Motion...')
self._encoder_mo = Encoder_Motion(ds_motion=ds_motion, num_hiddens=num_hiddens, num_residual_layers=num_residual_layers, num_residual_hiddens=num_residual_hiddens, n_head=num_head, d_model=num_hiddens, d_kv=64)
elif (model_opt['encoder_mo_type'].lower() == 'time-agnostic'):
print(f'Loading Motion Encoder: Encoder_Motion_TA...')
self._encoder_mo = Encoder_Motion_TA(ds_motion=ds_motion, num_hiddens=num_hiddens, num_residual_layers=num_residual_layers, num_residual_hiddens=num_residual_hiddens, n_head=num_head, d_model=num_hiddens, d_kv=64)
else:
raise ValueError(f"No implemention for encoder_mo_type: {model_opt['encoder_mo_type']}.")
if (model_opt.get('decoder_type', 'default') in ['default', 'decoder_woPA']):
self._decoder = Decoder(num_hiddens=num_hiddens, num_residual_layers=ds_content, num_residual_hiddens=num_residual_hiddens, ds_content=ds_content, ds_motion=ds_motion, ds_background=ds_content, ds_identity=ds_content)
else:
raise ValueError(f"No implemention for decoder_type: {model_opt.get('decoder_type', 'default')}.")
self._pre_vq_mo = nn.Conv2d(num_hiddens, embedding_dim, kernel_size=1, stride=1, padding=0)
self._suf_vq_mo = nn.ConvTranspose2d(embedding_dim, num_hiddens, kernel_size=1, stride=1, padding=0)
self._vq_ema = VectorQuantizerEMA(embedding_dim=embedding_dim, num_embeddings=num_embeddings, commitment_cost=commitment_cost, decay=decay, if_augcb=augcb)
self._data_variance = 0.0632704
def _decode(self, co_tokens, _, mo_tokens):
B = mo_tokens.shape[0]
vq_mo = self._vq_ema.quantize_code(mo_tokens)
quantize_mo = self._suf_vq_mo(vq_mo)
quantize_mo = rearrange(quantize_mo, '(b t) c h w -> b t c h w', b=B)
(x_rec, _, _) = self._decoder(quantize_mo)
return x_rec
def _generater(self, batch, is_training):
(x, xbg, xid, xmo) = batch
xco = x.clone()
(B, _, _, _, _) = xco.shape
feat_mo = self._encoder_mo(xmo)
feat_mo = rearrange(feat_mo, 'b t c h w -> (b t) c h w')
feat_mo = self._pre_vq_mo(feat_mo)
vq_output_mo = self._vq_ema(feat_mo, is_training=is_training)
quantize_mo = self._suf_vq_mo(vq_output_mo['quantize'])
quantize_mo = rearrange(quantize_mo, '(b t) c h w -> b t c h w', b=B)
(x_rec, _, _) = self._decoder(quantize_mo)
abs_loss = torch.abs((x_rec - x)).mean()
mse_loss = torch.tensor(0.0, dtype=torch.float32)
recon_loss = abs_loss
tx = rearrange(x, 'b t c h w -> (b t) c h w')
tx_rec = rearrange(x_rec, 'b t c h w -> (b t) c h w')
with torch.no_grad():
ssim_val = ssim(tx, tx_rec, data_range=1, size_average=True)
if (self.perceptual_loss is None):
self.perceptual_loss = lpips.LPIPS(net='vgg', pnet_tune=False).to(x.device)
p_loss = self.perceptual_loss(((tx * 2) - 1), ((tx_rec * 2) - 1)).mean()
nll_loss = (recon_loss + (p_loss * self.perceptual_factor))
loss = (nll_loss + vq_output_mo['loss'])
return {'loss': loss, 'nll_loss': nll_loss, 'x_rec': x_rec, 'quantize_bg': quantize_mo, 'quantize_id': quantize_mo, 'quantize_mo': quantize_mo, 'vq_output_bg': vq_output_mo, 'vq_output_id': vq_output_mo, 'vq_output_mo': vq_output_mo, 'record_logs': {'ssim_val': ssim_val, 'abs_loss': abs_loss, 'mse_loss': mse_loss, 'rec_loss': recon_loss, 'lpips_loss': p_loss, 'quant_loss_bg': vq_output_mo['loss'], 'quant_loss_mo': vq_output_mo['loss']}}
def calculate_adaptive_weight(self, nll_loss, g_loss, last_layer):
nll_grads = torch.autograd.grad(nll_loss, last_layer, retain_graph=True)[0]
g_grads = torch.autograd.grad(g_loss, last_layer, retain_graph=True)[0]
d_weight = (torch.norm(nll_grads) / (torch.norm(g_grads) + 0.0001))
d_weight = torch.clamp(d_weight, 0.1, 10000.0).detach()
return d_weight.detach()
def forward_step(self, inputs, is_training, optimizer_idx):
if (optimizer_idx <= 0):
record_logs = {}
logs = self._generater(inputs, is_training=is_training)
(loss, nll_loss, x_rec) = (logs['loss'], logs['nll_loss'], logs['x_rec'])
record_logs.update(logs['record_logs'])
if (optimizer_idx == 0):
logits_fake = self._discriminator(x_rec.contiguous())
gan_loss = (- torch.mean(logits_fake))
if (self.generator_weight is None):
generator_weight = self.calculate_adaptive_weight(nll_loss, gan_loss, last_layer=self._decoder._last_layer.weight)
else:
generator_weight = self.generator_weight
record_logs.update({'G0_generator_loss': gan_loss, 'G0_0_generator_weight': generator_weight})
elif (optimizer_idx == (- 1)):
gan_loss = torch.tensor(0.0)
generator_weight = torch.tensor(0.0)
else:
raise ValueError(f'No implemention for optimizer_idx: {optimizer_idx}.')
ret_loss = (loss + (generator_weight * gan_loss))
return {'loss': ret_loss, 'x_rec': x_rec, 'quantize_bg': logs['quantize_bg'], 'quantize_id': logs['quantize_bg'], 'quantize_mo': logs['quantize_mo'], 'ssim_metric': logs['record_logs']['ssim_val'], 'rec_loss': logs['record_logs']['rec_loss'], 'lpips_loss': logs['record_logs']['lpips_loss'], 'record_logs': record_logs}
elif (optimizer_idx == 1):
with torch.no_grad():
output = self._generater(inputs, is_training=False)
(x, _, _, _) = inputs
x_rec = output['x_rec']
logits_real = self._discriminator(x.contiguous())
logits_fake = self._discriminator(x_rec.contiguous())
gan_loss = self._discriminator_loss(logits_real, logits_fake)
return {'loss': gan_loss, 'record_logs': {'G1_discriminator_loss': gan_loss.clone().detach().mean(), 'G1_0_logits_real': logits_real.detach().mean(), 'G1_1_logits_fake': logits_fake.detach().mean()}}
else:
raise ValueError(f'No implemention for optimizer_idx: {optimizer_idx}.')
def forward(self, inputs, is_training, optimizer=None, iteration=None, wandb_open=False, writer=None):
logs = {}
if (iteration is None):
assert (is_training is False)
optimizer_idx = (- 1)
elif (iteration < self._disc_start_step):
optimizer_idx = (- 1)
else:
optimizer_idx = (iteration % 2)
output = self.forward_step(inputs, is_training, optimizer_idx=optimizer_idx)
if (((dist.is_initialized() is False) or (dist.get_rank() == 0)) and (is_training and ((iteration % 9) == 0))):
logs.update({'learning_rate': optimizer.state_dict()['param_groups'][0]['lr']})
logs.update(output['record_logs'])
if ((is_training == True) and (wandb_open == True)):
wandb.log(logs, step=iteration)
if ((is_training == True) and (writer is not None)):
if (optimizer_idx <= 0):
writer.add_scalar('train/rec_loss: ', logs['rec_loss'], iteration)
writer.add_scalar('train/quant_loss_bg_loss: ', logs['quant_loss_bg'], iteration)
writer.add_scalar('train/quant_loss_mo_loss: ', logs['quant_loss_mo'], iteration)
writer.add_scalar('train/learning_rate: ', logs['learning_rate'], iteration)
if ('G0_generator_loss' in logs.keys()):
writer.add_scalar('train/G0_generator_loss: ', logs['G0_generator_loss'], iteration)
writer.add_scalar('train/G0_0_generator_weight: ', logs['G0_0_generator_weight'], iteration)
else:
writer.add_scalar('train/G1_discriminator_loss: ', logs['G1_discriminator_loss'], iteration)
output['optimizer_idx'] = optimizer_idx
return output |
class quantize_attn_pos_model2(base):
_init_pytorch
def __init__(self, vocab_size, embed_dim, embed_init, max_nsent, max_npara, experiment, *args, **kwargs):
super(quantize_attn_pos_model2, self).__init__(vocab_size, embed_dim, embed_init, experiment)
if (self.expe.config.encoder_type.lower() in ['lstm', 'gru', 'gru_attn']):
ensize = (2 * self.expe.config.ensize)
else:
ensize = embed_dim
self.sent_id_pred = model_utils.get_mlp(input_size=ensize, hidden_size=self.expe.config.mhsize, output_size=ensize, n_layer=self.expe.config.mlplayer, dropout=self.expe.config.dp)
self.para_id_pred = model_utils.get_mlp(input_size=ensize, hidden_size=self.expe.config.mhsize, output_size=ensize, n_layer=self.expe.config.mlplayer, dropout=self.expe.config.dp)
self.sent_embed = nn.Embedding(self.expe.config.nb, ensize)
self.para_embed = nn.Embedding(self.expe.config.nb, ensize)
self.bins = (np.arange(self.expe.config.nb) / self.expe.config.nb)
def quantize_pos(self, ids, ids_mask):
quant_ids = (np.digitize((ids / ids_mask.sum(1)), self.bins) - 1)
return quant_ids
def attn(self, inp, mask, vecs, embed):
weight = torch.matmul(vecs, embed.weight.t()).mean((- 1))
weight.data.masked_fill_((1 - mask).data.byte(), (- float('inf')))
return (vecs * F.softmax(weight, 1).unsqueeze((- 1))).sum(1)
def forward(self, sent, mask, tgt, tgt_mask, tgt2, tgt_mask2, doc_id, para_id, pmask, sent_id, smask, *args):
self.train()
(sent, mask, tgt, tgt_mask, tgt2, tgt_mask2, para_id, sent_id) = self.to_vars(sent, mask, tgt, tgt_mask, tgt2, tgt_mask2, self.quantize_pos(para_id, pmask), self.quantize_pos(sent_id, smask))
(bs, sl) = sent.size()
(sent_vec, all_vecs) = self.encode.get_vecs(sent, mask)
logloss2 = self.next_decode(sent_vec, tgt2, tgt_mask2)
if self.expe.config.uni_pred:
logloss1 = torch.zeros_like(logloss2)
else:
logloss1 = self.prev_decode(sent_vec, tgt, tgt_mask)
logloss = (logloss1 + logloss2)
if self.expe.config.sratio:
sent_id_vecs = self.attn(sent, mask, all_vecs, self.sent_embed)
sent_id_vecs = self.sent_id_pred(sent_id_vecs)
sent_id_logit = torch.matmul(sent_id_vecs, self.sent_embed.weight.t())
sent_id_loss = F.cross_entropy(sent_id_logit, sent_id.long())
else:
sent_id_loss = torch.zeros_like(logloss)
if self.expe.config.pratio:
para_id_vecs = self.attn(sent, mask, all_vecs, self.para_embed)
para_id_vecs = self.para_id_pred(para_id_vecs)
para_id_logit = torch.matmul(para_id_vecs, self.para_embed.weight.t())
para_id_loss = F.cross_entropy(para_id_logit, para_id.long())
else:
para_id_loss = torch.zeros_like(logloss)
loss = (((self.expe.config.lratio * logloss) + (self.expe.config.sratio * sent_id_loss)) + (self.expe.config.pratio * para_id_loss))
return (loss, logloss1, logloss2, sent_id_loss, para_id_loss)
def score_sts(self, sent1, mask1, sent2, mask2):
self.eval()
(sent1, mask1, sent2, mask2) = self.to_vars(sent1, mask1, sent2, mask2)
sent1_vec = self.encode(sent1, mask1)
sent2_vec = self.encode(sent2, mask2)
sent_cos_pos = F.cosine_similarity(sent1_vec, sent2_vec)
return sent_cos_pos.data.cpu().numpy() |
def filter_keys(key_set):
def _f(dictionary):
return {k: v for (k, v) in dictionary.items() if (k in key_set)}
return _f |
def popen(cmd, mode='rb'):
if (not isinstance(cmd, str)):
raise TypeError(('invalid cmd type (%s, expected string)' % type(cmd)))
import subprocess, io, threading
def cleanup(proc, cmd):
ret = proc.wait()
if (ret > 0):
raise SubprocessFailed(('cmd %s returned %d !' % (cmd, ret)))
return
if (mode == 'r'):
proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
threading.Thread(target=cleanup, args=(proc, cmd)).start()
return io.TextIOWrapper(proc.stdout)
elif (mode == 'w'):
proc = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE)
threading.Thread(target=cleanup, args=(proc, cmd)).start()
return io.TextIOWrapper(proc.stdin)
elif (mode == 'rb'):
proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
threading.Thread(target=cleanup, args=(proc, cmd)).start()
return proc.stdout
elif (mode == 'wb'):
proc = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE)
threading.Thread(target=cleanup, args=(proc, cmd)).start()
return proc.stdin
else:
raise ValueError(('invalid mode %s' % mode)) |
class ResNet(nn.Module):
def __init__(self, block, layers, zero_init_residual=False, groups=1, widen=1, width_per_group=64, replace_stride_with_dilation=None, norm_layer=None, normalize=False, output_dim=0, hidden_mlp=0, nmb_prototypes=0, eval_mode=False):
super(ResNet, self).__init__()
if (norm_layer is None):
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.eval_mode = eval_mode
self.padding = nn.ConstantPad2d(1, 0.0)
self.inplanes = (width_per_group * widen)
self.dilation = 1
if (replace_stride_with_dilation is None):
replace_stride_with_dilation = [False, False, False]
if (len(replace_stride_with_dilation) != 3):
raise ValueError('replace_stride_with_dilation should be None or a 3-element tuple, got {}'.format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
num_out_filters = (width_per_group * widen)
self.conv1 = nn.Conv2d(3, num_out_filters, kernel_size=7, stride=2, padding=2, bias=False)
self.bn1 = norm_layer(num_out_filters)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, num_out_filters, layers[0])
num_out_filters *= 2
self.layer2 = self._make_layer(block, num_out_filters, layers[1], stride=2, dilate=replace_stride_with_dilation[0])
num_out_filters *= 2
self.layer3 = self._make_layer(block, num_out_filters, layers[2], stride=2, dilate=replace_stride_with_dilation[1])
num_out_filters *= 2
self.layer4 = self._make_layer(block, num_out_filters, layers[3], stride=2, dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.l2norm = normalize
if (output_dim == 0):
self.projection_head = None
elif (hidden_mlp == 0):
self.projection_head = nn.Linear((num_out_filters * block.expansion), output_dim)
else:
self.projection_head = nn.Sequential(nn.Linear((num_out_filters * block.expansion), hidden_mlp), nn.BatchNorm1d(hidden_mlp), nn.ReLU(inplace=True), nn.Linear(hidden_mlp, output_dim))
self.prototypes = None
if isinstance(nmb_prototypes, list):
self.prototypes = MultiPrototypes(output_dim, nmb_prototypes)
elif (nmb_prototypes > 0):
self.prototypes = nn.Linear(output_dim, nmb_prototypes, bias=False)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(conv1x1(self.inplanes, (planes * block.expansion), stride), norm_layer((planes * block.expansion)))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups, self.base_width, previous_dilation, norm_layer))
self.inplanes = (planes * block.expansion)
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups, base_width=self.base_width, dilation=self.dilation, norm_layer=norm_layer))
return nn.Sequential(*layers)
def forward_backbone(self, x):
x = self.padding(x)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
if self.eval_mode:
return x
x = self.avgpool(x)
x = torch.flatten(x, 1)
return x
def forward_head(self, x):
if (self.projection_head is not None):
x = self.projection_head(x)
if self.l2norm:
x = nn.functional.normalize(x, dim=1, p=2)
if (self.prototypes is not None):
return self.prototypes(x)
return x
def forward(self, inputs):
if (not isinstance(inputs, list)):
inputs = [inputs]
idx_crops = torch.cumsum(torch.unique_consecutive(torch.tensor([inp.shape[(- 1)] for inp in inputs]), return_counts=True)[1], 0)
start_idx = 0
for end_idx in idx_crops:
_out = self.forward_backbone(torch.cat(inputs[start_idx:end_idx]).cuda(non_blocking=True))
if (start_idx == 0):
output = _out
else:
output = torch.cat((output, _out))
start_idx = end_idx
return self.forward_head(output) |
def default_collate(batch):
elem = batch[0]
elem_type = type(elem)
if isinstance(elem, torch.Tensor):
out = None
if (torch.utils.data.get_worker_info() is not None):
numel = sum([x.numel() for x in batch])
storage = elem.storage()._new_shared(numel)
out = elem.new(storage)
return torch.stack(batch, 0, out=out)
elif ((elem_type.__module__ == 'numpy') and (elem_type.__name__ != 'str_') and (elem_type.__name__ != 'string_')):
if ((elem_type.__name__ == 'ndarray') or (elem_type.__name__ == 'memmap')):
if (np_str_obj_array_pattern.search(elem.dtype.str) is not None):
raise TypeError(default_collate_err_msg_format.format(elem.dtype))
return default_collate([torch.as_tensor(b) for b in batch])
elif (elem.shape == ()):
return torch.as_tensor(batch)
elif isinstance(elem, float):
return torch.tensor(batch, dtype=torch.float64)
elif isinstance(elem, int):
return torch.tensor(batch)
elif isinstance(elem, string_classes):
return batch
elif isinstance(elem, collections.abc.Mapping):
return {key: default_collate([d[key] for d in batch]) for key in elem}
elif (isinstance(elem, tuple) and hasattr(elem, '_fields')):
return elem_type(*(default_collate(samples) for samples in zip(*batch)))
elif isinstance(elem, collections.abc.Sequence):
it = iter(batch)
elem_size = len(next(it))
if (not all(((len(elem) == elem_size) for elem in it))):
raise RuntimeError('each element in list of batch should be of equal size')
transposed = zip(*batch)
return [default_collate(samples) for samples in transposed]
raise TypeError(default_collate_err_msg_format.format(elem_type)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.