code stringlengths 101 5.91M |
|---|
def resnet101s16(pretrained=False, finetune_layers=(), s16_feats=('layer4',), s8_feats=('layer2',), s4_feats=('layer1',), **kwargs):
model = ResNetS16(finetune_layers, s16_feats, s8_feats, s4_feats, Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101'], model_dir=config['nn_weights_path']))
return model |
class CustomDatasetDataLoader():
def __init__(self, opt):
self.opt = opt
dataset_class = find_dataset_using_name(opt.dataset_mode)
self.dataset = dataset_class(opt)
print(('dataset [%s] was created' % type(self.dataset).__name__))
self.dataloader = torch.utils.data.DataLoader(self.dataset, batch_size=opt.batch_size, shuffle=(not opt.serial_batches), num_workers=int(opt.num_threads))
def load_data(self):
return self
def __len__(self):
return min(len(self.dataset), self.opt.max_dataset_size)
def __iter__(self):
for (i, data) in enumerate(self.dataloader):
if ((i * self.opt.batch_size) >= self.opt.max_dataset_size):
break
(yield data) |
def score_labels_majority_vote(instances, gold_label_key='tags', treat_tie_as='O', span_level=True):
(tp, fp, fn) = (0, 0, 0)
for instance in instances:
maj_vote = _get_label_majority_vote(instance, treat_tie_as)
if span_level:
score = _score_sequence_span_level(maj_vote, instance[gold_label_key])
else:
score = _score_sequence_token_level(maj_vote, instance[gold_label_key])
tp += score[0]
fp += score[1]
fn += score[2]
column_names = ['TP', 'FP', 'FN', 'P', 'R', 'F1']
(p, r, f1) = _get_p_r_f1(tp, fp, fn)
record = [tp, fp, fn, p, r, f1]
index = (['Majority Vote'] if span_level else ['Majority Vote (Token Level)'])
results = pd.DataFrame.from_records([record], columns=column_names, index=index)
results = pd.DataFrame.sort_index(results)
return results |
class InitialBlock(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, padding=0, bias=False, relu=True):
super().__init__()
if relu:
activation = nn.ReLU()
else:
activation = nn.PReLU()
self.main_branch = nn.Conv2d(in_channels, (out_channels - 3), kernel_size=kernel_size, stride=2, padding=padding, bias=bias)
self.ext_branch = nn.MaxPool2d(kernel_size, stride=2, padding=padding)
self.batch_norm = nn.BatchNorm2d(out_channels)
self.out_prelu = activation
def forward(self, x):
main = self.main_branch(x)
ext = self.ext_branch(x)
out = torch.cat((main, ext), 1)
out = self.batch_norm(out)
return self.out_prelu(out) |
class UNet3DConditionModel(ModelMixin, ConfigMixin):
_supports_gradient_checkpointing = True
_to_config
def __init__(self, sample_size: Optional[int]=None, in_channels: int=4, out_channels: int=4, center_input_sample: bool=False, flip_sin_to_cos: bool=True, freq_shift: int=0, down_block_types: Tuple[str]=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D'), mid_block_type: str='UNetMidBlock3DCrossAttn', up_block_types: Tuple[str]=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D'), only_cross_attention: Union[(bool, Tuple[bool])]=False, block_out_channels: Tuple[int]=(320, 640, 1280, 1280), layers_per_block: int=2, downsample_padding: int=1, mid_block_scale_factor: float=1, act_fn: str='silu', norm_num_groups: int=32, norm_eps: float=1e-05, cross_attention_dim: int=1280, attention_head_dim: Union[(int, Tuple[int])]=8, dual_cross_attention: bool=False, use_linear_projection: bool=False, class_embed_type: Optional[str]=None, num_class_embeds: Optional[int]=None, upcast_attention: bool=False, resnet_time_scale_shift: str='default'):
super().__init__()
self.sample_size = sample_size
time_embed_dim = (block_out_channels[0] * 4)
self.conv_in = InflatedConv3d(in_channels, block_out_channels[0], kernel_size=3, padding=(1, 1))
self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)
timestep_input_dim = block_out_channels[0]
self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)
if ((class_embed_type is None) and (num_class_embeds is not None)):
self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)
elif (class_embed_type == 'timestep'):
self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)
elif (class_embed_type == 'identity'):
self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)
else:
self.class_embedding = None
self.down_blocks = nn.ModuleList([])
self.mid_block = None
self.up_blocks = nn.ModuleList([])
if isinstance(only_cross_attention, bool):
only_cross_attention = ([only_cross_attention] * len(down_block_types))
if isinstance(attention_head_dim, int):
attention_head_dim = ((attention_head_dim,) * len(down_block_types))
output_channel = block_out_channels[0]
for (i, down_block_type) in enumerate(down_block_types):
input_channel = output_channel
output_channel = block_out_channels[i]
is_final_block = (i == (len(block_out_channels) - 1))
down_block = get_down_block(down_block_type, num_layers=layers_per_block, in_channels=input_channel, out_channels=output_channel, temb_channels=time_embed_dim, add_downsample=(not is_final_block), resnet_eps=norm_eps, resnet_act_fn=act_fn, resnet_groups=norm_num_groups, cross_attention_dim=cross_attention_dim, attn_num_head_channels=attention_head_dim[i], downsample_padding=downsample_padding, dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention[i], upcast_attention=upcast_attention, resnet_time_scale_shift=resnet_time_scale_shift)
self.down_blocks.append(down_block)
if (mid_block_type == 'UNetMidBlock3DCrossAttn'):
self.mid_block = UNetMidBlock3DCrossAttn(in_channels=block_out_channels[(- 1)], temb_channels=time_embed_dim, resnet_eps=norm_eps, resnet_act_fn=act_fn, output_scale_factor=mid_block_scale_factor, resnet_time_scale_shift=resnet_time_scale_shift, cross_attention_dim=cross_attention_dim, attn_num_head_channels=attention_head_dim[(- 1)], resnet_groups=norm_num_groups, dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, upcast_attention=upcast_attention)
else:
raise ValueError(f'unknown mid_block_type : {mid_block_type}')
self.num_upsamplers = 0
reversed_block_out_channels = list(reversed(block_out_channels))
reversed_attention_head_dim = list(reversed(attention_head_dim))
only_cross_attention = list(reversed(only_cross_attention))
output_channel = reversed_block_out_channels[0]
for (i, up_block_type) in enumerate(up_block_types):
is_final_block = (i == (len(block_out_channels) - 1))
prev_output_channel = output_channel
output_channel = reversed_block_out_channels[i]
input_channel = reversed_block_out_channels[min((i + 1), (len(block_out_channels) - 1))]
if (not is_final_block):
add_upsample = True
self.num_upsamplers += 1
else:
add_upsample = False
up_block = get_up_block(up_block_type, num_layers=(layers_per_block + 1), in_channels=input_channel, out_channels=output_channel, prev_output_channel=prev_output_channel, temb_channels=time_embed_dim, add_upsample=add_upsample, resnet_eps=norm_eps, resnet_act_fn=act_fn, resnet_groups=norm_num_groups, cross_attention_dim=cross_attention_dim, attn_num_head_channels=reversed_attention_head_dim[i], dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention[i], upcast_attention=upcast_attention, resnet_time_scale_shift=resnet_time_scale_shift)
self.up_blocks.append(up_block)
prev_output_channel = output_channel
self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps)
self.conv_act = nn.SiLU()
self.conv_out = InflatedConv3d(block_out_channels[0], out_channels, kernel_size=3, padding=1)
def set_attention_slice(self, slice_size):
sliceable_head_dims = []
def fn_recursive_retrieve_slicable_dims(module: torch.nn.Module):
if hasattr(module, 'set_attention_slice'):
sliceable_head_dims.append(module.sliceable_head_dim)
for child in module.children():
fn_recursive_retrieve_slicable_dims(child)
for module in self.children():
fn_recursive_retrieve_slicable_dims(module)
num_slicable_layers = len(sliceable_head_dims)
if (slice_size == 'auto'):
slice_size = [(dim // 2) for dim in sliceable_head_dims]
elif (slice_size == 'max'):
slice_size = (num_slicable_layers * [1])
slice_size = ((num_slicable_layers * [slice_size]) if (not isinstance(slice_size, list)) else slice_size)
if (len(slice_size) != len(sliceable_head_dims)):
raise ValueError(f'You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}.')
for i in range(len(slice_size)):
size = slice_size[i]
dim = sliceable_head_dims[i]
if ((size is not None) and (size > dim)):
raise ValueError(f'size {size} has to be smaller or equal to {dim}.')
def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):
if hasattr(module, 'set_attention_slice'):
module.set_attention_slice(slice_size.pop())
for child in module.children():
fn_recursive_set_attention_slice(child, slice_size)
reversed_slice_size = list(reversed(slice_size))
for module in self.children():
fn_recursive_set_attention_slice(module, reversed_slice_size)
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, (CrossAttnDownBlock3D, DownBlock3D, CrossAttnUpBlock3D, UpBlock3D)):
module.gradient_checkpointing = value
def forward(self, sample: torch.FloatTensor, timestep: Union[(torch.Tensor, float, int)], encoder_hidden_states: torch.Tensor, class_labels: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, return_dict: bool=True, cross_attention_kwargs=None, down_block_additional_residuals: Optional[Tuple[torch.Tensor]]=None, mid_block_additional_residual: Optional[torch.Tensor]=None, inter_frame=False) -> Union[(UNet3DConditionOutput, Tuple)]:
default_overall_up_factor = (2 ** self.num_upsamplers)
forward_upsample_size = False
upsample_size = None
if any((((s % default_overall_up_factor) != 0) for s in sample.shape[(- 2):])):
logger.info('Forward upsample size to force interpolation output size.')
forward_upsample_size = True
if (attention_mask is not None):
attention_mask = ((1 - attention_mask.to(sample.dtype)) * (- 10000.0))
attention_mask = attention_mask.unsqueeze(1)
if self.config.center_input_sample:
sample = ((2 * sample) - 1.0)
timesteps = timestep
if (not torch.is_tensor(timesteps)):
is_mps = (sample.device.type == 'mps')
if isinstance(timestep, float):
dtype = (torch.float32 if is_mps else torch.float64)
else:
dtype = (torch.int32 if is_mps else torch.int64)
timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)
elif (len(timesteps.shape) == 0):
timesteps = timesteps[None].to(sample.device)
timesteps = timesteps.expand(sample.shape[0])
t_emb = self.time_proj(timesteps)
t_emb = t_emb.to(dtype=self.dtype)
emb = self.time_embedding(t_emb)
if (self.class_embedding is not None):
if (class_labels is None):
raise ValueError('class_labels should be provided when num_class_embeds > 0')
if (self.config.class_embed_type == 'timestep'):
class_labels = self.time_proj(class_labels)
class_emb = self.class_embedding(class_labels).to(dtype=self.dtype)
emb = (emb + class_emb)
sample = self.conv_in(sample)
down_block_res_samples = (sample,)
for downsample_block in self.down_blocks:
if (hasattr(downsample_block, 'has_cross_attention') and downsample_block.has_cross_attention):
(sample, res_samples) = downsample_block(hidden_states=sample, temb=emb, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask, inter_frame=inter_frame)
else:
(sample, res_samples) = downsample_block(hidden_states=sample, temb=emb)
down_block_res_samples += res_samples
if (down_block_additional_residuals is not None):
new_down_block_res_samples = ()
for (down_block_res_sample, down_block_additional_residual) in zip(down_block_res_samples, down_block_additional_residuals):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
down_block_res_samples = new_down_block_res_samples
sample = self.mid_block(sample, emb, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask, inter_frame=inter_frame)
if (mid_block_additional_residual is not None):
sample += mid_block_additional_residual
for (i, upsample_block) in enumerate(self.up_blocks):
is_final_block = (i == (len(self.up_blocks) - 1))
res_samples = down_block_res_samples[(- len(upsample_block.resnets)):]
down_block_res_samples = down_block_res_samples[:(- len(upsample_block.resnets))]
if ((not is_final_block) and forward_upsample_size):
upsample_size = down_block_res_samples[(- 1)].shape[2:]
if (hasattr(upsample_block, 'has_cross_attention') and upsample_block.has_cross_attention):
sample = upsample_block(hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, encoder_hidden_states=encoder_hidden_states, upsample_size=upsample_size, attention_mask=attention_mask, inter_frame=inter_frame)
else:
sample = upsample_block(hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, upsample_size=upsample_size)
sample = self.conv_norm_out(sample)
sample = self.conv_act(sample)
sample = self.conv_out(sample)
if (not return_dict):
return (sample,)
return UNet3DConditionOutput(sample=sample)
def from_pretrained_2d(cls, pretrained_model_path, subfolder=None):
if (subfolder is not None):
pretrained_model_path = os.path.join(pretrained_model_path, subfolder)
config_file = os.path.join(pretrained_model_path, 'config.json')
if (not os.path.isfile(config_file)):
raise RuntimeError(f'{config_file} does not exist')
with open(config_file, 'r') as f:
config = json.load(f)
config['_class_name'] = cls.__name__
config['down_block_types'] = ['CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D']
config['up_block_types'] = ['UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D']
from diffusers.utils import WEIGHTS_NAME
model = cls.from_config(config)
model_file = os.path.join(pretrained_model_path, WEIGHTS_NAME)
if (not os.path.isfile(model_file)):
raise RuntimeError(f'{model_file} does not exist')
state_dict = torch.load(model_file, map_location='cpu')
model.load_state_dict(state_dict, strict=False)
return model |
class YoloTrain(object):
def __init__(self):
self.anchor_per_scale = cfg.YOLO.ANCHOR_PER_SCALE
self.classes = utils.read_class_names(cfg.YOLO.CLASSES)
self.num_classes = len(self.classes)
self.learn_rate_init = cfg.TRAIN.LEARN_RATE_INIT
self.learn_rate_end = cfg.TRAIN.LEARN_RATE_END
self.first_stage_epochs = cfg.TRAIN.FISRT_STAGE_EPOCHS
self.second_stage_epochs = cfg.TRAIN.SECOND_STAGE_EPOCHS
self.warmup_periods = cfg.TRAIN.WARMUP_EPOCHS
self.initial_weight = cfg.TRAIN.INITIAL_WEIGHT
self.time = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime(time.time()))
self.moving_ave_decay = cfg.YOLO.MOVING_AVE_DECAY
self.max_bbox_per_scale = 150
self.train_logdir = './data/log/train'
self.trainset = Dataset('train')
self.testset = Dataset('test')
self.steps_per_period = len(self.trainset)
self.sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
with tf.name_scope('define_input'):
self.input_data = tf.placeholder(dtype=tf.float32, name='input_data')
self.label_sbbox = tf.placeholder(dtype=tf.float32, name='label_sbbox')
self.label_mbbox = tf.placeholder(dtype=tf.float32, name='label_mbbox')
self.label_lbbox = tf.placeholder(dtype=tf.float32, name='label_lbbox')
self.true_sbboxes = tf.placeholder(dtype=tf.float32, name='sbboxes')
self.true_mbboxes = tf.placeholder(dtype=tf.float32, name='mbboxes')
self.true_lbboxes = tf.placeholder(dtype=tf.float32, name='lbboxes')
self.trainable = tf.placeholder(dtype=tf.bool, name='training')
with tf.name_scope('define_loss'):
self.model = YOLOV3(self.input_data, self.trainable)
self.net_var = tf.global_variables()
(self.giou_loss, self.conf_loss, self.prob_loss) = self.model.compute_loss(self.label_sbbox, self.label_mbbox, self.label_lbbox, self.true_sbboxes, self.true_mbboxes, self.true_lbboxes)
self.loss = ((self.giou_loss + self.conf_loss) + self.prob_loss)
with tf.name_scope('learn_rate'):
self.global_step = tf.Variable(1.0, dtype=tf.float64, trainable=False, name='global_step')
warmup_steps = tf.constant((self.warmup_periods * self.steps_per_period), dtype=tf.float64, name='warmup_steps')
train_steps = tf.constant(((self.first_stage_epochs + self.second_stage_epochs) * self.steps_per_period), dtype=tf.float64, name='train_steps')
self.learn_rate = tf.cond(pred=(self.global_step < warmup_steps), true_fn=(lambda : ((self.global_step / warmup_steps) * self.learn_rate_init)), false_fn=(lambda : (self.learn_rate_end + ((0.5 * (self.learn_rate_init - self.learn_rate_end)) * (1 + tf.cos((((self.global_step - warmup_steps) / (train_steps - warmup_steps)) * np.pi)))))))
global_step_update = tf.assign_add(self.global_step, 1.0)
with tf.name_scope('define_weight_decay'):
moving_ave = tf.train.ExponentialMovingAverage(self.moving_ave_decay).apply(tf.trainable_variables())
with tf.name_scope('define_first_stage_train'):
self.first_stage_trainable_var_list = []
for var in tf.trainable_variables():
var_name = var.op.name
var_name_mess = str(var_name).split('/')
if (var_name_mess[0] in ['conv_sbbox', 'conv_mbbox', 'conv_lbbox']):
self.first_stage_trainable_var_list.append(var)
first_stage_optimizer = tf.train.AdamOptimizer(self.learn_rate).minimize(self.loss, var_list=self.first_stage_trainable_var_list)
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
with tf.control_dependencies([first_stage_optimizer, global_step_update]):
with tf.control_dependencies([moving_ave]):
self.train_op_with_frozen_variables = tf.no_op()
with tf.name_scope('define_second_stage_train'):
second_stage_trainable_var_list = tf.trainable_variables()
second_stage_optimizer = tf.train.AdamOptimizer(self.learn_rate).minimize(self.loss, var_list=second_stage_trainable_var_list)
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
with tf.control_dependencies([second_stage_optimizer, global_step_update]):
with tf.control_dependencies([moving_ave]):
self.train_op_with_all_variables = tf.no_op()
with tf.name_scope('loader_and_saver'):
self.loader = tf.train.Saver(self.net_var)
self.saver = tf.train.Saver(tf.global_variables(), max_to_keep=10)
with tf.name_scope('summary'):
tf.summary.scalar('learn_rate', self.learn_rate)
tf.summary.scalar('giou_loss', self.giou_loss)
tf.summary.scalar('conf_loss', self.conf_loss)
tf.summary.scalar('prob_loss', self.prob_loss)
tf.summary.scalar('total_loss', self.loss)
logdir = './data/log/'
if os.path.exists(logdir):
shutil.rmtree(logdir)
os.mkdir(logdir)
self.write_op = tf.summary.merge_all()
self.summary_writer = tf.summary.FileWriter(logdir, graph=self.sess.graph)
def train(self):
self.sess.run(tf.global_variables_initializer())
try:
print(('=> Restoring weights from: %s ... ' % self.initial_weight))
self.loader.restore(self.sess, self.initial_weight)
except:
print(('=> %s does not exist !!!' % self.initial_weight))
print('=> Now it starts to train YOLOV3 from scratch ...')
self.first_stage_epochs = 0
for epoch in range(1, ((1 + self.first_stage_epochs) + self.second_stage_epochs)):
if (epoch <= self.first_stage_epochs):
train_op = self.train_op_with_frozen_variables
else:
train_op = self.train_op_with_all_variables
pbar = tqdm(self.trainset)
(train_epoch_loss, test_epoch_loss) = ([], [])
for train_data in pbar:
(_, summary, train_step_loss, global_step_val) = self.sess.run([train_op, self.write_op, self.loss, self.global_step], feed_dict={self.input_data: train_data[0], self.label_sbbox: train_data[1], self.label_mbbox: train_data[2], self.label_lbbox: train_data[3], self.true_sbboxes: train_data[4], self.true_mbboxes: train_data[5], self.true_lbboxes: train_data[6], self.trainable: True})
train_epoch_loss.append(train_step_loss)
self.summary_writer.add_summary(summary, global_step_val)
pbar.set_description(('train loss: %.2f' % train_step_loss))
for test_data in self.testset:
test_step_loss = self.sess.run(self.loss, feed_dict={self.input_data: test_data[0], self.label_sbbox: test_data[1], self.label_mbbox: test_data[2], self.label_lbbox: test_data[3], self.true_sbboxes: test_data[4], self.true_mbboxes: test_data[5], self.true_lbboxes: test_data[6], self.trainable: False})
test_epoch_loss.append(test_step_loss)
(train_epoch_loss, test_epoch_loss) = (np.mean(train_epoch_loss), np.mean(test_epoch_loss))
ckpt_file = ('./checkpoint/yolov3_test_loss=%.4f.ckpt' % test_epoch_loss)
log_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
print(('=> Epoch: %2d Time: %s Train loss: %.2f Test loss: %.2f Saving %s ...' % (epoch, log_time, train_epoch_loss, test_epoch_loss, ckpt_file)))
self.saver.save(self.sess, ckpt_file, global_step=epoch) |
(version='2.0')
class Pruning(Component):
def __init__(self, conf_fname_or_obj=None):
super(Pruning, self).__init__()
if isinstance(conf_fname_or_obj, Config):
self.cfg = PruningConf()
self.cfg.map_pyconfig_to_cfg(conf_fname_or_obj)
self.cfg = self.cfg.usr_cfg
self.conf = conf_fname_or_obj.pruning
else:
raise NotImplementedError('Only WeightPruningConfig config is supported currently.')
self.pruners_info = process_config(self.conf)
self.callbacks = dict(tf_pruning=TfPruningCallback)
self.pruners = []
self.generate_hooks()
def update_config(self, *args, **kwargs):
for item in self.pruners_info:
for key in kwargs:
if (key in item.keys()):
item[key] = kwargs[key]
update_params(item)
check_config(item)
def get_sparsity_ratio(self):
pattern_sparsity_cnt = 0
element_sparsity_cnt = 0
for pruner in self.pruners:
modules = pruner.modules
sparsity_ratio = pruner.pattern.get_sparsity_ratio(pruner.masks)
cnt = 0
for key in modules.keys():
cnt += modules[key].weight.numel()
pattern_sparsity_cnt += int((cnt * sparsity_ratio))
for key in pruner.masks.keys():
element_sparsity_cnt += torch.sum((pruner.masks[key] == 0)).data.item()
linear_conv_cnt = 0
param_cnt = 0
for (name, module) in self._model.model.named_modules():
if ((type(module).__name__ in ['Linear']) or (re.search('Conv.d', type(module).__name__) is not None)):
linear_conv_cnt += module.weight.numel()
for (n, param) in self._model.model.named_parameters():
param_cnt += param.numel()
blockwise_over_matmul_gemm_conv = ((float(pattern_sparsity_cnt) / linear_conv_cnt) if (linear_conv_cnt != 0) else 0)
elementwise_over_matmul_gemm_conv = ((float(element_sparsity_cnt) / linear_conv_cnt) if (linear_conv_cnt != 0) else 0)
elementwise_over_all = ((float(element_sparsity_cnt) / param_cnt) if (param_cnt != 0) else 0)
logger.info(f'elementwise_over_matmul_gemm_conv:{elementwise_over_matmul_gemm_conv}, elementwise_over_all:{elementwise_over_all},blockwise_over_matmul_gemm_conv:{blockwise_over_matmul_gemm_conv}')
return (elementwise_over_matmul_gemm_conv, elementwise_over_all, blockwise_over_matmul_gemm_conv)
def _on_train_begin(self, dataloader=None):
self._generate_pruners()
def _on_epoch_begin(self, epoch):
for pruner in self.pruners:
pruner.on_epoch_begin(epoch)
def _on_step_begin(self, batch_id):
res = []
for pruner in self.pruners:
res.append(pruner.on_step_begin(batch_id))
return res
def _on_before_optimizer_step(self):
for pruner in self.pruners:
pruner.on_before_optimizer_step()
def _on_after_optimizer_step(self):
for pruner in self.pruners:
pruner.on_after_optimizer_step()
def _on_step_end(self):
res = []
for pruner in self.pruners:
res.append(pruner.on_step_end())
return res
def _on_epoch_end(self):
res = []
for pruner in self.pruners:
res.append(pruner.on_epoch_end())
return res
def _on_train_end(self):
for pruner in self.pruners:
pruner.on_train_end()
if isinstance(self._model.model, torch.nn.Module):
self.get_sparsity_ratio()
def _on_before_eval(self):
for pruner in self.pruners:
pruner.on_before_eval()
def _on_after_eval(self):
for pruner in self.pruners:
pruner.on_after_eval()
def prepare(self):
pass
def pre_process(self):
assert isinstance(self._model, BaseModel), 'need set neural_compressor Model for pruning....'
GLOBAL_STATE.STATE = MODE.PRUNING
framework_specific_info = {'device': self.cfg.device, 'random_seed': self.cfg.tuning.random_seed, 'workspace_path': self.cfg.tuning.workspace.path, 'q_dataloader': None, 'format': 'default', 'backend': 'default'}
if (self.framework == 'tensorflow'):
framework_specific_info.update({'inputs': self.cfg.model.inputs, 'outputs': self.cfg.model.outputs})
self.adaptor = FRAMEWORKS[self.framework](framework_specific_info)
self.prepare()
if ((self._train_dataloader is None) and (self._train_func is None)):
train_dataloader_cfg = self.cfg.pruning.train.dataloader
assert (train_dataloader_cfg is not None), 'dataloader field of train field of pruning section in yaml file should be configured as train_dataloader property is NOT set!'
train_dataloader_cfg.distributed = self.train_distributed
self._train_dataloader = create_dataloader(self.framework, train_dataloader_cfg)
if ((self._eval_dataloader is None) and (self._eval_func is None)):
eval_dataloader_cfg = self.cfg.evaluation.accuracy.dataloader
assert (eval_dataloader_cfg is not None), 'dataloader field of evaluation in yaml file should be configured as eval_dataloader property is NOT set!'
eval_dataloader_cfg.distributed = self.evaluation_distributed
self._eval_dataloader = create_dataloader(self.framework, eval_dataloader_cfg)
if (self._train_func is None):
train_cfg = self.cfg.pruning.train
assert train_cfg, 'train field of pruning section in yaml file must be configured for pruning if pruning_func is NOT set.'
self._train_func = create_train_func(self.framework, self.train_dataloader, self.adaptor, train_cfg, hooks=self.hooks, callbacks=self.callbacks)
if (self._eval_func is None):
eval_cfg = self.cfg.evaluation
assert eval_cfg, 'eval field of pruning section in yaml file must be configured for pruning if eval_func is NOT set.'
self._eval_func = create_eval_func(self.framework, self.eval_dataloader, self.adaptor, eval_cfg.accuracy.metric, eval_cfg.accuracy.postprocess, fp32_baseline=False)
if getattr(self.train_dataloader, 'distributed', False):
self.register_hook('on_train_begin', self.adaptor._pre_hook_for_hvd)
def execute(self):
logger.info("Start to get the baseline model's score before pruning.")
self.baseline_score = self._eval_func((self._model if getattr(self._eval_func, 'builtin', None) else self._model.model))
logger.info("Baseline model's score is {}.".format(str(self.baseline_score)))
logger.info('Model pruning begins.')
self._train_func((self._model if getattr(self._train_func, 'builtin', None) else self._model.model))
logger.info('Model pruning is done. Start to evaluate the pruned model.')
self.last_score = self._eval_func((self._model if getattr(self._eval_func, 'builtin', None) else self._model.model))
logger.info('Pruned model score is {}.'.format(str(self.last_score)))
return self._model
def generate_hooks(self):
self.register_hook('on_train_begin', self._on_train_begin)
self.register_hook('on_train_end', self._on_train_end)
self.register_hook('on_epoch_begin', self._on_epoch_begin)
self.register_hook('on_epoch_end', self._on_epoch_end)
self.register_hook('on_step_begin', self._on_step_begin)
self.register_hook('on_step_end', self._on_step_end)
self.register_hook('on_before_optimizer_step', self._on_before_optimizer_step)
self.register_hook('on_after_optimizer_step', self._on_after_optimizer_step)
self.register_hook('on_before_eval', self._on_before_eval)
self.register_hook('on_after_eval', self._on_after_eval)
def _generate_pruners(self):
if isinstance(self._model.model, torch.nn.Module):
for info in self.pruners_info:
modules = parse_to_prune(info, self._model.model)
if (modules == {}):
logger.warning('one pruner hooks no layers, please have a check')
self.pruners.append(get_pruner(info, modules))
info['modules'] = [key for key in modules.keys()]
info['len_of_modules'] = len(info['modules'])
logger.info(info)
else:
for info in self.pruners_info:
pruner = generate_pruner_config(info)
if (info.prune_type == 'magnitude'):
self.pruners.append(PRUNERS['BasicMagnitude'](self._model, pruner, None))
elif (info.prune_type == 'pattern_lock'):
self.pruners.append(PRUNERS['PatternLock'](self._model, pruner, None))
elif (info.prune_type == 'gradient_sensitivity'):
self.pruners.append(PRUNERS['GradientSensitivity'](self._model, pruner, None))
elif (info.prune_type == 'group_lasso'):
self.pruners.append(PRUNERS['GroupLasso'](self._model, pruner, None))
else:
assert False, 'now only support {}'.format(PRUNERS.keys())
logger.info(info)
def __call__(self):
return super(Pruning, self).__call__()
'This makes pruning.fit() equals to pruning().'
fit = __call__
def pruning_func(self):
assert False, 'Should not try to get the value of `pruning_func` attribute.'
return None
_func.setter
(version='2.0', reason='please use `train_func` instead')
def pruning_func(self, user_pruning_func):
self._train_func = user_pruning_func
def evaluation_distributed(self):
eval_dataloader_cfg = self.cfg.evaluation.accuracy.dataloader
yaml_distributed = eval_dataloader_cfg.get('distributed', False)
return (self._evaluation_distributed or yaml_distributed)
_distributed.setter
def evaluation_distributed(self, distributed):
self._evaluation_distributed = distributed
def train_distributed(self):
train_dataloader_cfg = self.cfg.pruning.train.dataloader
yaml_distributed = train_dataloader_cfg.get('distributed', False)
return (self._train_distributed or yaml_distributed)
_distributed.setter
def train_distributed(self, distributed):
self._train_distributed = distributed
def __repr__(self):
return 'Pruning' |
def make_tarball(tarball_path, sources, base_dir, prefix_dir=''):
base_dir = os.path.normpath(os.path.abspath(base_dir))
def archive_name(path):
path = os.path.normpath(os.path.abspath(path))
common_path = os.path.commonprefix((base_dir, path))
archive_name = path[len(common_path):]
if os.path.isabs(archive_name):
archive_name = archive_name[1:]
return os.path.join(prefix_dir, archive_name)
def visit(tar, dirname, names):
for name in names:
path = os.path.join(dirname, name)
if os.path.isfile(path):
path_in_tar = archive_name(path)
tar.add(path, path_in_tar)
compression = TARGZ_DEFAULT_COMPRESSION_LEVEL
tar = tarfile.TarFile.gzopen(tarball_path, 'w', compresslevel=compression)
try:
for source in sources:
source_path = source
if os.path.isdir(source):
os.path.walk(source_path, visit, tar)
else:
path_in_tar = archive_name(source_path)
tar.add(source_path, path_in_tar)
finally:
tar.close() |
def state_dict_to_master_params(model, state_dict, use_fp16):
if use_fp16:
named_model_params = [(name, state_dict[name]) for (name, _) in model.named_parameters()]
param_groups_and_shapes = get_param_groups_and_shapes(named_model_params)
master_params = make_master_params(param_groups_and_shapes)
else:
master_params = [state_dict[name] for (name, _) in model.named_parameters()]
return master_params |
def _kronecker_product(mat1, mat2):
(m1, n1) = mat1.get_shape().as_list()
mat1_rsh = array_ops.reshape(mat1, [m1, 1, n1, 1])
(m2, n2) = mat2.get_shape().as_list()
mat2_rsh = array_ops.reshape(mat2, [1, m2, 1, n2])
return array_ops.reshape((mat1_rsh * mat2_rsh), [(m1 * m2), (n1 * n2)]) |
def seed_everything(seed=42):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False |
def dropout_eval(model):
for m in model.modules():
if (type(m) == nn.Dropout):
m.eval() |
def detect_compute_compatibility(CUDA_HOME, so_file):
try:
cuobjdump = os.path.join(CUDA_HOME, 'bin', 'cuobjdump')
if os.path.isfile(cuobjdump):
output = subprocess.check_output("'{}' --list-elf '{}'".format(cuobjdump, so_file), shell=True)
output = output.decode('utf-8').strip().split('\n')
sm = []
for line in output:
line = re.findall('\\.sm_[0-9]*\\.', line)[0]
sm.append(line.strip('.'))
sm = sorted(set(sm))
return ', '.join(sm)
else:
return (so_file + '; cannot find cuobjdump')
except Exception:
return so_file |
_model_architecture('cmlm_transformer', 'cmlm_transformer_wmt_en_de')
def cmlm_wmt_en_de(args):
cmlm_base_architecture(args) |
def check_rdata_support(caller_name):
try:
import rdata
except ImportError:
raise ImportError(f'{caller_name} requires rdata. Please install pyreadr using `pip install rdata`') |
def _funcWrap(F: Type['U'], f, resultWrap: Optional[Type['Vec[T]']]=None, module: Any=libpymod) -> 'U':
if hasattr(f, '__call__'):
class FuncWrapper(F):
def __init__(self, f) -> None:
self.f = f
F.__init__(self)
def clone(self) -> 'FuncWrapper':
return module._sharedToStd(FuncWrapper(self.f))
def __str__(self) -> str:
(lines, lnum) = inspect.getsourcelines(self.f)
source = ''.join(lines)
filename = inspect.getfile(self.f)
return ('FuncWrapper(%s)\nCode from %s:%d >>>>>\n%s<<<<< Code from %s:%d' % (str(self.f), filename, lnum, source, filename, lnum))
def __call__(self, *args: List[Any]) -> Union[('T', 'Vec[T]')]:
try:
if (resultWrap is not None):
return _wrap(resultWrap, cast(Iterable['T'], self.f(*args)))
else:
return cast('T', self.f(*args))
except:
print('Error in wrapped function when called:', str(self))
print((("Base type is '" + str(F)) + "'"))
raise
res = FuncWrapper(f)
else:
class Constant(F):
def __init__(self, c: 'T') -> None:
self.c = c
F.__init__(self)
def clone(self) -> 'Constant':
return module._sharedToStd(Constant(self.c))
def __str__(self) -> str:
return (('Constant(' + str(self.c)) + ')')
def __call__(self, *args: List[Any]) -> 'T':
return self.c
res = Constant(cast('T', f))
return module._sharedToStd(res) |
def preprocess(image):
(w, h) = image.size
(w, h) = ((x - (x % 32)) for x in (w, h))
image = image.resize((w, h), resample=PIL_INTERPOLATION['lanczos'])
image = (np.array(image).astype(np.float32) / 255.0)
image = image[None].transpose(0, 3, 1, 2)
image = torch.from_numpy(image)
return ((2.0 * image) - 1.0) |
def load_mesh_data(mesh_fpath: str, field: str, device: Optional[torch.device]=None) -> Tuple[(Optional[torch.Tensor], Optional[torch.Tensor])]:
with PathManager.open(mesh_fpath, 'rb') as hFile:
return torch.as_tensor(pickle.load(hFile)[field], dtype=torch.float).to(device)
return None |
def getEpochsBetweenFullInf(pathToLog):
lineWithPattern = getFirstLineInLogWithCertainPattern(pathToLog, NUM_EPS_BETWEEN_FULLINF_PATTERN)
if (lineWithPattern == None):
return None
return getIntFromStr(lineWithPattern[(lineWithPattern.find(NUM_EPS_BETWEEN_FULLINF_PATTERN) + len(NUM_EPS_BETWEEN_FULLINF_PATTERN)):]) |
def validate_stopping_criteria(stopping_criteria: StoppingCriteriaList, max_length: int) -> StoppingCriteriaList:
stopping_max_length = stopping_criteria.max_length
new_stopping_criteria = deepcopy(stopping_criteria)
if ((stopping_max_length is not None) and (stopping_max_length != max_length)):
warnings.warn('You set different `max_length` for stopping criteria and `max_length` parameter', UserWarning)
elif (stopping_max_length is None):
new_stopping_criteria.append(MaxLengthCriteria(max_length=max_length))
return new_stopping_criteria |
class Block5(M.Model):
def initialize(self):
self.bn0 = L.batch_norm()
self.activ = L.activation(M.PARAM_RELU)
self.c1 = L.conv2D(3, 512, pad='VALID', usebias=False)
self.bn1 = L.batch_norm()
self.c2 = L.conv2D(3, 1024, pad='VALID', usebias=False, dilation_rate=2)
self.c3 = L.conv2D(1, 1024, usebias=False)
def forward(self, x):
x = self.bn0(x)
x = self.activ(x)
short = self.c3(x)
branch = self.c1(M.pad(x, 1))
branch = self.activ(self.bn1(branch))
branch = self.c2(M.pad(branch, 2))
res = (branch + short)
return res |
class ConfigParser(configargparse.ArgParser):
def __init__(self):
super().__init__(default_config_files=[os.path.join(os.path.dirname(__file__), 'default_config.yml')], conflict_handler='resolve')
self.add('--name', type=str, help='Name of the config for the offline reconstruction system.')
self.add('--fragment_size', type=int, help='Number of RGBD frames to construct a fragment.')
self.add('--device', type=str, help='Device to run the system.')
self.add('--engine', type=str, choices=['tensor', 'legacy'], help='Open3D engine to reconstruct.')
self.add('--multiprocessing', action='store_true', help='Use multiprocessing in operations. Only available for the legacy engine.')
input_parser = self.add_argument_group('input')
input_parser.add('--path_dataset', type=str, help='Path to the dataset folder. It should contain a folder with depth and a folder with color images.')
input_parser.add('--depth_folder', type=str, help='Path that stores depth images.')
input_parser.add('--color_folder', type=str, help='Path that stores color images.')
input_parser.add('--path_intrinsic', type=str, help='Path to the intrinsic.json config file.If the intrinsic matrix for color image is different,specify it by --path_color_intrinsic.By default PrimeSense intrinsics is used.')
input_parser.add('--path_color_intrinsic', type=str, help='Path to the intrinsic.json config file.If the intrinsic matrix for color image is different,specify it by --path_color_intrinsic.By default PrimeSense intrinsics is used.')
input_parser.add('--depth_min', type=float, help='Min clipping distance (in meter) for input depth data.')
input_parser.add('--depth_max', type=float, help='Max clipping distance (in meter) for input depth data.')
input_parser.add('--depth_scale', type=float, help='Scale factor to convert raw input depth data to meters.')
input_parser.add('--fragment_size', type=int, help='Number of RGBD frames per fragment')
odometry_parser = self.add_argument_group('odometry')
odometry_parser.add('--odometry_method', type=str, choices=['point2plane', 'intensity', 'hybrid', 'frame2model'], help='Method used in pose estimation between RGBD images.Frame2model only available for the tensor engine.')
odometry_parser.add('--odometry_loop_interval', type=int, help='Intervals to check loop closures between RGBD images.')
odometry_parser.add('--odometry_loop_weight', type=float, help='Weight of loop closure edges when optimizing pose graphs for odometry.')
odometry_parser.add('--odometry_distance_thr', type=float, help='Default distance threshold to filter outliers in odometry correspondences.')
registration_parser = self.add_argument_group('registration')
registration_parser.add('--icp_method', type=str, choices=['colored', 'point2point', 'point2plane', 'generalized'], help='Method used in registration between fragment point clouds with a good initial pose estimate.Generalized ICP only available for the tensor engine.')
registration_parser.add('--icp_voxelsize', type=float, help='Voxel size used to down sample point cloud for fast/multiscale ICP.')
registration_parser.add('--icp_distance_thr', type=float, help='Default distance threshold to filter outliers in ICP correspondences.')
registration_parser.add('--global_registration_method', type=str, choices=['fgr', 'ransac'], help='Method used in global registration of two fragment point clouds without an initial pose estimate.')
registration_parser.add('--registration_loop_weight', type=float, help='Weight of loop closure edges when optimizing pose graphs for registration.')
integration_parser = self.add_argument_group('integration')
integration_parser.add('--integrate_color', action='store_true', default=False, help='Volumetric integration mode.')
integration_parser.add('--voxel_size', type=float, help='Voxel size in meter for volumetric integration.')
integration_parser.add('--trunc_voxel_multiplier', type=float, help='Truncation distance multiplier in voxel size for signed distance. For instance, --trunc_voxel_multiplier=8 with --voxel_size=0.006(m) creates a truncation distance of 0.048(m).')
integration_parser.add('--est_point_count', type=int, help='Estimated point cloud size for surface extraction.')
integration_parser.add('--block_count', type=int, help='Pre-allocated voxel block count for volumetric integration.')
integration_parser.add('--surface_weight_thr', type=float, help='Weight threshold to filter outliers during volumetric surface reconstruction.')
def get_config(self):
config = self.parse_args()
if (config.engine == 'legacy'):
if config.device.lower().startswith('cuda'):
print('Legacy engine only supports CPU.', 'Fallback to CPU.')
config.device = 'CPU:0'
if (config.odometry_method == 'frame2model'):
print('Legacy engine does not supports frame2model tracking.', 'Fallback to hybrid odometry.')
config.odometry_method = 'hybrid'
elif (config.engine == 'tensor'):
if (config.icp_method == 'generalized'):
print('Tensor engine does not support generalized ICP.', 'Fallback to colored ICP.')
config.icp_method = 'colored'
if config.multiprocessing:
print('Tensor engine does not support multiprocessing.', 'Disabled.')
config.multiprocessing = False
if (config.device.lower().startswith('cuda') and (not o3d.core.cuda.is_available())):
print('Open3d not built with cuda support or no cuda device available. ', 'Fallback to CPU.')
config.device = 'CPU:0'
return config |
def build_depth_head(cfg):
name = cfg.MODEL.DEPTH_HEAD.NAME
return DEPTH_HEAD_REGISTRY.get(name)(cfg) |
def init_lstm(input_lstm):
for ind in range(0, input_lstm.num_layers):
weight = eval(('input_lstm.weight_ih_l' + str(ind)))
bias = np.sqrt((6.0 / ((weight.size(0) / 4) + weight.size(1))))
nn.init.uniform_(weight, (- bias), bias)
weight = eval(('input_lstm.weight_hh_l' + str(ind)))
bias = np.sqrt((6.0 / ((weight.size(0) / 4) + weight.size(1))))
nn.init.uniform_(weight, (- bias), bias)
if input_lstm.bias:
for ind in range(0, input_lstm.num_layers):
weight = eval(('input_lstm.bias_ih_l' + str(ind)))
weight.data.zero_()
weight.data[input_lstm.hidden_size:(2 * input_lstm.hidden_size)] = 1
weight = eval(('input_lstm.bias_hh_l' + str(ind)))
weight.data.zero_()
weight.data[input_lstm.hidden_size:(2 * input_lstm.hidden_size)] = 1 |
def _to_ops(iterable):
if (not _is_iterable(iterable)):
return iterable
return [_to_op(i) for i in iterable] |
class TestLogger(unittest.TestCase):
def test_changing_log_level(self) -> None:
change_log_level(logging.INFO)
self.assertEqual(logging.INFO, log.level) |
class FeaturePyramidNetwork(nn.Module):
def __init__(self, in_channels_list: List[int], out_channels: int, extra_blocks: Optional[ExtraFPNBlock]=None):
super(FeaturePyramidNetwork, self).__init__()
self.inner_blocks = nn.ModuleList()
self.layer_blocks = nn.ModuleList()
for in_channels in in_channels_list:
if (in_channels == 0):
raise ValueError('in_channels=0 is currently not supported')
inner_block_module = nn.Conv2d(in_channels, out_channels, 1)
layer_block_module = nn.Conv2d(out_channels, out_channels, 3, padding=1)
self.inner_blocks.append(inner_block_module)
self.layer_blocks.append(layer_block_module)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_uniform_(m.weight, a=1)
nn.init.constant_(m.bias, 0)
if (extra_blocks is not None):
assert isinstance(extra_blocks, ExtraFPNBlock)
self.extra_blocks = extra_blocks
def get_result_from_inner_blocks(self, x: Tensor, idx: int) -> Tensor:
num_blocks = len(self.inner_blocks)
if (idx < 0):
idx += num_blocks
i = 0
out = x
for module in self.inner_blocks:
if (i == idx):
out = module(x)
i += 1
return out
def get_result_from_layer_blocks(self, x: Tensor, idx: int) -> Tensor:
num_blocks = len(self.layer_blocks)
if (idx < 0):
idx += num_blocks
i = 0
out = x
for module in self.layer_blocks:
if (i == idx):
out = module(x)
i += 1
return out
def forward(self, x: Dict[(str, Tensor)]) -> Dict[(str, Tensor)]:
names = list(x.keys())
x = list(x.values())
last_inner = self.get_result_from_inner_blocks(x[(- 1)], (- 1))
results = []
results.append(self.get_result_from_layer_blocks(last_inner, (- 1)))
for idx in range((len(x) - 2), (- 1), (- 1)):
inner_lateral = self.get_result_from_inner_blocks(x[idx], idx)
feat_shape = inner_lateral.shape[(- 2):]
inner_top_down = F.interpolate(last_inner, size=feat_shape, mode='nearest')
last_inner = (inner_lateral + inner_top_down)
results.insert(0, self.get_result_from_layer_blocks(last_inner, idx))
if (self.extra_blocks is not None):
(results, names) = self.extra_blocks(results, x, names)
out = OrderedDict([(k, v) for (k, v) in zip(names, results)])
return out |
class Crop(object):
def __init__(self, tao=0.2):
self.tao = tao
def __call__(self, sequence):
copied_sequence = copy.deepcopy(sequence)
sub_seq_length = int((self.tao * len(copied_sequence)))
start_index = random.randint(0, ((len(copied_sequence) - sub_seq_length) - 1))
if (sub_seq_length < 1):
return [copied_sequence[start_index]]
else:
cropped_seq = copied_sequence[start_index:(start_index + sub_seq_length)]
return cropped_seq |
def symbolic_equations():
(a0, a1, a2, a3, a4, a5, a6) = var('a0, a1, a2, a3, a4, a5, a6')
(b0, b2, b3, b4, b5, c0) = var('b0, b2, b3, b4, b5, c0')
(t1, t2, t3, t4, t5, t6) = var('t1, t2, t3, t4, t5, t6')
eq1 = ((((a1 * t1) + (a2 * t2)) - (a3 * t3)) - a0)
eq2 = (((((b2 * t2) + (a3 * t3)) - (a4 * t4)) + (a5 * t5)) - b0)
eq3 = ((((a4 * t4) + (b5 * t5)) - (a6 * t6)) - c0)
return [eq1, eq2, eq3] |
def pytest_collection_modifyitems(config, items):
if config.getoption('--runslow'):
return
skip_slow = pytest.mark.skip(reason='need --runslow option to run')
skip_not_implemented = pytest.mark.skip(reason='test not yet implemented')
for item in items:
if ('slow' in item.keywords):
item.add_marker(skip_slow)
elif ('not_implemented' in item.keywords):
item.add_marker(skip_not_implemented) |
def adjust_learning_rate(optimizer, base_lr, epoch, stepsize=20, gamma=0.1, linear_decay=False, final_lr=0, max_epoch=100):
if linear_decay:
frac_done = (epoch / max_epoch)
lr = ((frac_done * final_lr) + ((1.0 - frac_done) * base_lr))
else:
lr = (base_lr * (gamma ** (epoch // stepsize)))
for param_group in optimizer.param_groups:
param_group['lr'] = lr |
_model_architecture('fconv_self_att', 'fconv_self_att_wp')
def fconv_self_att_wp(args):
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 256)
args.encoder_layers = getattr(args, 'encoder_layers', '[(128, 3)] * 2 + [(512,3)] * 1')
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 256)
args.decoder_layers = getattr(args, 'decoder_layers', '[(512, 4)] * 4 + [(768, 4)] * 2 + [(1024, 4)] * 1')
args.decoder_out_embed_dim = getattr(args, 'decoder_out_embed_dim', 256)
args.self_attention = getattr(args, 'self_attention', 'True')
args.multihead_self_attention_nheads = getattr(args, 'multihead_self_attention_nheads', 4)
args.project_input = getattr(args, 'project_input', 'True')
args.gated_attention = getattr(args, 'gated_attention', 'True')
args.downsample = getattr(args, 'downsample', 'True')
base_architecture(args) |
class EpsilonGreedyNewsRecommendation(GreedyNewsRecommendation):
def pick_action(self, context):
map_rewards = self._map_rewards(context)
if (np.random.uniform() < self.epsilon):
article = np.random.randint(0, self.num_articles)
else:
article = np.argmax(map_rewards)
return article |
class L2Loss(nn.Module):
def __init__(self):
super(L2Loss, self).__init__()
self.L2 = nn.MSELoss(reduction='mean')
def forward(self, target, mu):
loss = 0
target = target.detach()
loss = self.L2(target, mu)
return loss |
.skipif('env.PYPY')
def test_indirect_cycle(gc_tester):
obj = m.OwnsPythonObjects()
obj_list = [obj]
obj.value = obj_list
gc_tester(obj) |
class IceContrast(SegmentationDataset):
NUM_CLASS = 1
def __init__(self, base_dir='DENTIST', root=os.path.join('~', 'Nutstore Files', 'Dataset'), split='train', mode=None, transform=None, **kwargs):
super(IceContrast, self).__init__(root, split, mode, transform, **kwargs)
self.base_dir = base_dir
self._root = os.path.expanduser(os.path.join(root, base_dir))
self._transform = transform
self._split = split
self.mode = mode
self._items = self._load_items(split)
if (base_dir == 'DENTIST'):
self._anno_path = os.path.join('{}', 'masks/', '{}_pixels0.png')
self._image_path = os.path.join('{}', 'images', '{}.png')
elif (base_dir == 'Iceberg'):
self._anno_path = os.path.join('{}', 'labels/mask/', '{}_pixels0.png')
self._image_path = os.path.join('{}', 'images', '{}.png')
else:
raise ValueError('Unknown base dir')
def _load_items(self, split):
ids = []
root = self._root
lf = os.path.join(root, (split + '.txt'))
with open(lf, 'r') as f:
ids += [(root, line.strip()) for line in f.readlines()]
random.shuffle(ids)
return ids
def __getitem__(self, idx):
img_id = self._items[idx]
img_path = self._image_path.format(*img_id)
label_path = self._anno_path.format(*img_id)
img = Image.open(img_path).convert('RGB')
if (self.mode == 'test'):
img = img.resize((self.base_size, self.base_size), Image.BILINEAR)
img = self._img_transform(img)
if (self.transform is not None):
img = self.transform(img)
return (img, img_id[(- 1)])
mask = Image.open(label_path)
if (self.mode == 'train'):
(img, mask) = self._sync_transform(img, mask)
elif (self.mode == 'val'):
(img, mask) = self._val_sync_transform(img, mask)
else:
assert (self.mode == 'testval')
if (self.base_dir == 'DENTIST'):
(img, mask) = self._testval_sync_transform(img, mask)
else:
(img, mask) = (self._img_transform(img), self._mask_transform(mask))
if (self.transform is not None):
img = self.transform(img)
mask = (nd.expand_dims(mask, axis=0).astype('float32') / 255.0)
return (img, mask)
def __len__(self):
return len(self._items)
def classes(self):
return 'iceberg'
def _testval_sync_transform(self, img, mask):
base_size = self.base_size
img = img.resize((base_size, base_size), Image.BILINEAR)
mask = mask.resize((base_size, base_size), Image.NEAREST)
(img, mask) = (self._img_transform(img), self._mask_transform(mask))
return (img, mask)
def _sync_transform(self, img, mask):
if (random.random() < 0.5):
img = img.transpose(Image.FLIP_LEFT_RIGHT)
mask = mask.transpose(Image.FLIP_LEFT_RIGHT)
crop_size = self.crop_size
long_size = random.randint(int((self.base_size * 0.5)), int((self.base_size * 2.0)))
(w, h) = img.size
if (h > w):
oh = long_size
ow = int(((((1.0 * w) * long_size) / h) + 0.5))
short_size = ow
else:
ow = long_size
oh = int(((((1.0 * h) * long_size) / w) + 0.5))
short_size = oh
img = img.resize((ow, oh), Image.BILINEAR)
mask = mask.resize((ow, oh), Image.NEAREST)
if (short_size < crop_size):
padh = ((crop_size - oh) if (oh < crop_size) else 0)
padw = ((crop_size - ow) if (ow < crop_size) else 0)
img = ImageOps.expand(img, border=(0, 0, padw, padh), fill=0)
mask = ImageOps.expand(mask, border=(0, 0, padw, padh), fill=0)
(w, h) = img.size
x1 = random.randint(0, (w - crop_size))
y1 = random.randint(0, (h - crop_size))
img = img.crop((x1, y1, (x1 + crop_size), (y1 + crop_size)))
mask = mask.crop((x1, y1, (x1 + crop_size), (y1 + crop_size)))
if (random.random() < 0.5):
img = img.filter(ImageFilter.GaussianBlur(radius=random.random()))
(img, mask) = (self._img_transform(img), self._mask_transform(mask))
return (img, mask) |
class DyReLU(BaseModule):
def __init__(self, channels: int, ratio: int=4, conv_cfg: OptConfigType=None, act_cfg: MultiConfig=(dict(type='ReLU'), dict(type='HSigmoid', bias=3.0, divisor=6.0)), init_cfg: OptMultiConfig=None) -> None:
super().__init__(init_cfg=init_cfg)
if isinstance(act_cfg, dict):
act_cfg = (act_cfg, act_cfg)
assert (len(act_cfg) == 2)
assert is_tuple_of(act_cfg, dict)
self.channels = channels
self.expansion = 4
self.global_avgpool = nn.AdaptiveAvgPool2d(1)
self.conv1 = ConvModule(in_channels=channels, out_channels=int((channels / ratio)), kernel_size=1, stride=1, conv_cfg=conv_cfg, act_cfg=act_cfg[0])
self.conv2 = ConvModule(in_channels=int((channels / ratio)), out_channels=(channels * self.expansion), kernel_size=1, stride=1, conv_cfg=conv_cfg, act_cfg=act_cfg[1])
def forward(self, x: Tensor) -> Tensor:
coeffs = self.global_avgpool(x)
coeffs = self.conv1(coeffs)
coeffs = (self.conv2(coeffs) - 0.5)
(a1, b1, a2, b2) = torch.split(coeffs, self.channels, dim=1)
a1 = ((a1 * 2.0) + 1.0)
a2 = (a2 * 2.0)
out = torch.max(((x * a1) + b1), ((x * a2) + b2))
return out |
def make_data_loader(cfg, is_train=True, is_distributed=False, start_iter=0):
num_gpus = get_world_size()
if is_train:
videos_per_batch = cfg.SOLVER.VIDEOS_PER_BATCH
assert ((videos_per_batch % num_gpus) == 0), 'SOLVER.VIDEOS_PER_BATCH ({}) must be divisible by the number '
.format(videos_per_batch, num_gpus)
videos_per_gpu = (videos_per_batch // num_gpus)
shuffle = True
drop_last = True
num_iters = cfg.SOLVER.MAX_ITER
else:
videos_per_batch = cfg.TEST.VIDEOS_PER_BATCH
assert ((videos_per_batch % num_gpus) == 0), 'TEST.VIDEOS_PER_BATCH ({}) must be divisible by the number '
.format(videos_per_batch, num_gpus)
videos_per_gpu = (videos_per_batch // num_gpus)
shuffle = (False if (not is_distributed) else True)
drop_last = False
num_iters = None
start_iter = 0
aspect_grouping = ([1] if cfg.DATALOADER.ASPECT_RATIO_GROUPING else [])
DatasetCatalog = paths_catalog.DatasetCatalog
dataset_list = (cfg.DATASETS.TRAIN if is_train else cfg.DATASETS.TEST)
transforms = build_transforms(cfg, is_train)
if has_object(cfg.MODEL.HIT_STRUCTURE):
object_transforms = build_object_transforms(cfg, is_train=is_train)
else:
object_transforms = None
datasets = build_dataset(cfg, dataset_list, transforms, DatasetCatalog, is_train, object_transforms)
data_loaders = []
for dataset in datasets:
sampler = make_data_sampler(dataset, shuffle, is_distributed)
batch_sampler = make_batch_data_sampler(dataset, sampler, aspect_grouping, videos_per_gpu, num_iters, start_iter, drop_last)
collator = BatchCollator(cfg.DATALOADER.SIZE_DIVISIBILITY)
num_workers = cfg.DATALOADER.NUM_WORKERS
data_loader = torch.utils.data.DataLoader(dataset, num_workers=num_workers, batch_sampler=batch_sampler, collate_fn=collator)
data_loaders.append(data_loader)
if is_train:
assert (len(data_loaders) == 1)
return data_loaders[0]
return data_loaders |
def simclr_resnet50(num_classes, **kwargs):
return SimCLRResNet(base_model='resnet50', num_classes=num_classes) |
class SequentialModel(MetaEstimatorMixin, _BaseModel, metaclass=ABCMeta):
def __init__(self, estimator, estimator_hyperparams=None, permutation_test_params=None, latent_dimensions=None, copy_data=True, accept_sparse=False, random_state=None, permutation_test=False, p_threshold=0.001, corr_threshold=0.0):
super().__init__(latent_dimensions=latent_dimensions, copy_data=copy_data, accept_sparse=accept_sparse, random_state=random_state)
if hasattr(estimator, 'estimator'):
if (estimator.estimator.latent_dimensions != 1):
raise ValueError('The estimator must have 1 latent dimension, but has {}'.format(estimator.estimator.latent_dimensions))
elif (estimator.latent_dimensions != 1):
raise ValueError('The estimator must have 1 latent dimension, but has {}'.format(estimator.latent_dimensions))
self.estimator = estimator
if (estimator_hyperparams is None):
estimator_hyperparams = {}
self.estimator_hyperparams = estimator_hyperparams
self.permutation_test = permutation_test
if (permutation_test_params is None):
permutation_test_params = {}
self.permutation_test_params = permutation_test_params
self.p_threshold = p_threshold
self.corr_threshold = corr_threshold
def fit(self, views: Iterable[np.ndarray], y=None, **kwargs):
self._validate_data(views)
self._check_params()
if (self.latent_dimensions is None):
self.latent_dimensions = min([view.shape[1] for view in views])
self.weights_ = [[] for view in views]
self.p_values = []
k = 0
while (k < self.latent_dimensions):
self.estimator.set_params(**self.estimator_hyperparams)
self.estimator.fit(views)
p_value = None
best_estimator = self.estimator
if self.permutation_test:
best_estimator = getattr(self.estimator, 'best_estimator_', self.estimator)
p_value = permutation_test_score(best_estimator, views, y=None, **self.permutation_test_params)[2]
self.p_values.append(p_value)
if (((p_value is not None) and (p_value >= self.p_threshold)) or (best_estimator.score(views) < self.corr_threshold)):
if (p_value is not None):
self.p_values.pop()
break
else:
views = deflate_views(views, best_estimator.weights_)
for (i, weight) in enumerate(best_estimator.weights_):
self.weights_[i].append(weight)
k += 1
if all(((len(w) == 0) for w in self.weights_)):
raise ValueError('No significant latent dimensions found.')
self.latent_dimensions = k
self.weights_ = [np.concatenate(weights, axis=1) for weights in self.weights_]
return self |
class BaseOptions():
def __init__(self):
self.initialized = False
def initialize(self, parser):
parser.add_argument('--dataroot', required=True, help='Path to images')
parser.add_argument('--batchsize', type=int, default=2, help='Batch size')
parser.add_argument('--cfg_file', default='lib/configs/resnext_32x4d_nyudv2_c1', help='Set model and dataset config files')
parser.add_argument('--dataset', default='nyudv2', help='Path to images')
parser.add_argument('--load_ckpt', help='Checkpoint path to load')
parser.add_argument('--resume', action='store_true', help='Resume to train')
parser.add_argument('--epoch', default=30, type=int, help='Set training epochs')
parser.add_argument('--start_epoch', default=0, type=int, help='Set training epochs')
parser.add_argument('--start_step', default=0, type=int, help='Set training steps')
parser.add_argument('--thread', default=4, type=int, help='Thread for loading data')
parser.add_argument('--use_tfboard', action='store_true', help='Tensorboard to log training info')
parser.add_argument('--results_dir', type=str, default='./evaluation', help='Output dir')
self.initialized = True
return parser
def gather_options(self):
if (not self.initialized):
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser = self.initialize(parser)
self.parser = parser
return parser.parse_args()
def print_options(self, opt):
message = ''
message += ' Options \n'
for (k, v) in sorted(vars(opt).items()):
comment = ''
default = self.parser.get_default(k)
if (v != default):
comment = ('\t[default: %s]' % str(default))
message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment)
message += ' End '
print(message)
def parse(self):
opt = self.gather_options()
self.print_options(opt)
self.opt = opt
return self.opt |
_model
def regnetx_006(pretrained=False, **kwargs):
return _create_regnet('regnetx_006', pretrained, **kwargs) |
def get_imdb(name):
if (not __sets.has_key(name)):
raise KeyError('Unknown dataset: {}'.format(name))
return __sets[name]() |
class LocalRunner():
def __init__(self, snapshot_config, max_cpus=1):
self._snapshotter = Snapshotter(snapshot_config.snapshot_dir, snapshot_config.snapshot_mode, snapshot_config.snapshot_gap)
parallel_sampler.initialize(max_cpus)
seed = get_seed()
if (seed is not None):
parallel_sampler.set_seed(seed)
self._has_setup = False
self._plot = False
self._setup_args = None
self._train_args = None
self._stats = ExperimentStats(total_itr=0, total_env_steps=0, total_epoch=0, last_path=None)
self._algo = None
self._env = None
self._sampler = None
self._plotter = None
self._start_time = None
self._itr_start_time = None
self.step_itr = None
self.step_path = None
self.enable_logging = True
self._n_workers = None
self._worker_class = None
self._worker_args = None
def make_sampler(self, sampler_cls, *, seed=None, n_workers=psutil.cpu_count(logical=False), max_path_length=None, worker_class=DefaultWorker, sampler_args=None, worker_args=None):
if (not hasattr(self._algo, 'policy')):
raise ValueError('If the runner is used to construct a sampler, the algorithm must have a `policy` field.')
if (max_path_length is None):
if hasattr(self._algo, 'max_path_length'):
max_path_length = self._algo.max_path_length
else:
raise ValueError('If `sampler_cls` is specified in runner.setup, the algorithm must have a `max_path_length` field.')
if (seed is None):
seed = get_seed()
if (sampler_args is None):
sampler_args = {}
if (worker_args is None):
worker_args = {}
if issubclass(sampler_cls, BaseSampler):
return sampler_cls(self._algo, self._env, **sampler_args)
else:
return sampler_cls.from_worker_factory(WorkerFactory(seed=seed, max_path_length=max_path_length, n_workers=n_workers, worker_class=worker_class, worker_args=worker_args), agents=self._algo.policy, envs=self._env)
def setup(self, algo, env, sampler_cls=None, sampler_args=None, n_workers=psutil.cpu_count(logical=False), worker_class=None, worker_args=None):
self._algo = algo
self._env = env
self._n_workers = n_workers
self._worker_class = worker_class
if (sampler_args is None):
sampler_args = {}
if (sampler_cls is None):
sampler_cls = getattr(algo, 'sampler_cls', None)
if (worker_class is None):
worker_class = getattr(algo, 'worker_cls', DefaultWorker)
if (worker_args is None):
worker_args = {}
self._worker_args = worker_args
if (sampler_cls is None):
self._sampler = None
else:
self._sampler = self.make_sampler(sampler_cls, sampler_args=sampler_args, n_workers=n_workers, worker_class=worker_class, worker_args=worker_args)
self._has_setup = True
self._setup_args = SetupArgs(sampler_cls=sampler_cls, sampler_args=sampler_args, seed=get_seed())
def _start_worker(self):
if isinstance(self._sampler, BaseSampler):
self._sampler.start_worker()
if self._plot:
from garage.plotter import Plotter
self._plotter = Plotter()
self._plotter.init_plot(self.get_env_copy(), self._algo.policy)
def _shutdown_worker(self):
if (self._sampler is not None):
self._sampler.shutdown_worker()
if self._plot:
self._plotter.close()
def obtain_samples(self, itr, batch_size=None, agent_update=None, env_update=None):
if (self._sampler is None):
raise ValueError('Runner was not initialized with `sampler_cls`. Either provide `sampler_cls` to runner.setup, or set `algo.sampler_cls`.')
if ((batch_size is None) and (self._train_args.batch_size is None)):
raise ValueError('Runner was not initialized with `batch_size`. Either provide `batch_size` to runner.train, or pass `batch_size` to runner.obtain_samples.')
paths = None
if isinstance(self._sampler, BaseSampler):
paths = self._sampler.obtain_samples(itr, (batch_size or self._train_args.batch_size))
else:
if (agent_update is None):
agent_update = self._algo.policy.get_param_values()
paths = self._sampler.obtain_samples(itr, (batch_size or self._train_args.batch_size), agent_update=agent_update, env_update=env_update)
paths = paths.to_trajectory_list()
self._stats.total_env_steps += sum([len(p['rewards']) for p in paths])
return paths
def save(self, epoch):
if (not self._has_setup):
raise NotSetupError('Use setup() to setup runner before saving.')
logger.log('Saving snapshot...')
params = dict()
params['setup_args'] = self._setup_args
params['train_args'] = self._train_args
params['stats'] = self._stats
params['env'] = self._env
params['algo'] = self._algo
params['n_workers'] = self._n_workers
params['worker_class'] = self._worker_class
params['worker_args'] = self._worker_args
self._snapshotter.save_itr_params(epoch, params)
logger.log('Saved')
def restore(self, from_dir, from_epoch='last'):
saved = self._snapshotter.load(from_dir, from_epoch)
self._setup_args = saved['setup_args']
self._train_args = saved['train_args']
self._stats = saved['stats']
set_seed(self._setup_args.seed)
self.setup(env=saved['env'], algo=saved['algo'], sampler_cls=self._setup_args.sampler_cls, sampler_args=self._setup_args.sampler_args, n_workers=saved['n_workers'], worker_class=saved['worker_class'], worker_args=saved['worker_args'])
n_epochs = self._train_args.n_epochs
last_epoch = self._stats.total_epoch
last_itr = self._stats.total_itr
total_env_steps = self._stats.total_env_steps
batch_size = self._train_args.batch_size
store_paths = self._train_args.store_paths
pause_for_plot = self._train_args.pause_for_plot
fmt = '{:<20} {:<15}'
logger.log(('Restore from snapshot saved in %s' % self._snapshotter.snapshot_dir))
logger.log(fmt.format('-- Train Args --', '-- Value --'))
logger.log(fmt.format('n_epochs', n_epochs))
logger.log(fmt.format('last_epoch', last_epoch))
logger.log(fmt.format('batch_size', batch_size))
logger.log(fmt.format('store_paths', store_paths))
logger.log(fmt.format('pause_for_plot', pause_for_plot))
logger.log(fmt.format('-- Stats --', '-- Value --'))
logger.log(fmt.format('last_itr', last_itr))
logger.log(fmt.format('total_env_steps', total_env_steps))
self._train_args.start_epoch = (last_epoch + 1)
return copy.copy(self._train_args)
def log_diagnostics(self, pause_for_plot=False):
logger.log(('Time %.2f s' % (time.time() - self._start_time)))
logger.log(('EpochTime %.2f s' % (time.time() - self._itr_start_time)))
tabular.record('TotalEnvSteps', self._stats.total_env_steps)
logger.log(tabular)
if self._plot:
self._plotter.update_plot(self._algo.policy, self._algo.max_path_length)
if pause_for_plot:
input('Plotting evaluation run: Press Enter to " "continue...')
def train(self, n_epochs, batch_size=None, plot=False, store_paths=False, pause_for_plot=False):
if (not self._has_setup):
raise NotSetupError('Use setup() to setup runner before training.')
self._train_args = TrainArgs(n_epochs=n_epochs, batch_size=batch_size, plot=plot, store_paths=store_paths, pause_for_plot=pause_for_plot, start_epoch=0)
self._plot = plot
average_return = self._algo.train(self)
self._shutdown_worker()
return average_return
def step_epochs(self):
self._start_worker()
self._start_time = time.time()
self.step_itr = self._stats.total_itr
self.step_path = None
n_epochs = int(os.environ.get('GARAGE_EXAMPLE_TEST_N_EPOCHS', self._train_args.n_epochs))
logger.log('Obtaining samples...')
for epoch in range(self._train_args.start_epoch, n_epochs):
self._itr_start_time = time.time()
with logger.prefix(('epoch #%d | ' % epoch)):
(yield epoch)
save_path = (self.step_path if self._train_args.store_paths else None)
self._stats.last_path = save_path
self._stats.total_epoch = epoch
self._stats.total_itr = self.step_itr
self.save(epoch)
if self.enable_logging:
self.log_diagnostics(self._train_args.pause_for_plot)
logger.dump_all(self.step_itr)
tabular.clear()
def resume(self, n_epochs=None, batch_size=None, plot=None, store_paths=None, pause_for_plot=None):
if (self._train_args is None):
raise NotSetupError('You must call restore() before resume().')
self._train_args.n_epochs = (n_epochs or self._train_args.n_epochs)
self._train_args.batch_size = (batch_size or self._train_args.batch_size)
if (plot is not None):
self._train_args.plot = plot
if (store_paths is not None):
self._train_args.store_paths = store_paths
if (pause_for_plot is not None):
self._train_args.pause_for_plot = pause_for_plot
average_return = self._algo.train(self)
self._shutdown_worker()
return average_return
def get_env_copy(self):
return cloudpickle.loads(cloudpickle.dumps(self._env))
def total_env_steps(self):
return self._stats.total_env_steps |
class SpotterMixin():
def __init__(self, show_score, show_bbox, show_text, show_entity, dict_file=None, class_file=None, auto_reg=False):
self.show_score = show_score
self.show_bbox = show_bbox
self.show_text = show_text
self.show_entity = show_entity
self.auto_reg = auto_reg
if dict_file:
if (not auto_reg):
self.ocr_dict = {'<GO>': 0, '<END>': 1, **{val: ind for (ind, val) in enumerate(load_dict(dict_file), 2)}}
else:
self.ocr_dict = {'<GO>': 0, '<END>': 1, '<PAD>': 2, **{val: ind for (ind, val) in enumerate(load_dict(dict_file), 3)}}
self.rev_ocr_dict = dict()
for (key, val) in self.ocr_dict.items():
self.rev_ocr_dict[val] = key
else:
self.ocr_dict = None
self.rev_ocr_dict = None
if class_file:
if (not self.auto_reg):
self.entity_dict = {'O': 0}
for (ind, val) in enumerate(load_dict(class_file), 1):
self.entity_dict[('B-' + val)] = ((2 * ind) - 1)
self.entity_dict[('I-' + val)] = (2 * ind)
else:
self.entity_dict = {'O': 0, '<PAD>': 1}
self.entity_cls_dict = {'O': 0}
for (ind, val) in enumerate(load_dict(class_file), 1):
self.entity_dict[('B-' + val)] = (2 * ind)
self.entity_dict[('I-' + val)] = ((2 * ind) + 1)
self.entity_cls_dict[val] = ind
self.rev_entity_dict = dict()
for (key, val) in self.entity_dict.items():
self.rev_entity_dict[val] = key
else:
self.entity_dict = dict()
self.rev_entity_dict = dict()
def show_result(self, img, result, score_thr=0.5, bbox_color='red', poly_color='red', text_color='red', thickness=1, font_scale=0.05, win_name='', show=False, wait_time=0, out_file=None):
img = mmcv.imread(img)
img = img.copy()
boundaries = None
bboxes = None
labels = None
if ('boundary_result' in result.keys()):
boundaries = result['boundary_result']
labels = ([0] * len(boundaries))
if ('box_result' in result.keys()):
bboxes = result['box_result']
if (not labels):
labels = ([0] * len(bboxes))
if (out_file is not None):
show = False
if (boundaries is not None):
rec_results = result.get('REC', None)
kie_results = result.get('KIE', None)
(texts, entities) = (None, None)
if rec_results:
if (len(rec_results['indexes']) != 0):
texts = [[] for _ in range(len(bboxes))]
for (indexes, scores) in zip(rec_results['indexes'], rec_results['scores']):
index = indexes[0]
score = scores[0]
for i in range(index.shape[0]):
seq = ''
seq_score = 0
score_cnt = 0
if (not self.auto_reg):
for j in range(index.shape[1]):
if (index[(i, j)] == self.ocr_dict['<END>']):
seq_score += score[(i, j)]
score_cnt += 1
break
elif (index[(i, j)] == self.ocr_dict['<GO>']):
seq_score += score[(i, j)]
score_cnt += 1
continue
else:
seq_score += score[(i, j)]
score_cnt += 1
seq += self.rev_ocr_dict[index[(i, j)]]
texts[i].append([seq, (seq_score / score_cnt)])
else:
invalid_flag = False
for j in range(index.shape[1]):
if (index[(i, j)] == self.ocr_dict['<END>']):
seq_score += score[(i, j)]
score_cnt += 1
break
elif (index[(i, j)] == self.ocr_dict['<GO>']):
seq_score += score[(i, j)]
score_cnt += 1
continue
elif (index[(i, j)] == self.ocr_dict['<PAD>']):
invalid_flag = True
break
else:
seq_score += score[(i, j)]
score_cnt += 1
seq += self.rev_ocr_dict[index[(i, j)]]
if (not invalid_flag):
texts[i].append([seq, (seq_score / score_cnt)])
else:
texts[i].append([' ', 0.0])
if kie_results:
if (len(kie_results['indexes']) != 0):
entities = [[] for _ in range(len(bboxes))]
for (idx, (indexes, scores)) in enumerate(zip(kie_results['indexes'], kie_results['scores'])):
index = indexes[0]
score = scores[0]
for i in range(index.shape[0]):
if (not self.auto_reg):
tags = []
tag_score = 0
txt_len = len(texts[i][idx][0])
score_cnt = 0
if (txt_len == len(index[i])):
tags = ['O' for _ in range(len(index[i]))]
else:
for j in range(1, (txt_len + 1)):
tags.append(self.rev_entity_dict[index[(i, j)]])
tag_score += score[(i, j)]
score_cnt += 1
if (score_cnt == 0):
score_cnt = 1
entities[i].append([','.join(tags), (tag_score / score_cnt)])
else:
tags = []
tag_score = 0
txt_len = len(texts[i][idx][0])
score_cnt = 0
if (txt_len == len(index[i])):
tags = ['O' for _ in range(len(index[i]))]
else:
for j in range(txt_len):
tags.append(self.rev_entity_dict[index[(i, j)]])
tag_score += score[(i, j)]
score_cnt += 1
if (score_cnt == 0):
score_cnt = 1
entities[i].append([tags, (tag_score / score_cnt)])
imshow_e2e_result(img, boundaries, labels, score_thr=score_thr, boundary_color=poly_color, text_color=text_color, thickness=thickness, font_scale=font_scale, win_name=win_name, show=show, wait_time=wait_time, out_file=out_file, show_score=self.show_score, bboxes=bboxes, show_bbox=self.show_bbox, bbox_color=bbox_color, show_text=self.show_text, texts=texts, show_entity=self.show_entity, entities=entities)
if (not (show or out_file)):
warnings.warn('show==False and out_file is not specified, result image will be returned')
return img |
class SimulationActorState(AbstractState):
def __init__(self, handle):
self.handle = handle
self.position = []
self.velocity = [] |
def global_step(scope=None):
if (scope is None):
scope = fluid.global_scope()
v = scope.find_var('_DECAY_')
step = (np.array(v.get_tensor())[0] if v else 0)
return step |
def combine_dataset_datapoints(dataset_dicts: Dict[(str, List[Datapoint])], vg_imid2data: Dict[(int, Dict)], coco_imid2data: Dict[(str, Dict)], coco_path: str) -> Tuple[(Dict[(str, List[Datapoint])], Dict[(str, List[Datapoint])])]:
coco_all_unsafe = set()
vg_all_unsafe = set()
with open(f'{coco_path}/annotations/instances_val2014.json', 'r') as f:
coco_val = json.load(f)
coco_val_ids = []
for item in coco_val['images']:
coco_val_ids.append(item['id'])
with open('phrase_cut_unsafe_ids.pkl', 'rb') as f:
pc_unsafe = pickle.load(f)
with open('refexp_all_unsafe_ids.pkl', 'rb') as f:
refexp_unsafe = pickle.load(f)
with open('gqa_unsafe_ids.pkl', 'rb') as f:
gqa_unsafe = pickle.load(f)
vg_image_unsafe = set.union(set(pc_unsafe), set(gqa_unsafe))
for id in vg_image_unsafe:
if (str(id)[0] == 'n'):
continue
if (vg_imid2data[int(id)]['coco_id'] is not None):
coco_all_unsafe.add(str(vg_imid2data[int(id)]['coco_id']))
else:
vg_all_unsafe.add(str(id))
for id in refexp_unsafe:
coco_all_unsafe.add(str(id))
for id in coco_val_ids:
coco_all_unsafe.add(str(id))
coco_all = defaultdict(list)
vg_all = defaultdict(list)
for (dname, datapoint_list) in dataset_dicts.items():
if ((dname == 'gqa') or (dname == 'vg')):
for datapoint in datapoint_list:
image_id = int(datapoint.image_id)
if (vg_imid2data[image_id]['coco_id'] is not None):
image_id = str(vg_imid2data[image_id]['coco_id'])
if (image_id not in coco_all_unsafe):
coco_all[image_id].append(rescale_boxes(datapoint, (vg_imid2data[int(datapoint.image_id)]['height'], vg_imid2data[int(datapoint.image_id)]['width']), (coco_imid2data[image_id]['height'], coco_imid2data[image_id]['width'])))
else:
image_id = str(image_id)
if (image_id not in vg_all_unsafe):
vg_all[image_id].append(datapoint)
else:
for datapoint in datapoint_list:
image_id = str(datapoint.image_id)
if (image_id not in coco_all_unsafe):
coco_all[image_id].append(datapoint)
return (coco_all, vg_all) |
class TrainerKnapsack(TrainerBase):
def get_reward_name() -> str:
return 'value_items'
def is_reward_positive() -> bool:
return True
def get_observation_type() -> Type[Observation]:
return Observation
def init_encoder(self, num_layers, name) -> EncoderBase:
return KnapsackEncoder(num_layers, name)
def init_decoder(self, name) -> DecoderBase:
return KnapsackDecoder(name)
def generate_problem(key: PRNGKey, problem_size: jnp.int32) -> Array:
return generate_problem(key, problem_size)
def use_augmentations() -> bool:
return False
def get_augmentations(problem: Array) -> Array:
return problem[None]
def has_symmetric_starting_points(self) -> bool:
return False |
def identifier_everything_sampler(ann: Annotation) -> List[Tuple[(torch.IntTensor, Tuple[(torch.IntTensor, torch.IntTensor, torch.IntTensor)], int)]]:
ret = []
for (tokens, sent) in zip(ann.tokenized_sentences, ann.doc.sentences):
i = torch.IntTensor(ann.i)
c = torch.IntTensor(ann.c)
o = torch.IntTensor(ann.o)
if ((sent.labels is not None) and (sent.labels['evidence'] == 1)):
cls = 1
else:
cls = 0
ret.append((tokens, (i, c, o), cls))
return ret |
def test_kernel_eval():
result_string = 'ScoredKernel(k_opt=ProductKernel([ MaskKernel(ndim=4, active_dimension=1, base_kernel=PP0Kernel(lengthscale=-3.776833, output_variance=-3.365662)), MaskKernel(ndim=4, active_dimension=2, base_kernel=CubicKernel(offset=-1.149225, output_variance=-0.604651)) ]), nll=4546.591426, laplace_nle=4531.678317, bic_nle=9108.234692, noise=[-2.])'
k = fk.repr_string_to_kernel(result_string)
k.pretty_print() |
def load_tsp_test_data(num_cities: int):
if (num_cities == 100):
dataset_filename = 'experiments/evaluation_data/tsp100_test_seed1234.pkl'
elif (num_cities == 125):
dataset_filename = 'experiments/evaluation_data/tsp125_test_small_seed1235.pkl'
elif (num_cities == 150):
dataset_filename = 'experiments/evaluation_data/tsp150_test_small_seed1235.pkl'
elif (num_cities == 200):
dataset_filename = 'experiments/evaluation_data/tsp200_test_small_seed1235.pkl'
else:
raise RuntimeError(f'Error: There is no {num_cities}-item Knapsack dataset.')
with open(dataset_filename, 'rb') as f:
dataset = pkl.load(f)
return np.stack(dataset) |
def diapreresnet110_svhn(num_classes=10, **kwargs):
return get_diapreresnet_cifar(num_classes=num_classes, blocks=110, bottleneck=False, model_name='diapreresnet110_svhn', **kwargs) |
class ImageNet(Dataset):
def __init__(self, root, train=True, transform=None, target_transform=None, top_k=(1, 5), keep_rgb=False):
split = ('train' if train else 'val')
self.data_set = datasets.ImageNet(root, split=split)
self.classes = list()
for class_tuple in self.data_set.classes:
self.classes.append(','.join(class_tuple))
self.root = root
self.transform = transform
self.target_transform = target_transform
self.keep_rgb = keep_rgb
self._update_evaluator(top_k)
def __getitem__(self, index: int):
(image, target) = self.data_set.__getitem__(index)
image = default_converter(image, rgb=self.keep_rgb)
if (self.transform is not None):
image = self.transform(image)
if (self.target_transform is not None):
target = self.target_transform(target)
return (image, target)
def __len__(self) -> int:
return len(self.data_set)
def _update_evaluator(self, top_k):
self.evaluator = GeneralEvaluator(self.classes, top_k=top_k)
def __repr__(self):
return (((self.__class__.__name__ + ' (') + self.root) + ')') |
def resnet56_cifar100(num_classes=100, **kwargs):
return get_resnet_cifar(num_classes=num_classes, blocks=56, bottleneck=False, model_name='resnet56_cifar100', **kwargs) |
def generate_info_list(data_path, save_dir, psf_type='ZTE_new'):
syn_path = os.path.join(data_path, 'synthetic_data/input')
real_path = os.path.join(data_path, 'real_data/input')
code_path = os.path.join(data_path, 'PSF/kernel_code')
os.makedirs(save_dir, exist_ok=True)
real_save_path = os.path.join(save_dir, 'real_ZTE_list.txt')
curr_psf = os.path.abspath(os.path.join(code_path, '{}_code_{}.npy'.format(psf_type, '5')))
assert os.path.isfile(curr_psf), 'PSF Code file not exists.'
img_list = sorted(os.listdir(real_path))
assert (len(img_list) != 0), "No image files found in '{}'.".format(real_path)
print('Gererating info list for real data ...')
with open(real_save_path, 'w') as f:
for path in img_list:
f.write((((path + ' ') + curr_psf) + ' \n'))
syn_save_dir = os.path.join(save_dir, psf_type)
os.makedirs(syn_save_dir, exist_ok=True)
for subset in ['train', 'test']:
for pos in range(1, 10):
syn_save_path = os.path.join(syn_save_dir, '{}_code_{}_{}.txt'.format(psf_type, pos, subset))
print('Gererating info list {} ...'.format(syn_save_path))
curr_psf = os.path.abspath(os.path.join(code_path, '{}_code_{}.npy'.format(psf_type, pos)))
assert os.path.isfile(curr_psf), 'PSF Code file not exists.'
in_path = os.path.join(syn_path, '{}_{}'.format(psf_type, pos), subset)
img_list = [f for f in sorted(os.listdir(in_path)) if f.endswith('.npy')]
assert (len(img_list) != 0), "No image files found in '{}'.".format(in_path)
with open(syn_save_path, 'w') as f:
for path in tqdm(img_list):
f.write((((path + ' ') + curr_psf) + ' \n')) |
class SemanticBranch(BaseModule):
def __init__(self, semantic_channels=(16, 32, 64, 128), in_channels=3, exp_ratio=6, init_cfg=None):
super(SemanticBranch, self).__init__(init_cfg=init_cfg)
self.in_channels = in_channels
self.semantic_channels = semantic_channels
self.semantic_stages = []
for i in range(len(semantic_channels)):
stage_name = f'stage{(i + 1)}'
self.semantic_stages.append(stage_name)
if (i == 0):
self.add_module(stage_name, StemBlock(self.in_channels, semantic_channels[i]))
elif (i == (len(semantic_channels) - 1)):
self.add_module(stage_name, nn.Sequential(GELayer(semantic_channels[(i - 1)], semantic_channels[i], exp_ratio, 2), GELayer(semantic_channels[i], semantic_channels[i], exp_ratio, 1), GELayer(semantic_channels[i], semantic_channels[i], exp_ratio, 1), GELayer(semantic_channels[i], semantic_channels[i], exp_ratio, 1)))
else:
self.add_module(stage_name, nn.Sequential(GELayer(semantic_channels[(i - 1)], semantic_channels[i], exp_ratio, 2), GELayer(semantic_channels[i], semantic_channels[i], exp_ratio, 1)))
self.add_module(f'stage{len(semantic_channels)}_CEBlock', CEBlock(semantic_channels[(- 1)], semantic_channels[(- 1)]))
self.semantic_stages.append(f'stage{len(semantic_channels)}_CEBlock')
def forward(self, x):
semantic_outs = []
for stage_name in self.semantic_stages:
semantic_stage = getattr(self, stage_name)
x = semantic_stage(x)
semantic_outs.append(x)
return semantic_outs |
_module()
class FPN(BaseModule):
def __init__(self, in_channels, out_channels, num_outs, start_level=0, end_level=(- 1), add_extra_convs=False, extra_convs_on_inputs=False, relu_before_extra_convs=False, no_norm_on_lateral=False, conv_cfg=None, norm_cfg=None, act_cfg=None, upsample_cfg=dict(mode='nearest'), init_cfg=dict(type='Xavier', layer='Conv2d', distribution='uniform')):
super(FPN, self).__init__(init_cfg)
assert isinstance(in_channels, list)
self.in_channels = in_channels
self.out_channels = out_channels
self.num_ins = len(in_channels)
self.num_outs = num_outs
self.relu_before_extra_convs = relu_before_extra_convs
self.no_norm_on_lateral = no_norm_on_lateral
self.fp16_enabled = False
self.upsample_cfg = upsample_cfg.copy()
if (end_level == (- 1)):
self.backbone_end_level = self.num_ins
assert (num_outs >= (self.num_ins - start_level))
else:
self.backbone_end_level = end_level
assert (end_level <= len(in_channels))
assert (num_outs == (end_level - start_level))
self.start_level = start_level
self.end_level = end_level
self.add_extra_convs = add_extra_convs
assert isinstance(add_extra_convs, (str, bool))
if isinstance(add_extra_convs, str):
assert (add_extra_convs in ('on_input', 'on_lateral', 'on_output'))
elif add_extra_convs:
if extra_convs_on_inputs:
self.add_extra_convs = 'on_input'
else:
self.add_extra_convs = 'on_output'
self.lateral_convs = nn.ModuleList()
self.fpn_convs = nn.ModuleList()
for i in range(self.start_level, self.backbone_end_level):
l_conv = ConvModule(in_channels[i], out_channels, 1, conv_cfg=conv_cfg, norm_cfg=(norm_cfg if (not self.no_norm_on_lateral) else None), act_cfg=act_cfg, inplace=False)
fpn_conv = ConvModule(out_channels, out_channels, 3, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg, inplace=False)
self.lateral_convs.append(l_conv)
self.fpn_convs.append(fpn_conv)
extra_levels = ((num_outs - self.backbone_end_level) + self.start_level)
if (self.add_extra_convs and (extra_levels >= 1)):
for i in range(extra_levels):
if ((i == 0) and (self.add_extra_convs == 'on_input')):
in_channels = self.in_channels[(self.backbone_end_level - 1)]
else:
in_channels = out_channels
extra_fpn_conv = ConvModule(in_channels, out_channels, 3, stride=2, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg, inplace=False)
self.fpn_convs.append(extra_fpn_conv)
_fp16()
def forward(self, inputs):
assert (len(inputs) == len(self.in_channels))
laterals = [lateral_conv(inputs[(i + self.start_level)]) for (i, lateral_conv) in enumerate(self.lateral_convs)]
used_backbone_levels = len(laterals)
for i in range((used_backbone_levels - 1), 0, (- 1)):
if ('scale_factor' in self.upsample_cfg):
laterals[(i - 1)] = (laterals[(i - 1)] + resize(laterals[i], **self.upsample_cfg))
else:
prev_shape = laterals[(i - 1)].shape[2:]
laterals[(i - 1)] = (laterals[(i - 1)] + resize(laterals[i], size=prev_shape, **self.upsample_cfg))
outs = [self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels)]
if (self.num_outs > len(outs)):
if (not self.add_extra_convs):
for i in range((self.num_outs - used_backbone_levels)):
outs.append(F.max_pool2d(outs[(- 1)], 1, stride=2))
else:
if (self.add_extra_convs == 'on_input'):
extra_source = inputs[(self.backbone_end_level - 1)]
elif (self.add_extra_convs == 'on_lateral'):
extra_source = laterals[(- 1)]
elif (self.add_extra_convs == 'on_output'):
extra_source = outs[(- 1)]
else:
raise NotImplementedError
outs.append(self.fpn_convs[used_backbone_levels](extra_source))
for i in range((used_backbone_levels + 1), self.num_outs):
if self.relu_before_extra_convs:
outs.append(self.fpn_convs[i](F.relu(outs[(- 1)])))
else:
outs.append(self.fpn_convs[i](outs[(- 1)]))
return tuple(outs) |
_function('log')
class AutogradLog(AutogradFunction):
def forward(ctx, input):
ctx.save_for_backward(input)
return input.log()
def backward(ctx, grad_output):
(input,) = ctx.saved_tensors
return grad_output.div(input) |
class SeparableConv2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, padding=''):
super(SeparableConv2d, self).__init__()
self.depthwise_conv2d = create_conv2d(in_channels, in_channels, kernel_size=kernel_size, stride=stride, padding=padding, groups=in_channels)
self.pointwise_conv2d = create_conv2d(in_channels, out_channels, kernel_size=1, padding=0)
def forward(self, x):
x = self.depthwise_conv2d(x)
x = self.pointwise_conv2d(x)
return x |
class SKMotionEncoder6_Deep_nopool_res(nn.Module):
def __init__(self, args):
super().__init__()
self.cor_planes = cor_planes = (((((args.corr_radius * 2) + 1) ** 2) * args.cost_heads_num) * args.corr_levels)
self.convc1 = PCBlock4_Deep_nopool_res(cor_planes, 128, k_conv=args.k_conv)
self.convc2 = PCBlock4_Deep_nopool_res(256, 192, k_conv=args.k_conv)
self.convf1_ = nn.Conv2d(4, 128, 1, 1, 0)
self.convf2 = PCBlock4_Deep_nopool_res(128, 64, k_conv=args.k_conv)
self.conv = PCBlock4_Deep_nopool_res((64 + 192), (128 - 4), k_conv=args.k_conv)
def forward(self, forward_flow, backward_flow, forward_corr, backward_corr):
cor = F.gelu(torch.cat([self.convc1(forward_corr), self.convc1(backward_corr)], dim=1))
cor = self.convc2(cor)
flow = torch.cat([forward_flow, backward_flow], dim=1)
flo = self.convf1_(flow)
flo = self.convf2(flo)
cor_flo = torch.cat([cor, flo], dim=1)
out = self.conv(cor_flo)
return torch.cat([out, flow], dim=1) |
class MultiprocessingPdb(pdb.Pdb):
def __init__(self):
pdb.Pdb.__init__(self, nosigint=True)
def _cmdloop(self):
stdin_bak = sys.stdin
with _stdin_lock:
try:
if (_stdin_fd is not None):
if (not _stdin[0]):
_stdin[0] = os.fdopen(_stdin_fd)
sys.stdin = _stdin[0]
self.cmdloop()
finally:
sys.stdin = stdin_bak |
def strip_ddp_state_dict(state_dict):
clean_state_dict = type(state_dict)()
for (k, v) in state_dict.items():
key = (k[7:] if (k[:7] == 'module.') else k)
clean_state_dict[key] = v
return clean_state_dict |
def dboxes300_coco():
figsize = 300
feat_size = [38, 19, 10, 5, 3, 1]
steps = [8, 16, 32, 64, 100, 300]
scales = [21, 45, 99, 153, 207, 261, 315]
aspect_ratios = [[2], [2, 3], [2, 3], [2, 3], [2], [2]]
dboxes = DefaultBoxes(figsize, feat_size, steps, scales, aspect_ratios)
return dboxes |
class ClosableQueue():
def __init__(self, maxsize: int=1000):
self._maxsize = maxsize
self._queue = deque()
self._mutex = Lock()
self._not_empty = Condition(self._mutex)
self._not_full = Condition(self._mutex)
self._closed = False
def put(self, item):
with self._not_full:
if self._closed:
raise ClosedException('This queue has been closed, no more items can be added.')
while (len(self._queue) >= self._maxsize):
self._not_full.wait()
if self._closed:
raise ClosedException('This queue has been closed, no more items can be added.')
self._queue.append(item)
self._not_empty.notify()
def get(self):
with self._not_empty:
if (self._closed and (len(self._queue) == 0)):
raise ClosedException('This queue has been closed and is empty, no more items can be retrieved.')
while (len(self._queue) == 0):
self._not_empty.wait()
if (self._closed and (len(self._queue) == 0)):
raise ClosedException('This queue has been closed and is empty, no more items can be retrieved.')
item = self._queue.popleft()
self._not_full.notify()
return item
def close(self):
with self._mutex:
self._closed = True
self._not_empty.notify_all()
self._not_full.notify_all() |
def brew_install(modules):
for i in range(len(modules)):
os.system(('brew install %s' % modules[i])) |
def build_profiler(name):
if (name == 'inference'):
return InferenceProfiler()
elif (name == 'pytorch'):
from pytorch_lightning.profiler import PyTorchProfiler
return PyTorchProfiler(use_cuda=True, profile_memory=True, row_limit=100)
elif (name is None):
return PassThroughProfiler()
else:
raise ValueError(f'Invalid profiler: {name}') |
class NERReporter(IndependentLabelReporter):
yaml_tag = '!NERReporter'
def __init__(self, args, reporting_root, reporting_methods, ner_task):
self.args = args
self.reporting_methods = reporting_methods
self.reporting_method_dict = {'label_accuracy': self.report_label_values, 'v_entropy': self.report_v_entropy, 'ner_f1': self.report_ner_f1}
self.reporting_root = reporting_root
self.test_reporting_constraint = {'label_accuracy', 'v_entropy', 'ner_f1'}
self.ner_task = ner_task
def report_ner_f1(self, prediction_batches, dataset, split_name):
string_predictions = []
string_labels = []
for (prediction_batch, (_, label_batch, sentences)) in zip(prediction_batches, dataset):
prediction_batch = prediction_batch.to(self.args['device'])
prediction_batch = torch.argmax(prediction_batch, 2)
for (prediction_sentence, label_sentence) in zip(prediction_batch, label_batch):
string_predictions.append(list(filter((lambda x: (x != '-')), [self.ner_task.category_string_of_label_int(x) for x in prediction_sentence])))
string_labels.append(list(filter((lambda x: (x != '-')), [self.ner_task.category_string_of_label_int(x) for x in label_sentence])))
(precision, recall, f1) = score_by_entity(string_predictions, string_labels)
with open(os.path.join(self.reporting_root, (split_name + '.f1')), 'w') as fout:
fout.write((str(f1) + '\n'))
with open(os.path.join(self.reporting_root, (split_name + '.precision')), 'w') as fout:
fout.write((str(precision) + '\n'))
with open(os.path.join(self.reporting_root, (split_name + '.recall')), 'w') as fout:
fout.write((str(recall) + '\n')) |
def cluster_bibliography(input_tuple):
(doc, in_tag, out_tag, src_dir, dest_dir) = input_tuple
src_doc_folder = os.path.join(src_dir, doc)
return_values = []
src_annotations_file = os.path.join(src_dir, doc, (doc + '-{}.json'.format(in_tag)))
with open(src_annotations_file) as f:
annotation_list = json.load(f)
annotations = annotation_list
annotations = cluster_content_blocks_under_categories(annotations, root_types_to_consider=['bibliography'], new_block_category='bib_block')
dest_annotations_relpath = os.path.join(doc, (doc + '-{}.json'.format(out_tag)))
dest_annotations_fullpath = os.path.join(dest_dir, dest_annotations_relpath)
with open(dest_annotations_fullpath, 'w') as out_file:
json.dump(annotations, out_file, indent=1, sort_keys=True)
return doc |
def cifar_resnet18(output_dim):
model = _base_resnet18_cifar()
return _replace_fc(model, output_dim) |
((not FX_MODE), 'Unsupported Fx Mode with PyTorch Version Below 1.8')
class TestPytorchFXAdaptor(unittest.TestCase):
def tearDownClass(self):
shutil.rmtree('./saved', ignore_errors=True)
shutil.rmtree('runs', ignore_errors=True)
def test_fx_quant(self):
for approach in ['qat', 'static']:
model_origin = resnet18()
dataset = Datasets('pytorch')['dummy']((10, 3, 224, 224), label=True)
dataloader = DATALOADERS['pytorch'](dataset)
if (approach == 'qat'):
model = copy.deepcopy(model_origin)
conf = QuantizationAwareTrainingConfig(op_name_dict=qat_op_name_list)
compression_manager = prepare_compression(model, conf)
compression_manager.callbacks.on_train_begin()
model = compression_manager.model
q_model = train_func(model)
compression_manager.callbacks.on_train_end()
compression_manager.save('./saved')
else:
conf = PostTrainingQuantConfig(op_name_dict=ptq_fx_op_name_list)
conf.example_inputs = torch.randn([1, 3, 224, 224])
set_workspace('./saved')
q_model = quantization.fit(model_origin, conf, calib_dataloader=dataloader, eval_func=eval_func)
q_model.save('./saved')
model_fx = load('./saved', model_origin)
self.assertTrue(('quantize' in str(type(q_model.model.fc))))
self.assertTrue(isinstance(model_fx, torch.fx.graph_module.GraphModule))
shutil.rmtree('./saved', ignore_errors=True)
for approach in ['qat', 'static']:
model_origin = M()
dataset = Datasets('pytorch')['dummy']((100, 3, 224, 224), label=True)
dataloader = DATALOADERS['pytorch'](dataset)
if (approach == 'qat'):
model = copy.deepcopy(model_origin)
conf = QuantizationAwareTrainingConfig(op_name_dict=qat_op_name_list)
compression_manager = prepare_compression(model, conf)
q_model = fit(compression_manager=compression_manager, train_func=train_func, eval_func=eval_func)
compression_manager.save('./saved')
else:
conf = PostTrainingQuantConfig(op_name_dict=ptq_fx_op_name_list)
q_model = quantization.fit(model_origin, conf, calib_dataloader=dataloader)
q_model.save('./saved')
model_fx = load('./saved', model_origin)
self.assertTrue(('quantize' in str(type(model_fx.conv))))
self.assertTrue(isinstance(model_fx, torch.fx.graph_module.GraphModule))
shutil.rmtree('./saved', ignore_errors=True)
def test_quantize_with_metric(self):
model_origin = resnet18()
dataset = Datasets('pytorch')['dummy']((1, 3, 224, 224))
dataloader = DATALOADERS['pytorch'](dataset)
conf = PostTrainingQuantConfig()
q_model = quantization.fit(model_origin, conf, calib_dataloader=dataloader, eval_dataloader=dataloader, eval_metric=Metric(name='topk', k=1))
self.assertTrue(('quantize' in str(type(q_model.model.fc))))
def test_quantize_with_calib_func(self):
model_origin = resnet18()
conf = PostTrainingQuantConfig()
q_model = quantization.fit(model_origin, conf, calib_func=eval_func, eval_func=eval_func)
self.assertTrue(('quantize' in str(type(q_model.model.fc))))
((PT_VERSION < Version('1.9.0').release), 'Please use PyTroch 1.9 or higher version for dynamic quantization with pytorch_fx backend')
def test_fx_dynamic_quant(self):
origin_model = LSTMModel(ntoken=10, ninp=512, nhid=256, nlayers=5)
origin_model.eval()
conf = PostTrainingQuantConfig(approach='dynamic', op_name_dict=ptq_fx_op_name_list)
set_workspace('./saved')
q_model = quantization.fit(copy.deepcopy(origin_model), conf)
q_model.save('./saved')
model_fx = load('./saved', copy.deepcopy(origin_model))
self.assertTrue(isinstance(model_fx, torch.fx.graph_module.GraphModule))
state_dict = torch.load('./saved/best_model.pt')
tune_cfg = state_dict.pop('best_configure')
import yaml
with open('./saved/best_configure.yaml', 'w') as f:
yaml.dump(tune_cfg, f, default_flow_style=False)
torch.save(state_dict, './saved/best_model_weights.pt')
os.remove('./saved/best_model.pt')
model_fx = load('./saved', copy.deepcopy(origin_model))
self.assertTrue(isinstance(model_fx, torch.fx.graph_module.GraphModule))
history_file = './saved/history.snapshot'
model_fx_recover = recover(origin_model, history_file, 0)
self.assertEqual(model_fx.code, model_fx_recover.code)
shutil.rmtree('./saved', ignore_errors=True)
def test_default_dynamic_quant(self):
def eval_func(model):
return 1
for approach in ['qat', 'auto']:
model_origin = LSTMModel(ntoken=10, ninp=512, nhid=256, nlayers=2)
dataset = Datasets('pytorch')['dummy']((3, 10))
dataloader = DATALOADERS['pytorch'](dataset)
if (approach == 'qat'):
model = copy.deepcopy(model_origin)
conf = QuantizationAwareTrainingConfig(op_name_dict=qat_op_name_list)
compression_manager = prepare_compression(model, conf)
compression_manager.callbacks.on_train_begin()
model = compression_manager.model.model
train_func(model)
compression_manager.callbacks.on_train_end()
self.assertTrue(('quantize' in str(type(model.encoder))))
self.assertTrue(('quantize' in str(type(model.rnn))))
else:
conf = PostTrainingQuantConfig(approach='auto')
q_model = quantization.fit(model_origin, conf, calib_dataloader=dataloader)
self.assertTrue(('quantize' in str(type(q_model.model.encoder))))
self.assertTrue(('quantize' in str(type(q_model.model.rnn))))
def test_fx_sub_module_quant(self):
for approach in ['qat', 'static']:
model_origin = DynamicControlModel()
dataset = Datasets('pytorch')['dummy']((1, 3, 224, 224))
dataloader = DATALOADERS['pytorch'](dataset)
if (approach == 'qat'):
model = copy.deepcopy(model_origin)
conf = QuantizationAwareTrainingConfig(op_name_dict=qat_op_name_list)
compression_manager = prepare_compression(model, conf)
compression_manager.callbacks.on_train_begin()
model = compression_manager.model
q_model = train_func(model)
compression_manager.callbacks.on_train_end()
compression_manager.save('./saved')
else:
set_workspace('./saved')
conf = PostTrainingQuantConfig()
q_model = quantization.fit(model_origin, conf, calib_dataloader=dataloader)
q_model.save('./saved')
model_fx = load('./saved/best_model.pt', model_origin, **{'dataloader': torch.utils.data.DataLoader(dataset)})
self.assertTrue(isinstance(model_fx.sub, torch.fx.graph_module.GraphModule))
if (approach != 'qat'):
history_file = './saved/history.snapshot'
model_fx_recover = recover(model_origin, history_file, 0, **{'dataloader': torch.utils.data.DataLoader(dataset)})
self.assertEqual(model_fx.sub.code, model_fx_recover.sub.code)
shutil.rmtree('./saved', ignore_errors=True)
((PT_VERSION < Version('1.11.0').release), 'Please use PyTroch 1.11 or higher version for mixed precision with pytorch_fx or pytorch backend')
def test_mix_precision(self):
model_origin = DynamicControlModel()
dataset = Datasets('pytorch')['dummy']((100, 3, 224, 224))
dataloader = DataLoader('pytorch', dataset)
set_workspace('./saved')
conf = PostTrainingQuantConfig(op_name_dict=ptq_fx_op_name_list)
q_model = quantization.fit(model_origin, conf, calib_dataloader=dataloader, calib_func=eval_func)
tune_cfg = q_model.q_config
tune_cfg['op'][('conv.module', 'Conv2d')].clear()
tune_cfg['op'][('conv.module', 'Conv2d')] = {'weight': {'dtype': 'bf16'}, 'activation': {'dtype': 'bf16'}}
tune_cfg['bf16_ops_list'].append(('conv.module', 'Conv2d'))
from neural_compressor.adaptor.torch_utils.bf16_convert import Convert
q_model._model = Convert(q_model._model, tune_cfg)
self.assertEqual(q_model._model.conv.module.module.weight.dtype, torch.bfloat16)
self.assertEqual(q_model._model.conv.module.module.bias.dtype, torch.bfloat16)
def test_hawq_metric(self):
import torchvision
from neural_compressor.adaptor.torch_utils.hawq_metric import hawq_top
from neural_compressor.config import PostTrainingQuantConfig
from neural_compressor.data import DATALOADERS, Datasets
from neural_compressor.model.torch_model import PyTorchFXModel
from neural_compressor.quantization import fit
ori_model = torchvision.models.resnet18()
pt_model = PyTorchFXModel(ori_model)
dataset = Datasets('pytorch')['dummy']((16, 3, 224, 224))
dataloader = DATALOADERS['pytorch'](dataset)
q_model = fit(ori_model, conf=PostTrainingQuantConfig(), calib_dataloader=dataloader)
op_to_traces = hawq_top(fp32_model=pt_model, q_model=q_model, dataloader=dataloader, criterion=None, enable_act=True)
self.assertIsNotNone(op_to_traces) |
class CiderScorer(object):
def copy(self):
new = CiderScorer(n=self.n)
new.ctest = copy.copy(self.ctest)
new.crefs = copy.copy(self.crefs)
return new
def __init__(self, df_mode='corpus', test=None, refs=None, n=4, sigma=6.0):
self.n = n
self.sigma = sigma
self.crefs = []
self.ctest = []
self.df_mode = df_mode
self.ref_len = None
if (self.df_mode != 'corpus'):
pkl_file = cPickle.load(open(os.path.join('data', (df_mode + '.p')), 'rb'), **(dict(encoding='latin1') if six.PY3 else {}))
self.ref_len = np.log(float(pkl_file['ref_len']))
self.document_frequency = pkl_file['document_frequency']
self.cook_append(test, refs)
def clear(self):
self.crefs = []
self.ctest = []
def cook_append(self, test, refs):
if (refs is not None):
self.crefs.append(cook_refs(refs))
if (test is not None):
self.ctest.append(cook_test(test))
else:
self.ctest.append(None)
def size(self):
assert (len(self.crefs) == len(self.ctest)), ('refs/test mismatch! %d<>%d' % (len(self.crefs), len(self.ctest)))
return len(self.crefs)
def __iadd__(self, other):
if (type(other) is tuple):
self.cook_append(other[0], other[1])
else:
self.ctest.extend(other.ctest)
self.crefs.extend(other.crefs)
return self
def compute_doc_freq(self):
for refs in self.crefs:
for ngram in set([ngram for ref in refs for (ngram, count) in ref.items()]):
self.document_frequency[ngram] += 1
def compute_cider(self):
def counts2vec(cnts):
vec = [defaultdict(float) for _ in range(self.n)]
length = 0
norm = [0.0 for _ in range(self.n)]
for (ngram, term_freq) in cnts.items():
df = np.log(max(1.0, self.document_frequency[ngram]))
n = (len(ngram) - 1)
vec[n][ngram] = (float(term_freq) * (self.ref_len - df))
norm[n] += pow(vec[n][ngram], 2)
if (n == 1):
length += term_freq
norm = [np.sqrt(n) for n in norm]
return (vec, norm, length)
def sim(vec_hyp, vec_ref, norm_hyp, norm_ref, length_hyp, length_ref):
delta = float((length_hyp - length_ref))
val = np.array([0.0 for _ in range(self.n)])
for n in range(self.n):
for (ngram, count) in vec_hyp[n].items():
val[n] += (vec_hyp[n][ngram] * vec_ref[n][ngram])
if ((norm_hyp[n] != 0) and (norm_ref[n] != 0)):
val[n] /= (norm_hyp[n] * norm_ref[n])
assert (not math.isnan(val[n]))
return val
if (self.df_mode == 'corpus'):
self.ref_len = np.log(float(len(self.crefs)))
scores = []
for (test, refs) in zip(self.ctest, self.crefs):
(vec, norm, length) = counts2vec(test)
score = np.array([0.0 for _ in range(self.n)])
for ref in refs:
(vec_ref, norm_ref, length_ref) = counts2vec(ref)
score += sim(vec, vec_ref, norm, norm_ref, length, length_ref)
score_avg = np.mean(score)
score_avg /= len(refs)
score_avg *= 10.0
scores.append(score_avg)
return scores
def compute_score(self, option=None, verbose=0):
if (self.df_mode == 'corpus'):
self.document_frequency = defaultdict(float)
self.compute_doc_freq()
assert (len(self.ctest) >= max(self.document_frequency.values()))
score = self.compute_cider()
return (np.mean(np.array(score)), np.array(score)) |
def convert_from_color_segmentation(arr_3d, image_height, image_width):
palette = pascal_palette()
reshape_array = np.reshape(arr_3d, [(image_height * image_width), 3])
arr_2d = np.fromiter([palette.get((x[0], x[1], x[2]), 0) for x in reshape_array], reshape_array.dtype)
return np.reshape(np.asarray(arr_2d), arr_3d.shape[0:2]) |
class NormilizeActionSpecWrapper(wrappers.EnvironmentWrapper):
def __init__(self, environment):
super().__init__(environment)
action_spec = environment.action_spec()
self._scale = (action_spec.maximum - action_spec.minimum)
self._offset = action_spec.minimum
minimum = ((action_spec.minimum * 0) - 1.0)
maximum = ((action_spec.minimum * 0) + 1.0)
self._action_spec = specs.BoundedArray(action_spec.shape, action_spec.dtype, minimum, maximum, name=action_spec.name)
def _from_normal_actions(self, actions):
actions = (0.5 * (actions + 1.0))
return ((actions * self._scale) + self._offset)
def step(self, action):
action = self._from_normal_actions(action)
return self._environment.step(action)
def action_spec(self):
return self._action_spec |
def main():
(witpols, witsols) = circle_line_set()
input('hit enter to continue')
witset1 = extend(witpols, witsols)
witset2 = singular_locus_set()
intwitset = intersect(5, 3, 2, witset1, witset2)
(eqs, sols) = intwitset
print('the solutions :')
for sol in sols:
print(sol)
sol1 = coordinates_and_slopes(sols[0])
sol2 = coordinates_and_slopes(sols[1])
show_solutions(sol1, sol2) |
class TransformerEncoder(nn.Module):
def __init__(self, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4.0, qkv_bias=False, drop_rate=0.0, attn_drop_rate=0.0, drop_path_rate=0.0, act_args={'act': 'gelu'}, norm_args={'norm': 'ln'}):
super().__init__()
self.blocks = nn.ModuleList([Block(dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=(drop_path_rate[i] if isinstance(drop_path_rate, list) else drop_path_rate), norm_args=norm_args, act_args=act_args) for i in range(depth)])
self.depth = depth
def forward(self, x, pos):
for (_, block) in enumerate(self.blocks):
x = block((x + pos))
return x
def forward_features(self, x, pos, num_outs=None):
dilation = (self.depth // num_outs)
out_depth = list(range(self.depth))[((self.depth - ((num_outs - 1) * dilation)) - 1)::dilation]
out = []
for (i, block) in enumerate(self.blocks):
x = block((x + pos))
if (i in out_depth):
out.append(x)
return out |
class ImagePreprocessor(object):
__metaclass__ = ABCMeta
def __init__(self):
pass
def preprocess(self, image):
pass |
def test_ohem_sampler_empty_gt():
assigner = MaxIoUAssigner(pos_iou_thr=0.5, neg_iou_thr=0.5, ignore_iof_thr=0.5, ignore_wrt_candidates=False)
bboxes = torch.FloatTensor([[0, 0, 10, 10], [10, 10, 20, 20], [5, 5, 15, 15], [32, 32, 38, 42]])
gt_bboxes = torch.empty(0, 4)
gt_labels = torch.LongTensor([])
gt_bboxes_ignore = torch.Tensor([])
assign_result = assigner.assign(bboxes, gt_bboxes, gt_bboxes_ignore=gt_bboxes_ignore, gt_labels=gt_labels)
context = _context_for_ohem()
sampler = OHEMSampler(num=10, pos_fraction=0.5, context=context, neg_pos_ub=(- 1), add_gt_as_proposals=True)
feats = [torch.rand(1, 256, int((2 ** i)), int((2 ** i))) for i in [6, 5, 4, 3, 2]]
sample_result = sampler.sample(assign_result, bboxes, gt_bboxes, gt_labels, feats=feats)
assert (len(sample_result.pos_bboxes) == len(sample_result.pos_inds))
assert (len(sample_result.neg_bboxes) == len(sample_result.neg_inds)) |
class Sliding(_ExpandingSliding):
def __init__(self, length, step):
super(Sliding, self).__init__(initial_length=length, start_step=step, end_step=step) |
def get_latest_epoch(loadpath):
states = glob.glob1(os.path.join(*loadpath), 'state_*')
latest_epoch = (- 1)
for state in states:
epoch = int(state.replace('state_', '').replace('.pt', ''))
latest_epoch = max(epoch, latest_epoch)
return latest_epoch |
def create_policy(*, name, env_spec, policy_type, hidden_sizes, hidden_nonlinearity=None, use_lstm=False, lstm_hidden_dim=None, omit_obs_idxs=None, dim_option=None):
option_info = {'dim_option': dim_option}
policy_kwargs = dict(env_spec=env_spec, name=name, omit_obs_idxs=omit_obs_idxs, option_info=option_info)
module_kwargs = dict(hidden_sizes=hidden_sizes)
if (hidden_nonlinearity is not None):
module_kwargs.update(hidden_nonlinearity=hidden_nonlinearity)
if (policy_type == 'gaussian'):
module_cls = GaussianMLPTwoHeadedModuleEx
elif (policy_type == 'tanhgaussian'):
module_cls = GaussianMLPTwoHeadedModuleEx
module_kwargs.update(dict(max_std=np.exp(2.0), normal_distribution_cls=TanhNormal, output_w_init=functools.partial(xavier_normal_ex, gain=1.0), init_std=1.0))
elif (policy_type == 'beta_twoheaded'):
module_cls = BetaMLPTwoHeadedModuleEx
module_kwargs.update(dict(min_alpha=1.05, min_beta=1.05, output_w_init=functools.partial(torch.nn.init.xavier_uniform_, gain=1.0), distribution_transformations=[get_affine_transform_for_beta_dist(env_spec.action_space.low, env_spec.action_space.high)]))
policy_kwargs.update(dict(clip_action=True))
else:
assert False, f'Unknown --policy_type {policy_type}'
if (not use_lstm):
policy_cls = PolicyEx
policy_kwargs.update(dict(module_cls=module_cls, module_kwargs=module_kwargs))
else:
policy_cls = LstmPolicy
lstm_module_kwargs = dict(hidden_dim=lstm_hidden_dim, num_layers=1)
policy_kwargs.update(dict(post_lstm_module_cls=module_cls, post_lstm_module_kwargs=module_kwargs, lstm_module_cls=LSTMModule, lstm_module_kwargs=lstm_module_kwargs, state_include_action=0))
policy = construct_with_aim_logging(policy_cls, policy_kwargs, name, excluded_keys=['env_spec', 'name'])
return policy |
(version='2.0')
def extract_data_type(data_type: str) -> str:
return (('signed', data_type) if (data_type[0] != 'u') else ('unsigned', data_type[1:])) |
def prepare_add_background_given_object(image, datum, verbose=False, prefix_plan=None, background_instruction='Add gray background'):
task = 'add_background_given_object'
if verbose:
print('Task: ', task)
print('Fill out background, given all objects')
print('context: all boxes')
print('inpaint: background')
assert ('unnormalized_boxes' in datum), 'unnormalized_boxes not in datum'
assert ('box_captions' in datum), 'box_captions not in datum'
d = datum
mask_img = image.copy().convert('L')
mask_draw = ImageDraw.Draw(mask_img)
context_img = image.copy().convert('RGB')
context_draw = ImageDraw.Draw(context_img)
text_tokens = []
mask_draw.rectangle([(0, 0), mask_img.size], fill=255)
context_draw.rectangle([(0, 0), context_img.size], fill=(0, 0, 0))
for keep_obj_index in range(len(datum['unnormalized_boxes'])):
box = d['unnormalized_boxes'][keep_obj_index]
mask_draw.rectangle(box.long().tolist(), fill=0)
context_img.paste(image.crop(box.long().tolist()), box.long().tolist())
target_image = image
if prefix_plan:
text_tokens += [f"Step {(len(datum['unnormalized_boxes']) + 1)}:"]
text_tokens += [background_instruction]
return {'text': ' '.join(text_tokens), 'target_image': target_image, 'context_image': context_img, 'mask_image': mask_img, 'box_caption': 'gray background'} |
def build_dataset(cfg, default_args=None):
from .dataset_wrappers import ConcatDataset, RepeatDataset, MixDataset, ClassBalancedDataset
if isinstance(cfg, (list, tuple)):
dataset = ConcatDataset([build_dataset(c, default_args) for c in cfg])
elif (cfg['type'] == 'ConcatDataset'):
dataset = ConcatDataset([build_dataset(c, default_args) for c in cfg['datasets']], cfg.get('separate_eval', True))
elif (cfg['type'] == 'RepeatDataset'):
dataset = RepeatDataset(build_dataset(cfg['dataset'], default_args), cfg['times'])
elif (cfg['type'] == 'MixDataset'):
datasets_cfg = [cfg[k] for k in cfg if ('dataset' in k)]
ratios = [c.pop('ratio', 1) for c in datasets_cfg]
dataset = MixDataset([build_dataset(c, default_args) for c in datasets_cfg], ratios)
elif (cfg['type'] == 'ClassBalancedDataset'):
dataset = ClassBalancedDataset(build_dataset(cfg['dataset'], default_args), cfg['oversample_thr'])
elif isinstance(cfg.get('ann_file'), (list, tuple)):
dataset = _concat_dataset(cfg, default_args)
else:
dataset = build_from_cfg(cfg, DATASETS, default_args)
return dataset |
def resnet164bn_svhn(num_classes=10, **kwargs):
return get_resnet_cifar(num_classes=num_classes, blocks=164, bottleneck=True, model_name='resnet164bn_svhn', **kwargs) |
def is_device_locked(serialno):
import filelock
try:
with device_lock(serialno, timeout=1e-06):
return False
except filelock.Timeout:
return True |
def cnf_to_dimacs(file_name, clauses, num_atoms):
with open(file_name, 'w') as f:
f.write(f'''p cnf {num_atoms} {len(clauses)}
''')
for c in clauses:
for l in c:
f.write((str(l) + ' '))
f.write(('0' + '\n')) |
def render(pcl):
renderer = vtk.vtkRenderer()
render_window = vtk.vtkRenderWindow()
render_window.AddRenderer(renderer)
render_window_interactor = vtk.vtkRenderWindowInteractor()
render_window_interactor.SetRenderWindow(render_window)
print(pcl.height_min, pcl.height_max)
renderer.AddActor(pcl.get_actor())
render_window.Render()
render_window_interactor.Start() |
class Tester(unittest.TestCase):
def test_pksampler(self):
(p, k) = (16, 4)
dataset = FakeData(size=100, num_classes=10, image_size=(3, 1, 1))
targets = [target.item() for (_, target) in dataset]
self.assertRaises(AssertionError, PKSampler, targets, p, k)
dataset = FakeData(size=1000, num_classes=100, image_size=(3, 1, 1), transform=transforms.ToTensor())
targets = [target.item() for (_, target) in dataset]
sampler = PKSampler(targets, p, k)
loader = DataLoader(dataset, batch_size=(p * k), sampler=sampler)
for (_, labels) in loader:
bins = defaultdict(int)
for label in labels.tolist():
bins[label] += 1
self.assertEqual(len(bins), p)
for b in bins:
self.assertEqual(bins[b], k) |
def generate_pickles(ds_name, data_labels_path, output_path, instances_per_label, generate_cls_valid, seed):
path = Path(data_labels_path)
train_labels = pd.read_feather((path / 'labels_train.feather'))
test_labels = pd.read_feather((path / 'labels_test.feather'))
test_labels.id = ('test/' + test_labels.id)
train_labels.id = ('train/' + train_labels.id)
classes = sorted(list(set(train_labels['class'].values)))
if generate_cls_valid:
valid_dict = {k: v for (k, v) in zip(test_labels.id.values, test_labels['class'].values)}
pickle.dump(valid_dict, Path((output_path + f'{ds_name}_valid_data.pkl')).open('wb'))
pickle.dump(classes, Path((output_path + f'{ds_name}_classes.pkl')).open('wb'))
print('Generated classes and test/valid pickles')
np.random.seed(seed)
labelled_data = {}
for cls in classes:
filenames = train_labels[(train_labels['class'] == cls)].id.values
if (len(filenames) <= instances_per_label):
print(f'{cls} class only has {len(filenames)} instances')
choices = filenames
else:
choices = np.random.choice(filenames, size=instances_per_label, replace=False)
lbs = {elem: cls for elem in choices}
labelled_data.update(lbs)
filepath = os.path.join(output_path, f'{ds_name}_labelled_data_{instances_per_label}_seed{seed}.pkl')
pickle.dump(labelled_data, Path(filepath).open('wb'))
print(f'Generated labelled data pickle: {filepath}') |
class Bottleneck(nn.Module):
def __init__(self, in_channels, bottleneck_channels, out_channels, num_groups, stride_in_1x1, stride, dilation, norm_func):
super(Bottleneck, self).__init__()
self.downsample = None
if (in_channels != out_channels):
down_stride = (stride if (dilation == 1) else 1)
self.downsample = nn.Sequential(Conv2d(in_channels, out_channels, kernel_size=1, stride=down_stride, bias=False), norm_func(out_channels))
for modules in [self.downsample]:
for l in modules.modules():
if isinstance(l, Conv2d):
nn.init.kaiming_uniform_(l.weight, a=1)
if (dilation > 1):
stride = 1
(stride_1x1, stride_3x3) = ((stride, 1) if stride_in_1x1 else (1, stride))
self.conv1 = Conv2d(in_channels, bottleneck_channels, kernel_size=1, stride=stride_1x1, bias=False)
self.bn1 = norm_func(bottleneck_channels)
self.conv2 = Conv2d(bottleneck_channels, bottleneck_channels, kernel_size=3, stride=stride_3x3, padding=dilation, bias=False, groups=num_groups, dilation=dilation)
self.bn2 = norm_func(bottleneck_channels)
self.conv3 = Conv2d(bottleneck_channels, out_channels, kernel_size=1, bias=False)
self.bn3 = norm_func(out_channels)
for l in [self.conv1, self.conv2, self.conv3]:
nn.init.kaiming_uniform_(l.weight, a=1)
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = F.relu_(out)
out = self.conv2(out)
out = self.bn2(out)
out = F.relu_(out)
out0 = self.conv3(out)
out = self.bn3(out0)
if (self.downsample is not None):
identity = self.downsample(x)
out += identity
out = F.relu_(out)
return out |
class ActivationConf(WeightConf):
def __init__(self, datatype=None, scheme=None, granularity=None, algorithm=None):
super().__init__(datatype, scheme, granularity, algorithm) |
class OverallConstraintViolationComparatorTestCases(unittest.TestCase):
def setUp(self):
self.comparator: Comparator = OverallConstraintViolationComparator()
def test_should_comparator_return_0_if_the_solutions_have_no_constraints(self):
solution1 = Solution(1, 1, 0)
solution2 = Solution(1, 1, 0)
self.assertEqual(0, self.comparator.compare(solution1, solution2))
def test_should_comparator_return_0_if_the_solutions_have_the_same_constraint_violation_degree(self):
solution1 = Solution(1, 1, 2)
solution2 = Solution(1, 1, 2)
solution1.constraints[0] = (- 2)
solution1.constraints[1] = (- 3)
solution2.constraints[0] = (- 1)
solution2.constraints[1] = (- 4)
self.assertEqual(0, self.comparator.compare(solution1, solution2))
def test_should_comparator_return_minus_1_if_solution_2_has_lower_constraint_violation_degree(self):
solution1 = Solution(1, 1, 1)
solution2 = Solution(1, 1, 1)
solution1.constraints[0] = (- 2)
solution2.constraints[0] = (- 1)
self.assertEqual(1, self.comparator.compare(solution1, solution2))
def test_should_comparator_return_1_if_solution_2_has_higher_constraint_violation_degree(self):
solution1 = Solution(1, 1, 1)
solution2 = Solution(1, 1, 1)
solution1.constraints[0] = (- 2)
solution2.constraints[0] = (- 5)
self.assertEqual((- 1), self.comparator.compare(solution1, solution2)) |
def find_cut(rhos_array):
cut = min(np.argwhere((np.count_nonzero(rhos_array, axis=0) > 1)))[0]
return cut |
class MAMLTrajectoryBatch(collections.namedtuple('MAMLTrajectoryBatch', ['paths', 'observations', 'actions', 'rewards', 'valids', 'baselines'])): |
class AudioPreprocessing(nn.Module):
def __init__(self, **kwargs):
nn.Module.__init__(self)
self.optim_level = kwargs.get('optimization_level', Optimization.nothing)
self.featurizer = FeatureFactory.from_config(kwargs)
def forward(self, x: Tuple[(torch.Tensor, torch.Tensor)]) -> Tuple[(torch.Tensor, torch.Tensor)]:
(input_signal, length) = x
length.requires_grad_(False)
processed_signal = self.featurizer(x)
processed_length = self.featurizer.get_seq_len(length)
return (processed_signal, processed_length) |
class RandomRotate(object):
def __init__(self, angle, diff_angle=0, order=2, reshape=False):
self.angle = angle
self.reshape = reshape
self.order = order
def __call__(self, sample):
(image, depth) = (sample['image'], sample['depth'])
mean_depth = round((ImageStat.Stat(depth).mean[0] * 257))
applied_angle = random.uniform((- self.angle), self.angle)
image = image.rotate(applied_angle, resample=Image.BILINEAR, fillcolor=(255, 255, 255))
depth = depth.rotate(applied_angle, resample=Image.BILINEAR, fillcolor=mean_depth)
return {'image': image, 'depth': depth} |
def get_selected_template_idx_dataset(model_id):
import numpy as np
def map_fn(pred):
return np.argmax(np.array(pred['probs']))
return _get_predictions_dataset(model_id).map(map_fn) |
def main():
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments))
if ((len(sys.argv) == 2) and sys.argv[1].endswith('.json')):
(model_args, data_args, training_args) = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
(model_args, data_args, training_args) = parser.parse_args_into_dataclasses()
output_dir = Path(training_args.output_dir)
output_dir.mkdir(parents=True, exist_ok=True)
checkpoint = None
if ((len(os.listdir(training_args.output_dir)) > 0) and (not training_args.overwrite_output_dir)):
if ((output_dir / CONFIG_NAME).is_file() and (output_dir / TF2_WEIGHTS_NAME).is_file()):
checkpoint = output_dir
logger.info(f'Checkpoint detected, resuming training from checkpoint in {training_args.output_dir}. To avoid this behavior, change the `--output_dir` or add `--overwrite_output_dir` to train from scratch.')
else:
raise ValueError(f'Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to continue regardless.')
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout)])
logger.setLevel(logging.INFO)
logger.info(f'Training/evaluation parameters {training_args}')
data_files = {'train': data_args.train_file, 'validation': data_args.validation_file, 'test': data_args.test_file}
data_files = {key: file for (key, file) in data_files.items() if (file is not None)}
for key in data_files.keys():
logger.info(f'Loading a local file for {key}: {data_files[key]}')
if (data_args.input_file_extension == 'csv'):
datasets = load_dataset('csv', data_files=data_files, cache_dir=model_args.cache_dir)
else:
datasets = load_dataset('json', data_files=data_files, cache_dir=model_args.cache_dir)
if ('train' in datasets):
is_regression = (datasets['train'].features['label'].dtype in ['float32', 'float64'])
if is_regression:
num_labels = 1
else:
label_list = datasets['train'].unique('label')
label_list.sort()
num_labels = len(label_list)
else:
num_labels = None
label_list = None
is_regression = None
if (checkpoint is not None):
config_path = training_args.output_dir
elif model_args.config_name:
config_path = model_args.config_name
else:
config_path = model_args.model_name_or_path
if (num_labels is not None):
config = AutoConfig.from_pretrained(config_path, num_labels=num_labels, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None))
else:
config = AutoConfig.from_pretrained(config_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None))
tokenizer = AutoTokenizer.from_pretrained((model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path), cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None))
column_names = {col for cols in datasets.column_names.values() for col in cols}
non_label_column_names = [name for name in column_names if (name != 'label')]
if (('sentence1' in non_label_column_names) and ('sentence2' in non_label_column_names)):
(sentence1_key, sentence2_key) = ('sentence1', 'sentence2')
elif ('sentence1' in non_label_column_names):
(sentence1_key, sentence2_key) = ('sentence1', None)
elif (len(non_label_column_names) >= 2):
(sentence1_key, sentence2_key) = non_label_column_names[:2]
else:
(sentence1_key, sentence2_key) = (non_label_column_names[0], None)
if (data_args.max_seq_length > tokenizer.model_max_length):
logger.warning(f'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for themodel ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.')
max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length)
if ('train' in datasets):
if ((not is_regression) and (config.label2id != PretrainedConfig(num_labels=num_labels).label2id)):
label_name_to_id = config.label2id
if (list(sorted(label_name_to_id.keys())) == list(sorted(label_list))):
label_to_id = label_name_to_id
else:
logger.warning("Your model seems to have been trained with labels, but they don't match the dataset: ", f'''model labels: {list(sorted(label_name_to_id.keys()))}, dataset labels: {list(sorted(label_list))}.
Ignoring the model labels as a result.''')
label_to_id = {v: i for (i, v) in enumerate(label_list)}
elif (not is_regression):
label_to_id = {v: i for (i, v) in enumerate(label_list)}
else:
label_to_id = None
config.label2id = label_to_id
if (config.label2id is not None):
config.id2label = {id: label for (label, id) in label_to_id.items()}
else:
config.id2label = None
else:
label_to_id = config.label2id
if (('validation' in datasets) and (config.label2id is not None)):
validation_label_list = datasets['validation'].unique('label')
for val_label in validation_label_list:
assert (val_label in label_to_id), f'Label {val_label} is in the validation set but not the training set!'
def preprocess_function(examples):
args = ((examples[sentence1_key],) if (sentence2_key is None) else (examples[sentence1_key], examples[sentence2_key]))
result = tokenizer(*args, max_length=max_seq_length, truncation=True)
if ((config.label2id is not None) and ('label' in examples)):
result['label'] = [(config.label2id[l] if (l != (- 1)) else (- 1)) for l in examples['label']]
return result
datasets = datasets.map(preprocess_function, batched=True, load_from_cache_file=(not data_args.overwrite_cache))
with training_args.strategy.scope():
set_seed(training_args.seed)
if (checkpoint is None):
model_path = model_args.model_name_or_path
else:
model_path = checkpoint
model = TFAutoModelForSequenceClassification.from_pretrained(model_path, config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None))
optimizer = tf.keras.optimizers.Adam(learning_rate=training_args.learning_rate, beta_1=training_args.adam_beta1, beta_2=training_args.adam_beta2, epsilon=training_args.adam_epsilon, clipnorm=training_args.max_grad_norm)
if is_regression:
loss_fn = tf.keras.losses.MeanSquaredError()
metrics = []
else:
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
metrics = ['accuracy']
model.compile(optimizer=optimizer, loss=loss_fn, metrics=metrics)
tf_data = dict()
max_samples = {'train': data_args.max_train_samples, 'validation': data_args.max_val_samples, 'test': data_args.max_test_samples}
for key in ('train', 'validation', 'test'):
if (key not in datasets):
tf_data[key] = None
continue
if (key in ('train', 'validation')):
assert ('label' in datasets[key].features), f'Missing labels from {key} data!'
if (key == 'train'):
shuffle = True
batch_size = training_args.per_device_train_batch_size
drop_remainder = True
else:
shuffle = False
batch_size = training_args.per_device_eval_batch_size
drop_remainder = False
samples_limit = max_samples[key]
dataset = datasets[key]
if (samples_limit is not None):
dataset = dataset.select(range(samples_limit))
if (isinstance(training_args.strategy, tf.distribute.TPUStrategy) or data_args.pad_to_max_length):
logger.info("Padding all batches to max length because argument was set or we're on TPU.")
dataset_mode = 'constant_batch'
else:
dataset_mode = 'variable_batch'
data = convert_dataset_for_tensorflow(dataset, non_label_column_names, batch_size=batch_size, dataset_mode=dataset_mode, drop_remainder=drop_remainder, shuffle=shuffle)
tf_data[key] = data
if (tf_data['train'] is not None):
callbacks = [SavePretrainedCallback(output_dir=training_args.output_dir)]
model.fit(tf_data['train'], validation_data=tf_data['validation'], epochs=int(training_args.num_train_epochs), callbacks=callbacks)
elif (tf_data['validation'] is not None):
logger.info('Computing metrics on validation data...')
if is_regression:
loss = model.evaluate(tf_data['validation'])
logger.info(f'Loss: {loss:.5f}')
else:
(loss, accuracy) = model.evaluate(tf_data['validation'])
logger.info(f'Loss: {loss:.5f}, Accuracy: {(accuracy * 100):.4f}%')
if (tf_data['test'] is not None):
logger.info('Doing predictions on test dataset...')
predictions = model.predict(tf_data['test'])['logits']
predicted_class = (np.squeeze(predictions) if is_regression else np.argmax(predictions, axis=1))
output_test_file = os.path.join(training_args.output_dir, 'test_results.txt')
with open(output_test_file, 'w') as writer:
writer.write('index\tprediction\n')
for (index, item) in enumerate(predicted_class):
if is_regression:
writer.write(f'''{index} {item:3.3f}
''')
else:
item = config.id2label[item]
writer.write(f'''{index} {item}
''')
logger.info(f'Wrote predictions to {output_test_file}!')
if (('test' in datasets) and ('label' in datasets['test'].features)):
print('Computing prediction loss on test labels...')
labels = datasets['test']['label']
loss = float(loss_fn(labels, predictions).numpy())
print(f'Test loss: {loss:.4f}') |
def lid_filter(split, src, tgt, from_folder, to_folder, debug=False):
if (not os.path.exists(LID_MODEL)):
call(f'wget -nc -O {LID_MODEL}')
from_prefix = f'{from_folder}/{split}.{src}-{tgt}'
to_prefix = f'{to_folder}/{split}.{src}-{tgt}'
if (os.path.exists(f'{from_prefix}.{src}') and os.path.exists(f'{from_prefix}.{tgt}')):
(s_src, s_tgt) = (src.split('_')[0], tgt.split('_')[0])
cmd = f'python {LID_MULTI} --model {LID_MODEL} --inputs {from_prefix}.{src} {from_prefix}.{tgt} --langs {s_src} {s_tgt} --outputs {to_prefix}.{src} {to_prefix}.{tgt}'
print(f'filtering {from_prefix}')
call(cmd, debug=debug) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.