code stringlengths 101 5.91M |
|---|
class CustomRBC(HourRBC):
def __init__(self, env: CityLearnEnv, action_map: Mapping[(int, float)]=None, loader: IntProgress=None):
super().__init__(env=env, action_map=action_map)
self.loader = loader
def next_time_step(self):
super().next_time_step()
if (self.loader is not None):
self.loader.value += 1
else:
pass |
def set_random_seed(seed, deterministic=False):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
if deterministic:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False |
class HighResolutionNet(nn.Module):
def __init__(self):
super(HighResolutionNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=2, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64, momentum=BN_MOMENTUM)
self.conv2 = nn.Conv2d(64, 64, kernel_size=3, stride=2, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(64, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.stage1_cfg = cfg['MODEL']['EXTRA']['STAGE1']
num_channels = self.stage1_cfg['NUM_CHANNELS'][0]
block = blocks_dict[self.stage1_cfg['BLOCK']]
num_blocks = self.stage1_cfg['NUM_BLOCKS'][0]
self.layer1 = self._make_layer(block, 64, num_channels, num_blocks)
stage1_out_channel = (block.expansion * num_channels)
self.stage2_cfg = cfg['MODEL']['EXTRA']['STAGE2']
num_channels = self.stage2_cfg['NUM_CHANNELS']
block = blocks_dict[self.stage2_cfg['BLOCK']]
num_channels = [(num_channels[i] * block.expansion) for i in range(len(num_channels))]
self.transition1 = self._make_transition_layer([stage1_out_channel], num_channels)
(self.stage2, pre_stage_channels) = self._make_stage(self.stage2_cfg, num_channels)
self.stage3_cfg = cfg['MODEL']['EXTRA']['STAGE3']
num_channels = self.stage3_cfg['NUM_CHANNELS']
block = blocks_dict[self.stage3_cfg['BLOCK']]
num_channels = [(num_channels[i] * block.expansion) for i in range(len(num_channels))]
self.transition2 = self._make_transition_layer(pre_stage_channels, num_channels)
(self.stage3, pre_stage_channels) = self._make_stage(self.stage3_cfg, num_channels)
self.stage4_cfg = cfg['MODEL']['EXTRA']['STAGE4']
num_channels = self.stage4_cfg['NUM_CHANNELS']
block = blocks_dict[self.stage4_cfg['BLOCK']]
num_channels = [(num_channels[i] * block.expansion) for i in range(len(num_channels))]
self.transition3 = self._make_transition_layer(pre_stage_channels, num_channels)
(self.stage4, pre_stage_channels) = self._make_stage(self.stage4_cfg, num_channels, multi_scale_output=True)
(self.incre_modules, self.downsamp_modules, self.final_layer) = self._make_head(pre_stage_channels)
self.classifier = nn.Linear(2048, 1000)
self.FREEZE_AT = cfg.HRNET.FREEZE_AT
self.spatial_scale = (1 / 32)
self.dim_out = 2048
self._init_modules()
def _init_modules(self):
for i in range(1, (self.FREEZE_AT + 1)):
if (i == 1):
print('freeze : {}'.format(('stage%d' % i)))
freeze_params(getattr(self, 'conv1'))
freeze_params(getattr(self, 'conv2'))
freeze_params(getattr(self, 'layer1'))
else:
print('freeze : {}'.format(('stage%d' % i)))
freeze_params(getattr(self, ('stage%d' % i)))
self.freeze(self)
def freeze(self, m):
for (i, k) in m.named_children():
if isinstance(k, nn.BatchNorm2d):
k.eval()
else:
self.freeze(k)
def train(self, mode=True):
self.training = mode
for (i, k) in self.named_children():
k.train(mode)
for i in range(1, (self.FREEZE_AT + 1)):
if (i == 1):
print('freeze : {}'.format(('stage%d' % i)))
freeze_params(getattr(self, 'conv1'))
freeze_params(getattr(self, 'conv2'))
freeze_params(getattr(self, 'layer1'))
else:
print('freeze : {}'.format(('stage%d' % i)))
freeze_params(getattr(self, ('stage%d' % i)))
self.freeze(self)
def _make_head(self, pre_stage_channels):
head_block = Bottleneck
head_channels = [32, 64, 128, 256]
incre_modules = []
for (i, channels) in enumerate(pre_stage_channels):
incre_module = self._make_layer(head_block, channels, head_channels[i], 1, stride=1)
incre_modules.append(incre_module)
incre_modules = nn.ModuleList(incre_modules)
downsamp_modules = []
for i in range((len(pre_stage_channels) - 1)):
in_channels = (head_channels[i] * head_block.expansion)
out_channels = (head_channels[(i + 1)] * head_block.expansion)
downsamp_module = nn.Sequential(nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=2, padding=1), nn.BatchNorm2d(out_channels, momentum=BN_MOMENTUM), nn.ReLU(inplace=True))
downsamp_modules.append(downsamp_module)
downsamp_modules = nn.ModuleList(downsamp_modules)
final_layer = nn.Sequential(nn.Conv2d(in_channels=(head_channels[3] * head_block.expansion), out_channels=2048, kernel_size=1, stride=1, padding=0), nn.BatchNorm2d(2048, momentum=BN_MOMENTUM), nn.ReLU(inplace=True))
return (incre_modules, downsamp_modules, final_layer)
def _make_transition_layer(self, num_channels_pre_layer, num_channels_cur_layer):
num_branches_cur = len(num_channels_cur_layer)
num_branches_pre = len(num_channels_pre_layer)
transition_layers = []
for i in range(num_branches_cur):
if (i < num_branches_pre):
if (num_channels_cur_layer[i] != num_channels_pre_layer[i]):
transition_layers.append(nn.Sequential(nn.Conv2d(num_channels_pre_layer[i], num_channels_cur_layer[i], 3, 1, 1, bias=False), nn.BatchNorm2d(num_channels_cur_layer[i], momentum=BN_MOMENTUM), nn.ReLU(inplace=True)))
else:
transition_layers.append(None)
else:
conv3x3s = []
for j in range(((i + 1) - num_branches_pre)):
inchannels = num_channels_pre_layer[(- 1)]
outchannels = (num_channels_cur_layer[i] if (j == (i - num_branches_pre)) else inchannels)
conv3x3s.append(nn.Sequential(nn.Conv2d(inchannels, outchannels, 3, 2, 1, bias=False), nn.BatchNorm2d(outchannels, momentum=BN_MOMENTUM), nn.ReLU(inplace=True)))
transition_layers.append(nn.Sequential(*conv3x3s))
return nn.ModuleList(transition_layers)
def _make_layer(self, block, inplanes, planes, blocks, stride=1):
downsample = None
if ((stride != 1) or (inplanes != (planes * block.expansion))):
downsample = nn.Sequential(nn.Conv2d(inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((planes * block.expansion), momentum=BN_MOMENTUM))
layers = []
layers.append(block(inplanes, planes, stride, downsample))
inplanes = (planes * block.expansion)
for i in range(1, blocks):
layers.append(block(inplanes, planes))
return nn.Sequential(*layers)
def _make_stage(self, layer_config, num_inchannels, multi_scale_output=True):
num_modules = layer_config['NUM_MODULES']
num_branches = layer_config['NUM_BRANCHES']
num_blocks = layer_config['NUM_BLOCKS']
num_channels = layer_config['NUM_CHANNELS']
block = blocks_dict[layer_config['BLOCK']]
fuse_method = layer_config['FUSE_METHOD']
modules = []
for i in range(num_modules):
if ((not multi_scale_output) and (i == (num_modules - 1))):
reset_multi_scale_output = False
else:
reset_multi_scale_output = True
modules.append(HighResolutionModule(num_branches, block, num_blocks, num_inchannels, num_channels, fuse_method, reset_multi_scale_output))
num_inchannels = modules[(- 1)].get_num_inchannels()
return (nn.Sequential(*modules), num_inchannels)
def forward(self, x):
(h, w) = x.shape[(- 2):]
if ((h % 32) != 0):
h_padding = (32 - (h % 32))
else:
h_padding = 0
if ((w % 32) != 0):
w_padding = (32 - (w % 32))
else:
w_padding = 0
x = torch.nn.functional.pad(x, [0, w_padding, 0, h_padding], mode='constant', value=0)
with torch.no_grad():
assert (self.FREEZE_AT <= 2)
for i in range(1, (self.FREEZE_AT + 1)):
if (i == 1):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.layer1(x)
else:
x_list = []
for i in range(self.stage2_cfg['NUM_BRANCHES']):
if (self.transition1[i] is not None):
x_list.append(self.transition1[i](x))
else:
x_list.append(x)
y_list = self.stage2(x_list)
x_list = []
for i in range(self.stage3_cfg['NUM_BRANCHES']):
if (self.transition2[i] is not None):
x_list.append(self.transition2[i](y_list[(- 1)]))
else:
x_list.append(y_list[i])
y_list = self.stage3(x_list)
x_list = []
for i in range(self.stage4_cfg['NUM_BRANCHES']):
if (self.transition3[i] is not None):
x_list.append(self.transition3[i](y_list[(- 1)]))
else:
x_list.append(y_list[i])
y_list = self.stage4(x_list)
y = self.incre_modules[0](y_list[0])
for i in range(len(self.downsamp_modules)):
y = (self.incre_modules[(i + 1)](y_list[(i + 1)]) + self.downsamp_modules[i](y))
y = self.final_layer(y)
return y |
class SampleNormalizeCLIMixin(NormalizeCLIMixin, intnormcli.CLIMixin):
def fit(self, images: ImageSeq, /, masks: MaskSeqOrNone=None, *, modality: intnormt.Modality=intnormt.Modality.T1, **kwargs: typing.Any) -> None:
return None
def process_directories(self, image_dir: intnormt.PathLike, /, mask_dir: (intnormt.PathLike | None)=None, *, modality: intnormt.Modality=intnormt.Modality.T1, ext: str='nii*', return_normalized_and_masks: bool=False, **kwargs: typing.Any) -> (tuple[(ImageSeq, MaskSeqOrNone)] | None):
logger.debug('Grabbing images')
(images, masks) = intnormio.gather_images_and_masks(image_dir, mask_dir, ext=ext)
self.fit(images, masks, modality=modality, **kwargs)
if return_normalized_and_masks:
normalized: list[intnormt.ImageLike] = []
n_images = len(images)
zipped = intnormio.zip_with_nones(images, masks)
for (i, (image, mask)) in enumerate(zipped, 1):
logger.info(f'Normalizing image {i}/{n_images}')
normalized.append(self(image, mask, modality=modality))
return (normalized, masks)
return None
def plot_histogram_from_args(self, args: argparse.Namespace, /, normalized: ImageSeq, masks: MaskSeqOrNone=None) -> None:
import matplotlib.pyplot as plt
import intensity_normalization.plot.histogram as intnormhist
if (args.output_dir is None):
output = (pathlib.Path(args.image_dir) / 'hist.pdf')
else:
output = (pathlib.Path(args.output_dir) / 'hist.pdf')
hp = intnormhist.HistogramPlotter(title=self.fullname())
_ = hp(normalized, masks)
plt.savefig(output)
def call_from_argparse_args(self, args: argparse.Namespace, /, *, use_masks_in_plot: bool=True, **kwargs: typing.Any) -> None:
out = self.process_directories(args.image_dir, args.mask_dir, modality=intnormt.Modality.from_string(args.modality), ext=args.extension, return_normalized_and_masks=True)
assert (out is not None)
(normalized, masks) = out
assert isinstance(normalized, list)
image_filenames = intnormio.glob_ext(args.image_dir, ext=args.extension)
output_filenames = [self.append_name_to_file(fn, args.output_dir) for fn in image_filenames]
n_images = len(normalized)
assert (n_images == len(output_filenames))
for (i, (norm_image, fn)) in enumerate(zip(normalized, output_filenames), 1):
logger.info(f'Saving normalized image: {fn} ({i}/{n_images})')
norm_image.view(mioi.Image).to_filename(fn)
self.save_additional_info(args, normalized=normalized, masks=masks, image_filenames=image_filenames)
if args.plot_histogram:
_masks = (masks if use_masks_in_plot else None)
self.plot_histogram_from_args(args, normalized, _masks) |
class WarmupLinearSchedule(_LRSchedule):
warn_t_total = True
def get_lr_(self, progress):
if (progress < self.warmup):
return (progress / self.warmup)
return max(((progress - 1.0) / (self.warmup - 1.0)), 0.0) |
class DB2(nn.Module):
def __init__(self, inplanes, outplanes):
super(DB2, self).__init__()
self.short_cut = nn.Conv2d(outplanes, outplanes, kernel_size=1, stride=1, padding=0)
self.conv = nn.Sequential(nn.Conv2d((inplanes + outplanes), outplanes, kernel_size=3, stride=1, padding=1), nn.BatchNorm2d(outplanes), nn.ReLU(inplace=True), nn.Conv2d(outplanes, outplanes, kernel_size=3, stride=1, padding=1), nn.BatchNorm2d(outplanes), nn.ReLU(inplace=True))
self.conv2 = nn.Sequential(nn.Conv2d(outplanes, outplanes, kernel_size=3, stride=1, padding=1), nn.BatchNorm2d(outplanes), nn.ReLU(inplace=True), nn.Conv2d(outplanes, outplanes, kernel_size=3, stride=1, padding=1), nn.BatchNorm2d(outplanes), nn.ReLU(inplace=True))
def forward(self, x, z):
z = F.interpolate(z, size=x.size()[2:], mode='bilinear', align_corners=True)
p = self.conv(torch.cat((x, z), 1))
sc = self.short_cut(z)
p = (p + sc)
p2 = self.conv2(p)
p = (p + p2)
return (p, p)
def initialize(self):
weight_init(self) |
def build_estimator(logits, probs, labels, mode):
if (mode == tf.estimator.ModeKeys.PREDICT):
predictions = {'probs': probs, 'logits': logits}
export_outputs = {'prediction': tf.estimator.export.PredictOutput(predictions)}
return tf.estimator.EstimatorSpec(mode, predictions=predictions, export_outputs=export_outputs)
labels = tf.cast(labels, tf.float32)
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=labels, logits=logits))
auc = tf.metrics.auc(labels, probs, name='auc_op')
metrics = {'auc': auc}
if (mode == tf.estimator.ModeKeys.EVAL):
return tf.estimator.EstimatorSpec(mode, loss=loss, eval_metric_ops=metrics)
assert (mode == tf.estimator.ModeKeys.TRAIN)
optimizer = tf.train.AdamOptimizer(learning_rate=LEARNING_RATE, epsilon=EPSILON)
train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op) |
def check_submodules():
from transformers.utils import direct_transformers_import
transformers = direct_transformers_import(PATH_TO_TRANSFORMERS)
module_not_registered = [module for module in get_transformers_submodules() if ((module not in IGNORE_SUBMODULES) and (module not in transformers._import_structure.keys()))]
if (len(module_not_registered) > 0):
list_of_modules = '\n'.join((f'- {module}' for module in module_not_registered))
raise ValueError(f'''The following submodules are not properly registed in the main init of Transformers:
{list_of_modules}
Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''') |
def TrainForceField():
if 0:
a = MSet('chemspider9_force_cleaned')
a.Load()
TreatedAtoms = a.AtomTypes()
PARAMS['hidden1'] = 1000
PARAMS['hidden2'] = 1000
PARAMS['hidden3'] = 1000
PARAMS['learning_rate'] = 0.001
PARAMS['momentum'] = 0.95
PARAMS['max_steps'] = 101
PARAMS['batch_size'] = 28
PARAMS['test_freq'] = 2
PARAMS['tf_prec'] = 'tf.float64'
PARAMS['GradScaler'] = 0.05
PARAMS['NeuronType'] = 'relu'
PARAMS['HiddenLayers'] = [200, 200, 200]
d = MolDigester(TreatedAtoms, name_='ANI1_Sym_Direct', OType_='EnergyAndDipole')
tset = TensorMolData_BP_Direct_EE(a, d, order_=1, num_indis_=1, type_='mol', WithGrad_=True)
manager = TFMolManage('', tset, False, 'fc_sqdiff_BP_Direct_EE')
manager.Train(maxstep=101)
if 0:
a = MSet('H2O_augmented_more_cutoff5_rimp2_force_dipole')
a.Load()
TreatedAtoms = a.AtomTypes()
PARAMS['learning_rate'] = 1e-05
PARAMS['momentum'] = 0.95
PARAMS['max_steps'] = 1101
PARAMS['batch_size'] = 1000
PARAMS['test_freq'] = 10
PARAMS['tf_prec'] = 'tf.float64'
PARAMS['GradScaler'] = 1.0
PARAMS['DipoleScaler'] = 1.0
PARAMS['NeuronType'] = 'relu'
PARAMS['HiddenLayers'] = [200, 200, 200]
PARAMS['EECutoff'] = 15.0
PARAMS['EECutoffOn'] = 4.0
PARAMS['Erf_Width'] = 0.2
PARAMS['EECutoffOff'] = 15.0
PARAMS['learning_rate_dipole'] = 0.0001
PARAMS['learning_rate_energy'] = 1e-05
PARAMS['SwitchEpoch'] = 100
d = MolDigester(TreatedAtoms, name_='ANI1_Sym_Direct', OType_='EnergyAndDipole')
tset = TensorMolData_BP_Direct_EE(a, d, order_=1, num_indis_=1, type_='mol', WithGrad_=True)
manager = TFMolManage('', tset, False, 'fc_sqdiff_BP_Direct_EE')
manager.Train()
if 0:
a = MSet('H2O_augmented_more_cutoff5_rimp2_force_dipole')
a.Load()
TreatedAtoms = a.AtomTypes()
PARAMS['learning_rate'] = 0.0001
PARAMS['momentum'] = 0.95
PARAMS['max_steps'] = 2
PARAMS['batch_size'] = 1000
PARAMS['test_freq'] = 1
PARAMS['tf_prec'] = 'tf.float64'
PARAMS['GradScaler'] = 1.0
PARAMS['DipoleScaler'] = 1.0
PARAMS['NeuronType'] = 'relu'
PARAMS['HiddenLayers'] = [200, 200, 200]
PARAMS['EECutoff'] = 15.0
PARAMS['EECutoffOn'] = 4.4
PARAMS['EECutoffOff'] = 15.0
d = MolDigester(TreatedAtoms, name_='ANI1_Sym_Direct', OType_='EnergyAndDipole')
tset = TensorMolData_BP_Direct_EE(a, d, order_=1, num_indis_=1, type_='mol', WithGrad_=True)
manager = TFMolManage('Mol_H2O_augmented_more_cutoff5_rimp2_force_dipole_ANI1_Sym_Direct_fc_sqdiff_BP_Direct_EE_1', tset, False, 'fc_sqdiff_BP_Direct_EE')
manager.Continue_Training(target='All')
if 0:
a = MSet('H2O_augmented_more_cutoff5_rimp2_force_dipole')
a.Load()
TreatedAtoms = a.AtomTypes()
PARAMS['learning_rate'] = 1e-05
PARAMS['momentum'] = 0.95
PARAMS['max_steps'] = 901
PARAMS['batch_size'] = 1000
PARAMS['test_freq'] = 10
PARAMS['tf_prec'] = 'tf.float64'
PARAMS['GradScaler'] = 1.0
PARAMS['DipoleScaler'] = 1.0
PARAMS['NeuronType'] = 'relu'
PARAMS['HiddenLayers'] = [200, 200, 200]
PARAMS['EECutoff'] = 15.0
PARAMS['EECutoffOn'] = 7.0
PARAMS['AN1_r_Rc'] = 8.0
PARAMS['AN1_num_r_Rs'] = 64
PARAMS['Erf_Width'] = 0.4
PARAMS['EECutoffOff'] = 15.0
PARAMS['learning_rate_dipole'] = 0.0001
PARAMS['learning_rate_energy'] = 1e-05
PARAMS['SwitchEpoch'] = 100
d = MolDigester(TreatedAtoms, name_='ANI1_Sym_Direct', OType_='EnergyAndDipole')
tset = TensorMolData_BP_Direct_EE(a, d, order_=1, num_indis_=1, type_='mol', WithGrad_=True)
manager = TFMolManage('', tset, False, 'fc_sqdiff_BP_Direct_EE')
manager.Train()
if 1:
a = MSet('H2O_augmented_more_cutoff5_rimp2_force_dipole')
a.Load()
TreatedAtoms = a.AtomTypes()
PARAMS['learning_rate'] = 1e-05
PARAMS['momentum'] = 0.95
PARAMS['max_steps'] = 901
PARAMS['batch_size'] = 1000
PARAMS['test_freq'] = 10
PARAMS['tf_prec'] = 'tf.float64'
PARAMS['GradScaler'] = 1.0
PARAMS['DipoleScaler'] = 1.0
PARAMS['NeuronType'] = 'relu'
PARAMS['HiddenLayers'] = [200, 200, 200]
PARAMS['EECutoff'] = 15.0
PARAMS['EECutoffOn'] = 7.0
PARAMS['AN1_r_Rc'] = 5.0
PARAMS['Erf_Width'] = 0.4
PARAMS['EECutoffOff'] = 15.0
PARAMS['learning_rate_dipole'] = 0.0001
PARAMS['learning_rate_energy'] = 1e-05
PARAMS['SwitchEpoch'] = 100
d = MolDigester(TreatedAtoms, name_='ANI1_Sym_Direct', OType_='EnergyAndDipole')
tset = TensorMolData_BP_Direct_EE(a, d, order_=1, num_indis_=1, type_='mol', WithGrad_=True)
manager = TFMolManage('', tset, False, 'fc_sqdiff_BP_Direct_EE')
manager.Train()
if 0:
a = MSet('H2O_augmented_more_cutoff5_rimp2_force_dipole')
a.Load()
TreatedAtoms = a.AtomTypes()
PARAMS['learning_rate'] = 1e-05
PARAMS['momentum'] = 0.95
PARAMS['max_steps'] = 901
PARAMS['batch_size'] = 1000
PARAMS['test_freq'] = 10
PARAMS['tf_prec'] = 'tf.float64'
PARAMS['GradScaler'] = 1.0
PARAMS['DipoleScaler'] = 1.0
PARAMS['NeuronType'] = 'relu'
PARAMS['HiddenLayers'] = [200, 200, 200]
PARAMS['EECutoff'] = 15.0
PARAMS['EECutoffOn'] = 7.0
PARAMS['AN1_r_Rc'] = 8.0
PARAMS['AN1_num_r_Rs'] = 64
PARAMS['Erf_Width'] = 0.4
PARAMS['EECutoffOff'] = 15.0
PARAMS['learning_rate_dipole'] = 0.0001
PARAMS['learning_rate_energy'] = 1e-05
PARAMS['SwitchEpoch'] = 100
d = MolDigester(TreatedAtoms, name_='ANI1_Sym_Direct', OType_='EnergyAndDipole')
tset = TensorMolData_BP_Direct_EE(a, d, order_=1, num_indis_=1, type_='mol', WithGrad_=True)
manager = TFMolManage('', tset, False, 'fc_sqdiff_BP_Direct_EE_ChargeEncode')
manager.Train()
if 0:
a = MSet('H2O_augmented_more_rimp2_force_dipole')
a.Load()
TreatedAtoms = a.AtomTypes()
PARAMS['learning_rate'] = 1e-05
PARAMS['momentum'] = 0.95
PARAMS['max_steps'] = 901
PARAMS['batch_size'] = 1000
PARAMS['test_freq'] = 10
PARAMS['tf_prec'] = 'tf.float64'
PARAMS['GradScaler'] = 1.0
PARAMS['DipoleScaler'] = 1.0
PARAMS['NeuronType'] = 'relu'
PARAMS['HiddenLayers'] = [512, 512, 512]
PARAMS['EECutoff'] = 15.0
PARAMS['EECutoffOn'] = 7.0
PARAMS['AN1_r_Rc'] = 8.0
PARAMS['AN1_num_r_Rs'] = 64
PARAMS['Erf_Width'] = 0.4
PARAMS['EECutoffOff'] = 15.0
PARAMS['learning_rate_dipole'] = 0.0001
PARAMS['learning_rate_energy'] = 1e-05
PARAMS['SwitchEpoch'] = 100
d = MolDigester(TreatedAtoms, name_='ANI1_Sym_Direct', OType_='EnergyAndDipole')
tset = TensorMolData_BP_Direct_EE(a, d, order_=1, num_indis_=1, type_='mol', WithGrad_=True)
manager = TFMolManage('', tset, False, 'fc_sqdiff_BP_Direct_EE')
manager.Train()
if 1:
a = MSet('H2O_bowl02_rimp2_force_dipole')
a.Load()
TreatedAtoms = a.AtomTypes()
PARAMS['learning_rate'] = 1e-05
PARAMS['momentum'] = 0.95
PARAMS['max_steps'] = 901
PARAMS['batch_size'] = 1000
PARAMS['test_freq'] = 10
PARAMS['tf_prec'] = 'tf.float64'
PARAMS['GradScaler'] = 1.0
PARAMS['DipoleScaler'] = 1.0
PARAMS['NeuronType'] = 'relu'
PARAMS['HiddenLayers'] = [200, 200, 200]
PARAMS['EECutoff'] = 15.0
PARAMS['EECutoffOn'] = 7.0
PARAMS['AN1_r_Rc'] = 8.0
PARAMS['AN1_num_r_Rs'] = 64
PARAMS['Erf_Width'] = 0.4
PARAMS['EECutoffOff'] = 15.0
PARAMS['learning_rate_dipole'] = 0.0001
PARAMS['learning_rate_energy'] = 1e-05
PARAMS['SwitchEpoch'] = 100
d = MolDigester(TreatedAtoms, name_='ANI1_Sym_Direct', OType_='EnergyAndDipole')
tset = TensorMolData_BP_Direct_EE(a, d, order_=1, num_indis_=1, type_='mol', WithGrad_=True)
manager = TFMolManage('', tset, False, 'fc_sqdiff_BP_Direct_EE')
manager.Train()
if 0:
a = MSet('chemspider9_metady_force')
a.Load()
TreatedAtoms = a.AtomTypes()
PARAMS['learning_rate'] = 1e-05
PARAMS['momentum'] = 0.95
PARAMS['max_steps'] = 101
PARAMS['batch_size'] = 35
PARAMS['test_freq'] = 2
PARAMS['tf_prec'] = 'tf.float64'
PARAMS['GradScaler'] = 1.0
PARAMS['DipoleScaler'] = 1.0
PARAMS['NeuronType'] = 'relu'
PARAMS['HiddenLayers'] = [1000, 1000, 1000]
PARAMS['EECutoff'] = 15.0
PARAMS['EECutoffOn'] = 7.0
PARAMS['Erf_Width'] = 0.4
PARAMS['EECutoffOff'] = 15.0
PARAMS['learning_rate_dipole'] = 0.0001
PARAMS['learning_rate_energy'] = 1e-05
PARAMS['SwitchEpoch'] = 10
d = MolDigester(TreatedAtoms, name_='ANI1_Sym_Direct', OType_='EnergyAndDipole')
tset = TensorMolData_BP_Direct_EE(a, d, order_=1, num_indis_=1, type_='mol', WithGrad_=True)
manager = TFMolManage('', tset, False, 'fc_sqdiff_BP_Direct_EE_ChargeEncode')
manager.Train() |
class LucernHammer(BasePolearm):
def __init__(self):
super().__init__('lucern hammer', weight=150, damage=D.Dice.from_str('d6'), material=M.Iron, hit=0) |
def cleanup_dir(dir):
if os.path.exists(dir):
logging.info(f'Deleting directory: {dir}')
shutil.rmtree(dir)
logging.info(f'Deleted contents of directory: {dir}') |
class TorchImagenetLayerExtractor(BaseFeatureExtractor):
def __init__(self, model_name, tile_px, device=None, **kwargs):
super().__init__(backend='torch')
from ..torch import ModelParams, Features
from .. import torch_utils
from torchvision import transforms
self.device = torch_utils.get_device(device)
_hp = ModelParams(tile_px=tile_px, model=model_name, include_top=False, hidden_layers=0)
model = _hp.build_model(num_classes=1, pretrain='imagenet').to(self.device)
self.model_name = model_name
self.ftrs = Features.from_model(model, tile_px=tile_px, **kwargs)
self.tag = ((model_name + '_') + '-'.join(self.ftrs.layers))
self.num_features = self.ftrs.num_features
self._tile_px = tile_px
all_transforms = [transforms.Lambda((lambda x: (x / 255.0))), transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))]
self.transform = transforms.Compose(all_transforms)
self.preprocess_kwargs = dict(standardize=False)
def mixed_precision(self):
return self.ftrs.mixed_precision
_precision.setter
def mixed_precision(self, value):
self.ftrs.mixed_precision = value
def channels_last(self):
return self.ftrs.channels_last
_last.setter
def channels_last(self, value):
self.ftrs.channels_last = value
def __repr__(self):
return str(self)
def __str__(self):
return '<TorchImagenetLayerExtractor model={} layers={} n_features={}>'.format(self.model_name, self.ftrs.layers, self.num_features)
def __call__(self, obj, **kwargs):
if isinstance(obj, sf.WSI):
grid = features_from_slide(self, obj, **kwargs)
return np.ma.masked_where((grid == sf.heatmap.MASK), grid)
elif kwargs:
raise ValueError(f'{self.__class__.__name__} does not accept keyword arguments when extracting features from a batch of images.')
else:
import torch
assert (obj.dtype == torch.uint8)
obj = self.transform(obj).to(self.device)
return self.ftrs._predict(obj)
def dump_config(self):
return {'class': 'slideflow.model.extractors.TorchImagenetLayerExtractor', 'kwargs': {'model_name': self.model_name, 'tile_px': self._tile_px, 'layers': self.ftrs.layers, 'pooling': self.ftrs._pooling}} |
def _test_annotation_registration():
import fakelib
fakelib_class = fakelib.OnlyPresentSoThatHandlersCanBeRegistered
fakelib_method = fakelib_class.method_for_method_stub_presence
fakelib_method_a = fakelib_class.method_a
fakelib_method_b = fakelib_class.method_b
fakelib_function = fakelib.function_for_function_stub_presence
sys.modules.pop(fakelib.__name__)
if (sys.version_info >= (3, 8)):
fakelib_posonly_function = fakelib.fun_for_testing_posonlyarg
else:
fakelib_posonly_function = None
non_fakelib_module_name = 'non_fakelib_module'
for module_name in [fakelib.__name__, non_fakelib_module_name]:
assert (module_name not in REGISTERED_FUNCTION_SPECS)
assert (module_name not in REGISTERED_CLASS_SPECS)
for fun in [fakelib_method, fakelib_method_a, fakelib_method_b, fakelib_function, fakelib_posonly_function]:
if (fun is None):
continue
assert (fun not in REGISTERED_HANDLER_BY_FUNCTION)
register_annotations_directory(os.path.dirname(__file__))
for module_name in [fakelib.__name__, non_fakelib_module_name]:
assert (module_name in REGISTERED_FUNCTION_SPECS)
assert (module_name in REGISTERED_CLASS_SPECS)
for fun in [fakelib_method, fakelib_method_a, fakelib_method_b, fakelib_function, fakelib_posonly_function]:
if (fun is None):
continue
assert (fun not in REGISTERED_HANDLER_BY_FUNCTION)
sys.modules[fakelib.__name__] = fakelib
compile_and_register_handlers_for_module(fakelib)
for fun in [fakelib_method, fakelib_method_a, fakelib_method_b, fakelib_function, fakelib_posonly_function]:
if (fun is None):
continue
assert (fun in REGISTERED_HANDLER_BY_FUNCTION), ('%s not in there' % fun) |
def getbatch():
while True:
if (len(stack) == 0):
continue
return stack.pop(0) |
class Cutpaste_Dataset(Dataset):
def __init__(self, files: List, config: Namespace):
self.files = files
self.center = config.center
self.cutpaste_transform = CutPaste(type=config.cutpaste_type)
self.crop_size = ((32, 32) if config.localization else (config.image_size, config.image_size))
self.transforms = T.Compose([T.Resize((config.image_size, config.image_size), T.InterpolationMode.LANCZOS), T.CenterCrop(config.image_size), T.RandomCrop(self.crop_size)])
self.to_tensor = T.ToTensor()
with Pool(cpu_count()) as pool:
self.preload = pool.map(partial(self.load_file), files)
def load_file(self, file):
image = Image.open(file)
image = self.transforms(image)
image = self.cutpaste_transform(image)
image = [self.to_tensor(i) for i in image]
if self.center:
image = [((i - 0.5) * 2) for i in image]
return image
def __len__(self):
return len(self.files)
def __getitem__(self, idx) -> Tensor:
return self.preload[idx] |
((torch.cuda.device_count() < 2), 'test requires 2 GPUs')
class TestBMUF(unittest.TestCase):
def bmuf_process(self, cfg, args, iterations):
results = Manager().dict()
torch.multiprocessing.spawn(fn=functools.partial(single_gpu_training, cfg, args), args=(iterations, results), nprocs=args.distributed_world_size, join=True)
return results
def test_bmuf_sync(self):
(cfg, args) = setup_args()
iterations = 1
results = self.bmuf_process(cfg, args, iterations)
assert (len(results) == 2)
self.assertAlmostEqual(results[0], results[1])
def test_warmup_sync(self):
(cfg, args) = setup_args()
args.warmup_iterations = 20
cfg.bmuf.warmup_iterations = args.warmup_iterations
iterations = 20
results = self.bmuf_process(cfg, args, iterations)
assert (len(results) == 2)
self.assertAlmostEqual(results[0], results[1])
def test_warmup_sync_bmuf_sync(self):
(cfg, args) = setup_args()
args.warmup_iterations = 20
args.global_sync_iter = 5
cfg.bmuf.warmup_iterations = args.warmup_iterations
cfg.bmuf.global_sync_iter = args.global_sync_iter
iterations = 25
results = self.bmuf_process(cfg, args, iterations)
assert (len(results) == 2)
self.assertAlmostEqual(results[0], results[1])
def test_single_gpu_bmuf(self):
(cfg, args) = setup_args()
args.distributed_world_size = 1
args.warmup_iterations = 5
cfg.distributed_training.distributed_world_size = args.distributed_world_size
cfg.bmuf.distributed_world_size = args.distributed_world_size
cfg.bmuf.warmup_iterations = args.warmup_iterations
iterations = 20
results = self.bmuf_process(cfg, args, iterations)
assert (len(results) == 1)
def assertAlmostEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), 'size mismatch')
self.assertLess((t1 - t2).abs().max(), 0.0001) |
def main():
args = parse_args()
local_rank = args.local_rank
if (local_rank != (- 1)):
msg = 'is not compatible with YOLOv5 Multi-GPU DDP training'
batch_size = args.per_device_train_batch_size
assert ((batch_size % WORLD_SIZE) == 0), f'--batch-size {batch_size} must be multiple of WORLD_SIZE'
assert (torch.cuda.device_count() > local_rank), 'insufficient CUDA devices for DDP command'
torch.cuda.set_device(local_rank)
dist.init_process_group(backend=('nccl' if dist.is_nccl_available() else 'gloo'))
send_example_telemetry('run_clm_no_trainer', args)
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO)
if (args.calibration_dataset_name is not None):
if ('wiki' in args.calibration_dataset_name):
raw_datasets = load_dataset('wikitext', args.calibration_dataset_name, args.dataset_config_name)
else:
raw_datasets = load_dataset(args.calibration_dataset_name, args.dataset_config_name)
if ('validation' not in raw_datasets.keys()):
raw_datasets['validation'] = load_dataset(args.calibration_dataset_name, args.dataset_config_name, split=f'train[:{args.validation_split_percentage}%]')
raw_datasets['train'] = load_dataset(args.calibration_dataset_name, args.dataset_config_name, split=f'train[{args.validation_split_percentage}%:]')
else:
data_files = {}
dataset_args = {}
if (args.train_file is not None):
data_files['train'] = args.train_file
if (args.validation_file is not None):
data_files['validation'] = args.validation_file
extension = args.train_file.split('.')[(- 1)]
if (extension == 'txt'):
extension = 'text'
dataset_args['keep_linebreaks'] = (not args.no_keep_linebreaks)
raw_datasets = load_dataset(extension, data_files=data_files, **dataset_args)
if ('validation' not in raw_datasets.keys()):
raw_datasets['validation'] = load_dataset(extension, data_files=data_files, split=f'train[:{args.validation_split_percentage}%]', **dataset_args)
raw_datasets['train'] = load_dataset(extension, data_files=data_files, split=f'train[{args.validation_split_percentage}%:]', **dataset_args)
if args.config_name:
config = AutoConfig.from_pretrained(args.config_name, torchscript=True)
elif args.model_name_or_path:
config = AutoConfig.from_pretrained(args.model_name_or_path, torchscript=True, trust_remote_code=args.trust_remote_code)
else:
config = CONFIG_MAPPING[args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.')
is_llama = bool(('llama' in args.model_name_or_path))
is_t5 = bool(('t5' in args.model_name_or_path))
if args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=(not args.use_slow_tokenizer))
elif args.model_name_or_path:
if is_llama:
tokenizer = transformers.LlamaTokenizer.from_pretrained(args.model_name_or_path)
else:
tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, use_fast=(not args.use_slow_tokenizer), trust_remote_code=True)
else:
raise ValueError('You are instantiating a new tokenizer from scratch. This is not supported by this script.You can do it from another script, save it, and load it from here, using --tokenizer_name.')
if args.model_name_or_path:
if is_t5:
model = T5ForConditionalGeneration.from_pretrained(args.model_name_or_path, config=config)
else:
model = AutoModelForCausalLM.from_pretrained(args.model_name_or_path, from_tf=bool(('.ckpt' in args.model_name_or_path)), config=config, trust_remote_code=args.trust_remote_code, low_cpu_mem_usage=args.low_cpu_mem_usage)
else:
logger.info('Training new model from scratch')
model = AutoModelForCausalLM.from_config(config)
embedding_size = model.get_input_embeddings().weight.shape[0]
if (len(tokenizer) > embedding_size):
model.resize_token_embeddings(len(tokenizer))
if (local_rank != (- 1)):
device = torch.device('cuda', local_rank)
model = model.to(device)
model = DDP(model, device_ids=[local_rank], output_device=local_rank)
column_names = raw_datasets['train'].column_names
text_column_name = ('text' if ('text' in column_names) else column_names[0])
def tokenize_function(examples):
return tokenizer(examples[text_column_name], max_length=args.max_length, truncation=True)
if (RANK in {(- 1), 0}):
tokenized_datasets = raw_datasets.map(tokenize_function, batched=True, num_proc=args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=(not args.overwrite_cache), desc='Running tokenizer on dataset')
tokenized_datasets.set_format(type='torch', columns=['input_ids'])
if (args.block_size is None):
block_size = tokenizer.model_max_length
if (block_size > 1024):
logger.warning('The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can override this default with `--block_size xxx`.')
block_size = 1024
else:
if (args.block_size > tokenizer.model_max_length):
logger.warning(f'The block_size passed ({args.block_size}) is larger than the maximum length for the model({tokenizer.model_max_length}). Using block_size={tokenizer.model_max_length}.')
block_size = min(args.block_size, tokenizer.model_max_length)
def group_texts(examples):
concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()}
total_length = len(concatenated_examples[list(examples.keys())[0]])
if (total_length >= block_size):
total_length = ((total_length // block_size) * block_size)
result = {k: [t[i:(i + block_size)] for i in range(0, total_length, block_size)] for (k, t) in concatenated_examples.items()}
result['labels'] = result['input_ids'].copy()
return result
if (RANK in {(- 1), 0}):
lm_datasets = tokenized_datasets.map(group_texts, batched=True, num_proc=args.preprocessing_num_workers, load_from_cache_file=(not args.overwrite_cache), desc=f'Grouping texts in chunks of {block_size}')
train_dataset = lm_datasets['train']
train_dataset = train_dataset.shuffle(seed=42).select(range(128))
total_batch_size = args.per_device_train_batch_size
if (local_rank != (- 1)):
total_batch_size *= WORLD_SIZE
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=args.per_device_train_batch_size, sampler=train_sampler)
else:
train_dataloader = DataLoader(train_dataset, shuffle=False, collate_fn=default_data_collator, batch_size=args.per_device_train_batch_size)
logger.info('***** Running pruning *****')
logger.info(f' Num examples = {len(train_dataset)}')
logger.info(f' Instantaneous batch size per device = {args.per_device_train_batch_size}')
logger.info(f' Total train/prune batch size (w. parallel, distributed & accumulation) = {total_batch_size}')
if (not args.auto_config):
pruning_configs = [{'pruning_type': 'sparse_gpt', 'op_names': ['.*'], 'excluded_op_names': ['lm_head', 'embed_out']}]
else:
pruning_configs = []
auto_slim_configs = parse_auto_slim_config(model, ffn2_sparsity=args.target_sparsity, mha_sparsity=args.target_sparsity, pruning_type='sparse_gpt', pattern=args.pruning_pattern)
pruning_configs += auto_slim_configs
configs = WeightPruningConfig(pruning_configs, target_sparsity=args.target_sparsity, pattern=args.pruning_pattern)
device = args.device
if (device != 'cpu'):
device = ('cuda:' + str(device))
if args.do_prune:
torch.backends.cuda.matmul.allow_tf32 = False
torch.backends.cudnn.allow_tf32 = False
use_cache = model.config.use_cache
model.config.use_cache = False
pruning = prepare_pruning(model, configs, dataloader=train_dataloader, device=device)
model.config.use_cache = use_cache
if (args.output_dir is not None):
output_dir = args.output_dir
model.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
logger.info(f'The model has been exported to {output_dir}')
if (device != 'cpu'):
model = model.to(device)
logger.info(f'***** Evaluation in GPU mode. *****')
else:
logger.info(f'***** Evaluation in CPU mode. *****')
model.eval()
model_name = args.model_name_or_path
dtype = 'float32'
if args.eval_fp16:
if (hasattr(model, 'config') and (model.config.torch_dtype is torch.bfloat16)):
dtype = 'bfloat16'
else:
dtype = 'float16'
model_args = f'pretrained={model_name},tokenizer={model_name},dtype={dtype},use_accelerate={args.use_accelerate}'
eval_batch = args.per_device_eval_batch_size
user_model = (None if args.use_accelerate else model)
results = evaluate(model='hf-causal', model_args=model_args, user_model=user_model, batch_size=eval_batch, tasks=args.tasks, device=device) |
def load_kins_json(json_file, image_root, dataset_name=None):
from pycocotools.coco import COCO
timer = Timer()
json_file = PathManager.get_local_path(json_file)
with contextlib.redirect_stdout(io.StringIO()):
kins_api = COCO(json_file)
if (timer.seconds() > 1):
logger.info('Loading {} takes {:.2f} seconds.'.format(json_file, timer.seconds()))
id_map = None
if (dataset_name is not None):
meta = MetadataCatalog.get(dataset_name)
cat_ids = sorted(kins_api.getCatIds())
cats = kins_api.loadCats(cat_ids)
thing_classes = [str(c['name']) for c in sorted(cats, key=(lambda x: x['id']))]
meta.thing_classes = thing_classes
if (not ((min(cat_ids) == 1) and (max(cat_ids) == len(cat_ids)))):
if ('kins' not in dataset_name):
logger.warning("\nCategory ids in annotations are not in [1, #categories]! We'll apply a mapping for you.\n")
id_map = {v: i for (i, v) in enumerate(cat_ids)}
meta.thing_dataset_id_to_contiguous_id = id_map
img_ids = sorted(list(kins_api.imgs.keys()))
imgs = kins_api.loadImgs(img_ids)
anns = [kins_api.imgToAnns[img_id] for img_id in img_ids]
if ('minival' not in json_file):
ann_ids = [ann['id'] for anns_per_image in anns for ann in anns_per_image]
assert (len(set(ann_ids)) == len(ann_ids)), "Annotation ids in '{}' are not unique!".format(json_file)
imgs_anns = list(zip(imgs, anns))
logger.info('Loaded {} images in COCO format from {}'.format(len(imgs_anns), json_file))
dataset_dicts = []
ann_keys = ['iscrowd', 'bbox', 'category_id']
num_instances_without_valid_segmentation = 0
num_instances_without_valid_visible_segmentation = 0
logger.info('detecting visible: {}'.format(dataset_name.endswith('visible')))
for (img_dict, anno_dict_list) in imgs_anns:
record = {}
record['file_name'] = os.path.join(image_root, img_dict['file_name'])
record['height'] = img_dict['height']
record['width'] = img_dict['width']
image_id = record['image_id'] = img_dict['id']
objs = []
for anno in anno_dict_list:
anno['iscrowd'] = 0
assert (anno['image_id'] == image_id)
assert (anno.get('ignore', 0) == 0)
obj = {key: anno[key] for key in ann_keys if (key in anno)}
segm = (anno.get('inmodal_seg', None) if dataset_name.endswith('visible') else anno.get('segmentation', None))
if segm:
if (not isinstance(segm, dict)):
segm = [poly for poly in segm if (((len(poly) % 2) == 0) and (len(poly) >= 6))]
if (len(segm) == 0):
num_instances_without_valid_segmentation += 1
continue
obj['segmentation'] = segm
vis_segm = anno.get('inmodal_seg', None)
if (not isinstance(vis_segm, dict)):
vis_segm = [poly for poly in vis_segm if (((len(poly) % 2) == 0) and (len(poly) >= 6))]
if (len(vis_segm) == 0):
num_instances_without_valid_visible_segmentation += 1
continue
obj['visible_mask'] = vis_segm
obj['bbox_mode'] = BoxMode.XYWH_ABS
if id_map:
obj['category_id'] = id_map[obj['category_id']]
objs.append(obj)
record['annotations'] = objs
dataset_dicts.append(record)
if (num_instances_without_valid_segmentation > 0):
logger.warn('Filtered out {} instances without valid segmentation. There might be issues in your dataset generation process.'.format(num_instances_without_valid_segmentation))
if (num_instances_without_valid_visible_segmentation > 0):
logger.warn('Filtered out {} instances without valid visible segmentation. There might be issues in your dataset generation process.'.format(num_instances_without_valid_visible_segmentation))
return dataset_dicts |
class AbsTensor():
def __init__(self, shape: List[Union[(int, z3.ExprRef)]], dtype: DType):
assert isinstance(shape, (list, tuple)), f'Shape must be a list/tuple, but got {shape}'
self.shape = list(shape)
self.dtype = DType(dtype)
def downcast_rank(self):
return AbsTensor(shape=([None] * self.ndims), dtype=self.dtype)
def __hash__(self) -> int:
return hash((tuple(self.shape), self.dtype))
def __repr__(self) -> str:
return f'AbsTensor<{self.dtype.short()}>{str(self.shape)}'
def pretty(self) -> str:
return f'{self.dtype.short()}{self.shape}'
def weak_compare(self, other: 'AbsTensor') -> bool:
if ((self.dtype != other.dtype) or (self.ndims != other.ndims)):
return False
for (l, r) in zip(self.shape, other.shape):
if (isinstance(l, z3.ExprRef) or isinstance(r, z3.ExprRef)):
continue
if (l != r):
return False
return True
def strong_compare(self, other: 'AbsTensor') -> bool:
return ((self.shape == other.shape) and (self.dtype == other.dtype))
def __eq__(self, other: 'AbsTensor') -> bool:
return (isinstance(other, AbsTensor) and self.strong_compare(other))
def ge_zero(self):
ret = []
for s in self.shape:
if isinstance(s, z3.ExprRef):
ret.append(nnsmith_ge(s, 0))
else:
ConstraintCheck.ge(s, 0)
return ret
def sym_gt_conc_ge_zero(self):
ret = []
for s in self.shape:
if isinstance(s, z3.ExprRef):
ret.append(nnsmith_gt(s, 0))
else:
ConstraintCheck.ge(s, 0)
return ret
def gt_zero(self):
ret = []
for s in self.shape:
if isinstance(s, z3.ExprRef):
ret.append(nnsmith_gt(s, 0))
else:
ConstraintCheck.gt(s, 0)
return ret
def eq(self, other):
SanityCheck.eq(self.ndims, other.ndims)
ret = []
for i in range(self.ndims):
if (isinstance(self.shape[i], z3.ExprRef) or isinstance(other.shape[i], z3.ExprRef)):
ret.append(nnsmith_eq(self.shape[i], other.shape[i]))
else:
ConstraintCheck.eq(self.shape[i], other.shape[i])
return ret
def torch(self):
import torch
return torch.Size(self.shape)
def constains_symbol(self) -> bool:
return any((isinstance(s, z3.ExprRef) for s in self.shape))
def nelement(self):
if (len(self.shape) == 0):
return 1
return reduce((lambda x, y: nnsmith_mul(x, y)), self.shape, 1)
def nbytes(self) -> int:
return (self.nelement() * self.dtype.sizeof())
def deepcopy(self):
return AbsTensor(shape=list(self.shape), dtype=self.dtype)
def ndims(self):
return len(self.shape)
def is_concrete(self) -> bool:
return all((isinstance(s, int) for s in self.shape))
def htype(self):
return (self.dtype, self.ndims) |
(scope='module')
def sconv2dlstm_hidden_reset_zero_instance():
return snn.SConv2dLSTM(1, 8, 3, init_hidden=True, reset_mechanism='zero') |
class MinFrontExtractor(FrontExtractorOp):
op = 'Min'
enabled = True
def extract(cls, node: Node):
ReduceMin.update_node_stat(node, {'keep_dims': node.pb.attr['keep_dims'].b})
return cls.enabled |
def GetArgs():
parser = argparse.ArgumentParser(description='Prune pronunciation candidates based on soft-counts from lattice-alignmentoutputs, and a reference lexicon. Basically, for each word we sort all pronunciationcadidates according to their soft-counts, and then select the top r * N candidates(For words in the reference lexicon, N = # pron variants given by the referencelexicon; For oov words, N = avg. # pron variants per word in the reference lexicon).r is a user-specified constant, like 2.', epilog='See steps/dict/learn_lexicon_greedy.sh for example')
parser.add_argument('--r', type=float, default='2.0', help='a user-specified ratio parameter which determines how manypronunciation candidates we want to keep for each word.')
parser.add_argument('pron_stats', metavar='<pron-stats>', type=str, help='File containing soft-counts of all pronounciation candidates; each line must be <soft-counts> <word> <phones>')
parser.add_argument('ref_lexicon', metavar='<ref-lexicon>', type=str, help='Reference lexicon file, where we obtain # pron variants foreach word, based on which we prune the pron candidates.')
parser.add_argument('pruned_prons', metavar='<pruned-prons>', type=str, help='A file in lexicon format, which contains prons we want toprune away from the pron_stats file.')
print(' '.join(sys.argv), file=sys.stderr)
args = parser.parse_args()
args = CheckArgs(args)
return args |
def gen_iterator(out_path, dataset, gen_p):
global gen
gen = gen_p
if (not os.path.exists(out_path)):
os.makedirs(out_path)
print(out_path)
loader = dataset.get_loader(shuffle=True)
for (i, data) in tqdm(enumerate(loader)):
path = os.path.normpath(data['path'][0])
export_path = (out_path + '/generation/{}/{}/'.format(path.split(os.sep)[(- 2)], path.split(os.sep)[(- 1)]))
if os.path.exists(export_path):
if os.path.exists((export_path + 'gifs_result.obj')):
print('Path exists - skip! {}'.format(export_path))
continue
else:
os.makedirs(export_path)
(vs, fs, duration) = gen.generate_mesh(data)
mesh = trimesh.Trimesh(vs, fs)
mesh.remove_degenerate_faces()
mesh.export((export_path + 'gifs_result.obj'))
print('duration', duration) |
def get_oracle_score(ground_truth, predicted_answers, qid_list=None, mute=False):
exact_match = common = 0
if (qid_list is None):
qid_list = ground_truth.keys()
for qid in qid_list:
if (qid not in predicted_answers):
if (not mute):
message = 'Irrelavant question {} will receive score 0.'.format(qid)
print(message, file=sys.stderr)
continue
common += 1
prediction = normalize_answer(predicted_answers[qid])
ground_truths = get_ground_truths(ground_truth[qid])
em_for_this_question = has_exact_match(ground_truths, prediction)
exact_match += int(em_for_this_question)
exact_match = ((100.0 * exact_match) / len(qid_list))
return {'oracle_exact_match': exact_match, 'common': common, 'denominator': len(qid_list), 'pred_len': len(predicted_answers), 'gold_len': len(ground_truth)} |
class inconv(nn.Module):
def __init__(self, in_ch, out_ch):
super(inconv, self).__init__()
self.conv = double_conv(in_ch, out_ch)
def forward(self, x):
x = self.conv(x)
return x |
class AdversarialTopkErrorRate(TopkErrorRate):
def __init__(self, model, adversary=None, k=1):
super().__init__(model, k)
if (not adversary):
adversary = (lambda x, y: x)
self.adversary = adversary
def update(self, inputs, labels):
noisy = self.adversary(inputs, labels)
return super().update(noisy, labels) |
class DataSet(object):
def __init__(self, images, labels, fake_data=False):
if fake_data:
self._num_examples = 10000
else:
assert (images.shape[0] == labels.shape[0]), ('images.shape: %s labels.shape: %s' % (images.shape, labels.shape))
self._num_examples = images.shape[0]
assert (images.shape[3] == 1)
images = images.reshape(images.shape[0], (images.shape[1] * images.shape[2]))
images = images.astype(numpy.float32)
images = numpy.multiply(images, (1.0 / 255.0))
self._images = images
self._labels = labels
self._epochs_completed = 0
self._index_in_epoch = 0
def images(self):
return self._images
def labels(self):
return self._labels
def num_examples(self):
return self._num_examples
def epochs_completed(self):
return self._epochs_completed
def next_batch(self, batch_size, fake_data=False):
if fake_data:
fake_image = [1.0 for _ in xrange(784)]
fake_label = 0
return ([fake_image for _ in xrange(batch_size)], [fake_label for _ in xrange(batch_size)])
start = self._index_in_epoch
self._index_in_epoch += batch_size
if (self._index_in_epoch > self._num_examples):
self._epochs_completed += 1
perm = numpy.arange(self._num_examples)
numpy.random.shuffle(perm)
self._images = self._images[perm]
self._labels = self._labels[perm]
start = 0
self._index_in_epoch = batch_size
assert (batch_size <= self._num_examples)
end = self._index_in_epoch
return (self._images[start:end], self._labels[start:end]) |
def export_animated_mesh(output_path):
output_dir = os.path.dirname(output_path)
if (not os.path.isdir(output_dir)):
os.makedirs(output_dir, exist_ok=True)
bpy.ops.object.select_all(action='DESELECT')
bpy.data.objects['Armature'].select_set(True)
bpy.data.objects['Armature'].children[0].select_set(True)
if output_path.endswith('.glb'):
print('Exporting to glTF binary (.glb)')
bpy.ops.export_scene.gltf(filepath=output_path, export_format='GLB', export_selected=True, export_morph=False)
elif output_path.endswith('.fbx'):
print('Exporting to FBX binary (.fbx)')
bpy.ops.export_scene.fbx(filepath=output_path, use_selection=True, add_leaf_bones=False)
else:
print(('ERROR: Unsupported export format: ' + output_path))
sys.exit(1)
return |
_registry(pattern_type='MergedEmbeddingbag')
class MergedEmbeddingbag(Pattern):
def __call__(self, model):
pattern_mapping_config = {'MergedEmbeddingbag': [{'patterns': {'in': [[(0, 'Split'), (1, 'Squeeze'), (2, 'Shape'), (3, 'Gather'), (5, 'Unsqueeze'), (6, 'Concat'), (7, 'Slice'), (8, 'Shape'), (9, 'Gather'), (10, 'Loop')], [(), (4, 'Gather'), (6, 'Concat')]]}}]}
pattern = pattern_mapping_config['MergedEmbeddingbag'][0]['patterns']['in']
patterns_nodes_name = util.search_pattern(pattern, model)
if (len(patterns_nodes_name) != 0):
split_idx = model.get_node_id(patterns_nodes_name[0][0])
split_node = model.nodes[split_idx]
gather_idx = model.get_node_id(patterns_nodes_name[0][4])
gather_node = model.nodes[gather_idx]
match_nodes_name = []
merged_embeddingbag_inputs = []
merged_embeddingbag_outputs = []
merged_embeddingbag_inputs.append(gather_node.input_tensors[0])
merged_embeddingbag_inputs.append(split_node.input_tensors[0])
for pattern_nodes_name in patterns_nodes_name:
loop_idx = model.get_node_id(pattern_nodes_name[10])
loop_node = model.nodes[loop_idx]
weight = loop_node.input_tensors[0]
merged_embeddingbag_inputs.append(weight)
loop_output = loop_node.output_tensors[0]
merged_embeddingbag_outputs.append(loop_output)
for idx in range((len(pattern_nodes_name) - 1)):
match_nodes_name.append(pattern_nodes_name[idx])
merged_embeddingbag_node_name = ('MergedEmbeddingbag_' + split_node.name)
merged_embeddingbag_node = util.construct_node(merged_embeddingbag_node_name, 'MergedEmbeddingbag', merged_embeddingbag_inputs, merged_embeddingbag_outputs)
model.remove_nodes(match_nodes_name)
model.insert_nodes(split_idx, [merged_embeddingbag_node])
return model |
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, dilation=(1, 1), residual=True, BatchNorm=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride, padding=dilation[0], dilation=dilation[0])
self.bn1 = BatchNorm(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes, padding=dilation[1], dilation=dilation[1])
self.bn2 = BatchNorm(planes)
self.downsample = downsample
self.stride = stride
self.residual = residual
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if (self.downsample is not None):
residual = self.downsample(x)
if self.residual:
out += residual
out = self.relu(out)
return out |
def load_network(model, network_label, epoch, iteration, args):
dataset = args.data_path.split(os.sep)[(- 1)]
save_filename = '{0}_net_{1}_{2}_{3}.pth'.format(network_label, args.model, epoch, iteration)
save_path = osp.join(args.load_dir, save_filename)
model_state = torch.load(save_path)
if ('state_dict' in model_state):
model.load_state_dict(model_state['state_dict'])
else:
model.load_state_dict(model_state)
model_state = {'state_dict': model.cpu().state_dict(), 'epoch': epoch, 'iteration': iteration, 'model': args.model, 'color_space': args.color_space, 'batch_size': args.batch_size, 'dataset': dataset, 'image_size': args.image_size}
model.cuda(device_id=args.gpu)
print('Loaded {0} from epoch: {1} itr: {2}'.format(network_label, epoch, args.load)) |
def get_cosine_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, num_cycles=0.5, last_epoch=(- 1)):
def lr_lambda(current_step):
if (current_step < num_warmup_steps):
return (float(current_step) / float(max(1, num_warmup_steps)))
progress = (float((current_step - num_warmup_steps)) / float(max(1, (num_training_steps - num_warmup_steps))))
return max(0.0, (0.5 * (1.0 + math.cos((((math.pi * float(num_cycles)) * 2.0) * progress)))))
return LambdaLR(optimizer, lr_lambda, last_epoch) |
def eval_basemodel_precision_recall(pred_fn, source_fn, rel_topk, obj_topk, num_last_eval_points=4000):
pred_data = file_uri_reader_processor(pred_fn)[(- num_last_eval_points):]
source_data = file_uri_reader_processor(source_fn)['data']
group_pred_by_time = group_pred_data_in_time(pred_data, source_data)
out = get_precision_recall(group_pred_by_time, rel_topk=rel_topk, obj_topk=obj_topk)
print(out) |
_start_docstrings('The bare SegFormer encoder (Mix-Transformer) outputting raw hidden-states without any specific head on top.', SEGFORMER_START_DOCSTRING)
class SegformerModel(SegformerPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.config = config
self.encoder = SegformerEncoder(config)
self.post_init()
def _prune_heads(self, heads_to_prune):
for (layer, heads) in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
_start_docstrings_to_model_forward(SEGFORMER_INPUTS_DOCSTRING.format('(batch_size, sequence_length)'))
_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC, modality='vision', expected_output=_EXPECTED_OUTPUT_SHAPE)
def forward(self, pixel_values: torch.FloatTensor, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[(Tuple, BaseModelOutput)]:
output_attentions = (output_attentions if (output_attentions is not None) else self.config.output_attentions)
output_hidden_states = (output_hidden_states if (output_hidden_states is not None) else self.config.output_hidden_states)
return_dict = (return_dict if (return_dict is not None) else self.config.use_return_dict)
encoder_outputs = self.encoder(pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
sequence_output = encoder_outputs[0]
if (not return_dict):
return ((sequence_output,) + encoder_outputs[1:])
return BaseModelOutput(last_hidden_state=sequence_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions) |
class Expr(object):
def __init__(self, line=None, statement=False, original=None):
self.line = line
self.statement = statement
self.original = original
def copyargs(self):
return {'line': self.line, 'statement': self.statement, 'original': self.original}
def replace_original(self, d):
if (not self.original):
return
if (self.original[0] in d):
self.original = (d[self.original[0]], self.original[1])
def expr_original(self, s):
if self.original:
return ('%s{%s, %d}' % (s, self.original[0], self.original[1]))
else:
return s |
_REGISTRY.register()
def build_resnet_backbone(cfg, input_shape):
norm = cfg.MODEL.RESNETS.NORM
stem = BasicStem(in_channels=input_shape.channels, out_channels=cfg.MODEL.RESNETS.STEM_OUT_CHANNELS, norm=norm)
freeze_at = cfg.MODEL.BACKBONE.FREEZE_AT
out_features = cfg.MODEL.RESNETS.OUT_FEATURES
depth = cfg.MODEL.RESNETS.DEPTH
num_groups = cfg.MODEL.RESNETS.NUM_GROUPS
width_per_group = cfg.MODEL.RESNETS.WIDTH_PER_GROUP
bottleneck_channels = (num_groups * width_per_group)
in_channels = cfg.MODEL.RESNETS.STEM_OUT_CHANNELS
out_channels = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS
stride_in_1x1 = cfg.MODEL.RESNETS.STRIDE_IN_1X1
res5_dilation = cfg.MODEL.RESNETS.RES5_DILATION
deform_on_per_stage = cfg.MODEL.RESNETS.DEFORM_ON_PER_STAGE
deform_modulated = cfg.MODEL.RESNETS.DEFORM_MODULATED
deform_num_groups = cfg.MODEL.RESNETS.DEFORM_NUM_GROUPS
assert (res5_dilation in {1, 2}), 'res5_dilation cannot be {}.'.format(res5_dilation)
num_blocks_per_stage = {18: [2, 2, 2, 2], 34: [3, 4, 6, 3], 50: [3, 4, 6, 3], 101: [3, 4, 23, 3], 152: [3, 8, 36, 3]}[depth]
if (depth in [18, 34]):
assert (out_channels == 64), 'Must set MODEL.RESNETS.RES2_OUT_CHANNELS = 64 for R18/R34'
assert (not any(deform_on_per_stage)), 'MODEL.RESNETS.DEFORM_ON_PER_STAGE unsupported for R18/R34'
assert (res5_dilation == 1), 'Must set MODEL.RESNETS.RES5_DILATION = 1 for R18/R34'
assert (num_groups == 1), 'Must set MODEL.RESNETS.NUM_GROUPS = 1 for R18/R34'
stages = []
out_stage_idx = [{'res2': 2, 'res3': 3, 'res4': 4, 'res5': 5}[f] for f in out_features]
max_stage_idx = max(out_stage_idx)
for (idx, stage_idx) in enumerate(range(2, (max_stage_idx + 1))):
dilation = (res5_dilation if (stage_idx == 5) else 1)
first_stride = (1 if ((idx == 0) or ((stage_idx == 5) and (dilation == 2))) else 2)
stage_kargs = {'num_blocks': num_blocks_per_stage[idx], 'stride_per_block': ([first_stride] + ([1] * (num_blocks_per_stage[idx] - 1))), 'in_channels': in_channels, 'out_channels': out_channels, 'norm': norm}
if (depth in [18, 34]):
stage_kargs['block_class'] = BasicBlock
else:
stage_kargs['bottleneck_channels'] = bottleneck_channels
stage_kargs['stride_in_1x1'] = stride_in_1x1
stage_kargs['dilation'] = dilation
stage_kargs['num_groups'] = num_groups
if deform_on_per_stage[idx]:
stage_kargs['block_class'] = DeformBottleneckBlock
stage_kargs['deform_modulated'] = deform_modulated
stage_kargs['deform_num_groups'] = deform_num_groups
else:
stage_kargs['block_class'] = BottleneckBlock
blocks = ResNet.make_stage(**stage_kargs)
in_channels = out_channels
out_channels *= 2
bottleneck_channels *= 2
stages.append(blocks)
return ResNet(stem, stages, out_features=out_features).freeze(freeze_at) |
class ResInitBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super(ResInitBlock, self).__init__()
self.conv = conv7x7_block(in_channels=in_channels, out_channels=out_channels, stride=2)
self.pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
def forward(self, x):
x = self.conv(x)
x = self.pool(x)
return x |
def remove_unused_nodes_(gm: GraphModule, lint_and_recompile: bool=True):
graph = gm.graph
for node in graph.nodes:
if ((not node.users) and (node.op not in ['placeholder', 'output'])):
graph.erase_node(node)
if lint_and_recompile:
graph.lint()
gm.recompile() |
def get_mlp(features, activate):
if isinstance(activate, str):
activate = getattr(nn, activate)
layers = []
for (in_f, out_f) in zip(features[:(- 1)], features[1:]):
layers.append(nn.Linear(in_f, out_f))
layers.append(activate())
return nn.Sequential(*layers) |
def is_float(numStr):
flag = False
numStr = str(numStr).strip().lstrip('-').lstrip('+')
try:
reg = re.compile('^[-+]?[0-9]+\\.[0-9]+$')
res = reg.match(str(numStr))
if res:
flag = True
except Exception as ex:
print(('is_float() - error: ' + str(ex)))
return flag |
_vision
_torch
class VisionTextDualEncoderIntegrationTest(unittest.TestCase):
def test_inference(self):
model = VisionTextDualEncoderModel.from_pretrained('clip-italian/clip-italian', logit_scale_init_value=1)
processor = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian')
image = Image.open('./tests/fixtures/tests_samples/COCO/.png')
inputs = processor(text=['una foto di un gatto', 'una foto di un cane'], images=image, padding=True, return_tensors='pt')
outputs = model(**inputs)
self.assertEqual(outputs.logits_per_image.shape, (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]))
self.assertEqual(outputs.logits_per_text.shape, (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]))
expected_logits = torch.tensor([[1.2284727, 0.3104122]])
self.assertTrue(torch.allclose(outputs.logits_per_image, expected_logits, atol=0.001)) |
class TestNMTMedium(ExampleConfigTest, EncoderDecoderTests):
def _config_path(self):
return os.path.join(EXAMPLE_CONFIG_DIR, 'nmt_medium.yml') |
def ltr_collate(batch):
error_msg = 'batch must contain tensors, numbers, dicts or lists; found {}'
elem_type = type(batch[0])
if isinstance(batch[0], torch.Tensor):
out = None
if _check_use_shared_memory():
numel = sum([x.numel() for x in batch])
storage = batch[0].storage()._new_shared(numel)
out = batch[0].new(storage)
return torch.stack(batch, 0, out=out)
elif ((elem_type.__module__ == 'numpy') and (elem_type.__name__ != 'str_') and (elem_type.__name__ != 'string_')):
elem = batch[0]
if (elem_type.__name__ == 'ndarray'):
return torch.stack([torch.from_numpy(b) for b in batch], 0)
if (elem.shape == ()):
py_type = (float if elem.dtype.name.startswith('float') else int)
return torch.utils.data.dataloader.numpy_type_map[elem.dtype.name](list(map(py_type, batch)))
elif isinstance(batch[0], int_classes):
return torch.LongTensor(batch)
elif isinstance(batch[0], float):
return torch.DoubleTensor(batch)
elif isinstance(batch[0], string_classes):
return batch
elif isinstance(batch[0], TensorDict):
return TensorDict({key: ltr_collate([d[key] for d in batch]) for key in batch[0]})
elif isinstance(batch[0], collections.Mapping):
return {key: ltr_collate([d[key] for d in batch]) for key in batch[0]}
elif isinstance(batch[0], TensorList):
transposed = zip(*batch)
return TensorList([ltr_collate(samples) for samples in transposed])
elif isinstance(batch[0], collections.Sequence):
transposed = zip(*batch)
return [ltr_collate(samples) for samples in transposed]
elif (batch[0] is None):
return batch
raise TypeError(error_msg.format(type(batch[0]))) |
class SimulationRobotAction(AbstractAction):
def __init__(self, arm_cmd=None, gripper_cmd=None, mobile_base_cmd=None, code=None, error=False):
self.arm_cmd = arm_cmd
self.gripper_cmd = gripper_cmd
self.mobile_base_cmd = mobile_base_cmd
self.error = error
self.code = code
def getDescription(cls):
return ('arm_cmd', 'gripper_cmd', 'mobile_cmd') |
def check_type(param, param_name: str, typ, typ_name: str=None):
if (not isinstance(param, typ)):
typ_name = (str(typ) if (typ_name is None) else typ_name)
raise ValueError(f"'{param_name}' should be of type `{typ_name}`, got {type(param)}.")
return param |
class TimerCollection():
def __init__(self):
self._timers = collections.defaultdict(TimerStat)
self._enabled = True
def disable(self):
self._enabled = False
def enable(self):
self._enabled = True
def reset(self):
for timer in self._timers.values():
timer.reset()
def record(self, key):
if self._enabled:
return self._timers[key]
else:
return _nullcontext()
def stats(self, mean=True, last=False):
aggregates = {}
for (k, t) in self._timers.items():
if (t.count > 0):
if mean:
aggregates[('mean_%s_s' % k)] = t.mean
if last:
aggregates[('last_%s_s' % k)] = t.last
return aggregates |
def train(args, logger, dataloader, model, classifier1, classifier2, criterion1, criterion2, optimizer, epoch):
losses = AverageMeter()
losses_mse = AverageMeter()
losses_cet = AverageMeter()
losses_cet_across = AverageMeter()
losses_cet_within = AverageMeter()
model.train()
if args.mse:
criterion_mse = torch.nn.MSELoss().cuda()
classifier1.eval()
classifier2.eval()
for (i, (indice, input1, input2, label1, label2)) in enumerate(dataloader):
input1 = eqv_transform_if_needed(args, dataloader, indice, input1.cuda(non_blocking=True))
label1 = label1.cuda(non_blocking=True)
featmap1 = model(input1)
input2 = input2.cuda(non_blocking=True)
label2 = label2.cuda(non_blocking=True)
featmap2 = eqv_transform_if_needed(args, dataloader, indice, model(input2))
(B, C, _) = featmap1.size()[:3]
if (i == 0):
logger.info('Batch input size : {}'.format(list(input1.shape)))
logger.info('Batch label size : {}'.format(list(label1.shape)))
logger.info('Batch feature size : {}\n'.format(list(featmap1.shape)))
if (args.metric_train == 'cosine'):
featmap1 = F.normalize(featmap1, dim=1, p=2)
featmap2 = F.normalize(featmap2, dim=1, p=2)
(featmap12_processed, label12_processed) = (featmap1, label2.flatten())
(featmap21_processed, label21_processed) = (featmap2, label1.flatten())
output12 = feature_flatten(classifier2(featmap12_processed))
output21 = feature_flatten(classifier1(featmap21_processed))
loss12 = criterion2(output12, label12_processed)
loss21 = criterion1(output21, label21_processed)
loss_across = ((loss12 + loss21) / 2.0)
losses_cet_across.update(loss_across.item(), B)
(featmap11_processed, label11_processed) = (featmap1, label1.flatten())
(featmap22_processed, label22_processed) = (featmap2, label2.flatten())
output11 = feature_flatten(classifier1(featmap11_processed))
output22 = feature_flatten(classifier2(featmap22_processed))
loss11 = criterion1(output11, label11_processed)
loss22 = criterion2(output22, label22_processed)
loss_within = ((loss11 + loss22) / 2.0)
losses_cet_within.update(loss_within.item(), B)
loss = ((loss_across + loss_within) / 2.0)
losses_cet.update(loss.item(), B)
if args.mse:
loss_mse = criterion_mse(featmap1, featmap2)
losses_mse.update(loss_mse.item(), B)
loss = ((loss + loss_mse) / 2.0)
losses.update(loss.item(), B)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if ((i % 200) == 0):
logger.info('{0} / {1}\t'.format(i, len(dataloader)))
return (losses.avg, losses_cet.avg, losses_cet_within.avg, losses_cet_across.avg, losses_mse.avg) |
class Evaluator(object):
def __init__(self):
pass
def run(self, benchmark_name=None, gt_dir=None, res_dir=None, save_pkl=None, eval_mode='train', seqmaps_dir='seqmaps'):
start_time = time.time()
self.benchmark_gt_dir = gt_dir
self.seq_file = '{}-{}.txt'.format(benchmark_name, eval_mode)
res_dir = res_dir
self.benchmark_name = benchmark_name
self.seqmaps_dir = seqmaps_dir
self.mode = eval_mode
self.datadir = os.path.join(gt_dir, self.mode)
error_traceback = ''
assert (self.mode in ['train', 'test', 'all']), ('mode: %s not valid ' % s)
print(('Evaluating Benchmark: %s' % self.benchmark_name))
self.sequences = os.listdir(self.datadir)
self.gtfiles = []
self.tsfiles = []
for seq in self.sequences:
gtf = os.path.join(self.benchmark_gt_dir, self.mode, seq, 'gt/gt.txt')
if path.exists(gtf):
self.gtfiles.append(gtf)
else:
raise Exception(('Ground Truth %s missing' % gtf))
tsf = os.path.join(res_dir, ('%s.txt' % seq))
if path.exists(gtf):
self.tsfiles.append(tsf)
else:
raise Exception(('Result file %s missing' % tsf))
print('Found {} ground truth files and {} test files.'.format(len(self.gtfiles), len(self.tsfiles)))
print(self.tsfiles)
self.MULTIPROCESSING = False
MAX_NR_CORES = 10
if self.MULTIPROCESSING:
self.NR_CORES = np.minimum(MAX_NR_CORES, len(self.tsfiles))
try:
results = self.eval()
results_attributes = self.Overall_Results.metrics.keys()
for attr in results_attributes:
try:
self.Overall_Results.__dict__[attr] = sum((obj.__dict__[attr] for obj in self.results))
except:
pass
cache_attributes = self.Overall_Results.cache_dict.keys()
for attr in cache_attributes:
try:
self.Overall_Results.__dict__[attr] = self.Overall_Results.cache_dict[attr]['func']([obj.__dict__[attr] for obj in self.results])
except:
pass
print('evaluation successful')
for res in self.results:
res.compute_clearmot()
self.Overall_Results.compute_clearmot()
self.accumulate_df(type='mail')
self.failed = False
error = None
except Exception as e:
print(str(traceback.format_exc()))
print('<br> Evaluation failed! <br>')
error_traceback += str(traceback.format_exc())
self.failed = True
self.summary = None
end_time = time.time()
self.duration = ((end_time - start_time) / 60.0)
if self.failed:
startExc = error_traceback.split('<exc>')
error_traceback = [m.split('<!exc>')[0] for m in startExc[1:]]
error = ''
for err in error_traceback:
error += ('Error: %s' % err)
print('Error Message', error)
self.error = error
print(('ERROR %s' % error))
print('Evaluation Finished')
print('Your Results')
print(self.render_summary())
if save_pkl:
self.Overall_Results.save_dict(os.path.join(save_pkl, ('%s-%s-overall.pkl' % (self.benchmark_name, self.mode))))
for res in self.results:
res.save_dict(os.path.join(save_pkl, ('%s-%s-%s.pkl' % (self.benchmark_name, self.mode, res.seqName))))
print('Successfully save results')
return (self.Overall_Results, self.results)
def eval(self):
raise NotImplementedError
def accumulate_df(self, type=None):
for (k, res) in enumerate(self.results):
res.to_dataframe(display_name=True, type=type)
if (k == 0):
summary = res.df
else:
summary = summary.append(res.df)
summary = summary.sort_index()
self.Overall_Results.to_dataframe(display_name=True, type=type)
self.summary = summary.append(self.Overall_Results.df)
def render_summary(self, buf=None):
output = self.summary.to_string(buf=buf, formatters=self.Overall_Results.formatters, justify='left')
return output |
def get_item_iterator(d):
assert isinstance(d, dict)
if (sys.version_info[0] == 2):
item_iter = d.iteritems()
assert hasattr(item_iter, 'next')
elif (sys.version_info[0] == 3):
item_iter = iter(d.items())
assert hasattr(item_iter, '__next__')
else:
raise RuntimeError('Only python 2 and 3 supported.')
assert hasattr(item_iter, '__iter__')
return item_iter |
def _conv_bn_relu(x, num_filters: int, bn_train: bool=True):
x = Conv2D(num_filters, kernel_size=(3, 3), padding='same', **_CONV)(x)
x = BatchNormalization()(x, training=bn_train)
x = Activation('relu')(x)
return x |
def info(msg, *args, **kwargs):
if isinstance(msg, dict):
for (_, line) in enumerate(_pretty_dict(msg).split('\n')):
Logger().get_logger().info(line, *args, **kwargs)
else:
Logger().get_logger().info(msg, *args, **kwargs) |
def plot_density(p, n_pts=1000, range_lim=0.7, figsize=(7, 7), title=None, ax=None):
if (ax is None):
(_, ax) = plt.subplots(1, 1, figsize=figsize)
xy = setup_grid(range_lim=range_lim, n_pts=n_pts)
ij = xy.transpose(0, 1)
(left, right, down, up) = (ij[(0, 0, 0)], ij[((- 1), 0, 0)], ij[(0, 0, 1)], ij[(0, (- 1), 1)])
data_p = torch.exp(p.log_prob(xy)).cpu().data
vmax = data_p.max()
ax.imshow(data_p, cmap=plt.cm.viridis, vmin=0, vmax=vmax, extent=(left, right, down, up), origin='lower')
ax.axis('image')
ax.grid(False)
ax.set_xlim(left, right)
ax.set_ylim(down, up)
ax.set_xlabel('Source dim. 1')
ax.set_ylabel('Source dim. 2')
if (title is not None):
ax.set_title(title) |
class _BaseNormalization(Layer):
def _moments(self, x, axes):
return (K.mean(x, axis=axes, keepdims=True), K.var(x, axis=axes, keepdims=True)) |
def dump(obj, file=None, file_format=None, file_client_args=None, **kwargs):
if isinstance(file, Path):
file = str(file)
if (file_format is None):
if is_str(file):
file_format = file.split('.')[(- 1)]
elif (file is None):
raise ValueError('file_format must be specified since file is None')
if (file_format not in file_handlers):
raise TypeError(f'Unsupported format: {file_format}')
handler = file_handlers[file_format]
if (file is None):
return handler.dump_to_str(obj, **kwargs)
elif is_str(file):
file_client = FileClient.infer_client(file_client_args, file)
if handler.str_like:
with StringIO() as f:
handler.dump_to_fileobj(obj, f, **kwargs)
file_client.put_text(f.getvalue(), file)
else:
with BytesIO() as f:
handler.dump_to_fileobj(obj, f, **kwargs)
file_client.put(f.getvalue(), file)
elif hasattr(file, 'write'):
handler.dump_to_fileobj(obj, file, **kwargs)
else:
raise TypeError('"file" must be a filename str or a file-object') |
def get_trainer(trainer):
if (trainer == 'ContrastiveLossTrainer'):
return ContrastiveLossTrainer
elif (trainer == 'HardestContrastiveLossTrainer'):
return HardestContrastiveLossTrainer
elif (trainer == 'TripletLossTrainer'):
return TripletLossTrainer
elif (trainer == 'HardestTripletLossTrainer'):
return HardestTripletLossTrainer
else:
raise ValueError(f'Trainer {trainer} not found') |
def test_conv_ds():
k = (5, 5)
i = (60, 31, 16)
ch = 16
conv = stats.compute_conv2d(*i, ch, *k)
ds = stats.compute_conv2d_ds(*i, ch, *k)
ratio = (conv / ds)
assert (ratio > 9.0) |
def test_count_intersections():
cube = o3d.t.geometry.TriangleMesh.from_legacy(o3d.geometry.TriangleMesh.create_box())
scene = o3d.t.geometry.RaycastingScene()
scene.add_triangles(cube)
rays = o3d.core.Tensor([[0.5, 0.5, (- 1), 0, 0, 1], [0.5, 0.5, 0.5, 0, 0, 1], [10, 10, 10, 1, 0, 0]], dtype=o3d.core.float32)
ans = scene.count_intersections(rays)
np.testing.assert_equal(ans.numpy(), [2, 1, 0]) |
class StableDiffusionXLPipelineOutput(BaseOutput):
images: Union[(List[PIL.Image.Image], np.ndarray)] |
def float2bitstr(f):
four_bytes = struct.pack('>f', f)
return ''.join((f'{byte:08b}' for byte in four_bytes)) |
class WarpCTC(chainer.Chain):
def __init__(self, odim, eprojs, dropout_rate):
super(WarpCTC, self).__init__()
self.dropout_rate = dropout_rate
self.loss = None
with self.init_scope():
self.ctc_lo = L.Linear(eprojs, odim)
def __call__(self, hs, ys):
self.loss = None
ilens = [x.shape[0] for x in hs]
olens = [x.shape[0] for x in ys]
y_hat = self.ctc_lo(F.dropout(F.pad_sequence(hs), ratio=self.dropout_rate), n_batch_axes=2)
y_hat = y_hat.transpose(1, 0, 2)
logging.info(((self.__class__.__name__ + ' input lengths: ') + str(ilens)))
logging.info(((self.__class__.__name__ + ' output lengths: ') + str(olens)))
from chainer_ctc.warpctc import ctc as warp_ctc
self.loss = warp_ctc(y_hat, ilens, [cuda.to_cpu(y.data) for y in ys])[0]
logging.info(('ctc loss:' + str(self.loss.data)))
return self.loss
def log_softmax(self, hs):
y_hat = self.ctc_lo(F.pad_sequence(hs), n_batch_axes=2)
return F.log_softmax(y_hat.reshape((- 1), y_hat.shape[(- 1)])).reshape(y_hat.shape)
def argmax(self, hs_pad):
return F.argmax(self.ctc_lo(F.pad_sequence(hs_pad), n_batch_axes=2), axis=(- 1)) |
def cal_model_parm_nums(model):
total = sum([param.nelement() for param in model.parameters()])
return total |
class classifier(nn.Module):
def __init__(self, in_channel=1, out_channel=10):
super(classifier, self).__init__()
self.fc5 = nn.Sequential(nn.ReLU(), nn.Linear(16, out_channel))
def forward(self, x):
label = self.fc5(x)
return label |
def get_ratio_avgk(instance, num_samples=20):
truncate_len = len(instance['original_human_response_truncate']['choices'][0]['logprobs']['token_logprobs'])
orignal_prob = instance['original_human_response']['choices'][0]['logprobs']['token_logprobs'][truncate_len:]
orignal_logprob = np.mean(orignal_prob)
regen_probs = [(sum(instance['gold_gen_regen']['choices'][i]['logprobs']['token_logprobs']) / len(instance['gold_gen_regen']['choices'][i]['logprobs']['token_logprobs'])) for i in range(num_samples) if (len(instance['gold_gen_regen']['choices'][i]['logprobs']['token_logprobs']) != 0)]
regen_logprobs_avg_20 = np.mean(regen_probs)
original_th = (orignal_logprob - regen_logprobs_avg_20)
truncate_gen_len = len(instance['original_gen_response_truncate']['choices'][0]['logprobs']['token_logprobs'])
gen_prob = instance['original_gen_response']['choices'][0]['logprobs']['token_logprobs'][truncate_gen_len:]
gen_logprob = np.mean(gen_prob)
gen_regen_probs = [(sum(instance['gen_completion_regen']['choices'][i]['logprobs']['token_logprobs']) / len(instance['gen_completion_regen']['choices'][i]['logprobs']['token_logprobs'])) for i in range(num_samples) if (len(instance['gen_completion_regen']['choices'][i]['logprobs']['token_logprobs']) != 0)]
gen_regen_logprobs_avg_20 = np.mean(gen_regen_probs)
gen_th = (gen_logprob - gen_regen_logprobs_avg_20)
return (original_th, gen_th) |
('AGENT_22')
class AGENT_22(BaseAgent):
type = PolicyType.CNN
features_extractor_class = EXTRACTOR_5
features_extractor_kwargs = dict(features_dim=64)
net_arch = [dict(pi=[64, 64, 64], vf=[64, 64, 64])]
activation_fn = nn.ReLU |
class ChangePointKernel(Kernel):
def __init__(self, dimension=None, location=None, steepness=None, operands=None):
assert (len(operands) == 2)
self.dimension = dimension
self.location = location
self.steepness = steepness
if (operands is None):
self.operands = []
else:
self.operands = operands
def is_stationary(self):
return False
def sf(self):
raise RuntimeError('Cannot ask for scale factor of non-stationary kernel')
def arity(self):
return 2
def gpml_function(self):
return '{}'
def id(self):
return 'CP'
def param_vector(self):
return np.concatenate(([np.array([self.location, self.steepness])] + [o.param_vector for o in self.operands]))
def latex(self):
return (('{\\sc CP}\\left( ' + ' , '.join([o.latex for o in self.operands])) + ' \\right)')
def syntax(self):
return ((((colored('CP( ', self.depth) + self.operands[0].syntax) + colored(', ', self.depth)) + self.operands[1].syntax) + colored(' )', self.depth))
def is_operator(self):
return True
def is_abelian(self):
return False
def effective_params(self):
return (2 + sum([o.effective_params for o in self.operands]))
def depth(self):
return (max([o.depth for o in self.operands]) + 1)
def copy(self):
return ChangePointKernel(dimension=self.dimension, location=self.location, steepness=self.steepness, operands=[o.copy() for o in self.operands])
def initialise_params(self, sd=1, data_shape=None):
if (self.location is None):
self.location = np.random.uniform(data_shape['x_min'][self.dimension], data_shape['x_max'][self.dimension])
if (self.steepness is None):
self.steepness = np.random.normal(loc=(3.3 - np.log((data_shape['x_max'][self.dimension] - data_shape['x_min'][self.dimension]))), scale=1)
for o in self.operands:
o.initialise_params(sd=sd, data_shape=data_shape)
def __repr__(self):
return ('ChangePointKernel(dimension=%s, location=%s, steepness=%s, operands=%s)' % (self.dimension, self.location, self.steepness, (('[ ' + ', '.join([o.__repr__() for o in self.operands])) + ' ]')))
def pretty_print(self):
return ((((colored(('CP(dim=%s, loc=%s, steep=%s, ' % (self.dimension, format_if_possible('%1.1f', self.location), format_if_possible('%1.1f', self.steepness))), self.depth) + self.operands[0].pretty_print()) + colored(', ', self.depth)) + self.operands[1].pretty_print()) + colored(')', self.depth))
def load_param_vector(self, params):
self.location = params[0]
self.steepness = params[1]
start = 2
for o in self.operands:
end = (start + o.num_params)
o.load_param_vector(params[start:end])
start = end
def get_gpml_expression(self, dimensions):
return ('{, {%s, %s}}' % ((self.dimension + 1), ', '.join((o.get_gpml_expression(dimensions=dimensions) for o in self.operands))))
def multiply_by_const(self, sf):
for o in self.operands:
o.multiply_by_const(sf=sf)
def out_of_bounds(self, constraints):
return ((self.location < constraints['x_min'][self.dimension]) or (self.location > constraints['x_max'][self.dimension]) or (self.steepness < ((- np.log((constraints['x_max'][self.dimension] - constraints['x_min'][self.dimension]))) + 2.3)) or any([o.out_of_bounds(constraints) for o in self.operands])) |
class AttributeCommand(BaseCLICommand):
_name = 'attribute'
_help = 'Perform feature attribution on one or multiple sentences'
_dataclasses = AttributeArgs
def run(args: AttributeArgs):
attribute(args.input_texts, args.generated_texts, args) |
def configure_experiment(problems: dict, n_run: int):
jobs = []
max_evaluations = 25000
for run in range(n_run):
for (problem_tag, problem) in problems.items():
jobs.append(Job(algorithm=NSGAII(problem=problem, population_size=100, offspring_population_size=100, mutation=PolynomialMutation(probability=(1.0 / problem.number_of_variables), distribution_index=20), crossover=SBXCrossover(probability=1.0, distribution_index=20), termination_criterion=StoppingByEvaluations(max_evaluations=max_evaluations)), algorithm_tag='NSGAII', problem_tag=problem_tag, run=run))
jobs.append(Job(algorithm=GDE3(problem=problem, population_size=100, cr=0.5, f=0.5, termination_criterion=StoppingByEvaluations(max_evaluations=max_evaluations)), algorithm_tag='GDE3', problem_tag=problem_tag, run=run))
jobs.append(Job(algorithm=SMPSO(problem=problem, swarm_size=100, mutation=PolynomialMutation(probability=(1.0 / problem.number_of_variables), distribution_index=20), leaders=CrowdingDistanceArchive(100), termination_criterion=StoppingByEvaluations(max_evaluations=max_evaluations)), algorithm_tag='SMPSO', problem_tag=problem_tag, run=run))
return jobs |
('ner_tag')
class NerTagIndexer(TokenIndexer[int]):
def __init__(self, namespace: str='ner_tags') -> None:
self._namespace = namespace
def count_vocab_items(self, token: Token, counter: Dict[(str, Dict[(str, int)])]):
tag = token.ent_type_
if (not tag):
tag = 'NONE'
counter[self._namespace][tag] += 1
def tokens_to_indices(self, tokens: List[Token], vocabulary: Vocabulary, index_name: str) -> Dict[(str, List[int])]:
tags = [('NONE' if (token.ent_type_ is None) else token.ent_type_) for token in tokens]
return {index_name: [vocabulary.get_token_index(tag, self._namespace) for tag in tags]}
def get_padding_token(self) -> int:
return 0
def get_padding_lengths(self, token: int) -> Dict[(str, int)]:
return {}
def pad_token_sequence(self, tokens: Dict[(str, List[int])], desired_num_tokens: Dict[(str, int)], padding_lengths: Dict[(str, int)]) -> Dict[(str, List[int])]:
return {key: pad_sequence_to_length(val, desired_num_tokens[key]) for (key, val) in tokens.items()} |
def get_rank():
if _use_c10d[0]:
return dist_c10d.get_rank()
else:
return dist_no_c10d.get_rank() |
def time(solver, nccl):
fprop = []
bprop = []
total = caffe.Timer()
allrd = caffe.Timer()
for _ in range(len(solver.net.layers)):
fprop.append(caffe.Timer())
bprop.append(caffe.Timer())
display = solver.param.display
def show_time():
if ((solver.iter % display) == 0):
s = '\n'
for i in range(len(solver.net.layers)):
s += ('forw %3d %8s ' % (i, solver.net._layer_names[i]))
s += (': %.2f\n' % fprop[i].ms)
for i in range((len(solver.net.layers) - 1), (- 1), (- 1)):
s += ('back %3d %8s ' % (i, solver.net._layer_names[i]))
s += (': %.2f\n' % bprop[i].ms)
s += ('solver total: %.2f\n' % total.ms)
s += ('allreduce: %.2f\n' % allrd.ms)
caffe.log(s)
solver.net.before_forward((lambda layer: fprop[layer].start()))
solver.net.after_forward((lambda layer: fprop[layer].stop()))
solver.net.before_backward((lambda layer: bprop[layer].start()))
solver.net.after_backward((lambda layer: bprop[layer].stop()))
solver.add_callback((lambda : total.start()), (lambda : (total.stop(), allrd.start())))
solver.add_callback(nccl)
solver.add_callback((lambda : ''), (lambda : (allrd.stop(), show_time()))) |
class PolyBlock5a(nn.Module):
def __init__(self):
super(PolyBlock5a, self).__init__()
self.branches = Concurrent()
self.branches.add_module('branch1', MaxPoolBranch())
self.branches.add_module('branch2', Conv3x3Branch(in_channels=192, out_channels=192))
def forward(self, x):
x = self.branches(x)
return x |
class TFCvtConvEmbeddings(tf.keras.layers.Layer):
def __init__(self, config: CvtConfig, patch_size: int, embed_dim: int, stride: int, padding: int, **kwargs):
super().__init__(**kwargs)
self.padding = tf.keras.layers.ZeroPadding2D(padding=padding)
self.patch_size = (patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size))
self.projection = tf.keras.layers.Conv2D(filters=embed_dim, kernel_size=patch_size, strides=stride, padding='valid', data_format='channels_last', kernel_initializer=get_initializer(config.initializer_range), name='projection')
self.normalization = tf.keras.layers.LayerNormalization(epsilon=1e-05, name='normalization')
def call(self, pixel_values: tf.Tensor) -> tf.Tensor:
if isinstance(pixel_values, dict):
pixel_values = pixel_values['pixel_values']
pixel_values = self.projection(self.padding(pixel_values))
(batch_size, height, width, num_channels) = shape_list(pixel_values)
hidden_size = (height * width)
pixel_values = tf.reshape(pixel_values, shape=(batch_size, hidden_size, num_channels))
pixel_values = self.normalization(pixel_values)
pixel_values = tf.reshape(pixel_values, shape=(batch_size, height, width, num_channels))
return pixel_values |
def test_digits_euclidean_naive_init():
model1 = FacilityLocationSelection(100)
model2 = GraphCutSelection(100)
model = MixtureSelection(100, [model1, model2], [1.0, 0.3], metric='euclidean', optimizer='naive', initial_subset=digits_euclidean_ranking[:5])
model.fit(X_digits)
assert_array_equal(model.ranking[:20], digits_euclidean_ranking[5:25])
assert_array_almost_equal(model.gains[:20], digits_euclidean_gains[5:25], 4)
assert_array_almost_equal(model.subset, X_digits[model.ranking]) |
class PromptCap(nn.Module):
def __init__(self, ckpt='vqascore/promptcap-coco-vqa'):
super().__init__()
self.tokenizer = OFATokenizer.from_pretrained(ckpt)
self.model = OFAModel.from_pretrained(ckpt, use_cache=True)
self.model.eval()
(mean, std) = ([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
resolution = 480
self.patch_resize_transform = transforms.Compose([(lambda image: image.convert('RGB')), transforms.Resize((resolution, resolution), transforms.InterpolationMode.BICUBIC), transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)])
def caption(self, prompt, image, num_beams=5, no_repeat_ngram_size=3, max_new_tokens=100, **generator_args):
image = Image.open(image)
image = self.patch_resize_transform(image)
image = image.unsqueeze(0)
image = image.to(self.model.device)
prompt = self.tokenizer(prompt, return_tensors='pt').input_ids
prompt = prompt.to(self.model.device)
with torch.no_grad():
gen = self.model.generate(prompt, patch_images=image, num_beams=num_beams, no_repeat_ngram_size=no_repeat_ngram_size, max_new_tokens=max_new_tokens, **generator_args)
return self.tokenizer.batch_decode(gen, skip_special_tokens=True)[0].strip() |
_CODERS.register_module()
class YOLOBBoxCoder(BaseBBoxCoder):
def __init__(self, eps=1e-06):
super(BaseBBoxCoder, self).__init__()
self.eps = eps
def encode(self, bboxes, gt_bboxes, stride):
assert (bboxes.size(0) == gt_bboxes.size(0))
assert (bboxes.size((- 1)) == gt_bboxes.size((- 1)) == 4)
x_center_gt = ((gt_bboxes[(..., 0)] + gt_bboxes[(..., 2)]) * 0.5)
y_center_gt = ((gt_bboxes[(..., 1)] + gt_bboxes[(..., 3)]) * 0.5)
w_gt = (gt_bboxes[(..., 2)] - gt_bboxes[(..., 0)])
h_gt = (gt_bboxes[(..., 3)] - gt_bboxes[(..., 1)])
x_center = ((bboxes[(..., 0)] + bboxes[(..., 2)]) * 0.5)
y_center = ((bboxes[(..., 1)] + bboxes[(..., 3)]) * 0.5)
w = (bboxes[(..., 2)] - bboxes[(..., 0)])
h = (bboxes[(..., 3)] - bboxes[(..., 1)])
w_target = torch.log((w_gt / w).clamp(min=self.eps))
h_target = torch.log((h_gt / h).clamp(min=self.eps))
x_center_target = (((x_center_gt - x_center) / stride) + 0.5).clamp(self.eps, (1 - self.eps))
y_center_target = (((y_center_gt - y_center) / stride) + 0.5).clamp(self.eps, (1 - self.eps))
encoded_bboxes = torch.stack([x_center_target, y_center_target, w_target, h_target], dim=(- 1))
return encoded_bboxes
def decode(self, bboxes, pred_bboxes, stride):
assert (pred_bboxes.size(0) == bboxes.size(0))
assert (pred_bboxes.size((- 1)) == bboxes.size((- 1)) == 4)
x_center = ((bboxes[(..., 0)] + bboxes[(..., 2)]) * 0.5)
y_center = ((bboxes[(..., 1)] + bboxes[(..., 3)]) * 0.5)
w = (bboxes[(..., 2)] - bboxes[(..., 0)])
h = (bboxes[(..., 3)] - bboxes[(..., 1)])
x_center_pred = (((pred_bboxes[(..., 0)] - 0.5) * stride) + x_center)
y_center_pred = (((pred_bboxes[(..., 1)] - 0.5) * stride) + y_center)
w_pred = (torch.exp(pred_bboxes[(..., 2)]) * w)
h_pred = (torch.exp(pred_bboxes[(..., 3)]) * h)
decoded_bboxes = torch.stack(((x_center_pred - (w_pred / 2)), (y_center_pred - (h_pred / 2)), (x_center_pred + (w_pred / 2)), (y_center_pred + (h_pred / 2))), dim=(- 1))
return decoded_bboxes |
class TheanoCurves(CurvesManifold, TheanoShapes):
def __init__(self, *args, **kwargs):
TheanoShapes.__init__(self, *args, **kwargs) |
class DenseDetector(nn.Module):
def __init__(self, backbone: Backbone, head: nn.Module, head_in_features: Optional[List[str]]=None, *, pixel_mean, pixel_std):
super().__init__()
self.backbone = backbone
self.head = head
if (head_in_features is None):
shapes = self.backbone.output_shape()
self.head_in_features = sorted(shapes.keys(), key=(lambda x: shapes[x].stride))
else:
self.head_in_features = head_in_features
self.register_buffer('pixel_mean', torch.tensor(pixel_mean).view((- 1), 1, 1), False)
self.register_buffer('pixel_std', torch.tensor(pixel_std).view((- 1), 1, 1), False)
def device(self):
return self.pixel_mean.device
def _move_to_current_device(self, x):
return move_device_like(x, self.pixel_mean)
def forward(self, batched_inputs: List[Dict[(str, Tensor)]]):
images = self.preprocess_image(batched_inputs)
features = self.backbone(images.tensor)
features = [features[f] for f in self.head_in_features]
predictions = self.head(features)
if self.training:
assert (not torch.jit.is_scripting()), 'Not supported'
assert ('instances' in batched_inputs[0]), 'Instance annotations are missing in training!'
gt_instances = [x['instances'].to(self.device) for x in batched_inputs]
return self.forward_training(images, features, predictions, gt_instances)
else:
results = self.forward_inference(images, features, predictions)
if torch.jit.is_scripting():
return results
processed_results = []
for (results_per_image, input_per_image, image_size) in zip(results, batched_inputs, images.image_sizes):
height = input_per_image.get('height', image_size[0])
width = input_per_image.get('width', image_size[1])
r = detector_postprocess(results_per_image, height, width)
processed_results.append({'instances': r})
return processed_results
def forward_training(self, images, features, predictions, gt_instances):
raise NotImplementedError()
def preprocess_image(self, batched_inputs: List[Dict[(str, Tensor)]]):
images = [self._move_to_current_device(x['image']) for x in batched_inputs]
images = [((x - self.pixel_mean) / self.pixel_std) for x in images]
images = ImageList.from_tensors(images, self.backbone.size_divisibility, padding_constraints=self.backbone.padding_constraints)
return images
def _transpose_dense_predictions(self, predictions: List[List[Tensor]], dims_per_anchor: List[int]) -> List[List[Tensor]]:
assert (len(predictions) == len(dims_per_anchor))
res: List[List[Tensor]] = []
for (pred, dim_per_anchor) in zip(predictions, dims_per_anchor):
pred = [permute_to_N_HWA_K(x, dim_per_anchor) for x in pred]
res.append(pred)
return res
def _ema_update(self, name: str, value: float, initial_value: float, momentum: float=0.9):
if hasattr(self, name):
old = getattr(self, name)
else:
old = initial_value
new = ((old * momentum) + (value * (1 - momentum)))
setattr(self, name, new)
return new
def _decode_per_level_predictions(self, anchors: Boxes, pred_scores: Tensor, pred_deltas: Tensor, score_thresh: float, topk_candidates: int, image_size: Tuple[(int, int)]) -> Instances:
keep_idxs = (pred_scores > score_thresh)
pred_scores = pred_scores[keep_idxs]
topk_idxs = torch.nonzero(keep_idxs)
topk_idxs_size = topk_idxs.shape[0]
if isinstance(topk_idxs_size, Tensor):
num_topk = torch.clamp(topk_idxs_size, max=topk_candidates)
else:
num_topk = min(topk_idxs_size, topk_candidates)
(pred_scores, idxs) = pred_scores.topk(num_topk)
topk_idxs = topk_idxs[idxs]
(anchor_idxs, classes_idxs) = topk_idxs.unbind(dim=1)
pred_boxes = self.box2box_transform.apply_deltas(pred_deltas[anchor_idxs], anchors.tensor[anchor_idxs])
return Instances(image_size, pred_boxes=Boxes(pred_boxes), scores=pred_scores, pred_classes=classes_idxs)
def _decode_multi_level_predictions(self, anchors: List[Boxes], pred_scores: List[Tensor], pred_deltas: List[Tensor], score_thresh: float, topk_candidates: int, image_size: Tuple[(int, int)]) -> Instances:
predictions = [self._decode_per_level_predictions(anchors_i, box_cls_i, box_reg_i, score_thresh, topk_candidates, image_size) for (box_cls_i, box_reg_i, anchors_i) in zip(pred_scores, pred_deltas, anchors)]
return predictions[0].cat(predictions)
def visualize_training(self, batched_inputs, results):
from detectron2.utils.visualizer import Visualizer
assert (len(batched_inputs) == len(results)), 'Cannot visualize inputs and results of different sizes'
storage = get_event_storage()
max_boxes = 20
image_index = 0
img = batched_inputs[image_index]['image']
img = convert_image_to_rgb(img.permute(1, 2, 0), self.input_format)
v_gt = Visualizer(img, None)
v_gt = v_gt.overlay_instances(boxes=batched_inputs[image_index]['instances'].gt_boxes)
anno_img = v_gt.get_image()
processed_results = detector_postprocess(results[image_index], img.shape[0], img.shape[1])
predicted_boxes = processed_results.pred_boxes.tensor.detach().cpu().numpy()
v_pred = Visualizer(img, None)
v_pred = v_pred.overlay_instances(boxes=predicted_boxes[0:max_boxes])
prop_img = v_pred.get_image()
vis_img = np.vstack((anno_img, prop_img))
vis_img = vis_img.transpose(2, 0, 1)
vis_name = f'Top: GT bounding boxes; Bottom: {max_boxes} Highest Scoring Results'
storage.put_image(vis_name, vis_img) |
def test_score_hlr_sampler_empty_pred():
assigner = MaxIoUAssigner(pos_iou_thr=0.5, neg_iou_thr=0.5, ignore_iof_thr=0.5, ignore_wrt_candidates=False)
context = _context_for_ohem()
sampler = ScoreHLRSampler(num=10, pos_fraction=0.5, context=context, neg_pos_ub=(- 1), add_gt_as_proposals=True)
gt_bboxes_ignore = torch.Tensor([])
feats = [torch.rand(1, 256, int((2 ** i)), int((2 ** i))) for i in [6, 5, 4, 3, 2]]
bboxes = torch.empty(0, 4)
gt_bboxes = torch.FloatTensor([[0, 0, 10, 10], [10, 10, 20, 20], [5, 5, 15, 15], [32, 32, 38, 42]])
gt_labels = torch.LongTensor([1, 2, 2, 3])
assign_result = assigner.assign(bboxes, gt_bboxes, gt_bboxes_ignore=gt_bboxes_ignore, gt_labels=gt_labels)
(sample_result, _) = sampler.sample(assign_result, bboxes, gt_bboxes, gt_labels, feats=feats)
assert (len(sample_result.neg_inds) == 0)
assert (len(sample_result.pos_bboxes) == len(sample_result.pos_inds))
assert (len(sample_result.neg_bboxes) == len(sample_result.neg_inds))
bboxes = torch.FloatTensor([[0, 0, 10, 10], [10, 10, 20, 20], [5, 5, 15, 15], [32, 32, 38, 42]])
gt_bboxes = torch.empty(0, 4)
gt_labels = torch.LongTensor([])
assign_result = assigner.assign(bboxes, gt_bboxes, gt_bboxes_ignore=gt_bboxes_ignore, gt_labels=gt_labels)
(sample_result, _) = sampler.sample(assign_result, bboxes, gt_bboxes, gt_labels, feats=feats)
assert (len(sample_result.pos_inds) == 0)
assert (len(sample_result.pos_bboxes) == len(sample_result.pos_inds))
assert (len(sample_result.neg_bboxes) == len(sample_result.neg_inds))
bboxes = torch.FloatTensor([[0, 0, 10, 10], [10, 10, 20, 20], [5, 5, 15, 15], [32, 32, 38, 42]])
gt_bboxes = torch.FloatTensor([[0, 0, 10, 10], [10, 10, 20, 20], [5, 5, 15, 15], [32, 32, 38, 42]])
gt_labels = torch.LongTensor([1, 2, 2, 3])
assign_result = assigner.assign(bboxes, gt_bboxes, gt_bboxes_ignore=gt_bboxes_ignore, gt_labels=gt_labels)
(sample_result, _) = sampler.sample(assign_result, bboxes, gt_bboxes, gt_labels, feats=feats)
assert (len(sample_result.pos_bboxes) == len(sample_result.pos_inds))
assert (len(sample_result.neg_bboxes) == len(sample_result.neg_inds)) |
def plan_and_preprocess(task_string, processes_lowres=default_num_threads, processes_fullres=3, no_preprocessing=False):
from nnunet.experiment_planning.experiment_planner_baseline_2DUNet import ExperimentPlanner2D
from nnunet.experiment_planning.experiment_planner_baseline_3DUNet import ExperimentPlanner
preprocessing_output_dir_this_task_train = join(preprocessing_output_dir, task_string)
cropped_out_dir = join(nnUNet_cropped_data, task_string)
maybe_mkdir_p(preprocessing_output_dir_this_task_train)
shutil.copy(join(cropped_out_dir, 'dataset_properties.pkl'), preprocessing_output_dir_this_task_train)
shutil.copy(join(nnUNet_raw_data, task_string, 'dataset.json'), preprocessing_output_dir_this_task_train)
exp_planner = ExperimentPlanner(cropped_out_dir, preprocessing_output_dir_this_task_train)
exp_planner.plan_experiment()
if (not no_preprocessing):
exp_planner.run_preprocessing((processes_lowres, processes_fullres))
exp_planner = ExperimentPlanner2D(cropped_out_dir, preprocessing_output_dir_this_task_train)
exp_planner.plan_experiment()
if (not no_preprocessing):
exp_planner.run_preprocessing(processes_fullres)
if (not no_preprocessing):
p = Pool(default_num_threads)
stages = [i for i in subdirs(preprocessing_output_dir_this_task_train, join=True, sort=True) if (i.split('/')[(- 1)].find('stage') != (- 1))]
for s in stages:
print(s.split('/')[(- 1)])
list_of_npz_files = subfiles(s, True, None, '.npz', True)
list_of_pkl_files = [(i[:(- 4)] + '.pkl') for i in list_of_npz_files]
all_classes = []
for pk in list_of_pkl_files:
with open(pk, 'rb') as f:
props = pickle.load(f)
all_classes_tmp = np.array(props['classes'])
all_classes.append(all_classes_tmp[(all_classes_tmp >= 0)])
p.map(add_classes_in_slice_info, zip(list_of_npz_files, list_of_pkl_files, all_classes))
p.close()
p.join() |
class WN18RRProcessor(BaseProcessor):
def __init__(self, node_lut, relation_lut):
super().__init__(node_lut, relation_lut) |
def cluster_feat(dataset_json_file, tar_path):
with open(dataset_json_file, 'r') as fp:
data_json = json.load(fp)
data = data_json['data']
num_sample = len(data)
for (idx, entry) in enumerate(data):
wav = entry['wav']
if (idx == 0):
cur_sample = np.load((((tar_path + '/') + wav.split('/')[(- 1)][:(- 3)]) + 'npy'))
num_layer = cur_sample.shape[0]
feat_dim = cur_sample.shape[(- 1)]
print('number of layers {:d} feat dim {:d}'.format(num_layer, feat_dim))
all_feat = np.zeros(((num_layer + 1), num_sample, feat_dim))
all_label = []
cur_rep = np.load((((tar_path + '/') + wav.split('/')[(- 1)][:(- 3)]) + 'npy'))
for layer in range(cur_rep.shape[0]):
all_feat[(layer, idx)] = np.mean(cur_rep[layer], axis=0)
all_feat[((- 1), idx)] = np.mean(np.mean(cur_rep, axis=0), axis=0)
cur_label = int(wav.split('.')[(- 2)].split('-')[(- 1)])
all_label.append(cur_label)
assert (all_feat[0].shape[0] == len(all_label))
return (all_feat, all_label) |
def construct_dataloaders(dataset, defs, data_path='~/data', shuffle=True, normalize=True):
path = os.path.expanduser(data_path)
if (dataset == 'CIFAR10'):
(trainset, validset) = _build_cifar10(path, defs.augmentations, normalize)
loss_fn = Classification()
elif (dataset == 'CIFAR100'):
(trainset, validset) = _build_cifar100(path, defs.augmentations, normalize)
loss_fn = Classification()
elif (dataset == 'MNIST'):
(trainset, validset) = _build_mnist(path, defs.augmentations, normalize)
loss_fn = Classification()
elif (dataset == 'MNIST_GRAY'):
(trainset, validset) = _build_mnist_gray(path, defs.augmentations, normalize)
loss_fn = Classification()
elif (dataset == 'ImageNet'):
(trainset, validset) = _build_imagenet(path, defs.augmentations, normalize)
loss_fn = Classification()
elif (dataset == 'BSDS-SR'):
(trainset, validset) = _build_bsds_sr(path, defs.augmentations, normalize, upscale_factor=3, RGB=True)
loss_fn = PSNR()
elif (dataset == 'BSDS-DN'):
(trainset, validset) = _build_bsds_dn(path, defs.augmentations, normalize, noise_level=(25 / 255), RGB=False)
loss_fn = PSNR()
elif (dataset == 'BSDS-RGB'):
(trainset, validset) = _build_bsds_dn(path, defs.augmentations, normalize, noise_level=(25 / 255), RGB=True)
loss_fn = PSNR()
if MULTITHREAD_DATAPROCESSING:
num_workers = (min(torch.get_num_threads(), MULTITHREAD_DATAPROCESSING) if (torch.get_num_threads() > 1) else 0)
else:
num_workers = 0
trainloader = torch.utils.data.DataLoader(trainset, batch_size=min(defs.batch_size, len(trainset)), shuffle=shuffle, drop_last=True, num_workers=num_workers, pin_memory=PIN_MEMORY)
validloader = torch.utils.data.DataLoader(validset, batch_size=min(defs.batch_size, len(trainset)), shuffle=False, drop_last=False, num_workers=num_workers, pin_memory=PIN_MEMORY)
return (loss_fn, trainloader, validloader) |
def build(args, image_set, activated_class_ids, with_support=True):
assert (image_set == 'fewshot')
activated_class_ids.sort()
if (args.dataset_file in ['coco_base']):
root = Path('data/coco_fewshot')
img_folder = ((root.parent / 'coco') / 'train2017')
ann_file = ((root / f'seed{args.fewshot_seed}') / f'{args.num_shots}shot.json')
return DetectionDataset(args, img_folder, str(ann_file), transforms=make_transforms(), support_transforms=make_support_transforms(), return_masks=False, activated_class_ids=activated_class_ids, with_support=with_support, cache_mode=args.cache_mode, local_rank=get_local_rank(), local_size=get_local_size())
if (args.dataset_file == 'voc_base1'):
root = Path('data/voc_fewshot_split1')
img_folder = ((root.parent / 'voc') / 'images')
ann_file = ((root / f'seed{args.fewshot_seed}') / f'{args.num_shots}shot.json')
return DetectionDataset(args, img_folder, str(ann_file), transforms=make_transforms(), support_transforms=make_support_transforms(), return_masks=False, activated_class_ids=activated_class_ids, with_support=with_support, cache_mode=args.cache_mode, local_rank=get_local_rank(), local_size=get_local_size())
if (args.dataset_file == 'voc_base2'):
root = Path('data/voc_fewshot_split2')
img_folder = ((root.parent / 'voc') / 'images')
ann_file = ((root / f'seed{args.fewshot_seed}') / f'{args.num_shots}shot.json')
return DetectionDataset(args, img_folder, str(ann_file), transforms=make_transforms(), support_transforms=make_support_transforms(), return_masks=False, activated_class_ids=activated_class_ids, with_support=with_support, cache_mode=args.cache_mode, local_rank=get_local_rank(), local_size=get_local_size())
if (args.dataset_file == 'voc_base3'):
root = Path('data/voc_fewshot_split3')
img_folder = ((root.parent / 'voc') / 'images')
ann_file = ((root / f'seed{args.fewshot_seed}') / f'{args.num_shots}shot.json')
return DetectionDataset(args, img_folder, str(ann_file), transforms=make_transforms(), support_transforms=make_support_transforms(), return_masks=False, activated_class_ids=activated_class_ids, with_support=with_support, cache_mode=args.cache_mode, local_rank=get_local_rank(), local_size=get_local_size())
raise ValueError |
class ChineseCLIPModelTester():
def __init__(self, parent, text_kwargs=None, vision_kwargs=None, is_training=True):
if (text_kwargs is None):
text_kwargs = {}
if (vision_kwargs is None):
vision_kwargs = {}
self.parent = parent
self.text_model_tester = ChineseCLIPTextModelTester(parent, **text_kwargs)
self.vision_model_tester = ChineseCLIPVisionModelTester(parent, **vision_kwargs)
self.is_training = is_training
def prepare_config_and_inputs(self):
(config, input_ids, token_type_ids, attention_mask, _, __, ___) = self.text_model_tester.prepare_config_and_inputs()
(vision_config, pixel_values) = self.vision_model_tester.prepare_config_and_inputs()
config = self.get_config()
return (config, input_ids, token_type_ids, attention_mask, pixel_values)
def get_config(self):
return ChineseCLIPConfig.from_text_vision_configs(self.text_model_tester.get_config(), self.vision_model_tester.get_config(), projection_dim=64)
def create_and_check_model(self, config, input_ids, token_type_ids, attention_mask, pixel_values):
model = ChineseCLIPModel(config).to(torch_device).eval()
with torch.no_grad():
result = model(input_ids, pixel_values, attention_mask, token_type_ids)
self.parent.assertEqual(result.logits_per_image.shape, (self.vision_model_tester.batch_size, self.text_model_tester.batch_size))
self.parent.assertEqual(result.logits_per_text.shape, (self.text_model_tester.batch_size, self.vision_model_tester.batch_size))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(config, input_ids, token_type_ids, attention_mask, pixel_values) = config_and_inputs
inputs_dict = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask, 'pixel_values': pixel_values, 'return_loss': True}
return (config, inputs_dict) |
def build_and_train(slot_affinity_code, log_dir, run_ID, config_key):
affinity = affinity_from_code(slot_affinity_code)
config = configs[config_key]
variant = load_variant(log_dir)
config = update_config(config, variant)
sampler = SerialSampler(EnvCls=gym_make, env_kwargs=config['env'], CollectorCls=CpuResetCollector, eval_env_kwargs=config['env'], **config['sampler'])
algo = SAC(optim_kwargs=config['optim'], **config['algo'])
agent = SacAgent(**config['agent'])
runner = MinibatchRlEval(algo=algo, agent=agent, sampler=sampler, affinity=affinity, **config['runner'])
name = ('sac_' + config['env']['id'])
with logger_context(log_dir, run_ID, name, config):
runner.train() |
class ReformerForSequenceClassification(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
class CityscapesInstanceEvaluator(CityscapesEvaluator):
def process(self, inputs, outputs):
from cityscapesscripts.helpers.labels import name2label
for (input, output) in zip(inputs, outputs):
file_name = input['file_name']
basename = os.path.splitext(os.path.basename(file_name))[0]
pred_txt = os.path.join(self._temp_dir, (basename + '_pred.txt'))
output = output['instances'].to(self._cpu_device)
num_instances = len(output)
with open(pred_txt, 'w') as fout:
for i in range(num_instances):
pred_class = output.pred_classes[i]
classes = self._metadata.thing_classes[pred_class]
class_id = name2label[classes].id
score = output.scores[i]
mask = output.pred_masks[i].numpy().astype('uint8')
png_filename = os.path.join(self._temp_dir, (basename + '_{}_{}.png'.format(i, classes)))
Image.fromarray((mask * 255)).save(png_filename)
fout.write('{} {} {}\n'.format(os.path.basename(png_filename), class_id, score))
def evaluate(self):
comm.synchronize()
if (comm.get_rank() > 0):
return
import cityscapesscripts.evaluation.evalInstanceLevelSemanticLabeling as cityscapes_eval
self._logger.info('Evaluating results under {} ...'.format(self._temp_dir))
cityscapes_eval.args.predictionPath = os.path.abspath(self._temp_dir)
cityscapes_eval.args.predictionWalk = None
cityscapes_eval.args.JSONOutput = False
cityscapes_eval.args.colorized = False
cityscapes_eval.args.gtInstancesFile = os.path.join(self._temp_dir, 'gtInstances.json')
gt_dir = PathManager.get_local_path(self._metadata.gt_dir)
groundTruthImgList = glob.glob(os.path.join(gt_dir, '*', '*_gtFine_instanceIds.png'))
assert len(groundTruthImgList), 'Cannot find any ground truth images to use for evaluation. Searched for: {}'.format(cityscapes_eval.args.groundTruthSearch)
predictionImgList = []
for gt in groundTruthImgList:
predictionImgList.append(cityscapes_eval.getPrediction(gt, cityscapes_eval.args))
results = cityscapes_eval.evaluateImgLists(predictionImgList, groundTruthImgList, cityscapes_eval.args)['averages']
ret = OrderedDict()
ret['segm'] = {'AP': (results['allAp'] * 100), 'AP50': (results['allAp50%'] * 100)}
self._working_dir.cleanup()
return ret |
def eta(time_points, remaining_works, regression_points_used=200):
time_points = np.asarray(time_points)
remaining_works = np.asarray(remaining_works)
return np.mean([eta_linear_regression_shifted(time_points[(- regression_points_used):], remaining_works[(- regression_points_used):]), eta_lookback(time_points, remaining_works)]) |
def freq_transit(q, rho=1.0, **kwargs):
fmax0 = fmax_transit0(rho=rho)
return (fmax0 * (np.sin((np.pi * q)) ** 1.5)) |
def plot_gate_outputs_to_numpy(gate_targets, gate_outputs):
(fig, ax) = plt.subplots(figsize=(12, 3))
ax.scatter(range(len(gate_targets)), gate_targets, alpha=0.5, color='green', marker='+', s=1, label='target')
ax.scatter(range(len(gate_outputs)), gate_outputs, alpha=0.5, color='red', marker='.', s=1, label='predicted')
plt.xlabel('Frames (Green target, Red predicted)')
plt.ylabel('Gate State')
plt.tight_layout()
fig.canvas.draw()
data = save_figure_to_numpy(fig)
plt.close()
return data |
class Swinv2PreTrainedModel(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def load_imagenet_family_model(type, folder, checkpoint, device, dataset_dir, num_classes, load_temp=False, model_params=None):
(model, model_folder_post, _) = build_model224(type, num_classes, **model_params)
state_dict_file = get_filename(folder, f'{dataset_dir}/{model_folder_post}', checkpoint, load_temp)
state_dict = torch.load(state_dict_file, map_location=device)
model.load_state_dict(state_dict)
return model |
def _segm_pvtv2(name, backbone_name, num_classes, output_stride, pretrained_backbone):
if (output_stride == 8):
aspp_dilate = [12, 24, 36]
else:
aspp_dilate = [6, 12, 18]
backbone = pvt_v2_b2()
if pretrained_backbone:
path = './pretrained_pth/pvt_v2_b2.pth'
save_model = torch.load(path)
model_dict = backbone.state_dict()
state_dict = {k: v for (k, v) in save_model.items() if (k in model_dict.keys())}
model_dict.update(state_dict)
backbone.load_state_dict(model_dict)
inplanes = 512
low_level_planes = 64
if (name == 'deeplabv3plus'):
classifier = DeepLabHeadV3Plus(inplanes, low_level_planes, num_classes, aspp_dilate)
model = DeepLabV3(backbone, classifier)
return model |
class MapSeqsToReactants():
def __init__(self, json_reactants_to_id_path=None):
self.reactant_id_to_smi_dict = {v: k for (k, v) in mchef_config.get_reactant_smi_to_reactant_id_dict(json_reactants_to_id_path).items()}
self.stop_sym_idx = mchef_config.get_num_graphs(json_reactants_to_id_path)
self.pad_idx = mchef_config.PAD_VALUE
def __call__(self, array_of_symbol_indices):
filtered_list = [idx for idx in array_of_symbol_indices if (idx not in {self.stop_sym_idx, self.pad_idx})]
return [self.reactant_id_to_smi_dict[idx] for idx in filtered_list] |
def reset_cfg(cfg, args):
if args.root:
cfg.DATASET.ROOT = args.root
if args.output_dir:
cfg.OUTPUT_DIR = args.output_dir
if args.resume:
cfg.RESUME = args.resume
if args.seed:
cfg.SEED = args.seed
if args.source_domains:
cfg.DATASET.SOURCE_DOMAINS = args.source_domains
if args.target_domains:
cfg.DATASET.TARGET_DOMAINS = args.target_domains
if args.transforms:
cfg.INPUT.TRANSFORMS = args.transforms
if args.trainer:
cfg.TRAINER.NAME = args.trainer
if args.backbone:
cfg.MODEL.BACKBONE.NAME = args.backbone
if args.head:
cfg.MODEL.HEAD.NAME = args.head |
def wait_for_session_and_get_session(self, master, config=None, max_wait_secs=float('Inf')):
global_dict = common_util.GlobalDict()
if ('session_creation_count' not in global_dict):
global_dict['session_creation_count'] = 0
global_dict['session_creation_count'] += 1
self._target = master
if (max_wait_secs is None):
max_wait_secs = float('Inf')
timer = session_manager._CountDownTimer(max_wait_secs)
while True:
sess = session.Session(self._target, graph=self._graph, config=config)
not_ready_msg = None
not_ready_local_msg = None
(local_init_success, not_ready_local_msg) = self._try_run_local_init_op(sess)
if local_init_success:
(is_ready, not_ready_msg) = self._model_ready(sess)
if is_ready:
global_dict = common_util.GlobalDict()
global_dict['sess'] = sess
return sess
self._safe_close(sess)
remaining_ms_after_wait = (timer.secs_remaining() - self._recovery_wait_secs)
if (remaining_ms_after_wait < 0):
raise errors.DeadlineExceededError(None, None, ('Session was not ready after waiting %d secs.' % (max_wait_secs,)))
logger.info('Waiting for model to be ready. Ready_for_local_init_op: %s, ready: %s', not_ready_local_msg, not_ready_msg)
time.sleep(self._recovery_wait_secs) |
class TestAPI(unittest.TestCase):
def test_cnn_train(self):
with io.open((DATA_DIR + '.labels'), 'r') as f:
labels = {line.rstrip('\n') for line in f}
model = Magpie()
model.init_word_vectors(DATA_DIR, vec_dim=100)
history = model.train(DATA_DIR, labels, nn_model='cnn', test_ratio=0.3, epochs=3)
assert (history is not None)
predictions = model.predict_from_text('Black holes are cool!')
assert (len(predictions) == len(labels))
for (lab, val) in predictions:
assert (lab in labels)
assert (0 <= val <= 1)
def test_rnn_batch_train(self):
with io.open((DATA_DIR + '.labels'), 'r') as f:
labels = {line.rstrip('\n') for line in f}
model = Magpie()
model.init_word_vectors(DATA_DIR, vec_dim=100)
history = model.batch_train(DATA_DIR, labels, nn_model='rnn', epochs=3)
assert (history is not None)
predictions = model.predict_from_text('Black holes are cool!')
assert (len(predictions) == len(labels))
for (lab, val) in predictions:
assert (lab in labels)
assert (0 <= val <= 1) |
def fid_inception_v3():
inception = _inception_v3(num_classes=1008, aux_logits=False, pretrained=False)
inception.Mixed_5b = FIDInceptionA(192, pool_features=32)
inception.Mixed_5c = FIDInceptionA(256, pool_features=64)
inception.Mixed_5d = FIDInceptionA(288, pool_features=64)
inception.Mixed_6b = FIDInceptionC(768, channels_7x7=128)
inception.Mixed_6c = FIDInceptionC(768, channels_7x7=160)
inception.Mixed_6d = FIDInceptionC(768, channels_7x7=160)
inception.Mixed_6e = FIDInceptionC(768, channels_7x7=192)
inception.Mixed_7b = FIDInceptionE_1(1280)
inception.Mixed_7c = FIDInceptionE_2(2048)
state_dict = load_state_dict_from_url(FID_WEIGHTS_URL, progress=True)
inception.load_state_dict(state_dict)
return inception |
def save_data(data, out_path, darknet_label_path):
with open(out_path, 'w+') as f:
json.dump(data, f)
for group in data:
with open(os.path.join(darknet_label_path), 'w+') as f:
json.dump(data, f) |
def compose(base_map, next_map):
(ax1, a1, b1) = base_map
(ax2, a2, b2) = next_map
if (ax1 is None):
ax = ax2
elif ((ax2 is None) or (ax1 == ax2)):
ax = ax1
else:
raise AxisMismatchException
return (ax, (a1 * a2), ((a1 * b2) + b1)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.