code stringlengths 101 5.91M |
|---|
class UNetMidBlock2D(nn.Module):
def __init__(self, in_channels: int, temb_channels: int, dropout: float=0.0, num_layers: int=1, resnet_eps: float=1e-06, resnet_time_scale_shift: str='default', resnet_act_fn: str='swish', resnet_groups: int=32, attn_groups: Optional[int]=None, resnet_pre_norm: bool=True, add_attention: bool=True, attention_head_dim: int=1, output_scale_factor: float=1.0):
super().__init__()
resnet_groups = (resnet_groups if (resnet_groups is not None) else min((in_channels // 4), 32))
self.add_attention = add_attention
if (attn_groups is None):
attn_groups = (resnet_groups if (resnet_time_scale_shift == 'default') else None)
resnets = [ResnetBlock2D(in_channels=in_channels, out_channels=in_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm)]
attentions = []
if (attention_head_dim is None):
logger.warn(f'It is not recommend to pass `attention_head_dim=None`. Defaulting `attention_head_dim` to `in_channels`: {in_channels}.')
attention_head_dim = in_channels
for _ in range(num_layers):
if self.add_attention:
attentions.append(Attention(in_channels, heads=(in_channels // attention_head_dim), dim_head=attention_head_dim, rescale_output_factor=output_scale_factor, eps=resnet_eps, norm_num_groups=attn_groups, spatial_norm_dim=(temb_channels if (resnet_time_scale_shift == 'spatial') else None), residual_connection=True, bias=True, upcast_softmax=True, _from_deprecated_attn_block=True))
else:
attentions.append(None)
resnets.append(ResnetBlock2D(in_channels=in_channels, out_channels=in_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm))
self.attentions = nn.ModuleList(attentions)
self.resnets = nn.ModuleList(resnets)
def forward(self, hidden_states: torch.FloatTensor, temb: Optional[torch.FloatTensor]=None) -> torch.FloatTensor:
hidden_states = self.resnets[0](hidden_states, temb)
for (attn, resnet) in zip(self.attentions, self.resnets[1:]):
if (attn is not None):
hidden_states = attn(hidden_states, temb=temb)
hidden_states = resnet(hidden_states, temb)
return hidden_states |
def normalize(x, stats):
if (stats is None):
return x
return ((x - stats.mean) / stats.std) |
def get_file_size(filename):
size_in_mb = (os.path.getsize(filename) / float((1024 ** 2)))
return size_in_mb |
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('dataset_loader', type=str)
parser.add_argument('dataset_path', type=str)
parser.add_argument('--video_names', type=str, nargs='+', help='Only generate training data for a subset of videos. If not set, will include all videos in dataset_path.')
parser.add_argument('--num_train_samples', type=int, help='How many training samples to generate')
parser.add_argument('--num_eval_samples', type=int, help='How many evaluation samples to generate')
parser.add_argument('--temp_dir', type=str, help='Where to store temporary intermediate results')
parser.add_argument('--work_dir', type=str, help='Root folder for all experiments')
parser.add_argument('--num_process', type=int, help='How many worker processes')
parser.add_argument('--random_seed', type=int, help='Optional random seed for deterministic results')
add_resizing_arguments(parser)
add_image_processing_arguments(parser)
args = parser.parse_args()
last_progress = None
for progress in generate(**vars(args)):
prog_percent = int((progress * 100))
if (prog_percent != last_progress):
logger.info(f'Generating training data: {prog_percent}% done')
last_progress = prog_percent |
def format_str_one(v, float_prec=6, int_pad=1):
if (isinstance(v, torch.Tensor) and (v.numel() == 1)):
v = v.item()
if isinstance(v, float):
return (('{:.' + str(float_prec)) + 'f}').format(v)
if (isinstance(v, int) and int_pad):
return (('{:0' + str(int_pad)) + 'd}').format(v)
return str(v) |
def create_vocabulary_from_data(datasets: DatasetDict, word_delimiter_token: Optional[str]=None, unk_token: Optional[str]=None, pad_token: Optional[str]=None):
def extract_all_chars(batch):
all_text = ' '.join(batch['target_text'])
vocab = list(set(all_text))
return {'vocab': [vocab], 'all_text': [all_text]}
vocabs = datasets.map(extract_all_chars, batched=True, batch_size=(- 1), keep_in_memory=True, remove_columns=datasets['train'].column_names)
vocab_set = functools.reduce((lambda vocab_1, vocab_2: (set(vocab_1['vocab'][0]) | set(vocab_2['vocab'][0]))), vocabs.values())
vocab_dict = {v: k for (k, v) in enumerate(sorted(list(vocab_set)))}
if (word_delimiter_token is not None):
vocab_dict[word_delimiter_token] = vocab_dict[' ']
del vocab_dict[' ']
if (unk_token is not None):
vocab_dict[unk_token] = len(vocab_dict)
if (pad_token is not None):
vocab_dict[pad_token] = len(vocab_dict)
return vocab_dict |
def train_val_test_generate(dataframe, model_params):
(train_val_test_x, train_val_test_y, len_x_samples, len_before_x_samples) = pad_all_cases(dataframe, dataframe['NO3'].values, model_params, model_params['min_before'], model_params['max_before'], model_params['min_after'], model_params['max_after'], model_params['output_length'])
train_val_test_y = np.expand_dims(train_val_test_y, axis=2)
return (train_val_test_x, train_val_test_y, len_x_samples, len_before_x_samples) |
def main():
parser = argparse.ArgumentParser(description='Tool to average the params of input checkpoints to produce a new checkpoint')
parser.add_argument('--inputs', required=True, nargs='+', help='Input checkpoint file paths.')
parser.add_argument('--output', required=True, metavar='FILE', help='Write the new checkpoint containing the averaged weights to this path.')
num_group = parser.add_mutually_exclusive_group()
num_group.add_argument('--num-epoch-checkpoints', type=int, help='if set, will try to find checkpoints with names checkpoint_xx.pt in the path specified by input, and average last this many of them.')
num_group.add_argument('--num-update-checkpoints', type=int, help='if set, will try to find checkpoints with names checkpoint_ee_xx.pt in the path specified by input, and average last this many of them.')
parser.add_argument('--checkpoint-upper-bound', type=int, help='when using --num-epoch-checkpoints, this will set an upper bound on which epoch to use, when using --num-update-checkpoints, this will set an upper bound on which update to usee.g., with --num-epoch-checkpoints=10 --checkpoint-upper-bound=50, checkpoints 41-50 would be averaged.e.g., with --num-update-checkpoints=10 --checkpoint-upper-bound=50000, checkpoints 40500-50000 would be averaged assuming --save-interval-updates 500')
args = parser.parse_args()
print(args)
num = None
is_update_based = False
if (args.num_update_checkpoints is not None):
num = args.num_update_checkpoints
is_update_based = True
elif (args.num_epoch_checkpoints is not None):
num = args.num_epoch_checkpoints
assert ((args.checkpoint_upper_bound is None) or ((args.num_epoch_checkpoints is not None) or (args.num_update_checkpoints is not None))), '--checkpoint-upper-bound requires --num-epoch-checkpoints or --num-update-checkpoints'
assert ((args.num_epoch_checkpoints is None) or (args.num_update_checkpoints is None)), 'Cannot combine --num-epoch-checkpoints and --num-update-checkpoints'
if (num is not None):
args.inputs = last_n_checkpoints(args.inputs, num, is_update_based, upper_bound=args.checkpoint_upper_bound)
print('averaging checkpoints: ', args.inputs)
new_state = average_checkpoints(args.inputs)
with PathManager.open(args.output, 'wb') as f:
torch.save(new_state, f)
print('Finished writing averaged checkpoint to {}'.format(args.output)) |
def plot_dimension_interval(ax, cmap, dim_: int, x_box: List[float], y_box: List[float], x_color='#FFB570', y_color='#67AB9F', label=False):
if (not label):
ax.hlines(dim_, x_box[0], x_box[1], x_color, lw=10)
ax.hlines(dim_, y_box[0], y_box[1], y_color, lw=7)
else:
ax.hlines(dim_, x_box[0], x_box[1], x_color, lw=10, label='X Box Intervals', cmap=cmap)
ax.hlines(dim_, y_box[0], y_box[1], y_color, lw=7, label='Y Box Intervals', cmap=cmap) |
def register_all_cityscapes(root='datasets'):
for (key, (image_dir, gt_dir)) in _RAW_CITYSCAPES_SPLITS.items():
meta = _get_builtin_metadata('cityscapes')
image_dir = os.path.join(root, image_dir)
gt_dir = os.path.join(root, gt_dir)
inst_key = key.format(task='instance_seg')
DatasetCatalog.register(inst_key, (lambda x=image_dir, y=gt_dir: load_cityscapes_instances(x, y, from_json=True, to_polygons=True)))
MetadataCatalog.get(inst_key).set(image_dir=image_dir, gt_dir=gt_dir, evaluator_type='cityscapes', **meta)
sem_key = key.format(task='sem_seg')
DatasetCatalog.register(sem_key, (lambda x=image_dir, y=gt_dir: load_cityscapes_semantic(x, y)))
MetadataCatalog.get(sem_key).set(image_dir=image_dir, gt_dir=gt_dir, evaluator_type='sem_seg', **meta) |
class Boco():
def __init__(self, name):
self.name = name
def validate(self):
assert self.computeLoss, 'You need to specify a function to compute the loss' |
def get_video_loader(use_petrel_backend: bool=True, enable_mc: bool=True, conf_path: str=None):
if (petrel_backend_imported and use_petrel_backend):
_client = Client(conf_path=conf_path, enable_mc=enable_mc)
else:
_client = None
def _loader(video_path):
if ((_client is not None) and ('s3:' in video_path)):
video_path = io.BytesIO(_client.get(video_path))
vr = VideoReader(video_path, num_threads=1, ctx=cpu(0))
return vr
return _loader |
def create_split_mesh(cat_desc, edge_length_threshold, overwrite=False, start_threshold=None):
from shapenet.core import cat_desc_to_id
from shapenet.core.meshes.config import get_mesh_config
from template_ffd.templates.ids import get_template_ids
cat_id = cat_desc_to_id(cat_desc)
example_ids = get_template_ids(cat_id)
config = get_mesh_config(edge_length_threshold)
init = (None if (start_threshold is None) else get_mesh_config(start_threshold))
config.create_cat_data(cat_id, example_ids, overwrite, init) |
class LevelsFilter(logging.Filter):
def __init__(self, levels):
self.levels = [getattr(logging, level) for level in levels]
def filter(self, record):
return (record.levelno in self.levels) |
def reorder_image(img, input_order='HWC'):
if (input_order not in ['HWC', 'CHW']):
raise ValueError(f"Wrong input_order {input_order}. Supported input_orders are 'HWC' and 'CHW'")
if (len(img.shape) == 2):
img = img[(..., None)]
return img
if (input_order == 'CHW'):
img = img.transpose(1, 2, 0)
return img |
def update_config_from_file(filename, base_cfg=None):
exp_config = None
with open(filename) as f:
exp_config = edict(yaml.safe_load(f))
if (base_cfg is not None):
_update_config(base_cfg, exp_config)
else:
_update_config(cfg, exp_config) |
class ADFF(nn.Module):
def __init__(self, block_channel, adff_num_features=1280, rpd_num_features=2048):
super(ADFF, self).__init__()
rpd_num_features = (rpd_num_features // 2)
print('block_channel:', block_channel)
self.upsample_scale1to5 = _UpProjection(num_input_features=block_channel[0], num_output_features=(adff_num_features // 5))
self.upsample_scale2to5 = _UpProjection(num_input_features=block_channel[1], num_output_features=(adff_num_features // 5))
self.upsample_scale3to5 = _UpProjection(num_input_features=block_channel[2], num_output_features=(adff_num_features // 5))
self.upsample_scale4to5 = _UpProjection(num_input_features=block_channel[3], num_output_features=(adff_num_features // 5))
self.upsample_scale5to5 = _UpProjection(num_input_features=block_channel[4], num_output_features=(adff_num_features // 5))
self.conv_scale5 = nn.Conv2d(adff_num_features, rpd_num_features, kernel_size=3, stride=1, padding=1, bias=False)
self.bn_scale5 = nn.BatchNorm2d(rpd_num_features)
adff_num_features = (adff_num_features // 2)
rpd_num_features = (rpd_num_features // 2)
self.upsample_scale1to4 = _UpProjection(num_input_features=block_channel[0], num_output_features=(adff_num_features // 5))
self.upsample_scale2to4 = _UpProjection(num_input_features=block_channel[1], num_output_features=(adff_num_features // 5))
self.upsample_scale3to4 = _UpProjection(num_input_features=block_channel[2], num_output_features=(adff_num_features // 5))
self.upsample_scale4to4 = _UpProjection(num_input_features=block_channel[3], num_output_features=(adff_num_features // 5))
self.upsample_scale5to4 = _UpProjection(num_input_features=block_channel[4], num_output_features=(adff_num_features // 5))
self.conv_scale4 = nn.Conv2d(adff_num_features, rpd_num_features, kernel_size=3, stride=1, padding=1, bias=False)
self.bn_scale4 = nn.BatchNorm2d(rpd_num_features)
adff_num_features = (adff_num_features // 2)
rpd_num_features = (rpd_num_features // 2)
self.upsample_scale1to3 = _UpProjection(num_input_features=block_channel[0], num_output_features=(adff_num_features // 5))
self.upsample_scale2to3 = _UpProjection(num_input_features=block_channel[1], num_output_features=(adff_num_features // 5))
self.upsample_scale3to3 = _UpProjection(num_input_features=block_channel[2], num_output_features=(adff_num_features // 5))
self.upsample_scale4to3 = _UpProjection(num_input_features=block_channel[3], num_output_features=(adff_num_features // 5))
self.upsample_scale5to3 = _UpProjection(num_input_features=block_channel[4], num_output_features=(adff_num_features // 5))
self.conv_scale3 = nn.Conv2d(adff_num_features, rpd_num_features, kernel_size=3, stride=1, padding=1, bias=False)
self.bn_scale3 = nn.BatchNorm2d(rpd_num_features)
adff_num_features = (adff_num_features // 2)
rpd_num_features = (rpd_num_features // 2)
self.upsample_scale1to2 = _UpProjection(num_input_features=block_channel[0], num_output_features=(adff_num_features // 5))
self.upsample_scale2to2 = _UpProjection(num_input_features=block_channel[1], num_output_features=(adff_num_features // 5))
self.upsample_scale3to2 = _UpProjection(num_input_features=block_channel[2], num_output_features=(adff_num_features // 5))
self.upsample_scale4to2 = _UpProjection(num_input_features=block_channel[3], num_output_features=(adff_num_features // 5))
self.upsample_scale5to2 = _UpProjection(num_input_features=block_channel[4], num_output_features=(adff_num_features // 5))
self.conv_scale2 = nn.Conv2d(adff_num_features, rpd_num_features, kernel_size=3, stride=1, padding=1, bias=False)
self.bn_scale2 = nn.BatchNorm2d(rpd_num_features)
adff_num_features = (adff_num_features // 2)
rpd_num_features = (rpd_num_features // 2)
self.upsample_scale1to1 = _UpProjection(num_input_features=block_channel[0], num_output_features=(adff_num_features // 5))
self.upsample_scale2to1 = _UpProjection(num_input_features=block_channel[1], num_output_features=(adff_num_features // 5))
self.upsample_scale3to1 = _UpProjection(num_input_features=block_channel[2], num_output_features=(adff_num_features // 5))
self.upsample_scale4to1 = _UpProjection(num_input_features=block_channel[3], num_output_features=(adff_num_features // 5))
self.upsample_scale5to1 = _UpProjection(num_input_features=block_channel[4], num_output_features=(adff_num_features // 5))
self.conv_scale1 = nn.Conv2d(adff_num_features, rpd_num_features, kernel_size=3, stride=1, padding=1, bias=False)
self.bn_scale1 = nn.BatchNorm2d(rpd_num_features)
def forward(self, feature_pyramid):
scale1_size = [feature_pyramid[0].size(2), feature_pyramid[0].size(3)]
scale2_size = [feature_pyramid[1].size(2), feature_pyramid[1].size(3)]
scale3_size = [feature_pyramid[2].size(2), feature_pyramid[2].size(3)]
scale4_size = [feature_pyramid[3].size(2), feature_pyramid[3].size(3)]
scale5_size = [feature_pyramid[4].size(2), feature_pyramid[4].size(3)]
scale_1to5 = self.upsample_scale1to5(feature_pyramid[0], scale5_size)
scale_2to5 = self.upsample_scale2to5(feature_pyramid[1], scale5_size)
scale_3to5 = self.upsample_scale3to5(feature_pyramid[2], scale5_size)
scale_4to5 = self.upsample_scale4to5(feature_pyramid[3], scale5_size)
scale_5to5 = self.upsample_scale5to5(feature_pyramid[4], scale5_size)
scale5_mff = torch.cat((scale_1to5, scale_2to5, scale_3to5, scale_4to5, scale_5to5), 1)
scale5_mff = F.relu(self.bn_scale5(self.conv_scale5(scale5_mff)))
scale_1to4 = self.upsample_scale1to4(feature_pyramid[0], scale4_size)
scale_2to4 = self.upsample_scale2to4(feature_pyramid[1], scale4_size)
scale_3to4 = self.upsample_scale3to4(feature_pyramid[2], scale4_size)
scale_4to4 = self.upsample_scale4to4(feature_pyramid[3], scale4_size)
scale_5to4 = self.upsample_scale5to4(feature_pyramid[4], scale4_size)
scale4_mff = torch.cat((scale_1to4, scale_2to4, scale_3to4, scale_4to4, scale_5to4), 1)
scale4_mff = F.relu(self.bn_scale4(self.conv_scale4(scale4_mff)))
scale_1to3 = self.upsample_scale1to3(feature_pyramid[0], scale3_size)
scale_2to3 = self.upsample_scale2to3(feature_pyramid[1], scale3_size)
scale_3to3 = self.upsample_scale3to3(feature_pyramid[2], scale3_size)
scale_4to3 = self.upsample_scale4to3(feature_pyramid[3], scale3_size)
scale_5to3 = self.upsample_scale5to3(feature_pyramid[4], scale3_size)
scale3_mff = torch.cat((scale_1to3, scale_2to3, scale_3to3, scale_4to3, scale_5to3), 1)
scale3_mff = F.relu(self.bn_scale3(self.conv_scale3(scale3_mff)))
scale_1to2 = self.upsample_scale1to2(feature_pyramid[0], scale2_size)
scale_2to2 = self.upsample_scale2to2(feature_pyramid[1], scale2_size)
scale_3to2 = self.upsample_scale3to2(feature_pyramid[2], scale2_size)
scale_4to2 = self.upsample_scale4to2(feature_pyramid[3], scale2_size)
scale_5to2 = self.upsample_scale5to2(feature_pyramid[4], scale2_size)
scale2_mff = torch.cat((scale_1to2, scale_2to2, scale_3to2, scale_4to2, scale_5to2), 1)
scale2_mff = F.relu(self.bn_scale2(self.conv_scale2(scale2_mff)))
scale_1to1 = self.upsample_scale1to1(feature_pyramid[0], scale1_size)
scale_2to1 = self.upsample_scale2to1(feature_pyramid[1], scale1_size)
scale_3to1 = self.upsample_scale3to1(feature_pyramid[2], scale1_size)
scale_4to1 = self.upsample_scale4to1(feature_pyramid[3], scale1_size)
scale_5to1 = self.upsample_scale5to1(feature_pyramid[4], scale1_size)
scale1_mff = torch.cat((scale_1to1, scale_2to1, scale_3to1, scale_4to1, scale_5to1), 1)
scale1_mff = F.relu(self.bn_scale1(self.conv_scale1(scale1_mff)))
fused_feature_pyramid = [scale1_mff, scale2_mff, scale3_mff, scale4_mff, scale5_mff]
return fused_feature_pyramid |
def __median_wilcoxon_to_latex(indicator_name: str, wilcoxon_data: pd.DataFrame, caption: str, label):
indicator_data = wilcoxon_data[(wilcoxon_data['Indicator'] == indicator_name)]
problems = pd.unique(indicator_data['Problem'])
algorithms = pd.unique(indicator_data['Algorithm'])
num_columns = len(algorithms)
columns = algorithms
alignment = 'c'
col_format = '{}|{}'.format(alignment, (alignment * num_columns))
column_labels = ['\\textbf{{{0}}}'.format(label.replace('_', '\\_')) for label in columns]
output = io.StringIO()
output.write('\\documentclass{article}\n')
output.write('\\usepackage[utf8]{inputenc}\n')
output.write('\\usepackage{tabularx}\n')
output.write('\\usepackage{colortbl}\n')
output.write('\\usepackage[table*]{xcolor}\n')
output.write('\\xdefinecolor{gray95}{gray}{0.65}\n')
output.write('\\xdefinecolor{gray25}{gray}{0.8}\n')
output.write('\\title{Median and Wilcoxon}\n')
output.write('\\author{}\n')
output.write('\\begin{document}\n')
output.write('\\maketitle\n')
output.write('\\section{Table}\n')
output.write('\\begin{table}[!htp]\n')
output.write(' \\caption{{{}}}\n'.format(caption))
output.write(' \\label{{{}}}\n'.format(label))
output.write(' \\centering\n')
output.write(' \\begin{tiny}\n')
output.write((' \\begin{tabular}{%s}\n' % col_format))
output.write(' & {} \\\\\\hline\n'.format(' & '.join(column_labels)))
counters = {}
for algorithm in algorithms:
counters[algorithm] = [0, 0, 0]
for problem in problems:
values = []
for algorithm in algorithms:
row = indicator_data[((indicator_data['Problem'] == problem) & (indicator_data['Algorithm'] == algorithm))]
value = '{:.2e}({:.2e})'.format(row['Median'].tolist()[0], row['IQR'].tolist()[0])
if (algorithm != algorithms[(- 1)]):
if (row['TestResult'].tolist()[0] == '-'):
value = '{{{}-}}'.format(value)
counters[algorithm][2] = (counters[algorithm][2] + 1)
elif (row['TestResult'].tolist()[0] == '+'):
value = '{{{}+}}'.format(value)
counters[algorithm][0] = (counters[algorithm][0] + 1)
else:
value = '{{{}\\approx}}'.format(value)
counters[algorithm][1] = (counters[algorithm][1] + 1)
values.append(value)
medians = indicator_data[(indicator_data['Problem'] == problem)]['Median']
iqrs = indicator_data[(indicator_data['Problem'] == problem)]['IQR']
pairs = list(zip(medians, iqrs))
indexes = sorted(range(len(pairs)), key=(lambda x: pairs[x]))
if check_minimization(indicator_name):
best = indexes[0]
second_best = indexes[1]
else:
best = indexes[(- 1)]
second_best = indexes[(- 2)]
values[best] = ('\\cellcolor{gray95} ' + values[best])
values[second_best] = ('\\cellcolor{gray25} ' + values[second_best])
output.write('\\textbf{{{0}}} & ${1}$ \\\\\n'.format(problem, ' $ & $ '.join([str(val).replace('e-', 'e\\makebox[0.1cm]{-}').replace('e+', 'e\\makebox[0.1cm]{+}') for val in values])))
counter_summary = []
for algorithm in algorithms[:(- 1)]:
counter_summary.append(counters[algorithm])
output.write(' \\hline\n')
output.write('\\textbf{{{0}}} & ${1}$ \\\\\n'.format('$+/\\approx/-$', ' $ & $ '.join([((((str(val[0]) + '/') + str(val[1])) + '/') + str(val[2])) for val in counter_summary])))
output.write(' \\end{tabular}\n')
output.write(' \\end{tiny}\n')
output.write('\\end{table}\n')
output.write('\\end{document}')
return output.getvalue() |
class TFFlaubertForTokenClassification():
def __init__(self, *args, **kwargs):
requires_tf(self)
def from_pretrained(self, *args, **kwargs):
requires_tf(self) |
def ResNet50(include_top=False, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000, **kwargs):
def stack_fn(x):
x = stack1(x, 64, 3, stride1=1, name='conv2')
x = stack1(x, 128, 4, name='conv3')
x = stack1(x, 256, 6, name='conv4')
x = stack1(x, 512, 3, name='conv5', stride1=1, dilation=2)
return x
return ResNet(stack_fn, False, True, 'resnet50', include_top, weights, input_tensor, input_shape, pooling, classes, **kwargs) |
def maybe_dict_from_checkpoint(ckpt_path=None, ckpt_dict=None):
assert ((ckpt_path is not None) or (ckpt_dict is not None))
if (ckpt_dict is None):
ckpt_dict = load_dict_from_checkpoint(ckpt_path)
return ckpt_dict |
class IntLinear(nn.Module):
def __init__(self, in_features, out_features, bias=True, p=0, update_step=3000, bits=8, method='histogram'):
super(IntLinear, self).__init__()
self.in_features = int(in_features)
self.out_features = int(out_features)
self.weight = torch.nn.Parameter(torch.Tensor(out_features, in_features))
self.chosen_bias = bias
if self.chosen_bias:
self.bias = torch.nn.Parameter(torch.Tensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
self.p = p
self.bits = bits
self.method = method
self.update_step = update_step
self.counter = 0
def reset_parameters(self):
nn.init.xavier_uniform_(self.weight)
if self.chosen_bias:
nn.init.constant_(self.bias, 0.0)
return
def forward(self, input):
p = (self.p if self.training else 1)
if ((self.counter % self.update_step) == 0):
self.scale = None
self.zero_point = None
self.counter += 1
(weight_quantized, self.scale, self.zero_point) = emulate_int(self.weight.detach(), bits=self.bits, method=self.method, scale=self.scale, zero_point=self.zero_point)
mask = torch.zeros_like(self.weight)
mask.bernoulli_((1 - p))
noise = (weight_quantized - self.weight).masked_fill(mask.bool(), 0)
clamp_low = ((- self.scale) * self.zero_point)
clamp_high = (self.scale * (((2 ** self.bits) - 1) - self.zero_point))
weight = (torch.clamp(self.weight, clamp_low.item(), clamp_high.item()) + noise.detach())
output = F.linear(input, weight, self.bias)
return output
def extra_repr(self):
return 'in_features={}, out_features={}, bias={}, quant_noise={}, bits={}, method={}'.format(self.in_features, self.out_features, (self.bias is not None), self.p, self.bits, self.method) |
def hook_adapavgpool2d(m, x, y):
x = x[0]
out_size = _pair(m.output_size)
k = (torch.Tensor(list(x.size()[2:])) / torch.Tensor(out_size))
k = torch.prod(torch.ceil(k)).item()
flops_per_ele = k
flops = (flops_per_ele * y.numel())
return int(flops) |
def set_seed(seed):
import random
import tensorflow as tf
seed %=
random.seed(seed)
np.random.seed(seed)
tf.compat.v1.set_random_seed(seed)
print(('using seed %s' % str(seed))) |
class ContinuousMctsPolicies(AbstractMctsPolicies):
def __init__(self, sample=None, initialize=None, score_c=1.0, pw_C=1.0, pw_alpha=0.25, *args, **kwargs):
super(ContinuousMctsPolicies, self).__init__(*args, score=ContinuousRaveScore(score_c), widen=ProgressiveWiden(pw_C, pw_alpha), extract=MostVisitedExtract(), sample=sample, initialize=initialize, **kwargs)
raise NotImplementedError('this does not yet exist') |
class UP(TDAgent):
def __init__(self, eval_points=10000, leverage=1.0, W=None):
super(UP, self).__init__()
self.eval_points = eval_points
self.leverage = leverage
self.W = W
def init_portfolio(self, X):
m = X.shape[1]
self.W = np.matrix(mc_simplex((m - 1), self.eval_points))
self.S = np.matrix(np.ones(self.W.shape[0])).T
leverage = max(self.leverage, (1.0 / m))
stretch = ((leverage - (1.0 / m)) / (1.0 - (1.0 / m)))
self.W = (((self.W - (1.0 / m)) * stretch) + (1.0 / m))
def decide_by_history(self, x, last_b):
x = self.get_last_rpv(x)
x = np.reshape(x, (1, x.size))
if (self.W is None):
self.init_portfolio(x)
self.S = np.multiply(self.S, (self.W * np.matrix(x).T))
b = (self.W.T * self.S)
pv = (b / np.sum(b))
pvn = np.ravel(pv)
return pvn |
def makeGraph(root):
g = nx.DiGraph()
nid = 0
(name, nid) = get_name(root, nid)
g.add_node(name)
good = []
bad = []
mid = [name]
for child in root.children:
nid = add_to_graph(g, name, child, good, bad, mid, nid)
return (g, good, bad, mid) |
def get_model():
model = Sequential()
model.add(Conv2D(64, (3, 3), activation='relu', input_shape=(80, 80, 3)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(64, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
return model |
class SemiNet(torch.nn.Module):
def __init__(self, in_channels=3, taskcla=None):
super(SemiNet, self).__init__()
self.conv1 = torch.nn.Conv2d(in_channels, 32, kernel_size=3, stride=1, padding=1)
self.GN1 = torch.nn.GroupNorm(32, 32)
self.BN1 = torch.nn.BatchNorm2d(32)
self.conv2 = torch.nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1)
self.conv3 = torch.nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1)
self.GN2 = torch.nn.GroupNorm(32, 128)
self.BN2 = torch.nn.BatchNorm2d(128)
self.conv4 = torch.nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1)
self.conv5 = torch.nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1)
self.GN3 = torch.nn.GroupNorm(32, 256)
self.BN3 = torch.nn.BatchNorm2d(256)
self.conv6 = torch.nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1)
self.maxpool = torch.nn.MaxPool2d(2)
self.relu = torch.nn.ReLU()
self.drop1 = torch.nn.Dropout(0.05)
self.drop2 = torch.nn.Dropout(0.1)
self.drop3 = torch.nn.Dropout(0.1)
self.fc1 = torch.nn.Linear(4096, 1024)
self.fc2 = torch.nn.Linear(1024, 512)
self.fc3 = torch.nn.Linear(512, 10)
return
def forward(self, x):
h = self.relu(self.BN1(self.conv1(x)))
h = self.maxpool(self.relu(self.conv2(h)))
h = self.relu(self.BN2(self.conv3(h)))
h = self.drop1(self.maxpool(self.relu(self.conv4(h))))
h = self.relu(self.BN3(self.conv5(h)))
h = self.maxpool(self.relu(self.conv6(h)))
h = h.view(x.size(0), (- 1))
h = self.drop2(self.relu(self.fc1(h)))
h = self.drop3(self.relu(self.fc2(h)))
y = self.fc3(h)
return y |
def unset_hook(f: Callable[([Any], Any)]) -> Callable[([Any], Any)]:
(f)
def unset_hook_wrapper(self, **kwargs):
f(self, **kwargs)
self.attribution_model.is_hooked = False
return unset_hook_wrapper |
def gen_convs(inchannel, outchannel, bn=False):
(yield nn.Conv2d(inchannel, outchannel, 3, padding=1))
if bn:
(yield nn.BatchNorm2d(outchannel))
(yield nn.ReLU(inplace=True)) |
def lazily_load_dataset(corpus_type, opt):
assert (corpus_type in ['train', 'valid'])
def _lazy_dataset_loader(pt_file, corpus_type):
dataset = torch.load(pt_file)
logger.info(('Loading %s dataset from %s, number of examples: %d' % (corpus_type, pt_file, len(dataset))))
return dataset
pt = (((opt.data + '.') + corpus_type) + '.pt')
(yield _lazy_dataset_loader(pt, corpus_type)) |
class LlamaTokenizer(PreTrainedTokenizer):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
model_input_names = ['input_ids', 'attention_mask']
def __init__(self, vocab_file, unk_token='<unk>', bos_token='<s>', eos_token='</s>', sp_model_kwargs: Optional[Dict[(str, Any)]]=None, add_bos_token=True, add_eos_token=False, decode_with_prefix_space=False, clean_up_tokenization_spaces=False, **kwargs):
self.sp_model_kwargs = ({} if (sp_model_kwargs is None) else sp_model_kwargs)
super().__init__(bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, clean_up_tokenization_spaces=clean_up_tokenization_spaces, **kwargs)
self.vocab_file = vocab_file
self.add_bos_token = add_bos_token
self.add_eos_token = add_eos_token
self.decode_with_prefix_space = decode_with_prefix_space
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(vocab_file)
self._no_prefix_space_tokens = None
def no_prefix_space_tokens(self):
if (self._no_prefix_space_tokens is None):
vocab = self.convert_ids_to_tokens(list(range(self.vocab_size)))
self._no_prefix_space_tokens = {i for (i, tok) in enumerate(vocab) if (not tok.startswith(''))}
return self._no_prefix_space_tokens
def vocab_size(self):
return self.sp_model.get_piece_size()
def bos_token_id(self) -> Optional[int]:
return self.sp_model.bos_id()
def eos_token_id(self) -> Optional[int]:
return self.sp_model.eos_id()
def get_vocab(self):
vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def _tokenize(self, text):
return self.sp_model.encode(text, out_type=str)
def _convert_token_to_id(self, token):
return self.sp_model.piece_to_id(token)
def _convert_id_to_token(self, index):
token = self.sp_model.IdToPiece(index)
return token
def _maybe_add_prefix_space(self, tokens, decoded):
if (tokens and (tokens[0] not in self.no_prefix_space_tokens)):
return (' ' + decoded)
else:
return decoded
def convert_tokens_to_string(self, tokens):
current_sub_tokens = []
out_string = ''
prev_is_special = False
for token in tokens:
if (token in self.all_special_tokens):
if (not prev_is_special):
out_string += ' '
out_string += (self.sp_model.decode(current_sub_tokens) + token)
prev_is_special = True
current_sub_tokens = []
else:
current_sub_tokens.append(token)
prev_is_special = False
out_string += self.sp_model.decode(current_sub_tokens)
out_string = self._maybe_add_prefix_space(tokens=tokens, decoded=out_string)
return out_string
def save_vocabulary(self, save_directory, filename_prefix: Optional[str]=None) -> Tuple[str]:
if (not os.path.isdir(save_directory)):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
out_vocab_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']))
if ((os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file)) and os.path.isfile(self.vocab_file)):
copyfile(self.vocab_file, out_vocab_file)
elif (not os.path.isfile(self.vocab_file)):
with open(out_vocab_file, 'wb') as fi:
content_spiece_model = self.sp_model.serialized_model_proto()
fi.write(content_spiece_model)
return (out_vocab_file,)
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
if self.add_bos_token:
bos_token_ids = [self.bos_token_id]
else:
bos_token_ids = []
output = (bos_token_ids + token_ids_0)
if (token_ids_1 is not None):
output = (output + token_ids_1)
if self.add_eos_token:
output = (output + [self.eos_token_id])
return output
def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)
if (token_ids_1 is None):
return (([1] + ([0] * len(token_ids_0))) + [1])
return (((([1] + ([0] * len(token_ids_0))) + [1, 1]) + ([0] * len(token_ids_1))) + [1])
def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
eos = [self.eos_token_id]
if (token_ids_1 is None):
return (len((token_ids_0 + eos)) * [0])
return (len((((token_ids_0 + eos) + token_ids_1) + eos)) * [0]) |
class PPM(nn.ModuleList):
def __init__(self, pool_scales, in_channels, channels, conv_cfg, norm_cfg, act_cfg, align_corners, **kwargs):
super(PPM, self).__init__()
self.pool_scales = pool_scales
self.align_corners = align_corners
self.in_channels = in_channels
self.channels = channels
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
for pool_scale in pool_scales:
self.append(nn.Sequential(nn.AdaptiveAvgPool2d(pool_scale), ConvModule(self.in_channels, self.channels, 1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg, **kwargs)))
def forward(self, x):
ppm_outs = []
for ppm in self:
ppm_out = ppm(x)
upsampled_ppm_out = resize(ppm_out, size=x.size()[2:], mode='bilinear', align_corners=self.align_corners)
ppm_outs.append(upsampled_ppm_out)
return ppm_outs |
def get_connected_molecules(molecules):
connected = []
for mol in molecules:
if is_connected(mol):
connected.append(mol)
return connected |
def test_parse_dimensions():
valid_examples = [('3x3', (3, 3)), ('4x2', (4, 2))]
for (inp, expect) in valid_examples:
out = parse_dimensions(inp)
assert (out == expect), (out, '!=', expect) |
_materialize('tensorflow')
class NHWCConv2dValidPad(NHWCConv2d):
def __init__(self, out_channels: Union[(int, z3.ExprRef)], stride: Union[(int, z3.ExprRef)], dilation_h: Union[(int, z3.ExprRef)], dilation_w: Union[(int, z3.ExprRef)]):
super().__init__(out_channels, stride, dilation_h, dilation_w, 'VALID') |
def set_log_dir(root_dir, exp_name):
path_dict = {}
os.makedirs(root_dir, exist_ok=True)
exp_path = os.path.join(root_dir, exp_name)
now = datetime.now(dateutil.tz.tzlocal())
timestamp = now.strftime('%Y_%m_%d_%H_%M_%S')
prefix = ((exp_path + '_') + timestamp)
os.makedirs(prefix)
path_dict['prefix'] = prefix
ckpt_path = os.path.join(prefix, 'Model')
os.makedirs(ckpt_path)
path_dict['ckpt_path'] = ckpt_path
log_path = os.path.join(prefix, 'Log')
os.makedirs(log_path)
path_dict['log_path'] = log_path
sample_path = os.path.join(prefix, 'Samples')
os.makedirs(sample_path)
path_dict['sample_path'] = sample_path
return path_dict |
def load_config(path='configs/default.yaml') -> dict:
with open(path, 'r') as ymlfile:
cfg = yaml.safe_load(ymlfile)
return cfg |
def handle_signal(signum, frame) -> None:
log.info(f'Received interrupt signal {signum}')
if waiting:
sys.exit(1)
global abort
abort = True |
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument('--work_dir', help='the dir to save logs and models')
parser.add_argument('--resume_from', help='the checkpoint file to resume from')
parser.add_argument('--validate', action='store_true', help='whether to evaluate the checkpoint during training')
group_gpus = parser.add_mutually_exclusive_group()
group_gpus.add_argument('--gpus', type=int, help='number of gpus to use (only applicable to non-distributed training)')
group_gpus.add_argument('--gpu-ids', type=int, nargs='+', help='ids of gpus to use (only applicable to non-distributed training)')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument('--deterministic', action='store_true', help='whether to set deterministic options for CUDNN backend.')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm', 'mpi'], default='none', help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument('--autoscale-lr', action='store_true', help='automatically scale lr with the number of gpus')
args = parser.parse_args()
if ('LOCAL_RANK' not in os.environ):
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args |
def grad(y, x, create_graph=True, keepdim=False):
N = (y.size(0) if (len(y.size()) == 2) else 1)
Ny = y.size((- 1))
Nx = x.size((- 1))
z = torch.ones_like(y[(..., 0)])
dy = []
for i in range(Ny):
dy.append(torch.autograd.grad(y[(..., i)], x, grad_outputs=z, create_graph=create_graph)[0])
shape = np.array([N, Ny])[(2 - len(y.size())):]
shape = (list(shape) if keepdim else list(shape[(shape > 1)]))
return torch.cat(dy, dim=(- 1)).view((shape + [Nx])) |
def is_time(token):
without_time = re.sub('(\\d)*(\\d):(\\d\\d)([aA][mM]|[pP][Mm])', '', token).strip()
return (not without_time) |
class TaskDataCollatorForSeq2Seq(DataCollatorForSeq2Seq):
def check_uniqueness(self, samples):
assert (len(np.unique(samples)) == 1)
def __call__(self, features):
print('#### COLLATOR DEBUG #$$$$')
print(features[0].keys())
print('')
tasks = [d.pop('task') for d in features]
labels_list_exist = False
if ('labels_list' in features[0]):
labels_list_exist = True
labels_list = [d.pop('labels_list') for d in features]
self.check_uniqueness(tasks)
output = super().__call__(features)
output['task'] = tasks[0]
if labels_list_exist:
output['labels_list'] = labels_list
return output |
def get_permutations(num_images):
permutations = []
for i in range(num_images):
for j in range(num_images):
if (i != j):
permutations.append((i, j))
return np.array(permutations) |
class BN(PlainNetBasicBlockClass):
def __init__(self, out_channels=None, copy_from=None, no_create=False, **kwargs):
super(BN, self).__init__(**kwargs)
self.no_create = no_create
if (copy_from is not None):
assert isinstance(copy_from, nn.BatchNorm2d)
self.in_channels = copy_from.weight.shape[0]
self.out_channels = copy_from.weight.shape[0]
assert ((out_channels is None) or (out_channels == self.out_channels))
self.netblock = copy_from
else:
self.in_channels = out_channels
self.out_channels = out_channels
if no_create:
return
else:
self.netblock = nn.BatchNorm2d(num_features=self.out_channels)
def forward(self, x):
return self.netblock(x)
def __str__(self):
return 'BN({})'.format(self.out_channels)
def __repr__(self):
return 'BN({}|{})'.format(self.block_name, self.out_channels)
def get_output_resolution(self, input_resolution):
return input_resolution
def get_FLOPs(self, input_resolution):
return ((input_resolution ** 2) * self.out_channels)
def get_model_size(self):
return self.out_channels
def set_in_channels(self, c):
self.in_channels = c
self.out_channels = c
if (not self.no_create):
self.netblock = nn.BatchNorm2d(num_features=self.out_channels)
self.netblock.train()
self.netblock.requires_grad_(True)
def create_from_str(cls, s, no_create=False, **kwargs):
assert BN.is_instance_from_str(s)
idx = _get_right_parentheses_index_(s)
assert (idx is not None)
param_str = s[len('BN('):idx]
tmp_idx = param_str.find('|')
if (tmp_idx < 0):
tmp_block_name = 'uuid{}'.format(uuid.uuid4().hex)
else:
tmp_block_name = param_str[0:tmp_idx]
param_str = param_str[(tmp_idx + 1):]
out_channels = int(param_str)
return (BN(out_channels=out_channels, block_name=tmp_block_name, no_create=no_create), s[(idx + 1):]) |
class SeparableConv2d_same(nn.Module):
def __init__(self, inplanes, planes, kernel_size=3, stride=1, dilation=1, bias=False):
super(SeparableConv2d_same, self).__init__()
self.conv1 = nn.Conv2d(inplanes, inplanes, kernel_size, stride, 0, dilation, groups=inplanes, bias=bias)
self.pointwise = nn.Conv2d(inplanes, planes, 1, 1, 0, 1, 1, bias=bias)
def forward(self, x):
x = fixed_padding(x, self.conv1.kernel_size[0], dilation=self.conv1.dilation[0])
x = self.conv1(x)
x = self.pointwise(x)
return x |
class MeanShift(nn.Conv2d):
def __init__(self, rgb_range, rgb_mean, rgb_std, sign=(- 1)):
super(MeanShift, self).__init__(3, 3, kernel_size=1)
std = torch.Tensor(rgb_std)
self.weight.data = torch.eye(3).view(3, 3, 1, 1)
self.weight.data.div_(std.view(3, 1, 1, 1))
self.bias.data = ((sign * rgb_range) * torch.Tensor(rgb_mean))
self.bias.data.div_(std)
self.requires_grad = False |
class ResNetConvBackbone(nn.Module):
def __init__(self, num_layers: int, pretrained: bool) -> None:
super(ResNetConvBackbone, self).__init__()
self.resnet = get_vanilla_resnet_model(num_layers, pretrained)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.resnet.conv1(x)
x = self.resnet.bn1(x)
x = self.resnet.relu(x)
x = self.resnet.maxpool(x)
x = self.resnet.layer1(x)
x = self.resnet.layer2(x)
x = self.resnet.layer3(x)
x = self.resnet.layer4(x)
x = self.resnet.avgpool(x)
x = torch.flatten(x, 1)
return x |
def get_bt_sentences(train_path):
pkl_path = Path(train_path).parent.joinpath(f'train_aug_bt_data.pkl')
sentence_to_aug_sentence = get_backtrans_data_dict(pkl_path, train_path)
return sentence_to_aug_sentence |
def create_data_loader(root_dir, batch_size, nproc):
dir_path = os.path.realpath(root_dir)
data_transform = transforms.Compose([transforms.Resize(256), transforms.ColorJitter(), transforms.RandomCrop(224), transforms.RandomHorizontalFlip(), transforms.Resize(128), transforms.ToTensor()])
catdogs = ImageFolder(dir_path, data_transform)
dataset_size = len(catdogs)
if (('SANITY_CHECK' in os.environ) and (os.environ['SANITY_CHECK'] == '1')):
train_size = ((2 * nproc) * batch_size)
val_size = ((2 * nproc) * batch_size)
test_size = ((dataset_size - train_size) - val_size)
(train_set, val_set, _) = torch.utils.data.random_split(catdogs, [train_size, val_size, test_size])
else:
train_split = 0.8
train_size = int((dataset_size * train_split))
val_size = (dataset_size - train_size)
(train_set, val_set) = torch.utils.data.random_split(catdogs, [train_size, val_size])
train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=True, num_workers=0)
val_loader = DataLoader(val_set, batch_size=batch_size, shuffle=False, num_workers=0)
size = (train_size, val_size)
return (train_loader, val_loader, size) |
def make_module(sym_model, ctx, input_desc):
assert (isinstance(sym_model, tuple) and isinstance(sym_model[0], mx.symbol.Symbol))
(symnet, args, auxs) = sym_model
mod = mx.module.module.Module(symbol=symnet, data_names=[d.name for d in input_desc], label_names=None, context=ctx)
mod.bind(input_desc, for_training=False)
mod.set_params(args, auxs, allow_missing=True)
return mod |
class ANY():
def __init__(self, _type):
self._type = _type
def __eq__(self, other):
return isinstance(other, self._type)
def __repr__(self):
return f'ANY({self._type.__name__})' |
def main(_):
RANDOM_SEED = 66
np.random.seed(RANDOM_SEED)
output_dir = os.path.join(FLAGS.output_dir, FLAGS.category)
sample_dir = os.path.join(output_dir, 'synthesis')
log_dir = os.path.join(output_dir, 'log')
model_dir = os.path.join(output_dir, 'checkpoints')
if tf.gfile.Exists(log_dir):
tf.gfile.DeleteRecursively(log_dir)
tf.gfile.MakeDirs(log_dir)
if tf.gfile.Exists(sample_dir):
tf.gfile.DeleteRecursively(sample_dir)
tf.gfile.MakeDirs(sample_dir)
if tf.gfile.Exists(model_dir):
tf.gfile.DeleteRecursively(model_dir)
tf.gfile.MakeDirs(model_dir)
train_data = data_io.getObj(FLAGS.data_path, FLAGS.category, cube_len=FLAGS.cube_len, num_voxels=FLAGS.train_size, low_bound=0, up_bound=1)
data_io.saveVoxelsToMat(train_data, ('%s/observed_data.mat' % output_dir), cmin=0, cmax=1)
voxel_mean = train_data.mean()
train_data = (train_data - voxel_mean)
train_data = train_data[(..., np.newaxis)]
FLAGS.num_batches = int(math.ceil((len(train_data) / FLAGS.batch_size)))
print('Reading voxel data {}, shape: {}'.format(FLAGS.category, train_data.shape))
print(('min: %.4f\tmax: %.4f\tmean: %.4f' % (train_data.min(), train_data.max(), voxel_mean)))
net = DescriptorNet3D(FLAGS)
net.build_model()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sample_size = (FLAGS.sample_batch * FLAGS.num_batches)
sample_voxels = np.random.randn(sample_size, FLAGS.cube_len, FLAGS.cube_len, FLAGS.cube_len, 1)
saver = tf.train.Saver(max_to_keep=50)
des_loss_epoch = []
rec_err_epoch = []
plt.ion()
for epoch in range(FLAGS.num_epochs):
d_grad_vec = []
des_loss_vec = []
rec_err_vec = []
start_time = time.time()
sess.run(net.reset_grads)
for i in range(FLAGS.num_batches):
obs_data = train_data[(i * FLAGS.batch_size):min(len(train_data), ((i + 1) * FLAGS.batch_size))]
syn_data = sample_voxels[(i * FLAGS.sample_batch):((i + 1) * FLAGS.sample_batch)]
if (epoch < 100):
syn = sess.run(net.langevin_descriptor_noise, feed_dict={net.syn: syn_data})
else:
syn = sess.run(net.langevin_descriptor, feed_dict={net.syn: syn_data})
(des_grads, des_loss) = sess.run([net.des_grads, net.des_loss, net.update_d_grads], feed_dict={net.obs: obs_data, net.syn: syn})[:2]
d_grad_vec.append(des_grads)
des_loss_vec.append(des_loss)
rec_err = sess.run(net.recon_err, feed_dict={net.obs: obs_data, net.syn: syn})
rec_err_vec.append(rec_err)
sample_voxels[(i * FLAGS.sample_batch):((i + 1) * FLAGS.sample_batch)] = syn
sess.run(net.apply_d_grads)
(d_grad_mean, des_loss_mean, rec_err_mean) = (float(np.mean(d_grad_vec)), float(np.mean(des_loss_vec)), float(np.mean(rec_err_vec)))
des_loss_epoch.append(des_loss_mean)
rec_err_epoch.append(rec_err_mean)
end_time = time.time()
print(('Epoch #%d, descriptor loss: %.4f, descriptor SSD weight: %.4f, Avg MSE: %4.4f, time: %.2fs' % (epoch, des_loss_mean, d_grad_mean, rec_err_mean, (end_time - start_time))))
if ((epoch % FLAGS.log_step) == 0):
if (not os.path.exists(sample_dir)):
os.makedirs(sample_dir)
data_io.saveVoxelsToMat((sample_voxels + voxel_mean), ('%s/sample%04d.mat' % (sample_dir, epoch)), cmin=0, cmax=1)
if (not os.path.exists(model_dir)):
os.makedirs(model_dir)
saver.save(sess, ('%s/%s' % (model_dir, 'net.ckpt')), global_step=epoch)
if (not os.path.exists(log_dir)):
os.makedirs(log_dir)
plt.figure(1)
data_io.draw_graph(plt, des_loss_epoch, 'des_loss', log_dir, 'r')
plt.figure(2)
data_io.draw_graph(plt, rec_err_epoch, 'recon_error', log_dir, 'b') |
def find_similar_token(token, tokens):
token = re.sub('-\\d\\d$', '', token)
for (i, t) in enumerate(tokens):
if (token == t):
return tokens[i]
return None |
class ConvertCommand(BaseTransformersCLICommand):
def register_subcommand(parser: ArgumentParser):
train_parser = parser.add_parser('convert', help='CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.')
train_parser.add_argument('--model_type', type=str, required=True, help="Model's type.")
train_parser.add_argument('--tf_checkpoint', type=str, required=True, help='TensorFlow checkpoint path or folder.')
train_parser.add_argument('--pytorch_dump_output', type=str, required=True, help='Path to the PyTorch saved model output.')
train_parser.add_argument('--config', type=str, default='', help='Configuration file path or folder.')
train_parser.add_argument('--finetuning_task_name', type=str, default=None, help='Optional fine-tuning task name if the TF model was a finetuned model.')
train_parser.set_defaults(func=convert_command_factory)
def __init__(self, model_type: str, tf_checkpoint: str, pytorch_dump_output: str, config: str, finetuning_task_name: str, *args):
self._logger = logging.get_logger('transformers-cli/converting')
self._logger.info(f'Loading model {model_type}')
self._model_type = model_type
self._tf_checkpoint = tf_checkpoint
self._pytorch_dump_output = pytorch_dump_output
self._config = config
self._finetuning_task_name = finetuning_task_name
def run(self):
if (self._model_type == 'albert'):
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(IMPORT_ERROR_MESSAGE)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output)
elif (self._model_type == 'bert'):
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(IMPORT_ERROR_MESSAGE)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output)
elif (self._model_type == 'funnel'):
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(IMPORT_ERROR_MESSAGE)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output)
elif (self._model_type == 't5'):
try:
from ..models.t5.convert_t5_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(IMPORT_ERROR_MESSAGE)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output)
elif (self._model_type == 'gpt'):
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import convert_openai_checkpoint_to_pytorch
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output)
elif (self._model_type == 'transfo_xl'):
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import convert_transfo_xl_checkpoint_to_pytorch
except ImportError:
raise ImportError(IMPORT_ERROR_MESSAGE)
if ('ckpt' in self._tf_checkpoint.lower()):
TF_CHECKPOINT = self._tf_checkpoint
TF_DATASET_FILE = ''
else:
TF_DATASET_FILE = self._tf_checkpoint
TF_CHECKPOINT = ''
convert_transfo_xl_checkpoint_to_pytorch(TF_CHECKPOINT, self._config, self._pytorch_dump_output, TF_DATASET_FILE)
elif (self._model_type == 'gpt2'):
try:
from ..models.gpt2.convert_gpt2_original_tf_checkpoint_to_pytorch import convert_gpt2_checkpoint_to_pytorch
except ImportError:
raise ImportError(IMPORT_ERROR_MESSAGE)
convert_gpt2_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output)
elif (self._model_type == 'xlnet'):
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import convert_xlnet_checkpoint_to_pytorch
except ImportError:
raise ImportError(IMPORT_ERROR_MESSAGE)
convert_xlnet_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output, self._finetuning_task_name)
elif (self._model_type == 'xlm'):
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import convert_xlm_checkpoint_to_pytorch
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint, self._pytorch_dump_output)
elif (self._model_type == 'lxmert'):
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import convert_lxmert_checkpoint_to_pytorch
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint, self._pytorch_dump_output)
elif (self._model_type == 'rembert'):
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import convert_rembert_tf_checkpoint_to_pytorch
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output)
else:
raise ValueError('--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]') |
class SimilarityClient():
def __init__(self, stub):
self.stub = stub
def search(self, id, k):
request = recall_pb2.Query(userID=id, k=k)
try:
candidates = self.stub.searchCandidates(request)
return candidates.candidate
except Exception as e:
logging.warning('RPC failed:{}'.format(e))
return
def getMetrics(self):
try:
msg = self.stub.getMetrics(recall_pb2.ServerMessage())
except Exception as e:
logging.warning('RPC failed:{}'.format(e))
logging.info(('Got metrics: ' + msg.str))
def resetMetrics(self):
try:
self.stub.resetMetrics(recall_pb2.ServerMessage())
except Exception as e:
logging.warning('RPC failed:{}'.format(e)) |
def load_model_from_config(config, sd):
model = instantiate_from_config(config)
model.load_state_dict(sd, strict=False)
model.cuda()
model.eval()
return model |
class LowRankAdapter(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.input_dim = config.input_dim
self.down_sample_size = (self.input_dim // config.reduction_factor)
self.activation = Activations(config.non_linearity.lower())
self.down_sampler = LowRankLinear(self.input_dim, self.down_sample_size, w_init=config.low_rank_w_init, rank=config.low_rank_rank)
self.up_sampler = LowRankLinear(self.down_sample_size, self.input_dim, w_init=config.low_rank_w_init, rank=config.low_rank_rank)
def forward(self, x):
z = self.down_sampler(x)
z = self.activation(z)
output = self.up_sampler(z)
return output |
def relevant_clauses(clauses, object_pair_list, converter):
def _relevant(c):
for l in c:
triple = converter.num2triple(abs(l))
for obj_pair in object_pair_list:
if ((obj_pair == [triple[1], triple[2]]) or (obj_pair == [triple[2], triple[1]])):
return True
return False
return [c for c in clauses if _relevant(c)] |
class Option(NetOption):
def __init__(self, conf_path, args):
super(Option, self).__init__()
self.conf = ConfigFactory.parse_file(conf_path)
self.save_path = self.conf['save_path']
self.dataPath = self.conf['dataPath']
self.dataset = self.conf['dataset']
self.nGPU = self.conf['nGPU']
self.GPU = self.conf['GPU']
self.visible_devices = args.gpu
self.network = self.conf['network']
self.nThreads = self.conf['nThreads']
self.nEpochs = self.conf['nEpochs']
self.batchSize = self.conf['batchSize']
self.momentum = self.conf['momentum']
if (args.wd == None):
self.weightDecay = float(self.conf['weightDecay'])
else:
self.weightDecay = float(args.wd)
self.opt_type = self.conf['opt_type']
self.warmup_epochs = self.conf['warmup_epochs']
if (args.lrs == None):
self.lr_S = self.conf['lr_S']
else:
self.lr_S = args.lrs
self.lrPolicy_S = self.conf['lrPolicy_S']
self.step_S = self.conf['step_S']
self.decayRate_S = self.conf['decayRate_S']
if (args.qw == None):
self.qw = self.conf['qw']
else:
self.qw = args.qw
if (args.qa == None):
self.qa = self.conf['qa']
else:
self.qa = args.qa
self.experimentID = f"autorecon_{self.network}_qwqa_{self.qw}_{self.qa}_threshold_{args.passing_threshold}_kd_scale_{args.kd_scale}_ce_scale_{args.ce_scale}_lrS_{self.lr_S}_WD_{self.weightDecay}_id_{self.conf['experimentID']}"
self.nClasses = self.conf['nClasses']
self.temperature = self.conf['temperature']
self.alpha = self.conf['alpha']
self.ce_scale = args.ce_scale
self.kd_scale = args.kd_scale
self.latent_dim = self.conf['latent_dim']
self.img_size = self.conf['img_size']
self.channels = self.conf['channels']
self.lr_G = self.conf['lr_G']
self.lrPolicy_G = self.conf['lrPolicy_G']
self.step_G = self.conf['step_G']
self.decayRate_G = self.conf['decayRate_G']
self.b1 = self.conf['b1']
self.b2 = self.conf['b2']
self.save = args.save
self.passing_threshold = args.passing_threshold
self.passing_threshold_first = args.passing_threshold
self.threshold_decay_rate = args.threshold_decay_rate
self.threshold_decay_ep = args.threshold_decay_ep
self.alpha_iter = args.alpha_iter
self.adalr = args.adalr
def set_save_path(self):
self.save_path = (self.save_path + 'log_{}_bs{:d}_lr{:.4f}_TELCNN_baseline_opt{}_qw{:d}_qa{:d}_epoch{}_{}/'.format(self.dataset, self.batchSize, self.lr, self.opt_type, self.qw, self.qa, self.nEpochs, self.experimentID))
if os.path.exists(self.save_path):
print('{} file exist!'.format(self.save_path))
if (not os.path.exists(self.save_path)):
os.makedirs(self.save_path)
def paramscheck(self, logger):
logger.info('|===>The used PyTorch version is {}'.format(self.torch_version))
if (self.dataset in ['cifar10', 'mnist']):
self.nClasses = 10
elif (self.dataset == 'cifar100'):
self.nClasses = 100
elif ((self.dataset == 'imagenet') or 'thi_imgnet'):
self.nClasses = 1000
elif (self.dataset == 'imagenet100'):
self.nClasses = 100 |
def Align_LC(mjd, mjd2, data, data2, error, error2):
if (len(data2) > len(data)):
new_data2 = []
new_error2 = []
new_mjd2 = []
new_mjd = np.copy(mjd)
new_error = np.copy(error)
new_data = np.copy(data)
count = 0
for index in xrange(len(data)):
where = np.where((mjd2 == mjd[index]))
if (np.array_equal(where[0], []) is False):
new_data2.append(data2[where])
new_error2.append(error2[where])
new_mjd2.append(mjd2[where])
else:
new_mjd = np.delete(new_mjd, (index - count))
new_error = np.delete(new_error, (index - count))
new_data = np.delete(new_data, (index - count))
count = (count + 1)
new_data2 = np.asarray(new_data2).flatten()
new_error2 = np.asarray(new_error2).flatten()
else:
new_data = []
new_error = []
new_mjd = []
new_mjd2 = np.copy(mjd2)
new_error2 = np.copy(error2)
new_data2 = np.copy(data2)
count = 0
for index in xrange(len(data2)):
where = np.where((mjd == mjd2[index]))
if (np.array_equal(where[0], []) is False):
new_data.append(data[where])
new_error.append(error[where])
new_mjd.append(mjd[where])
else:
new_mjd2 = np.delete(new_mjd2, (index - count))
new_error2 = np.delete(new_error2, (index - count))
new_data2 = np.delete(new_data2, (index - count))
count = (count + 1)
new_data = np.asarray(new_data).flatten()
new_mjd = np.asarray(new_mjd).flatten()
new_error = np.asarray(new_error).flatten()
return (new_data, new_data2, new_mjd, new_error, new_error2) |
def test_classifier(P, model, loader, criterion, steps, logger=None):
metric_logger = MetricLogger(delimiter=' ')
if (logger is None):
log_ = print
else:
log_ = logger.log
mode = model.training
model.eval()
acc = 0.0
acc_ema = 0.0
if hasattr(P, 'moving_inner_lr'):
inner_step_ema = P.moving_inner_lr
else:
inner_step_ema = P.inner_lr
for (n, batch) in enumerate(loader):
if ((n * P.test_batch_size) > P.max_test_task):
break
(train_inputs, train_targets) = batch['train']
train_inputs = train_inputs.to(device, non_blocking=True)
train_targets = train_targets.to(device, non_blocking=True)
(test_inputs, test_targets) = batch['test']
test_inputs = test_inputs.to(device, non_blocking=True)
test_targets = test_targets.to(device, non_blocking=True)
for (task_idx, (train_input, train_target, test_input, test_target)) in enumerate(zip(train_inputs, train_targets, test_inputs, test_targets)):
(params, loss_train) = maml_inner_adapt(model, criterion, train_input, train_target, P.inner_lr, P.inner_steps_test, first_order=True)
(params_ema, loss_train_ema) = maml_inner_adapt(model, criterion, train_input, train_target, inner_step_ema, P.inner_steps_test, first_order=True, params=P.moving_average)
with torch.no_grad():
outputs_test = model(test_input, params=params)
outputs_test_ema = model(test_input, params=params_ema)
loss = criterion(outputs_test, test_target)
loss_ema = criterion(outputs_test_ema, test_target)
if (not P.regression):
acc = accuracy(outputs_test, test_target, topk=(1,))[0].item()
acc_ema = accuracy(outputs_test_ema, test_target, topk=(1,))[0].item()
elif (P.dataset == 'shapenet'):
acc = (- degree_loss(outputs_test, test_target).item())
acc_ema = (- degree_loss(outputs_test_ema, test_target).item())
elif (P.dataset == 'pose'):
acc = (- loss.item())
acc_ema = (- loss_ema.item())
else:
raise NotImplementedError()
metric_logger.meters['loss_train_ori'].update(loss_train.item())
metric_logger.meters['loss_ori'].update(loss.item())
metric_logger.meters['acc_ori'].update(acc)
metric_logger.meters['loss_train'].update(loss_train_ema.item())
metric_logger.meters['loss'].update(loss_ema.item())
metric_logger.meters['acc'].update(acc_ema)
metric_logger.synchronize_between_processes()
log_((' * [ %.3f] [LossOutEMA %.3f] [LossInEMA %.3f]' % (metric_logger.acc.global_avg, metric_logger.loss.global_avg, metric_logger.loss_train.global_avg)))
log_((' * [ %.3f] [LossOut %.3f] [LossIn %.3f]' % (metric_logger.acc_ori.global_avg, metric_logger.loss_ori.global_avg, metric_logger.loss_train_ori.global_avg)))
if (logger is not None):
logger.scalar_summary('eval/acc', metric_logger.acc.global_avg, steps)
logger.scalar_summary('eval/loss_test', metric_logger.loss.global_avg, steps)
logger.scalar_summary('eval/loss_train', metric_logger.loss_train.global_avg, steps)
logger.scalar_summary('eval/acc_ori', metric_logger.acc_ori.global_avg, steps)
logger.scalar_summary('eval/loss_test_ori', metric_logger.loss_ori.global_avg, steps)
logger.scalar_summary('eval/loss_train_ori', metric_logger.loss_train_ori.global_avg, steps)
model.train(mode)
return metric_logger.acc.global_avg |
def train(dataset='mnist', model_name='sl', batch_size=128, epochs=50, noise_ratio=0, asym=False, alpha=1.0, beta=1.0):
print(('Dataset: %s, model: %s, batch: %s, epochs: %s, noise ratio: %s%%, asymmetric: %s, alpha: %s, beta: %s' % (dataset, model_name, batch_size, epochs, noise_ratio, asym, alpha, beta)))
(X_train, y_train, y_train_clean, X_test, y_test) = get_data(dataset, noise_ratio, asym=asym, random_shuffle=False)
n_images = X_train.shape[0]
image_shape = X_train.shape[1:]
num_classes = y_train.shape[1]
print('n_images', n_images, 'num_classes', num_classes, 'image_shape:', image_shape)
P = np.eye(num_classes)
model = get_model(dataset, input_tensor=None, input_shape=image_shape, num_classes=num_classes)
if (dataset == 'cifar-100'):
optimizer = SGD(lr=0.1, decay=0.005, momentum=0.9)
else:
optimizer = SGD(lr=0.1, decay=0.0001, momentum=0.9)
if (model_name == 'ce'):
loss = cross_entropy
elif (model_name == 'sl'):
loss = symmetric_cross_entropy(alpha, beta)
elif (model_name == 'lsr'):
loss = lsr
elif (model_name == 'joint'):
loss = joint_optimization_loss
elif (model_name == 'gce'):
loss = generalized_cross_entropy
elif (model_name == 'boot_hard'):
loss = boot_hard
elif (model_name == 'boot_soft'):
loss = boot_soft
elif (model_name == 'forward'):
loss = forward(P)
elif (model_name == 'backward'):
loss = backward(P)
else:
print(('Model %s is unimplemented!' % model_name))
exit(0)
model.compile(loss=loss, optimizer=optimizer, metrics=['accuracy'])
if asym:
model_save_file = ('model/asym_%s_%s_%s.{epoch:02d}.hdf5' % (model_name, dataset, noise_ratio))
else:
model_save_file = ('model/%s_%s_%s.{epoch:02d}.hdf5' % (model_name, dataset, noise_ratio))
callbacks = []
if (model_name == 'sl'):
cp_callback = ModelCheckpoint(model_save_file, monitor='val_loss', verbose=0, save_best_only=False, save_weights_only=True, period=1)
callbacks.append(cp_callback)
else:
cp_callback = ModelCheckpoint(model_save_file, monitor='val_loss', verbose=0, save_best_only=False, save_weights_only=True, period=1)
callbacks.append(cp_callback)
lr_scheduler = get_lr_scheduler(dataset)
callbacks.append(lr_scheduler)
callbacks.append(SGDLearningRateTracker(model))
log_callback = LoggerCallback(model, X_train, y_train, y_train_clean, X_test, y_test, dataset, model_name, noise_ratio, asym, epochs, alpha, beta)
callbacks.append(log_callback)
if (dataset in ['mnist', 'svhn']):
datagen = ImageDataGenerator()
elif (dataset in ['cifar-10']):
datagen = ImageDataGenerator(width_shift_range=0.2, height_shift_range=0.2, horizontal_flip=True)
else:
datagen = ImageDataGenerator(rotation_range=20, width_shift_range=0.2, height_shift_range=0.2, horizontal_flip=True)
datagen.fit(X_train)
model.fit_generator(datagen.flow(X_train, y_train, batch_size=batch_size), steps_per_epoch=(len(X_train) / batch_size), epochs=epochs, validation_data=(X_test, y_test), verbose=1, callbacks=callbacks) |
def _shufflenetv2_mpncov(arch, pretrained, progress, *args, **kwargs):
model = ShuffleNetV2_MPNCOV(*args, **kwargs)
if pretrained:
model_url = model_urls[arch]
if (model_url is None):
raise NotImplementedError('pretrained {} is not supported as of now'.format(arch))
else:
state_dict = load_state_dict_from_url(model_url, progress=progress)
model.load_state_dict(state_dict)
return model |
class RandomGaussianNoise(object):
def __init__(self, p=0.5, noise_variance=(0, 0.5)):
super().__init__()
self.p = p
self.noise_variance = noise_variance
def __call__(self, img_and_mask: Tuple[(np.ndarray, np.ndarray, np.ndarray)]) -> Tuple[(np.ndarray, np.ndarray, np.ndarray)]:
(img, _, mask) = img_and_mask
noised_image = img
mask = np.stack(([mask] * 4))
if (torch.rand(1) < self.p):
if (self.noise_variance[0] == self.noise_variance[1]):
variance = self.noise_variance[0]
else:
variance = random.uniform(self.noise_variance[0], self.noise_variance[1])
noised_image = (img + np.random.normal(0.0, variance, size=img.shape))
noised_image[(mask == 0)] = img[(mask == 0)]
return (noised_image, img_and_mask[1], mask) |
def validate(data_path, device, model, word2idx, entity2idx, model_name, return_hits_at_k):
model.eval()
data = process_text_file(data_path)
answers = []
data_gen = data_generator(data=data, word2ix=word2idx, entity2idx=entity2idx)
total_correct = 0
error_count = 0
hit_at_1 = 0
hit_at_5 = 0
hit_at_10 = 0
candidates_with_scores = []
writeCandidatesToFile = False
for i in tqdm(range(len(data))):
try:
d = next(data_gen)
head = d[0].to(device)
question = d[1].to(device)
ans = d[2]
ques_len = d[3].unsqueeze(0)
tail_test = torch.tensor(ans, dtype=torch.long).to(device)
scores = model.get_score_ranked(head=head, sentence=question, sent_len=ques_len)[0]
mask = torch.zeros(len(entity2idx)).to(device)
mask[head] = 1
new_scores = (scores - (mask * 99999))
pred_ans = torch.argmax(new_scores).item()
if (pred_ans == head.item()):
print('Head and answer same')
print(torch.max(new_scores))
print(torch.min(new_scores))
if writeCandidatesToFile:
entry = {}
entry['question'] = d[(- 1)]
head_text = idx2entity[head.item()]
entry['head'] = head_text
(s, c) = torch.topk(new_scores, 200)
s = s.cpu().detach().numpy()
c = c.cpu().detach().numpy()
cands = []
for cand in c:
cands.append(idx2entity[cand])
entry['scores'] = s
entry['candidates'] = cands
correct_ans = []
for a in ans:
correct_ans.append(idx2entity[a])
entry['answers'] = correct_ans
candidates_with_scores.append(entry)
if inTopk(new_scores, ans, 1):
hit_at_1 += 1
if inTopk(new_scores, ans, 5):
hit_at_5 += 1
if inTopk(new_scores, ans, 10):
hit_at_10 += 1
if (type(ans) is int):
ans = [ans]
is_correct = 0
if (pred_ans in ans):
total_correct += 1
is_correct = 1
else:
num_incorrect += 1
q_text = d[(- 1)]
answers.append(((((q_text + '\t') + str(pred_ans)) + '\t') + str(is_correct)))
except:
error_count += 1
accuracy = (total_correct / len(data))
if return_hits_at_k:
return (answers, accuracy, (hit_at_1 / len(data)), (hit_at_5 / len(data)), (hit_at_10 / len(data)))
else:
return (answers, accuracy) |
def test_global_context_block_1d():
N = 10
C = 128
reduction = 16
data = torch.randn(N, C, 7)
model = GlobalContextBlock1D(in_channels=C, reduction=reduction)
print(model)
outputs = model(data)
print(outputs.shape)
assert (outputs.shape == (N, C, 7)) |
def clean_html_one_sample(sample):
roles = ['human', 'gpt']
if (len(sample['conversations']) <= 1):
return (sample, 1)
if (sample['conversations'][0]['from'] != 'human'):
sample['conversations'] = sample['conversations'][1:]
if (len(sample['conversations']) <= 1):
return (sample, 1)
if (sample['conversations'][(- 1)]['from'] == 'human'):
sample['conversations'] = sample['conversations'][:(- 1)]
if (len(sample['conversations']) <= 1):
return (sample, 1)
for (i, c) in enumerate(sample['conversations']):
if (c['from'] != roles[(i % 2)]):
return (sample, 2)
if contain_blocked_words(c['value']):
return (sample, 3)
try:
new_val = html_to_markdown(c['value'])
except (bs4.builder.ParserRejectedMarkup, AssertionError):
return (sample, 4)
c['value'] = new_val
return (sample, 0) |
class SetupArgs():
def __init__(self, sampler_cls, sampler_args, seed):
self.sampler_cls = sampler_cls
self.sampler_args = sampler_args
self.seed = seed |
class Bow(BaseBow):
def __init__(self):
super().__init__('bow', weight=30, damage=D.Dice.from_str('d2'), material=M.Wood, hit=0) |
def common_config(parser):
parser.add_argument('--percentage', '-pc', type=int, default=100, help='% of samples to use for % experiment, defaults to 100 for no experiment')
parser.add_argument('--shuffle', '-sh', type=str_to_bool, default=True, help='shuffle test set (after splitting)')
parser.add_argument('--speed_benchmark', '-speed', type=str_to_bool, default=False, help='Excecute speed benchmark')
parser.add_argument('--space_benchmark', '-space', type=str_to_bool, default=False, help='Excecute space benchmark')
parser.add_argument('--gaussian-blur', '-gb', type=str_to_bool, default=False, help='Apply gaussian filtering on anomaly map')
parser.add_argument('--sigma', '-s', type=float, default=4, help='sigma of gaussian filter')
parser.add_argument('--seed', type=int, default=10, help='Random seed')
parser.add_argument('--load_pretrained', '-lp', type=str_to_bool, default=False, help='Load encoder pretrained with CCD')
parser.add_argument('--disable_wandb', '-dw', type=str_to_bool, default=False, help='disable wandb logging')
parser.add_argument('--eval', '-ev', type=str_to_bool, default=False, help='Evaluation mode')
parser.add_argument('--no_dice', type=str_to_bool, default=False, help='do not calculate dice (used to save inference time)')
parser.add_argument('--restoration', '-res', type=str_to_bool, default=False, help='VAE restoration')
parser.add_argument('--image_size', type=int, default=128, help='Image size')
parser.add_argument('--img_channels', type=int, default=1, help='Image channels')
parser.add_argument('--center', '-cnt', type=str_to_bool, default=False, help='Whether to center the samples to [-1,1] range.')
parser.add_argument('--stadardize', '-stad', type=str_to_bool, default=False, help='Whether to standardize the samples to N(0,1) dataset-wise.')
parser.add_argument('--modality', '-mod', type=str, default='MRI', help='MRI sequence')
parser.add_argument('--normal_split', '-ns', type=float, default=0.95, help='normal set split')
parser.add_argument('--anomal_split', '-as', type=float, default=0.9, help='anomaly set split')
parser.add_argument('--num_workers', type=int, default=4, help='Number of workers')
parser.add_argument('--sequence', '-seq', type=str, default='t2', help='MRI sequence', choices=['t1', 't2'])
parser.add_argument('--brats_t1', type=str_to_bool, default=True, help='True for BraTS T1, false for ATLAS')
parser.add_argument('--slice_range', type=int, nargs='+', default=(0, 155), help='Lower and Upper slice index')
parser.add_argument('--normalize', type=str_to_bool, default=False, help='Normalize images to 98th percentile and scale to [0,1]')
parser.add_argument('--equalize_histogram', type=str_to_bool, default=False, help='Equalize histogram')
parser.add_argument('--sup_devices', type=str_to_bool, default=False, help='Whether to include CXRs with support devices')
parser.add_argument('--AP_only', type=str_to_bool, default=True, help='Whether to include only AP CXRs')
parser.add_argument('--pathology', type=str, default='effusion', help='Pathology of test set.', choices=['enlarged', 'effusion', 'opacity'])
parser.add_argument('--sex', type=str, default='both', help='Sex of patients', choices=['male', 'female', 'both'])
parser.add_argument('--name_add', '-nam', type=str, default='', help='option to add to the name string')
parser.add_argument('--log_frequency', '-lf', type=int, default=200, help='logging frequency')
parser.add_argument('--val_frequency', '-vf', type=int, default=1000, help='validation frequency')
parser.add_argument('--anom_val_frequency', '-avf', type=int, default=1000, help='Validation frequency on anomalous samples')
parser.add_argument('--val_steps', type=int, default=100, help='validation steps')
parser.add_argument('--num_images_log', '-nil', type=int, default=16, help='Number of images to log on wandb')
parser.add_argument('--ssim_eval', '-ssim', type=str_to_bool, default=True, help='Whether to use SSIM on residual-based methods.')
parser.add_argument('--get_images', '-img', type=str_to_bool, default=False)
return parser |
def get_texts(texts: List[str], already_processed: Optional[bool]=False, n_cpus: Optional[int]=None) -> List[List[str]]:
num_cpus = (n_cpus if (n_cpus is not None) else os.cpu_count())
if (not already_processed):
processed_texts = [' '.join(simple_preprocess(t)) for t in texts]
else:
processed_texts = texts
tok = Tokenizer(n_cpus=num_cpus).process_all(processed_texts)
return tok |
class SharedMemoryWriter(StorageWriter):
def __init__(self, shm_handler: SharedMemoryHandler) -> None:
super().__init__()
self.file_name = ''
self.shm_handler = shm_handler
self.metadata: Dict[(str, object)] = {}
def set_up_storage_writer(self, is_coordinator: bool) -> None:
pass
def prepare_local_plan(self, plan: SavePlan) -> SavePlan:
return plan
def prepare_global_plan(self, global_plan: List[SavePlan]) -> List[SavePlan]:
new_plans = [dataclasses.replace(plan, storage_data=_StoragePrefix(f'__{i}_')) for (i, plan) in enumerate(global_plan)]
return new_plans
def write_data(self, plan: SavePlan, planner: SavePlanner) -> Future[List[WriteResult]]:
storage_plan: _StoragePrefix = plan.storage_data
file_count = 0
files = []
self.file_name = f'{storage_plan.prefix}{file_count}{DEFAULT_SUFFIX}'
for bucket in plan.items:
files.append((self.file_name, bucket))
if self.shm_handler.no_checkpint_state():
buffer_size = _get_buffer_size(files, planner)
self.shm_handler.init_shared_memory(create=True, size=buffer_size)
assert (self.shm_handler.shared_memory is not None)
(write_results, no_shard_data) = _write_memory_from_list(shm=self.shm_handler.shared_memory, files=files, planner=planner)
self.metadata['no_shard_data'] = no_shard_data
fut: Future[List[WriteResult]] = Future()
fut.set_result(write_results)
return fut
def finish(self, metadata: Metadata, results: List[List[WriteResult]]) -> None:
storage_md = dict()
for wr_list in results:
storage_md.update({wr.index: wr.storage_data for wr in wr_list})
metadata.storage_data = storage_md
self.metadata['dcp_metadata'] = metadata |
def main(args, local_rank):
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO)
vocabs = dict()
vocabs['src'] = Vocab(args.src_vocab, 0, [BOS, EOS])
vocabs['tgt'] = Vocab(args.tgt_vocab, 0, [BOS, EOS])
if ((args.world_size == 1) or (dist.get_rank() == 0)):
logger.info(args)
for name in vocabs:
logger.info('vocab %s, size %d, coverage %.3f', name, vocabs[name].size, vocabs[name].coverage)
set_seed()
torch.cuda.set_device(local_rank)
device = torch.device('cuda', local_rank)
if (args.arch == 'vanilla'):
model = Generator(vocabs, args.embed_dim, args.ff_embed_dim, args.num_heads, args.dropout, args.enc_layers, args.dec_layers, args.label_smoothing)
elif (args.arch == 'mem'):
model = MemGenerator(vocabs, args.embed_dim, args.ff_embed_dim, args.num_heads, args.dropout, args.mem_dropout, args.enc_layers, args.dec_layers, args.mem_enc_layers, args.label_smoothing, args.use_mem_score)
elif (args.arch == 'rg'):
logger.info('start building model')
logger.info('building retriever')
retriever = Retriever.from_pretrained(args.num_retriever_heads, vocabs, args.retriever, args.nprobe, args.topk, local_rank, use_response_encoder=(args.rebuild_every > 0))
logger.info('building retriever + generator')
model = RetrieverGenerator(vocabs, retriever, args.share_encoder, args.embed_dim, args.ff_embed_dim, args.num_heads, args.dropout, args.mem_dropout, args.enc_layers, args.dec_layers, args.mem_enc_layers, args.label_smoothing)
if args.resume_ckpt:
model.load_state_dict(torch.load(args.resume_ckpt)['model'])
else:
global_step = 0
if (args.world_size > 1):
set_seed(( + dist.get_rank()))
model = model.to(device)
retriever_params = [v for (k, v) in model.named_parameters() if k.startswith('retriever.')]
other_params = [v for (k, v) in model.named_parameters() if (not k.startswith('retriever.'))]
optimizer = Adam([{'params': retriever_params, 'lr': ((args.embed_dim ** (- 0.5)) * 0.1)}, {'params': other_params, 'lr': (args.embed_dim ** (- 0.5))}], betas=(0.9, 0.98), eps=1e-09)
lr_schedule = get_inverse_sqrt_schedule_with_warmup(optimizer, args.warmup_steps, args.total_train_steps)
train_data = DataLoader(vocabs, args.train_data, args.per_gpu_train_batch_size, for_train=True, rank=local_rank, num_replica=args.world_size)
model.eval()
(step, epoch) = (0, 0)
tr_stat = Statistics()
logger.info('start training')
model.train()
best_dev_bleu = 0.0
while (global_step <= args.total_train_steps):
for batch in train_data:
batch = move_to_device(batch, device)
if (args.arch == 'rg'):
(loss, acc) = model(batch, update_mem_bias=(global_step > args.update_retriever_after))
else:
(loss, acc) = model(batch)
tr_stat.update({'loss': (loss.item() * batch['tgt_num_tokens']), 'tokens': batch['tgt_num_tokens'], 'acc': acc})
tr_stat.step()
loss.backward()
step += 1
if (not ((step % args.gradient_accumulation_steps) == ((- 1) % args.gradient_accumulation_steps))):
continue
if (args.world_size > 1):
average_gradients(model)
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
optimizer.step()
lr_schedule.step()
optimizer.zero_grad()
global_step += 1
if ((args.world_size == 1) or (dist.get_rank() == 0)):
if ((global_step % args.print_every) == ((- 1) % args.print_every)):
logger.info('epoch %d, step %d, loss %.3f, acc %.3f', epoch, global_step, (tr_stat['loss'] / tr_stat['tokens']), (tr_stat['acc'] / tr_stat['tokens']))
tr_stat = Statistics()
if ((global_step % args.eval_every) == ((- 1) % args.eval_every)):
model.eval()
max_time_step = (256 if (global_step > (2 * args.warmup_steps)) else 5)
bleus = []
for cur_dev_data in args.dev_data:
dev_data = DataLoader(vocabs, cur_dev_data, args.dev_batch_size, for_train=False)
bleu = validate(device, model, dev_data, beam_size=5, alpha=0.6, max_time_step=max_time_step)
bleus.append(bleu)
bleu = (sum(bleus) / len(bleus))
logger.info('epoch %d, step %d, dev bleu %.2f', epoch, global_step, bleu)
if (bleu > best_dev_bleu):
testbleus = []
for cur_test_data in args.test_data:
test_data = DataLoader(vocabs, cur_test_data, args.dev_batch_size, for_train=False)
testbleu = validate(device, model, test_data, beam_size=5, alpha=0.6, max_time_step=max_time_step)
testbleus.append(testbleu)
testbleu = (sum(testbleus) / len(testbleus))
logger.info('epoch %d, step %d, test bleu %.2f', epoch, global_step, testbleu)
torch.save({'args': args, 'model': model.state_dict()}, ('%s/best.pt' % (args.ckpt,)))
if (not args.only_save_best):
torch.save({'args': args, 'model': model.state_dict()}, ('%s/epoch%d_batch%d_devbleu%.2f_testbleu%.2f' % (args.ckpt, epoch, global_step, bleu, testbleu)))
best_dev_bleu = bleu
model.train()
if ((args.rebuild_every > 0) and ((global_step % args.rebuild_every) == ((- 1) % args.rebuild_every))):
model.retriever.drop_index()
torch.cuda.empty_cache()
next_index_dir = ('%s/batch%d' % (args.ckpt, global_step))
if ((args.world_size == 1) or (dist.get_rank() == 0)):
model.retriever.rebuild_index(next_index_dir)
dist.barrier()
else:
dist.barrier()
model.retriever.update_index(next_index_dir, args.nprobe)
if (global_step > args.total_train_steps):
break
epoch += 1
logger.info('rank %d, finish training after %d steps', local_rank, global_step) |
class TemporalCenterCrop(object):
def __init__(self, size, padding=True, pad_method='loop'):
self.size = size
self.padding = padding
self.pad_method = pad_method
def __call__(self, frame_indices):
center_index = (len(frame_indices) // 2)
begin_index = max(0, (center_index - (self.size // 2)))
end_index = min((begin_index + self.size), len(frame_indices))
out = list(frame_indices[begin_index:end_index])
if (self.padding == True):
if (self.pad_method == 'loop'):
while (len(out) < self.size):
for index in out:
if (len(out) >= self.size):
break
out.append(index)
else:
while (len(out) < self.size):
for index in out:
if (len(out) >= self.size):
break
out.append(index)
out.sort()
return out |
_model_architecture('hf_gpt2', 'hf_gpt2_medium')
def hf_gpt2_medium(args):
args.embed_dim = getattr(args, 'embed_dim', 1024)
args.num_attention_heads = getattr(args, 'num_attention_heads', 16)
args.num_layers = getattr(args, 'num_layers', 24)
default_architecture(args) |
def get_paths(path):
files = os.listdir(path)
if ((LOG_FILE_NAME in files) or any([('seed' in f) for f in files])):
return [path]
else:
return sum([get_paths(os.path.join(path, f)) for f in files], start=[]) |
('(float32[:], float32[:])', device=True, inline=True)
def rbbox_to_corners(corners, rbbox):
angle = rbbox[4]
a_cos = math.cos(angle)
a_sin = math.sin(angle)
center_x = rbbox[0]
center_y = rbbox[1]
x_d = rbbox[2]
y_d = rbbox[3]
corners_x = cuda.local.array((4,), dtype=numba.float32)
corners_y = cuda.local.array((4,), dtype=numba.float32)
corners_x[0] = ((- x_d) / 2)
corners_x[1] = ((- x_d) / 2)
corners_x[2] = (x_d / 2)
corners_x[3] = (x_d / 2)
corners_y[0] = ((- y_d) / 2)
corners_y[1] = (y_d / 2)
corners_y[2] = (y_d / 2)
corners_y[3] = ((- y_d) / 2)
for i in range(4):
corners[(2 * i)] = (((a_cos * corners_x[i]) + (a_sin * corners_y[i])) + center_x)
corners[((2 * i) + 1)] = ((((- a_sin) * corners_x[i]) + (a_cos * corners_y[i])) + center_y) |
def matrix_for_bone_from_parent(bone, ao):
eb1 = ao.data.bones[bone.name]
E = eb1.matrix_local
ebp = ao.data.bones[bone.name].parent
E_p = ebp.matrix_local
return (E_p.inverted() E) |
class HiResCAM(BaseCAM):
def __init__(self, model, target_layers, reshape_transform=None):
super(HiResCAM, self).__init__(model, target_layers, reshape_transform)
def get_cam_image(self, input_tensor, target_layer, target_category, activations, grads, eigen_smooth):
elementwise_activations = (grads * activations)
if eigen_smooth:
print("Warning: HiResCAM's faithfulness guarantees do not hold if smoothing is applied")
cam = get_2d_projection(elementwise_activations)
else:
cam = elementwise_activations.sum(axis=1)
return cam |
def dfscode_to_tensor(dfscode, feature_map):
(max_nodes, max_edges) = (feature_map['max_nodes'], feature_map['max_edges'])
(node_forward_dict, edge_forward_dict) = (feature_map['node_forward'], feature_map['edge_forward'])
(num_nodes_feat, num_edges_feat) = (len(feature_map['node_forward']), len(feature_map['edge_forward']))
dfscode_tensors = {'t1': ((max_nodes + 1) * torch.ones((max_edges + 1), dtype=torch.long)), 't2': ((max_nodes + 1) * torch.ones((max_edges + 1), dtype=torch.long)), 'v1': ((num_nodes_feat + 1) * torch.ones((max_edges + 1), dtype=torch.long)), 'e': ((num_edges_feat + 1) * torch.ones((max_edges + 1), dtype=torch.long)), 'v2': ((num_nodes_feat + 1) * torch.ones((max_edges + 1), dtype=torch.long)), 'len': len(dfscode)}
for (i, code) in enumerate(dfscode):
dfscode_tensors['t1'][i] = int(code[0])
dfscode_tensors['t2'][i] = int(code[1])
dfscode_tensors['v1'][i] = int(node_forward_dict[code[2]])
dfscode_tensors['e'][i] = int(edge_forward_dict[code[3]])
dfscode_tensors['v2'][i] = int(node_forward_dict[code[4]])
(dfscode_tensors['t1'][len(dfscode)], dfscode_tensors['t2'][len(dfscode)]) = (max_nodes, max_nodes)
(dfscode_tensors['v1'][len(dfscode)], dfscode_tensors['v2'][len(dfscode)]) = (num_nodes_feat, num_nodes_feat)
dfscode_tensors['e'][len(dfscode)] = num_edges_feat
return dfscode_tensors |
class TensorFlowWrapper():
def __init__(self, embedding_layer_hub_name: str) -> None:
g = tensorflow.Graph()
with g.as_default():
embedding_layer = tensorflow_hub.Module(embedding_layer_hub_name)
self._sts_input1 = tensorflow.placeholder(tensorflow.string, shape=None)
self._sts_input2 = tensorflow.placeholder(tensorflow.string, shape=None)
sts_encode1 = tensorflow.nn.l2_normalize(embedding_layer(self._sts_input1), axis=1)
sts_encode2 = tensorflow.nn.l2_normalize(embedding_layer(self._sts_input2), axis=1)
cosine_similarities = tensorflow.reduce_sum(tensorflow.multiply(sts_encode1, sts_encode2), axis=1)
clip_cosine_similarities = tensorflow.clip_by_value(cosine_similarities, (- 1.0), 1.0)
self._sim_scores = (1.0 - tensorflow.acos(clip_cosine_similarities))
init_op = tensorflow.group([tensorflow.global_variables_initializer(), tensorflow.tables_initializer()])
g.finalize()
self._session = tensorflow.Session(graph=g)
self._session.run(init_op)
def append_scores(self, sentence_pairs: pandas.DataFrame) -> None:
text_a = sentence_pairs['sent_1'].fillna('').tolist()
text_b = sentence_pairs['sent_2'].fillna('').tolist()
scores = self._session.run(self._sim_scores, feed_dict={self._sts_input1: text_a, self._sts_input2: text_b})
sentence_pairs['score'] = scores
def close(self):
self._session.close() |
def get_default_sess_config(mem_fraction=0.99):
conf = tf.ConfigProto()
conf.gpu_options.per_process_gpu_memory_fraction = mem_fraction
conf.gpu_options.allocator_type = 'BFC'
conf.gpu_options.allow_growth = True
conf.allow_soft_placement = True
return conf |
def early_stopping(loss, patience):
if (len(loss) < patience):
return False
loss = loss[(patience * (- 1)):]
last = sys.float_info.min
for l in loss:
if (l < last):
return False
last = l
return True |
class NatPreTrainedModel(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def CheckForNonStandardConstructs(filename, clean_lines, linenum, nesting_state, error):
line = clean_lines.lines[linenum]
if Search('printf\\s*\\(.*".*%[-+ ]?\\d*q', line):
error(filename, linenum, 'runtime/printf_format', 3, '%q in format strings is deprecated. Use %ll instead.')
if Search('printf\\s*\\(.*".*%\\d+\\$', line):
error(filename, linenum, 'runtime/printf_format', 2, '%N$ formats are unconventional. Try rewriting to avoid them.')
line = line.replace('\\\\', '')
if Search('("|\\\').*\\\\(%|\\[|\\(|{)', line):
error(filename, linenum, 'build/printf_format', 3, '%, [, (, and { are undefined character escapes. Unescape them.')
line = clean_lines.elided[linenum]
if Search('\\b(const|volatile|void|char|short|int|long|float|double|signed|unsigned|schar|u?int8|u?int16|u?int32|u?int64)\\s+(register|static|extern|typedef)\\b', line):
error(filename, linenum, 'build/storage_class', 5, 'Storage class (static, extern, typedef, etc) should be first.')
if Match('\\s*#\\s*endif\\s*[^/\\s]+', line):
error(filename, linenum, 'build/endif_comment', 5, 'Uncommented text after #endif is non-standard. Use a comment.')
if Match('\\s*class\\s+(\\w+\\s*::\\s*)+\\w+\\s*;', line):
error(filename, linenum, 'build/forward_decl', 5, 'Inner-style forward declarations are invalid. Remove this line.')
if Search('(\\w+|[+-]?\\d+(\\.\\d*)?)\\s*(<|>)\\?=?\\s*(\\w+|[+-]?\\d+)(\\.\\d*)?', line):
error(filename, linenum, 'build/deprecated', 3, '>? and <? (max and min) operators are non-standard and deprecated.')
if Search('^\\s*const\\s*string\\s*&\\s*\\w+\\s*;', line):
error(filename, linenum, 'runtime/member_string_references', 2, 'const string& members are dangerous. It is much better to use alternatives, such as pointers or simple constants.')
classinfo = nesting_state.InnermostClass()
if ((not classinfo) or (not classinfo.seen_open_brace)):
return
base_classname = classinfo.name.split('::')[(- 1)]
args = Match(('\\s+(?:inline\\s+)?%s\\s*\\(([^,()]+)\\)' % re.escape(base_classname)), line)
if (args and (args.group(1) != 'void') and (not Match(('(const\\s+)?%s(\\s+const)?\\s*(?:<\\w+>\\s*)?&' % re.escape(base_classname)), args.group(1).strip()))):
error(filename, linenum, 'runtime/explicit', 5, 'Single-argument constructors should be marked explicit.') |
def ReadFile(filename, print_error=True):
try:
fp = open(filename)
try:
return fp.read()
finally:
fp.close()
except IOError:
if print_error:
print(('Error reading %s: %s' % (filename, sys.exc_info()[1])))
return None |
def adjust_upper_plane(x0, y0, x_minus, x_plus, y_minus, y_plus, lr=0.01, max_iter=100, print_info=True):
x0 = x0.detach()
y0 = y0.detach()
x1 = ((x0 + x_minus) / 2).data.clone()
y1 = ((y0 + y_minus) / 2).data.clone()
x2 = ((x0 + x_minus) / 2).data.clone()
y2 = ((y0 + y_plus) / 2).data.clone()
x3 = ((x0 + x_plus) / 2).data.clone()
y3 = ((y0 + y_plus) / 2).data.clone()
x4 = ((x0 + x_plus) / 2).data.clone()
y4 = ((y0 + y_minus) / 2).data.clone()
x1.requires_grad = True
y1.requires_grad = True
x2.requires_grad = True
y2.requires_grad = True
x3.requires_grad = True
y3.requires_grad = True
x4.requires_grad = True
y4.requires_grad = True
(a, b, c) = plane(x0, y0)
optimizer = optim.Adam([x1, y1, x2, y2, x3, y3, x4, y4], lr=lr)
x1_best = torch.zeros(x_minus.shape, device=x_minus.device)
y1_best = torch.zeros(x_minus.shape, device=x_minus.device)
loss1_best = (torch.ones(x_minus.shape, device=x_minus.device) * 1000)
x2_best = torch.zeros(x_minus.shape, device=x_minus.device)
y2_best = torch.zeros(x_minus.shape, device=x_minus.device)
loss2_best = (torch.ones(x_minus.shape, device=x_minus.device) * 1000)
x3_best = torch.zeros(x_minus.shape, device=x_minus.device)
y3_best = torch.zeros(x_minus.shape, device=x_minus.device)
loss3_best = (torch.ones(x_minus.shape, device=x_minus.device) * 1000)
x4_best = torch.zeros(x_minus.shape, device=x_minus.device)
y4_best = torch.zeros(x_minus.shape, device=x_minus.device)
loss4_best = (torch.ones(x_minus.shape, device=x_minus.device) * 1000)
for i in range(max_iter):
loss1 = ((((a * x1) + (b * y1)) + c) - (torch.tanh(x1) * torch.sigmoid(y1)))
loss2 = ((((a * x2) + (b * y2)) + c) - (torch.tanh(x2) * torch.sigmoid(y2)))
loss3 = ((((a * x3) + (b * y3)) + c) - (torch.tanh(x3) * torch.sigmoid(y3)))
loss4 = ((((a * x4) + (b * y4)) + c) - (torch.tanh(x4) * torch.sigmoid(y4)))
(qloss1, valid1) = qualification_loss_upper(x1, y1, x_minus, x_plus, y_minus, y_plus)
best1 = ((loss1 < loss1_best) * valid1)
x1_best[best1] = x1[best1]
y1_best[best1] = y1[best1]
loss1_best[best1] = loss1[best1]
(qloss2, valid2) = qualification_loss_upper(x2, y2, x_minus, x_plus, y_minus, y_plus)
best2 = ((loss2 < loss2_best) * valid2)
x2_best[best2] = x2[best2]
y2_best[best2] = y2[best2]
loss2_best[best2] = loss2[best2]
(qloss3, valid3) = qualification_loss_upper(x3, y3, x_minus, x_plus, y_minus, y_plus)
best3 = ((loss3 < loss3_best) * valid3)
x3_best[best3] = x3[best3]
y3_best[best3] = y3[best3]
loss3_best[best3] = loss3[best3]
(qloss4, valid4) = qualification_loss_upper(x4, y4, x_minus, x_plus, y_minus, y_plus)
best4 = ((loss4 < loss4_best) * valid4)
x4_best[best4] = x4[best4]
y4_best[best4] = y4[best4]
loss4_best[best4] = loss4[best4]
loss = ((loss1 * (valid1.float() + 0.1)) + qloss1)
loss = ((loss + (loss2 * (valid2.float() + 0.1))) + qloss2)
loss = ((loss + (loss3 * (valid3.float() + 0.1))) + qloss3)
loss = ((loss + (loss4 * (valid4.float() + 0.1))) + qloss4)
loss = loss.mean()
optimizer.zero_grad()
loss.backward()
optimizer.step()
if print_info:
print(('1 adjust upper plane loss: %.4f' % loss.item()))
return (x1_best, y1_best, x2_best, y2_best, x3_best, y3_best, x4_best, y4_best, loss1_best, loss2_best, loss3_best, loss4_best) |
def parameter_net_file():
with tempfile.NamedTemporaryFile(mode='w+', delete=False) as f:
f.write("name: 'pythonnet' force_backward: true\n input: 'data' input_shape { dim: 10 dim: 9 dim: 8 }\n layer { type: 'Python' name: 'layer' bottom: 'data' top: 'top'\n python_param { module: 'test_python_layer' layer: 'ParameterLayer' } }\n ")
return f.name |
def _metric_max_over_ground_truths(metric_fn, prediction, ground_truths):
scores_for_ground_truths = []
for ground_truth in ground_truths:
score = metric_fn(prediction, ground_truth)
scores_for_ground_truths.append(score)
return max(scores_for_ground_truths) |
def nchw_to_nlc(x):
assert (len(x.shape) == 4)
return x.flatten(2).transpose(1, 2).contiguous() |
def shared_single(dim=2):
shp = tuple(([1] * dim))
return theano.shared(numpy.zeros(shp, dtype='float32')) |
def get_path_iterator(root, tsv, nshard, rank, audio_col_name):
with open(tsv) as f:
reader = csv.DictReader(f, delimiter='\t', quotechar=None, doublequote=False, lineterminator='\n', quoting=csv.QUOTE_NONE)
subpaths = [op.join(root, e[audio_col_name]) for e in reader]
(start, end) = get_shard_range(len(subpaths), nshard, rank)
subpaths = subpaths[start:end]
def iterate():
for subpath in subpaths:
(yield (op.join(root, subpath), None))
return (iterate, len(subpaths)) |
def main(args):
(model, dataset) = (args.model, args.dataset)
model_name = ClipModel.get_model_name_by_index(model)
dataset_name = ImageTextData.get_data_name_by_index(dataset)
args.log_file = (os.getcwd() + '/log/{}_{}_{}.txt'.format(args.mode, model_name, dataset_name))
logger = get_logger(args.log_file, args.log_file)
logger.info(args)
clip = ClipModel(model, logger=logger)
logger.info(f'Clip model {model_name} loaded')
itdata = ImageTextData(dataset, root=args.root, preprocess=clip.preprocess)
train_loader = torch.utils.data.DataLoader(itdata, batch_size=args.batchsize, shuffle=True)
logger.info(f'Dataset {dataset_name} loaded')
if (args.mode == 'zs'):
(acc, res) = clip.evaluate(train_loader)
logger.info('Results: {}'.format(res))
logger.info('Accuracy: {:.2f}%'.format((acc * 100)))
elif (args.mode == 'fe'):
res = clip.feature_extraction(train_loader)
logger.info('Feature extracted!')
if (not os.path.exists('feat')):
os.makedirs('feat')
feat_file = 'feat/{}_{}_{}.csv'.format(args.mode, model_name, dataset_name)
np.savetxt(feat_file, res, fmt='%.4f')
elif (args.mode == 'ft'):
test_data = ImageTextData(args.test_data, root=args.root, preprocess=clip.preprocess)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=args.test_batchsize, shuffle=False, drop_last=False)
optimizer = optim.Adam(clip.model.parameters(), lr=args.lr, betas=(args.beta1, args.beta2), eps=args.eps, weight_decay=args.weight_decay)
best_acc = clip.finetune(train_loader, test_loader, optimizer, args.nepoch, save_path='/home/jindwang/mine/clipood/model/{}_{}_{}.pt'.format(args.mode, model_name, dataset_name))
logger.info('Accuracy: {:.2f}%'.format((best_acc * 100)))
else:
raise NotImplementedError |
def make_mlp_model(latent_dim, output_dim, num_layers, activation=tf.nn.relu, l2_regularizer_weight=0.01, bias_init_stddev=0.1):
layers = ([latent_dim] * (num_layers - 1))
layers.append(output_dim)
return snt.Sequential([snt.nets.MLP(layers, activation=activation, initializers={'w': tf.initializers.glorot_normal(), 'b': tf.initializers.truncated_normal(stddev=bias_init_stddev)}, activate_final=False)]) |
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10, soft=False):
super(ResNet, self).__init__()
self.in_planes = 64
self.soft = soft
self.downsample = nn.MaxPool2d(2)
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear((512 * block.expansion), num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = ([stride] + ([1] * (num_blocks - 1)))
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = (planes * block.expansion)
return nn.Sequential(*layers)
def forward(self, x, downsample_=True):
if downsample_:
x = self.downsample(x)
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), (- 1))
out = self.linear(out)
if self.soft:
out = F.softmax(out, dim=1)
return F.log_softmax(out, dim=1)
else:
return F.log_softmax(out, dim=1)
def forward_oltl(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), (- 1))
out = self.linear(out)
return (F.log_softmax(out, dim=1), F.softmax(out, dim=1)) |
class VOTLTVideo(Video):
def __init__(self, name, root, video_dir, init_rect, img_names, gt_rect, load_img=False):
super(VOTLTVideo, self).__init__(name, root, video_dir, init_rect, img_names, gt_rect, None, load_img)
self.gt_traj = [([0] if np.isnan(bbox[0]) else bbox) for bbox in self.gt_traj]
if (not load_img):
img_name = os.path.join(root, self.img_names[0])
img = np.array(Image.open(img_name), np.uint8)
self.width = img.shape[1]
self.height = img.shape[0]
self.confidence = {}
def load_tracker(self, path, tracker_names=None, store=True):
if (not tracker_names):
tracker_names = [x.split('/')[(- 1)] for x in glob(path) if os.path.isdir(x)]
if isinstance(tracker_names, str):
tracker_names = [tracker_names]
for name in tracker_names:
traj_file = os.path.join(path, name, 'longterm', self.name, (self.name + '_001.txt'))
with open(traj_file, 'r') as f:
traj = [list(map(float, x.strip().split(','))) for x in f.readlines()]
if store:
self.pred_trajs[name] = traj
confidence_file = os.path.join(path, name, 'longterm', self.name, (self.name + '_001_confidence.value'))
with open(confidence_file, 'r') as f:
score = [float(x.strip()) for x in f.readlines()[1:]]
score.insert(0, float('nan'))
if store:
self.confidence[name] = score
return (traj, score) |
def MFM(x, name):
with tf.variable_scope(name):
shape = x.get_shape().as_list()
res = tf.reshape(x, [(- 1), shape[1], shape[2], 2, (shape[(- 1)] // 2)])
res = tf.reduce_max(res, axis=[3])
return res |
def zero_last_layer(encoder):
encoder.model_z[4].weight.data.fill_(0.0)
encoder.model_z[4].bias.data.fill_(0.0)
return encoder |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.