code stringlengths 101 5.91M |
|---|
class PrefetchDataset(torch.utils.data.Dataset):
def __init__(self, opt, dataset, pre_process_func):
self.images = dataset.images
self.load_image_func = dataset.coco.loadImgs
self.img_dir = dataset.img_dir
self.pre_process_func = pre_process_func
self.opt = opt
def __getitem__(self, index):
img_id = self.images[index]
img_info = self.load_image_func(ids=[img_id])[0]
img_path = os.path.join(self.img_dir, img_info['file_name'])
image = cv2.imread(img_path)
(images, meta) = ({}, {})
for scale in opt.test_scales:
if (opt.task == 'ddd'):
(images[scale], meta[scale]) = self.pre_process_func(image, scale, img_info['calib'])
else:
(images[scale], meta[scale]) = self.pre_process_func(image, scale)
return (img_id, {'images': images, 'image': image, 'meta': meta})
def __len__(self):
return len(self.images) |
def distribute_position_amplitude_data(data: PositionAmplitudeData) -> PositionAmplitudeData:
walker_data = data['walker_data']
move_metadata = data['move_metadata']
walker_data = default_distribute_data(walker_data)
move_metadata = replicate_all_local_devices(move_metadata)
return PositionAmplitudeData(walker_data=walker_data, move_metadata=move_metadata) |
def split_needed(next_el, current_types, last_type):
repeatable_if_adjacent = {'REPORTNUMBER', 'COLLABORATION'}
next_type = ('ARXIV' if next_el.get('is_arxiv') else next_el['type'])
if (';' in next_el['misc_txt']):
return 'semicolon'
if ((next_type in (current_types - repeatable_if_adjacent)) or ((last_type == next_type) and (next_type not in repeatable_if_adjacent))):
return 'repeated field'
return None |
class QNLI(AbstractTask):
name = 'qnli'
labels_list = ['0', '1']
metric = [metrics.accuracy]
metric_names = ['accuracy']
split_to_data_split = {'train': 'train', 'validation': 'validation', 'test': 'validation'}
def load_dataset(self, split):
return datasets.load_dataset('glue', 'qnli', split=split, script_version='master')
def preprocessor(self, example, add_prefix=True):
src_texts = ['question:', example['question'], 'sentence:', example['sentence']]
tgt_texts = [str(example['label'])]
return self.seq2seq_format(src_texts, tgt_texts, add_prefix) |
def get_transformer_hidden_size(model: transformers.PreTrainedModel):
if isinstance(model, transformers.GPT2LMHeadModel):
hidden_size_attr_name = 'n_embd'
elif isinstance(model, transformers.OPTForCausalLM):
hidden_size_attr_name = 'word_embed_proj_dim'
elif isinstance(model, transformers.T5ForConditionalGeneration):
hidden_size_attr_name = 'd_model'
else:
llama_cls = getattr(transformers, ('LLaMAForCausalLM' if hasattr(transformers, 'LLaMAForCausalLM') else 'LlamaForCausalLM'))
if isinstance(model, llama_cls):
hidden_size_attr_name = 'hidden_size'
else:
raise ValueError(f'Unknown base_model type: {type(model)}')
from typing import Any, Mapping
return getattr(model.config, hidden_size_attr_name) |
_model
def tf_efficientnet_b0_ap(pretrained=False, **kwargs):
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet('tf_efficientnet_b0_ap', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs)
return model |
class SyntacticMetric(Metric):
def __init__(self):
pass
def evaluate_example(self, summary, reference):
with CoreNLPClient(annotators=['tokenize', 'ssplit', 'pos', 'lemma', 'parse'], timeout=30000, memory='16G') as client:
answer = get_stats(client, summary)
return answer
def evaluate_batch(self, summaries, references, aggregate=True):
corpus_score_dict = Counter()
with CoreNLPClient(annotators=['tokenize', 'ssplit', 'pos', 'lemma', 'parse'], timeout=30000, memory='16G') as client:
if aggregate:
corpus_score_dict = Counter()
else:
corpus_score_dict = []
for (count, summ) in enumerate(summaries):
print(count)
stats = get_stats(client, summ)
if aggregate:
corpus_score_dict.update(stats)
else:
corpus_score_dict.append(stats)
if aggregate:
for key in corpus_score_dict.keys():
corpus_score_dict[key] /= float(len(summaries))
return corpus_score_dict
def supports_multi_ref(self):
return False |
def to_tensor_slice_dataset(data, label, config):
features = collections.OrderedDict()
output_types = collections.OrderedDict()
for i in range(len(CONTINUOUS_COLUMNS)):
import numpy as np
features[CONTINUOUS_COLUMNS[i]] = data[i].astype(np.float32)
output_types[CONTINUOUS_COLUMNS[i]] = tf.float32
for j in range(len(CATEGORICAL_COLUMNS)):
features[CATEGORICAL_COLUMNS[j]] = data[(j + len(CONTINUOUS_COLUMNS))].astype('int64')
output_types[CATEGORICAL_COLUMNS[j]] = tf.int64
labels = label
def get_item():
i = 0
size = len(labels)
while (i < size):
single_features = collections.OrderedDict()
for (k, v) in features.items():
single_features[k] = v[i]
single_label = labels[i]
(yield (single_features, single_label))
i += 1
dataset = tf.data.Dataset.from_tensor_slices((features, labels))
dataset = dataset.shuffle(buffer_size=20000, seed=config['seed'])
dataset = dataset.repeat(config['no_of_epochs'])
dataset = dataset.prefetch(config['batch_size'])
dataset = dataset.batch(config['batch_size'])
dataset = dataset.prefetch(2)
return dataset |
class AgentGenerator():
def __init__(self, args):
self.client = carla.Client(args.host, args.port)
self.client.set_timeout(10.0)
self.world = self.client.get_world()
self.map = self.world.get_map()
self.tm = self.client.get_trafficmanager(args.tm_port)
self.tm.set_global_distance_to_leading_vehicle(1.0)
self.tm.set_hybrid_physics_mode(args.hybrid)
self.tm.set_random_device_seed(1)
blueprints = self.world.get_blueprint_library().filter(args.filterv)
blueprints = [x for x in blueprints if (int(x.get_attribute('number_of_wheels')) == 4)]
blueprints = [x for x in blueprints if (not x.id.endswith('isetta'))]
blueprints = [x for x in blueprints if (not x.id.endswith('carlacola'))]
blueprints = [x for x in blueprints if (not x.id.endswith('cybertruck'))]
blueprints = [x for x in blueprints if (not x.id.endswith('t2'))]
self.blueprints = blueprints
self.agent_list = []
self.minimum_distance = 15
self.spawn_offset = {'front': 15, 'front_left': 10, 'back_left': (- 10), 'front_right': 10, 'back_right': (- 10), 'back': (- 15), 'double_left': 0, 'double_right': 0}
self.autopilot_enabled = False
while True:
actors = self.world.get_actors().filter('vehicle.*')
if (actors != None):
self.ego = actors[0]
print('find ego vehicle, id: {}'.format(self.ego.id))
break
print('wait for ego vehicle')
time.sleep(1)
def random_blueprint(self):
random.seed(int(time.time()))
blueprint = random.choice(self.blueprints)
if blueprint.has_attribute('color'):
color = random.choice(blueprint.get_attribute('color').recommended_values)
blueprint.set_attribute('color', color)
if blueprint.has_attribute('driver_id'):
driver_id = random.choice(blueprint.get_attribute('driver_id').recommended_values)
blueprint.set_attribute('driver_id', driver_id)
blueprint.set_attribute('role_name', 'autopilot')
return blueprint
def get_transform(self, waypoint):
tf = waypoint.transform
tf.location.z += 1
return tf
def spawn_agent(self, transform):
batch = [carla.command.SpawnActor(self.random_blueprint(), transform)]
for response in self.client.apply_batch_sync(batch, False):
if response.error:
print('{}'.format(response.error))
return None
else:
self.agent_list.append(response.actor_id)
return self.agent_list[(- 1)]
def get_nearby_waypoint(self, waypoint, distance):
nearby_wp = (waypoint.next(distance) if (distance >= 0) else waypoint.previous(abs(distance)))
return (nearby_wp[(- 1)] if nearby_wp else None)
def spawn_agent_at(self, location='front', given_offset=None):
ego_waypoint = self.map.get_waypoint(self.ego.get_transform().location)
offset = (given_offset if given_offset else self.spawn_offset[location])
spawn_point = None
if ((location == 'front') or (location == 'back')):
spawn_point = self.get_nearby_waypoint(ego_waypoint, offset)
elif ((location == 'front_left') or (location == 'back_left')):
spawn_point = ego_waypoint.get_left_lane()
if spawn_point:
spawn_point = self.get_nearby_waypoint(spawn_point, offset)
elif ((location == 'front_right') or (location == 'back_right')):
spawn_point = ego_waypoint.get_right_lane()
if spawn_point:
spawn_point = self.get_nearby_waypoint(spawn_point, offset)
elif (location == 'double_left'):
spawn_point = ego_waypoint.get_left_lane().get_left_lane()
if spawn_point:
spawn_point = self.get_nearby_waypoint(spawn_point, given_offset)
elif (location == 'double_right'):
spawn_point = ego_waypoint.get_right_lane().get_lane()
if spawn_point:
spawn_point = self.get_nearby_waypoint(spawn_point, given_offset)
if (not spawn_point):
print("can't find spawn location at ego's {}".format(location))
return False
actor = self.spawn_agent(self.get_transform(spawn_point))
if (actor != None):
print('succeed to spawn agent at {}'.format(self.get_transform(spawn_point)))
if (self.spawn_offset[location] >= 0):
self.spawn_offset[location] += self.minimum_distance
else:
self.spawn_offset[location] -= self.minimum_distance
return actor
def set_auto_lane_change(self, enable):
for actor in self.agent_list:
self.tm.auto_lane_change(self.world.get_actor(actor), enable)
def set_ignore_light_percentage(self, percentage):
for actor in self.agent_list:
self.tm.ignore_lights_percentage(self.world.get_actor(actor), percentage)
def set_speed_percentage_difference(self, actor, percentage):
actor = self.world.get_actor(actor)
self.tm.vehicle_percentage_speed_difference(actor, percentage)
def trigger_autopilot(self):
port = self.tm.get_port()
print('{} autopilot'.format(('Disable' if self.autopilot_enabled else 'Enable')))
for agent in self.agent_list:
self.world.get_actor(agent).set_autopilot((~ self.autopilot_enabled), port)
self.autopilot_enabled = (not self.autopilot_enabled)
def destory(self):
print(('\ndestroying %d agents' % len(self.agent_list)))
self.client.apply_batch([carla.command.DestroyActor(x) for x in self.agent_list])
time.sleep(0.5) |
_criterion('wav2vec', dataclass=Wav2VecCriterionConfig)
class Wav2vecCriterion(FairseqCriterion):
def __init__(self, task, infonce=False, loss_weights=None, log_keys=None):
super().__init__(task)
self.infonce = infonce
self.loss_weights = loss_weights
self.log_keys = ([] if (log_keys is None) else log_keys)
def forward(self, model, sample, reduce=True):
net_output = model(**sample['net_input'])
logits = model.get_logits(net_output).float()
target = model.get_targets(sample, net_output)
weights = None
if (hasattr(model, 'get_target_weights') and (not self.infonce)):
weights = model.get_target_weights(target, net_output)
if torch.is_tensor(weights):
weights = weights.float()
losses = []
if self.infonce:
loss = F.cross_entropy(logits, target, reduction=('sum' if reduce else 'none'))
else:
loss = F.binary_cross_entropy_with_logits(logits, target.float(), weights, reduction=('sum' if reduce else 'none'))
sample_size = (target.numel() if self.infonce else target.long().sum().item())
losses.append(loss.detach().clone())
if (self.loss_weights is not None):
assert hasattr(model, 'get_extra_losses')
extra_losses = model.get_extra_losses(net_output)
if torch.is_tensor(extra_losses):
extra_losses = [extra_losses]
if ((len(self.loss_weights) == 1) and (len(extra_losses) != 1)):
self.loss_weights = ([self.loss_weights[0]] * len(extra_losses))
assert (len(extra_losses) == len(self.loss_weights)), f'{len(extra_losses)}, {len(self.loss_weights)}'
for (p, coef) in zip(extra_losses, self.loss_weights):
if ((coef != 0) and (p is not None)):
p = ((coef * p.float()) * sample_size)
loss += p
losses.append(p)
logging_output = {'loss': (loss.item() if reduce else loss), 'ntokens': sample_size, 'nsentences': sample['id'].numel(), 'sample_size': sample_size}
for lk in self.log_keys:
if (lk in net_output):
logging_output[lk] = float(net_output[lk])
if (len(losses) > 1):
for (i, l) in enumerate(losses):
logging_output[f'loss_{i}'] = l.item()
if self.infonce:
with torch.no_grad():
if (logits.numel() == 0):
corr = 0
count = 0
else:
assert (logits.dim() > 1), logits.shape
max = (logits.argmax((- 1)) == 0)
min = (logits.argmin((- 1)) == 0)
both = (max & min)
corr = (max.long().sum().item() - both.long().sum().item())
count = max.numel()
logging_output['correct'] = corr
logging_output['count'] = count
return (loss, sample_size, logging_output)
def reduce_metrics(logging_outputs) -> None:
loss_sum = utils.item(sum((log.get('loss', 0) for log in logging_outputs)))
ntokens = utils.item(sum((log.get('ntokens', 0) for log in logging_outputs)))
nsentences = utils.item(sum((log.get('nsentences', 0) for log in logging_outputs)))
sample_size = utils.item(sum((log.get('sample_size', 0) for log in logging_outputs)))
metrics.log_scalar('loss', ((loss_sum / sample_size) / math.log(2)), sample_size, round=3)
metrics.log_scalar('ntokens', ntokens)
metrics.log_scalar('nsentences', nsentences)
correct = sum((log.get('correct', 0) for log in logging_outputs))
metrics.log_scalar('_correct', correct)
total = sum((log.get('count', 0) for log in logging_outputs))
metrics.log_scalar('_total', total)
if (total > 0):
metrics.log_derived('accuracy', (lambda meters: (safe_round((meters['_correct'].sum / meters['_total'].sum), 5) if (meters['_total'].sum > 0) else float('nan'))))
builtin_keys = {'loss', 'ntokens', 'nsentences', 'sample_size', 'correct', 'count'}
for k in logging_outputs[0]:
if (k not in builtin_keys):
val = sum((log.get(k, 0) for log in logging_outputs))
if k.startswith('loss'):
metrics.log_scalar(k, ((val / sample_size) / math.log(2)), sample_size, round=3)
else:
metrics.log_scalar(k, (val / len(logging_outputs)), round=3)
def logging_outputs_can_be_summed() -> bool:
return False |
def main():
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
(model_args, data_args, training_args) = parser.parse_args_into_dataclasses()
if ((data_args.eval_data_file is None) and training_args.do_eval):
raise ValueError('Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file or remove the --do_eval argument.')
if (os.path.exists(training_args.output_dir) and os.listdir(training_args.output_dir) and training_args.do_train and (not training_args.overwrite_output_dir)):
raise ValueError(f'Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome.')
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=(logging.INFO if (training_args.local_rank in [(- 1), 0]) else logging.WARN))
logger.warning('Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s', training_args.local_rank, training_args.device, training_args.n_gpu, bool((training_args.local_rank != (- 1))), training_args.fp16)
logger.info('Training/evaluation parameters %s', training_args)
set_seed(training_args.seed)
if model_args.config_name:
config = AutoConfig.from_pretrained(model_args.config_name, cache_dir=model_args.cache_dir)
elif model_args.model_name_or_path:
config = AutoConfig.from_pretrained(model_args.model_name_or_path, cache_dir=model_args.cache_dir)
else:
config = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.')
config.max_seq_length = 512
if model_args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, cache_dir=model_args.cache_dir)
elif model_args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, cache_dir=model_args.cache_dir)
else:
raise ValueError('You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another script, save it,and load it from here, using --tokenizer_name')
if model_args.model_name_or_path:
model = AutoModelWithLMHead.from_pretrained(model_args.model_name_or_path, from_tf=bool(('.ckpt' in model_args.model_name_or_path)), config=config, cache_dir=model_args.cache_dir)
else:
logger.info('Training new model from scratch')
model = AutoModelWithLMHead.from_config(config)
model.resize_token_embeddings(len(tokenizer))
if ((config.model_type in ['bert', 'roberta', 'distilbert', 'camembert']) and (not data_args.mlm)):
raise ValueError('BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the--mlm flag (masked language modeling).')
if (data_args.block_size <= 0):
data_args.block_size = tokenizer.max_len
else:
data_args.block_size = min(data_args.block_size, tokenizer.max_len)
train_dataset = (get_dataset(data_args, tokenizer=tokenizer, cache_dir=model_args.cache_dir) if training_args.do_train else None)
eval_dataset = (get_dataset(data_args, tokenizer=tokenizer, evaluate=True, cache_dir=model_args.cache_dir) if training_args.do_eval else None)
if (config.model_type == 'xlnet'):
data_collator = DataCollatorForPermutationLanguageModeling(tokenizer=tokenizer, plm_probability=data_args.plm_probability, max_span_length=data_args.max_span_length)
else:
data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=data_args.mlm, mlm_probability=data_args.mlm_probability)
trainer = Trainer(model=model, args=training_args, data_collator=data_collator, train_dataset=train_dataset, eval_dataset=eval_dataset, prediction_loss_only=True)
if training_args.do_train:
model_path = (model_args.model_name_or_path if ((model_args.model_name_or_path is not None) and os.path.isdir(model_args.model_name_or_path)) else None)
trainer.train(model_path=model_path)
trainer.save_model()
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir)
results = {}
if training_args.do_eval:
logger.info('*** Evaluate ***')
eval_output = trainer.evaluate()
perplexity = math.exp(eval_output['eval_loss'])
result = {'perplexity': perplexity}
output_eval_file = os.path.join(training_args.output_dir, 'eval_results_lm.txt')
if trainer.is_world_master():
with open(output_eval_file, 'w') as writer:
logger.info('***** Eval results *****')
for key in sorted(result.keys()):
logger.info(' %s = %s', key, str(result[key]))
writer.write(('%s = %s\n' % (key, str(result[key]))))
results.update(result)
return results |
class VectorType(LaVarType):
def __init__(self, rows=0, desc=None, element_type=ScalarType(), symbol=None, dynamic=DynamicTypeEnum.DYN_INVALID, rows_ir=None):
LaVarType.__init__(self, VarTypeEnum.VECTOR, desc, element_type, symbol, dynamic=dynamic)
self.rows = rows
self.rows_ir = rows_ir
self.cols = 1
self.sparse = False
def get_signature(self):
if self.element_type:
return 'vector,rows:{},ele_type:{}'.format(self.rows, self.element_type.get_signature())
else:
return 'vector,rows:{}'.format(self.rows)
def is_integer_element(self):
return self.element_type.is_integer_element()
def get_json_content(self):
return '{{"type": "vector", "is_int":"{}", "element":{}, "rows":"{}"}}'.format(self.is_integer_element(), self.element_type.get_json_content(), self.rows) |
class FlipTensor(nn.Module):
def __init__(self, dim=(- 2), item_index=None):
super(FlipTensor, self).__init__()
self.dim = dim
self.item_index = item_index
def flip(self, x):
if (self.dim is not None):
if (self.item_index is None):
return x.flip(dims=[self.dim])
else:
x[self.item_index] = x[self.item_index].flip(dims=[self.dim])
return x
else:
return x
def forward(self, x):
return self.flip(x) |
def interpolate(dist, length, mode, max_curvature, origin_x, origin_y, origin_yaw):
if (mode == 'S'):
x = (origin_x + ((dist / max_curvature) * math.cos(origin_yaw)))
y = (origin_y + ((dist / max_curvature) * math.sin(origin_yaw)))
yaw = origin_yaw
else:
ldx = (math.sin(dist) / max_curvature)
ldy = 0.0
yaw = None
if (mode == 'L'):
ldy = ((1.0 - math.cos(dist)) / max_curvature)
yaw = (origin_yaw + dist)
elif (mode == 'R'):
ldy = ((1.0 - math.cos(dist)) / (- max_curvature))
yaw = (origin_yaw - dist)
gdx = ((math.cos((- origin_yaw)) * ldx) + (math.sin((- origin_yaw)) * ldy))
gdy = (((- math.sin((- origin_yaw))) * ldx) + (math.cos((- origin_yaw)) * ldy))
x = (origin_x + gdx)
y = (origin_y + gdy)
return (x, y, yaw, (1 if (length > 0.0) else (- 1))) |
def cycle(dataloader, distributed=False):
epoch = 0
while True:
for (images, targets) in dataloader:
(yield (images, targets))
epoch += 1
if distributed:
dataloader.sampler.set_epoch(epoch) |
def plot(deephyperedges_directory, MLP_directory, deepsets_directory, metric, dataset):
dhe_metrics = pd.read_csv(deephyperedges_directory)
x = []
y = []
for (index, row) in dhe_metrics.iterrows():
x.append(float(row['Step']))
y.append(float(row['Value']))
mlp_metrics = pd.read_csv(MLP_directory)
x_mlp = []
y_mlp = []
for (index, row) in mlp_metrics.iterrows():
x_mlp.append(float(row['Step']))
y_mlp.append(float(row['Value']))
ds_metrics = pd.read_csv(deepsets_directory)
x_ds = []
y_ds = []
for (index, row) in ds_metrics.iterrows():
x_ds.append(float(row['Step']))
y_ds.append(float(row['Value']))
sns.set()
ds_normal = '(0.0, 0.0, 0.7, 0.2)'
ds_smoothed = '(0.0, 0.0, 0.7, 1)'
dh_normal = '(0.0, 0.7, 0.0, 0.2)'
dh_smoothed = '(0.0, 0.7, 0.0, 1)'
mlp_normal = '(0.7, 0.2, 0.1, 0.2)'
mlp_smoothed = '(0.7, 0.2, 0.1, 1)'
plt.gca().set_prop_cycle(color=[mlp_normal, ds_normal, dh_normal, mlp_smoothed, ds_smoothed, dh_smoothed])
plt.plot(x_mlp, y_mlp)
plt.plot(x_ds, y_ds)
plt.plot(x, y)
plt.plot(x_mlp, smooth(y_mlp, 0.8))
plt.plot(x_ds, smooth(y_ds, 0.8))
plt.plot(x, smooth(y, 0.8))
plt.legend(['_nolegend_', '_nolegend_', '_nolegend_', 'MLP + TAS Walks', 'Deep Sets + SAT Walks', 'Deep Hyperedges'], loc='bottom right')
plt.savefig((((('images/paper/' + dataset) + '/') + metric) + '.png'), dpi=300)
plt.show() |
def test_boradcast_data(model_parallel_size):
if (torch.distributed.get_rank() == 0):
print('> testing boradcast_data with model parallel size {} ...'.format(model_parallel_size))
mpu.initialize_model_parallel(model_parallel_size)
torch.manual_seed((1234 + mpu.get_data_parallel_rank()))
model_parallel_size = mpu.get_model_parallel_world_size()
key_size_t = {'key1': [7, 11], 'key2': [8, 2, 1], 'key3': [13], 'key4': [5, 1, 2], 'key5': [5, 12]}
keys = list(key_size_t.keys())
data = {}
data_t = {}
for key in key_size_t:
data[key] = torch.LongTensor(size=key_size_t[key]).random_(0, 1000)
data_t[key] = data[key].clone()
data['keyX'] = torch.FloatTensor(size=(5,)).random_(0, 1000)
data_t['keyX'] = data['keyX'].clone()
if (mpu.get_model_parallel_rank() != 0):
data = None
data_utils._check_data_types(keys, data_t, torch.int64)
(key_size, key_numel, total_numel) = data_utils._build_key_size_numel_dictionaries(keys, data)
for key in keys:
assert (key_size[key] == key_size_t[key])
total_numel_t = 0
for key in keys:
target_size = functools.reduce(operator.mul, key_size_t[key], 1)
assert (key_numel[key] == target_size)
total_numel_t += target_size
assert (total_numel == total_numel_t)
data_b = data_utils.broadcast_data(keys, data, torch.int64)
for key in keys:
tensor = data_t[key].cuda()
assert (data_b[key].sub(tensor).abs().max() == 0)
mpu.destroy_model_parallel()
torch.distributed.barrier()
if (torch.distributed.get_rank() == 0):
print('>> passed the test :-)') |
def test_corpus_czech(recwarn):
s = pd.Series(['Holka modrooka nesedavej tam', 'Holka modrooka nesedavej u potoka', 'podemele tvoje oci', 'vezme li te bude skoda', 'V potoce je hastrmanek', 'V potoce je velka voda', 'V potoce se voda toci', 'zataha te za copanek'])
corpus = tn.Corpus(s, lang='cs')
assert (len(corpus.documents) == 8)
tokenized = corpus.tokenized()
assert (len(recwarn) == 2)
assert (tokenized.sum().n > 8)
w1 = recwarn.pop(UserWarning)
assert (str(w1.message) == "Language model 'cs' is not yet installed.")
w2 = recwarn.pop(UserWarning)
assert (str(w2.message) == "Using basic 'cs' language model.") |
def attention(q, k, v, d_k, mask=None, dropout=None):
scores = (torch.matmul(q, k.transpose((- 2), (- 1))) / math.sqrt(d_k))
if (mask is not None):
mask = mask.unsqueeze(1)
scores = scores.masked_fill((mask == 0), (- .0))
scores = F.softmax(scores, dim=(- 1))
if (dropout is not None):
scores = dropout(scores)
output = torch.matmul(scores, v)
return output |
class BidirectionalLSTM(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(BidirectionalLSTM, self).__init__()
self.rnn = nn.LSTM(input_size, hidden_size, bidirectional=True, batch_first=True)
self.linear = nn.Linear((hidden_size * 2), output_size)
def forward(self, input):
self.rnn.flatten_parameters()
(recurrent, _) = self.rnn(input)
output = self.linear(recurrent)
return output |
def show_doc_from_name(mod_name, ft_name: str, doc_string: bool=True, arg_comments: dict={}, alt_doc_string: str=''):
mod = import_mod(mod_name)
splits = str.split(ft_name, '.')
assert hasattr(mod, splits[0]), print(f"Module {mod_name} doesn't have a function named {splits[0]}.")
elt = getattr(mod, splits[0])
for (i, split) in enumerate(splits[1:]):
assert hasattr(elt, split), print(f"Class {'.'.join(splits[:(i + 1)])} doesn't have a function named {split}.")
elt = getattr(elt, split)
show_doc(elt, doc_string, ft_name, arg_comments, alt_doc_string) |
class RealmConfig(PretrainedConfig):
model_type = 'realm'
def __init__(self, vocab_size=30522, hidden_size=768, retriever_proj_size=128, num_hidden_layers=12, num_attention_heads=12, num_candidates=8, intermediate_size=3072, hidden_act='gelu_new', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, span_hidden_size=256, max_span_width=10, reader_layer_norm_eps=0.001, reader_beam_size=5, reader_seq_len=320, num_block_records=, searcher_beam_size=5000, searcher_seq_len=64, pad_token_id=1, bos_token_id=0, eos_token_id=2, **kwargs):
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.retriever_proj_size = retriever_proj_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.num_candidates = num_candidates
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.initializer_range = initializer_range
self.type_vocab_size = type_vocab_size
self.layer_norm_eps = layer_norm_eps
self.span_hidden_size = span_hidden_size
self.max_span_width = max_span_width
self.reader_layer_norm_eps = reader_layer_norm_eps
self.reader_beam_size = reader_beam_size
self.reader_seq_len = reader_seq_len
self.num_block_records = num_block_records
self.searcher_beam_size = searcher_beam_size
self.searcher_seq_len = searcher_seq_len |
class StyleGAN2Discriminator(nn.Module):
def __init__(self, resolution, image_channels=3, label_size=0, architecture='resnet', use_wscale=True, minibatch_std_group_size=4, minibatch_std_channels=1, fmaps_base=(32 << 10), fmaps_max=512):
super().__init__()
if (resolution not in _RESOLUTIONS_ALLOWED):
raise ValueError(f'''Invalid resolution: `{resolution}`!
Resolutions allowed: {_RESOLUTIONS_ALLOWED}.''')
if (architecture not in _ARCHITECTURES_ALLOWED):
raise ValueError(f'''Invalid architecture: `{architecture}`!
Architectures allowed: {_ARCHITECTURES_ALLOWED}.''')
self.init_res = _INIT_RES
self.init_res_log2 = int(np.log2(self.init_res))
self.resolution = resolution
self.final_res_log2 = int(np.log2(self.resolution))
self.image_channels = image_channels
self.label_size = label_size
self.architecture = architecture
self.use_wscale = use_wscale
self.minibatch_std_group_size = minibatch_std_group_size
self.minibatch_std_channels = minibatch_std_channels
self.fmaps_base = fmaps_base
self.fmaps_max = fmaps_max
self.pth_to_tf_var_mapping = {}
for res_log2 in range(self.final_res_log2, (self.init_res_log2 - 1), (- 1)):
res = (2 ** res_log2)
block_idx = (self.final_res_log2 - res_log2)
if ((res_log2 == self.final_res_log2) or (self.architecture == 'skip')):
self.add_module(f'input{block_idx}', ConvBlock(in_channels=self.image_channels, out_channels=self.get_nf(res), kernel_size=1, use_wscale=self.use_wscale))
self.pth_to_tf_var_mapping[f'input{block_idx}.weight'] = f'{res}x{res}/FromRGB/weight'
self.pth_to_tf_var_mapping[f'input{block_idx}.bias'] = f'{res}x{res}/FromRGB/bias'
if (res != self.init_res):
self.add_module(f'layer{(2 * block_idx)}', ConvBlock(in_channels=self.get_nf(res), out_channels=self.get_nf(res), use_wscale=self.use_wscale))
tf_layer0_name = 'Conv0'
self.add_module(f'layer{((2 * block_idx) + 1)}', ConvBlock(in_channels=self.get_nf(res), out_channels=self.get_nf((res // 2)), scale_factor=2, use_wscale=self.use_wscale))
tf_layer1_name = 'Conv1_down'
if (self.architecture == 'resnet'):
layer_name = f'skip_layer{block_idx}'
self.add_module(layer_name, ConvBlock(in_channels=self.get_nf(res), out_channels=self.get_nf((res // 2)), kernel_size=1, add_bias=False, scale_factor=2, use_wscale=self.use_wscale, activation_type='linear'))
self.pth_to_tf_var_mapping[f'{layer_name}.weight'] = f'{res}x{res}/Skip/weight'
else:
self.add_module(f'layer{(2 * block_idx)}', ConvBlock(in_channels=self.get_nf(res), out_channels=self.get_nf(res), use_wscale=self.use_wscale, minibatch_std_group_size=minibatch_std_group_size, minibatch_std_channels=minibatch_std_channels))
tf_layer0_name = 'Conv'
self.add_module(f'layer{((2 * block_idx) + 1)}', DenseBlock(in_channels=((self.get_nf(res) * res) * res), out_channels=self.get_nf((res // 2)), use_wscale=self.use_wscale))
tf_layer1_name = 'Dense0'
self.pth_to_tf_var_mapping[f'layer{(2 * block_idx)}.weight'] = f'{res}x{res}/{tf_layer0_name}/weight'
self.pth_to_tf_var_mapping[f'layer{(2 * block_idx)}.bias'] = f'{res}x{res}/{tf_layer0_name}/bias'
self.pth_to_tf_var_mapping[f'layer{((2 * block_idx) + 1)}.weight'] = f'{res}x{res}/{tf_layer1_name}/weight'
self.pth_to_tf_var_mapping[f'layer{((2 * block_idx) + 1)}.bias'] = f'{res}x{res}/{tf_layer1_name}/bias'
self.add_module(f'layer{((2 * block_idx) + 2)}', DenseBlock(in_channels=self.get_nf((res // 2)), out_channels=max(self.label_size, 1), use_wscale=self.use_wscale, activation_type='linear'))
self.pth_to_tf_var_mapping[f'layer{((2 * block_idx) + 2)}.weight'] = f'Output/weight'
self.pth_to_tf_var_mapping[f'layer{((2 * block_idx) + 2)}.bias'] = f'Output/bias'
if (self.architecture == 'skip'):
self.downsample = DownsamplingLayer()
def get_nf(self, res):
return min((self.fmaps_base // res), self.fmaps_max)
def forward(self, image, label=None, **_unused_kwargs):
expected_shape = (self.image_channels, self.resolution, self.resolution)
if ((image.ndim != 4) or (image.shape[1:] != expected_shape)):
raise ValueError(f'''The input tensor should be with shape [batch_size, channel, height, width], where `channel` equals to {self.image_channels}, `height`, `width` equal to {self.resolution}!
But `{image.shape}` is received!''')
if self.label_size:
if (label is None):
raise ValueError(f'Model requires an additional label (with size {self.label_size}) as inputs, but no label is received!')
batch_size = image.shape[0]
if ((label.ndim != 2) or (label.shape != (batch_size, self.label_size))):
raise ValueError(f'''Input label should be with shape [batch_size, label_size], where `batch_size` equals to that of images ({image.shape[0]}) and `label_size` equals to {self.label_size}!
But `{label.shape}` is received!''')
x = self.input0(image)
for res_log2 in range(self.final_res_log2, (self.init_res_log2 - 1), (- 1)):
block_idx = (self.final_res_log2 - res_log2)
if ((self.architecture == 'skip') and (block_idx > 0)):
image = self.downsample(image)
x = (x + self.__getattr__(f'input{block_idx}')(image))
if ((self.architecture == 'resnet') and (res_log2 != self.init_res_log2)):
residual = self.__getattr__(f'skip_layer{block_idx}')(x)
x = self.__getattr__(f'layer{(2 * block_idx)}')(x)
x = self.__getattr__(f'layer{((2 * block_idx) + 1)}')(x)
if ((self.architecture == 'resnet') and (res_log2 != self.init_res_log2)):
x = ((x + residual) / np.sqrt(2.0))
x = self.__getattr__(f'layer{((2 * block_idx) + 2)}')(x)
if self.label_size:
x = torch.sum((x * label), dim=1, keepdim=True)
return x |
def init_quantize_config(model, quantize_recipe=None):
assert ('quantize_config' not in global_config), 'quantize_config has been unexpectedly created. Please check your QAT workflow'
config = QuantizeConfig()
config_quantizable_layers(model)
if quantize_recipe:
config.add_quantize_recipe(quantize_recipe)
return config |
_register
class Pruner():
def __init__(self, start_epoch=None, end_epoch=None, initial_sparsity=None, target_sparsity=None, update_frequency=1, method='per_tensor', prune_type='basic_magnitude', start_step=None, end_step=None, update_frequency_on_step=None, prune_domain=None, sparsity_decay_type=None, pattern='tile_pattern_1x1', names=None, extra_excluded_names=None, parameters=None):
self.start_epoch = start_epoch
self.end_epoch = end_epoch
self.update_frequency = update_frequency
self.target_sparsity = target_sparsity
self.initial_sparsity = initial_sparsity
self.update_frequency = update_frequency
self.start_step = start_step
self.end_step = end_step
self.update_frequency_on_step = update_frequency_on_step
self.prune_domain = prune_domain
self.sparsity_decay_type = sparsity_decay_type
self.extra_excluded_names = extra_excluded_names
self.pattern = pattern
self.prune_type = prune_type
self.method = method
self.names = names
self.parameters = parameters |
def randreg_equation(n, reg, d_min=2, d_max=3, seed=None):
import networkx as nx
G = nx.random_regular_graph(reg, n, seed=seed)
inputs = [[] for _ in range(n)]
for (i, (na, nb)) in enumerate(G.edges):
ix = get_symbol(i)
inputs[na].append(ix)
inputs[nb].append(ix)
rng = random.Random(seed)
size_dict = {get_symbol(i): rng.randint(d_min, d_max) for i in range(len(G.edges))}
output = []
shapes = [tuple((size_dict[ix] for ix in term)) for term in inputs]
return (inputs, output, shapes, size_dict) |
class STP_Base_Net(torch.nn.Module):
def __init__(self, args):
super(STP_Base_Net, self).__init__()
self.args = args
self.ip_emb = torch.nn.Linear(2, self.args['input_embedding_size'])
self.enc_rnn = torch.nn.GRU(self.args['input_embedding_size'], self.args['encoder_size'], 1, batch_first=True)
self.dyn_emb = torch.nn.Linear(self.args['encoder_size'], self.args['dyn_embedding_size'])
self.dec_rnn = torch.nn.LSTM((2 * self.args['encoder_size']), self.args['decoder_size'], 2, batch_first=True)
self.op = torch.nn.Linear(self.args['decoder_size'], 2)
self.leaky_relu = torch.nn.LeakyReLU(0.1)
def LSTM_Encoder(self, Hist):
(_, Hist_Enc) = self.enc_rnn(self.leaky_relu(self.ip_emb(Hist)))
Hist_Enc = self.leaky_relu(self.dyn_emb(self.leaky_relu(Hist_Enc.view(Hist_Enc.shape[1], Hist_Enc.shape[2]))))
return Hist_Enc
def forward(self, data_pyg):
raiseNotImplementedError('forward is not implemented in STP_Base_Net!')
def decode(self, enc):
enc = enc.unsqueeze(1)
enc = enc.repeat(1, self.args['out_length'], 1)
(h_dec, _) = self.dec_rnn(enc)
fut_pred = self.op(h_dec)
return fut_pred |
class BaseOptions():
def __init__(self):
self.initialized = False
def initialize(self, parser):
parser.add_argument('--name', type=str, default='label2coco', help='name of the experiment. It decides where to store samples and models')
parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here')
parser.add_argument('--model', type=str, default='pix2pix', help='which model to use')
parser.add_argument('--norm_G', type=str, default='spectralinstance', help='instance normalization or batch normalization')
parser.add_argument('--norm_D', type=str, default='spectralinstance', help='instance normalization or batch normalization')
parser.add_argument('--norm_E', type=str, default='spectralinstance', help='instance normalization or batch normalization')
parser.add_argument('--phase', type=str, default='train', help='train, val, test, etc')
parser.add_argument('--batchSize', type=int, default=1, help='input batch size')
parser.add_argument('--preprocess_mode', type=str, default='scale_width_and_crop', help='scaling and cropping of images at load time.', choices=('resize_and_crop', 'crop', 'scale_width', 'scale_width_and_crop', 'scale_shortside', 'scale_shortside_and_crop', 'fixed', 'none'))
parser.add_argument('--load_size', type=int, default=1024, help='Scale images to this size. The final image will be cropped to --crop_size.')
parser.add_argument('--crop_size', type=int, default=512, help='Crop to the width of crop_size (after initially scaling the images to load_size.)')
parser.add_argument('--aspect_ratio', type=float, default=1.0, help='The ratio width/height. The final height of the load image will be crop_size/aspect_ratio')
parser.add_argument('--label_nc', type=int, default=182, help='# of input label classes without unknown class. If you have unknown class as class label, specify --contain_dopntcare_label.')
parser.add_argument('--contain_dontcare_label', action='store_true', help='if the label map contains dontcare label (dontcare=255)')
parser.add_argument('--output_nc', type=int, default=3, help='# of output image channels')
parser.add_argument('--dataroot', type=str, default='/data/cityscapes/')
parser.add_argument('--label_dir', type=str, default='/data/cityscapes/')
parser.add_argument('--image_dir', type=str, default='/data/cityscapes/')
parser.add_argument('--dataset_mode_source', type=str, default='custom')
parser.add_argument('--dataroot_source', type=str, default='/data/cityscapes/')
parser.add_argument('--label_dir_source', type=str, default='/data/cityscapes/')
parser.add_argument('--image_dir_source', type=str, default='/data/cityscapes/')
parser.add_argument('--dataset_mode_target', type=str, default='cityscapes')
parser.add_argument('--dataroot_target', type=str, default='/data/cityscapes/')
parser.add_argument('--label_dir_target', type=str, default='/data/cityscapes/')
parser.add_argument('--image_dir_target', type=str, default='/data/cityscapes/')
parser.add_argument('--dataset_mode', type=str, default='custom')
parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly')
parser.add_argument('--no_flip', action='store_true', help='if specified, do not flip the images for data argumentation')
parser.add_argument('--nThreads', default=8, type=int, help='# threads for loading data')
parser.add_argument('--max_dataset_size', type=int, default=sys.maxsize, help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.')
parser.add_argument('--load_from_opt_file', action='store_true', help='load the options from checkpoints and use that as default')
parser.add_argument('--cache_filelist_write', action='store_true', help='saves the current filelist into a text file, so that it loads faster')
parser.add_argument('--cache_filelist_read', action='store_true', help='reads from the file list cache')
parser.add_argument('--display_winsize', type=int, default=400, help='display window size')
parser.add_argument('--netG', type=str, default='spade', help='selects model to use for netG (pix2pixhd | spade)')
parser.add_argument('--ngf', type=int, default=64, help='# of gen filters in first conv layer')
parser.add_argument('--init_type', type=str, default='xavier', help='network initialization [normal|xavier|kaiming|orthogonal]')
parser.add_argument('--init_variance', type=float, default=0.02, help='variance of the initialization distribution')
parser.add_argument('--z_dim', type=int, default=256, help='dimension of the latent z vector')
parser.add_argument('--no_instance', action='store_true', help='if specified, do *not* add instance map as input')
parser.add_argument('--nef', type=int, default=16, help='# of encoder filters in the first conv layer')
parser.add_argument('--use_vae', action='store_true', help='enable training with an image encoder.')
parser.add_argument('--eval_losses_dir', type=str, default='.', help='dir to save evaluation losses')
parser.add_argument('--eval_spade', action='store_true', help='when eval SPADE, input should be gtFinePred')
parser.add_argument('--rec_save_suffix', type=str, default='leftImg8bitRec')
parser.add_argument('--eval_output_dir', type=str, default='outputs', help='dir to save evaluation outputs')
parser.add_argument('--vae_test', action='store_true', help='if specified, no reparametric')
self.initialized = True
return parser
def gather_options(self):
if (not self.initialized):
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser = self.initialize(parser)
(opt, unknown) = parser.parse_known_args()
model_name = opt.model
model_option_setter = models.get_option_setter(model_name)
parser = model_option_setter(parser, self.isTrain)
dataset_mode = opt.dataset_mode
dataset_option_setter = data.get_option_setter(dataset_mode)
parser = dataset_option_setter(parser, self.isTrain)
(opt, unknown) = parser.parse_known_args()
if opt.load_from_opt_file:
parser = self.update_options_from_file(parser, opt)
opt = parser.parse_args()
self.parser = parser
return opt
def print_options(self, opt):
message = ''
message += ' Options \n'
for (k, v) in sorted(vars(opt).items()):
comment = ''
default = self.parser.get_default(k)
if (v != default):
comment = ('\t[default: %s]' % str(default))
message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment)
message += ' End '
print(message)
def option_file_path(self, opt, makedir=False):
expr_dir = os.path.join(opt.checkpoints_dir, opt.name)
if makedir:
util.mkdirs(expr_dir)
file_name = os.path.join(expr_dir, 'opt')
return file_name
def save_options(self, opt):
file_name = self.option_file_path(opt, makedir=True)
with open((file_name + '.txt'), 'wt') as opt_file:
for (k, v) in sorted(vars(opt).items()):
comment = ''
default = self.parser.get_default(k)
if (v != default):
comment = ('\t[default: %s]' % str(default))
opt_file.write('{:>25}: {:<30}{}\n'.format(str(k), str(v), comment))
with open((file_name + '.pkl'), 'wb') as opt_file:
pickle.dump(opt, opt_file)
def update_options_from_file(self, parser, opt):
new_opt = self.load_options(opt)
for (k, v) in sorted(vars(opt).items()):
if (hasattr(new_opt, k) and (v != getattr(new_opt, k))):
new_val = getattr(new_opt, k)
parser.set_defaults(**{k: new_val})
return parser
def load_options(self, opt):
file_name = self.option_file_path(opt, makedir=False)
new_opt = pickle.load(open((file_name + '.pkl'), 'rb'))
return new_opt
def parse(self, save=False):
opt = self.gather_options()
opt.isTrain = self.isTrain
self.print_options(opt)
if opt.isTrain:
self.save_options(opt)
opt.semantic_nc = ((opt.label_nc + (1 if opt.contain_dontcare_label else 0)) + (0 if opt.no_instance else 1))
str_ids = opt.gpu_ids.split(',')
opt.gpu_ids = []
for str_id in str_ids:
id = int(str_id)
if (id >= 0):
opt.gpu_ids.append(id)
if (len(opt.gpu_ids) > 0):
torch.cuda.set_device(opt.gpu_ids[0])
assert ((len(opt.gpu_ids) == 0) or ((opt.batchSize % len(opt.gpu_ids)) == 0)), ('Batch size %d is wrong. It must be a multiple of # GPUs %d.' % (opt.batchSize, len(opt.gpu_ids)))
self.opt = opt
return self.opt |
def parse_args():
parser = argparse.ArgumentParser(description='Go LT-NCF')
parser.add_argument('--bpr_batch', type=int, default=2048, help='the batch size for bpr loss training procedure')
parser.add_argument('--recdim', type=int, default=64, help='the embedding size of LT-NCF')
parser.add_argument('--layer', type=int, default=3, help='the layer num of LT-NCF')
parser.add_argument('--lr', type=float, default=0.001, help='the learning rate')
parser.add_argument('--lr_time', type=float, default=0.0001, help='the learning rate')
parser.add_argument('--decay', type=float, default=0.0001, help='the weight decay for l2 normalizaton')
parser.add_argument('--dropout', type=int, default=0, help='using the dropout or not')
parser.add_argument('--keepprob', type=float, default=0.6, help='the batch size for bpr loss training procedure')
parser.add_argument('--a_fold', type=int, default=100, help='the fold num used to split large adj matrix, like gowalla')
parser.add_argument('--testbatch', type=int, default=100, help='the batch size of users for testing')
parser.add_argument('--dataset', type=str, default='gowalla', help='available datasets: [lastfm, gowalla, yelp2018, amazon-book]')
parser.add_argument('--path', type=str, default='./checkpoints', help='path to save weights')
parser.add_argument('--topks', nargs='?', default='[20]', help=' test list, e.g. [10,20,30,40,50]')
parser.add_argument('--tensorboard', type=int, default=1, help='enable tensorboard')
parser.add_argument('--comment', type=str, default='lt-ncf')
parser.add_argument('--load', type=int, default=0)
parser.add_argument('--epochs', type=int, default=1000)
parser.add_argument('--multicore', type=int, default=0, help='whether we use multiprocessing or not in test')
parser.add_argument('--pretrain', type=int, default=0, help='whether we use pretrained weight or not')
parser.add_argument('--seed', type=int, default=2020, help='random seed')
parser.add_argument('--model', type=str, default='ltocf', help='rec-model, support [mf, lgn, ltocf, ltocf2, ltocf1]')
parser.add_argument('--timesplit', type=int, default='4', help='split time e.g. timesplit=4 -> #T=3, timesplit=3 -> #T=2')
parser.add_argument('--gpuid', type=int, default=0, help='Please give a value for gpu id')
parser.add_argument('--solver', type=str, default='euler', help='ode solver: [dopri5, euler, rk4, adaptive_heun, bosh3, explicit_adams, implicit_adams]')
parser.add_argument('--adjoint', type=eval, default=False, choices=[True, False])
parser.add_argument('--learnable_time', type=eval, default=False, choices=[True, False])
parser.add_argument('--dual_res', type=eval, default=False, choices=[True, False])
parser.add_argument('--parallel', type=eval, default=False, choices=[True, False])
parser.add_argument('--rtol', type=float, default=1e-07, help='rtol')
parser.add_argument('--atol', type=float, default=1e-09, help='atol')
parser.add_argument('--pretrained_file', type=str, default='ltocf')
parser.add_argument('--K', type=float, default=4, help='final integral time K')
return parser.parse_args() |
def _check_is_aligned(df, id_col, dt_col):
res = (len(set(df.groupby(id_col).apply((lambda df: hash(str(df[dt_col].values)))))) == 1)
return res |
class MLP_G(nn.Module):
def __init__(self, ninput, noutput, layers, activation=nn.ReLU(), gpu=True):
super(MLP_G, self).__init__()
self.ninput = ninput
self.noutput = noutput
layer_sizes = ([ninput] + [int(x) for x in layers.split('-')])
self.layers = []
for i in range((len(layer_sizes) - 1)):
layer = nn.Linear(layer_sizes[i], layer_sizes[(i + 1)])
self.layers.append(layer)
self.add_module(('layer' + str((i + 1))), layer)
bn = nn.BatchNorm1d(layer_sizes[(i + 1)], eps=1e-05, momentum=0.1)
self.layers.append(bn)
self.add_module(('bn' + str((i + 1))), bn)
self.layers.append(activation)
self.add_module(('activation' + str((i + 1))), activation)
layer = nn.Linear(layer_sizes[(- 1)], noutput)
self.layers.append(layer)
self.add_module(('layer' + str(len(self.layers))), layer)
self.init_weights()
def forward(self, x):
for (i, layer) in enumerate(self.layers):
x = layer(x)
return x
def init_weights(self):
init_std = 0.02
for layer in self.layers:
try:
layer.weight.data.normal_(0, init_std)
layer.bias.data.fill_(0)
except:
pass |
def get_time_gap(s, e):
return (datetime.datetime.fromtimestamp(e) - datetime.datetime.fromtimestamp(s)).__str__() |
def test_pointers(msg):
living_before = ConstructorStats.get(UserType).alive()
assert (m.get_void_ptr_value(m.return_void_ptr()) == 4660)
assert m.get_void_ptr_value(UserType())
assert (ConstructorStats.get(UserType).alive() == living_before)
with pytest.raises(TypeError) as excinfo:
m.get_void_ptr_value([1, 2, 3])
assert (msg(excinfo.value) == '\n get_void_ptr_value(): incompatible function arguments. The following argument types are supported:\n 1. (arg0: capsule) -> int\n\n Invoked with: [1, 2, 3]\n ')
assert (m.return_null_str() is None)
assert (m.get_null_str_value(m.return_null_str()) is not None)
ptr = m.return_unique_ptr()
assert ('StringList' in repr(ptr))
assert (m.print_opaque_list(ptr) == 'Opaque list: [some value]') |
class Product(MergeOperator):
def __call__(self, base_encoding, side_encoding, additional_encodings=[]):
merged_encoding = (base_encoding * side_encoding)
for add_encoding in additional_encodings:
merged_encoding *= add_encoding
return merged_encoding |
class TrainEpocher(Epocher):
def __init__(self, *, model: Union[(Model, nn.Module)], optimizer: T_optim, labeled_loader: T_loader, unlabeled_loader: T_loader, sup_criterion: T_loss, num_batches: int, cur_epoch=0, device='cpu', train_with_two_stage: bool=False, disable_bn_track_for_unlabeled_data: bool=False, **kwargs) -> None:
super().__init__(model, num_batches=num_batches, cur_epoch=cur_epoch, device=device)
self._optimizer = optimizer
self._labeled_loader = labeled_loader
self._unlabeled_loader = unlabeled_loader
self._sup_criterion = sup_criterion
self._affine_transformer = TensorRandomFlip(axis=[1, 2], threshold=0.8)
self.train_with_two_stage = train_with_two_stage
logger.opt(depth=1).trace('{} set to be using {} stage training', self.__class__.__name__, ('two' if self.train_with_two_stage else 'single'))
self._disable_bn = disable_bn_track_for_unlabeled_data
if self._disable_bn:
logger.debug('{} set to disable bn tracking', self.__class__.__name__)
def _init(self, *, reg_weight: float, **kwargs):
self._reg_weight = reg_weight
def _assertion(self):
labeled_set = get_dataset(self._labeled_loader)
labeled_transform = labeled_set.transforms
assert (labeled_transform._total_freedom is False)
if (self._unlabeled_loader is not None):
unlabeled_set = get_dataset(self._unlabeled_loader)
unlabeled_transform = unlabeled_set.transforms
assert (unlabeled_transform._total_freedom is False)
def _configure_meters(self, meters: MeterInterface) -> MeterInterface:
C = self.num_classes
report_axis = list(range(1, C))
meters.register_meter('lr', AverageValueListMeter())
meters.register_meter('reg_weight', AverageValueMeter())
meters.register_meter('sup_loss', AverageValueMeter())
meters.register_meter('reg_loss', AverageValueMeter())
meters.register_meter('sup_dice', UniversalDice(C, report_axises=report_axis))
return meters
def _run(self, *args, **kwargs):
self.meters['lr'].add(get_lrs_from_optimizer(self._optimizer))
assert self._model.training, self._model.training
return self._run_semi(*args, **kwargs)
def _set_model_state(self, model) -> None:
model.train()
def _run_semi(self, *args, **kwargs) -> EpochResultDict:
for (self.cur_batch_num, labeled_data, unlabeled_data) in zip(self._indicator, self._labeled_loader, self._unlabeled_loader):
seed = random.randint(0, int(.0))
((labeled_image, _), labeled_target, labeled_filename, _, label_group) = self._unzip_data(labeled_data, self._device)
((unlabeled_image, unlabeled_image_cf), _, unlabeled_filename, unl_partition, unl_group) = self._unzip_data(unlabeled_data, self._device)
with FixRandomSeed(seed):
unlabeled_image_tf = torch.stack([self._affine_transformer(x) for x in unlabeled_image_cf], dim=0)
assert (unlabeled_image_tf.shape == unlabeled_image.shape), (unlabeled_image_tf.shape, unlabeled_image.shape)
(label_logits, unlabel_logits, unlabel_tf_logits) = self.forward_pass(labeled_image=labeled_image, unlabeled_image=unlabeled_image, unlabeled_image_tf=unlabeled_image_tf)
with FixRandomSeed(seed):
unlabel_logits_tf = torch.stack([self._affine_transformer(x) for x in unlabel_logits], dim=0)
assert (unlabel_logits_tf.shape == unlabel_tf_logits.shape), (unlabel_logits_tf.shape, unlabel_tf_logits.shape)
onehot_target = class2one_hot(labeled_target.squeeze(1), self.num_classes)
sup_loss = self._sup_criterion(label_logits.softmax(1), onehot_target)
reg_loss = self.regularization(unlabeled_tf_logits=unlabel_tf_logits, unlabeled_logits_tf=unlabel_logits_tf, seed=seed, unlabeled_image=unlabeled_image, unlabeled_image_tf=unlabeled_image_tf, label_group=unl_group, partition_group=unl_partition, unlabeled_filename=unlabeled_filename, labeled_filename=labeled_filename)
_reg_weight = self._reg_weight
if isinstance(self._reg_weight, WeightScheduler):
_reg_weight = self._reg_weight.value
self.meters['reg_weight'].add(_reg_weight)
total_loss = (sup_loss + (_reg_weight * reg_loss))
self._optimizer.zero_grad()
total_loss.backward()
self._optimizer.step()
if self.on_master():
with torch.no_grad():
self.meters['sup_loss'].add(sup_loss.item())
self.meters['sup_dice'].add(label_logits.max(1)[1], labeled_target.squeeze(1), group_name=label_group)
self.meters['reg_loss'].add(reg_loss.item())
report_dict = self.meters.tracking_status()
self._indicator.set_postfix_dict(report_dict)
report_dict = self.meters.tracking_status(final=True)
return report_dict
def _forward_pass(self, labeled_image, unlabeled_image, unlabeled_image_tf):
(n_l, n_unl) = (len(labeled_image), len(unlabeled_image))
if (not self.train_with_two_stage):
predict_logits = self._model(torch.cat([labeled_image, unlabeled_image, unlabeled_image_tf], dim=0))
(label_logits, unlabel_logits, unlabel_tf_logits) = torch.split(predict_logits, [n_l, n_unl, n_unl], dim=0)
else:
label_logits = self._model(labeled_image)
bn_context = (_disable_tracking_bn_stats if self._disable_bn else nullcontext)
with bn_context(self._model):
(unlabel_logits, unlabel_tf_logits) = torch.split(self._model(torch.cat([unlabeled_image, unlabeled_image_tf], dim=0)), [n_unl, n_unl], dim=0)
return (label_logits, unlabel_logits, unlabel_tf_logits)
def _unzip_data(data, device):
((image, target), (image_ct, target_ct), filename, partition, group) = preprocess_input_with_twice_transformation(data, device)
return ((image, image_ct), target, filename, partition, group)
def regularization(self, **kwargs):
return self._regularization(**kwargs)
def _regularization(self, **kwargs):
return torch.tensor(0, dtype=torch.float, device=self._device) |
def register_cdod_pascal_voc(name, dirname, split, year, class_names):
DatasetCatalog.register(name, (lambda : load_cdod_voc_instances(dirname, split, class_names)))
MetadataCatalog.get(name).set(thing_classes=list(class_names), dirname=dirname, year=year, split=split) |
class RepVGGOur(nn.Module):
expansion: int = 1
def __init__(self, inplanes, planes, stride=1, groups=1, kernel_size=3, se_block=True, additional_branches=[]):
super().__init__()
activation = nn.ReLU()
if ('swish' in additional_branches):
activation = nn.SiLU()
self.block = RepVGGBlock(inplanes, planes, kernel_size, stride, padding=1, groups=groups, avg_pool=True, se_block=se_block, activation=activation)
def forward(self, x):
out = self.block(x)
return out |
class CustomSACPolicy(SACPolicy):
def __init__(self, *args, **kwargs):
super(CustomSACPolicy, self).__init__(*args, **kwargs, layers=[256, 256], feature_extraction='mlp') |
def make_args_list(n_trials_from, n_trials, dataset_names, algorithms, n_hparams_from, n_hparams, steps, data_dir, task, holdout_fraction, single_test_envs, hparams):
args_list = []
for trial_seed in range(n_trials_from, (n_trials_from + n_trials)):
for dataset in dataset_names:
for algorithm in algorithms:
if single_test_envs:
all_test_envs = [[i] for i in range(datasets.num_environments(dataset))]
else:
all_test_envs = all_test_env_combinations(datasets.num_environments(dataset))
for test_envs in all_test_envs:
for hparams_seed in range(n_hparams_from, n_hparams):
train_args = {}
train_args['dataset'] = dataset
train_args['algorithm'] = algorithm
train_args['test_envs'] = test_envs
train_args['holdout_fraction'] = holdout_fraction
train_args['hparams_seed'] = hparams_seed
train_args['data_dir'] = data_dir
train_args['task'] = task
train_args['trial_seed'] = trial_seed
train_args['seed'] = misc.seed_hash(dataset, algorithm, test_envs, hparams_seed, trial_seed)
if (steps is not None):
train_args['steps'] = steps
if (hparams is not None):
train_args['hparams'] = hparams
args_list.append(train_args)
return args_list |
def _reset_library_root_logger() -> None:
global _default_handler
with _lock:
if (not _default_handler):
return
library_root_logger = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler)
library_root_logger.setLevel(logging.NOTSET)
_default_handler = None |
def convert_model(model, args):
for m in model._modules:
child = model._modules[m]
if is_pruned(child):
if (get_layer_info(child) in ['LGC']):
model._modules[m] = CondensingLGC(child)
elif (get_layer_info(child) in ['SFR']):
model._modules[m] = CondensingSFR(child)
del child
else:
convert_model(child, args) |
class YolosModel(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
class PredictionLossGame(CooperativeGame):
def __init__(self, extension, sample, label, loss, groups=None):
if (sample.ndim == 1):
sample = sample[np.newaxis]
if np.isscalar(label):
label = np.array([label])
if (loss is utils.crossentropyloss):
if ((label.ndim <= 1) or (label.shape[1] == 1)):
if np.issubdtype(label.dtype, np.floating):
label = label.astype(int)
self.extension = extension
self.sample = sample
self.label = label
self.loss = loss
num_features = sample.shape[1]
if (groups is None):
self.players = num_features
self.groups_matrix = None
else:
inds_list = []
for group in groups:
inds_list += list(group)
assert np.all((np.sort(inds_list) == np.arange(num_features)))
self.players = len(groups)
self.groups_matrix = np.zeros((len(groups), num_features), dtype=bool)
for (i, group) in enumerate(groups):
self.groups_matrix[(i, group)] = True
self.sample_repeat = sample
self.label_repeat = label
def __call__(self, S):
if (len(S) != len(self.sample_repeat)):
self.sample_repeat = self.sample.repeat(len(S), 0)
self.label_repeat = self.label.repeat(len(S), 0)
input_data = self.sample_repeat
output_label = self.label_repeat
if (self.groups_matrix is not None):
S = np.matmul(S, self.groups_matrix)
return (- self.loss(self.extension(input_data, S), output_label)) |
def write_sentences(write_path, premises, hypotheses, append=False):
print('Writing to {}\n'.format(write_path))
if append:
with open(write_path, 'a') as f:
for p in premises:
f.write(p)
f.write('\n')
for h in hypotheses:
f.write(h)
f.write('\n')
else:
with open(write_path, 'w') as f:
for p in premises:
f.write(p)
f.write('\n')
for h in hypotheses:
f.write(h)
f.write('\n') |
def InceptionV3(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000, **kwargs):
global backend, layers, models, keras_utils
(backend, layers, models, keras_utils) = get_submodules_from_kwargs(kwargs)
if (not ((weights in {'imagenet', None}) or os.path.exists(weights))):
raise ValueError('The `weights` argument should be either `None` (random initialization), `imagenet` (pre-training on ImageNet), or the path to the weights file to be loaded.')
if ((weights == 'imagenet') and include_top and (classes != 1000)):
raise ValueError('If using `weights` as `"imagenet"` with `include_top` as true, `classes` should be 1000')
if (input_tensor is None):
img_input = layers.Input(shape=input_shape)
elif (not backend.is_keras_tensor(input_tensor)):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
if (backend.image_data_format() == 'channels_first'):
channel_axis = 1
else:
channel_axis = 4
x = conv3d_bn(img_input, 32, 3, 3, 3, strides=(2, 2, 2), padding='same')
x = conv3d_bn(x, 32, 3, 3, 3, padding='same')
x = conv3d_bn(x, 64, 3, 3, 3, padding='same')
x = layers.MaxPooling3D((3, 3, 3), strides=(2, 2, 2), padding='same')(x)
x = conv3d_bn(x, 80, 1, 1, 1, padding='same')
x = conv3d_bn(x, 192, 3, 3, 3, padding='same')
x = layers.MaxPooling3D((3, 3, 3), strides=(2, 2, 2), padding='same')(x)
branch1x1 = conv3d_bn(x, 64, 1, 1, 1)
branch5x5 = conv3d_bn(x, 48, 1, 1, 1)
branch5x5 = conv3d_bn(branch5x5, 64, 5, 5, 5)
branch3x3dbl = conv3d_bn(x, 64, 1, 1, 1)
branch3x3dbl = conv3d_bn(branch3x3dbl, 96, 3, 3, 3)
branch3x3dbl = conv3d_bn(branch3x3dbl, 96, 3, 3, 3)
branch_pool = layers.AveragePooling3D((3, 3, 3), strides=(1, 1, 1), padding='same')(x)
branch_pool = conv3d_bn(branch_pool, 32, 1, 1, 1)
x = layers.concatenate([branch1x1, branch5x5, branch3x3dbl, branch_pool], axis=channel_axis, name='mixed0')
branch1x1 = conv3d_bn(x, 64, 1, 1, 1)
branch5x5 = conv3d_bn(x, 48, 1, 1, 1)
branch5x5 = conv3d_bn(branch5x5, 64, 5, 5, 5)
branch3x3dbl = conv3d_bn(x, 64, 1, 1, 1)
branch3x3dbl = conv3d_bn(branch3x3dbl, 96, 3, 3, 3)
branch3x3dbl = conv3d_bn(branch3x3dbl, 96, 3, 3, 3)
branch_pool = layers.AveragePooling3D((3, 3, 3), strides=(1, 1, 1), padding='same')(x)
branch_pool = conv3d_bn(branch_pool, 64, 1, 1, 1)
x = layers.concatenate([branch1x1, branch5x5, branch3x3dbl, branch_pool], axis=channel_axis, name='mixed1')
branch1x1 = conv3d_bn(x, 64, 1, 1, 1)
branch5x5 = conv3d_bn(x, 48, 1, 1, 1)
branch5x5 = conv3d_bn(branch5x5, 64, 5, 5, 5)
branch3x3dbl = conv3d_bn(x, 64, 1, 1, 1)
branch3x3dbl = conv3d_bn(branch3x3dbl, 96, 3, 3, 3)
branch3x3dbl = conv3d_bn(branch3x3dbl, 96, 3, 3, 3)
branch_pool = layers.AveragePooling3D((3, 3, 3), strides=(1, 1, 1), padding='same')(x)
branch_pool = conv3d_bn(branch_pool, 64, 1, 1, 1)
x = layers.concatenate([branch1x1, branch5x5, branch3x3dbl, branch_pool], axis=channel_axis, name='mixed2')
branch3x3 = conv3d_bn(x, 384, 3, 3, 3, strides=(2, 2, 2), padding='same')
branch3x3dbl = conv3d_bn(x, 64, 1, 1, 1)
branch3x3dbl = conv3d_bn(branch3x3dbl, 96, 3, 3, 3)
branch3x3dbl = conv3d_bn(branch3x3dbl, 96, 3, 3, 3, strides=(2, 2, 2), padding='same')
branch_pool = layers.MaxPooling3D((3, 3, 3), strides=(2, 2, 2), padding='same')(x)
x = layers.concatenate([branch3x3, branch3x3dbl, branch_pool], axis=channel_axis, name='mixed3')
branch1x1 = conv3d_bn(x, 192, 1, 1, 1)
branch7x7 = conv3d_bn(x, 128, 1, 1, 1)
branch7x7 = conv3d_bn(branch7x7, 128, 1, 7, 1)
branch7x7 = conv3d_bn(branch7x7, 192, 7, 1, 1)
branch7x7dbl = conv3d_bn(x, 128, 1, 1, 1)
branch7x7dbl = conv3d_bn(branch7x7dbl, 128, 7, 1, 1)
branch7x7dbl = conv3d_bn(branch7x7dbl, 128, 1, 7, 1)
branch7x7dbl = conv3d_bn(branch7x7dbl, 128, 7, 1, 1)
branch7x7dbl = conv3d_bn(branch7x7dbl, 192, 1, 7, 1)
branch_pool = layers.AveragePooling2D((3, 3, 3), strides=(1, 1, 1), padding='same')(x)
branch_pool = conv3d_bn(branch_pool, 192, 1, 1, 1)
x = layers.concatenate([branch1x1, branch7x7, branch7x7dbl, branch_pool], axis=channel_axis, name='mixed4')
for i in range(2):
branch1x1 = conv3d_bn(x, 192, 1, 1, 1)
branch7x7 = conv3d_bn(x, 160, 1, 1, 1)
branch7x7 = conv3d_bn(branch7x7, 160, 1, 7, 1)
branch7x7 = conv3d_bn(branch7x7, 192, 7, 1, 1)
branch7x7dbl = conv3d_bn(x, 160, 1, 1, 1)
branch7x7dbl = conv3d_bn(branch7x7dbl, 160, 7, 1, 1)
branch7x7dbl = conv3d_bn(branch7x7dbl, 160, 1, 7, 1)
branch7x7dbl = conv3d_bn(branch7x7dbl, 160, 7, 1, 1)
branch7x7dbl = conv3d_bn(branch7x7dbl, 192, 1, 7, 1)
branch_pool = layers.AveragePooling3D((3, 3, 3), strides=(1, 1, 1), padding='same')(x)
branch_pool = conv3d_bn(branch_pool, 192, 1, 1, 1)
x = layers.concatenate([branch1x1, branch7x7, branch7x7dbl, branch_pool], axis=channel_axis, name=('mixed' + str((5 + i))))
branch1x1 = conv3d_bn(x, 192, 1, 1, 1)
branch7x7 = conv3d_bn(x, 192, 1, 1, 1)
branch7x7 = conv3d_bn(branch7x7, 192, 1, 7, 1)
branch7x7 = conv3d_bn(branch7x7, 192, 7, 1, 1)
branch7x7dbl = conv3d_bn(x, 192, 1, 1, 1)
branch7x7dbl = conv3d_bn(branch7x7dbl, 192, 7, 1, 1)
branch7x7dbl = conv3d_bn(branch7x7dbl, 192, 1, 7, 1)
branch7x7dbl = conv3d_bn(branch7x7dbl, 192, 7, 1, 1)
branch7x7dbl = conv3d_bn(branch7x7dbl, 192, 1, 7, 1)
branch_pool = layers.AveragePooling3D((3, 3, 3), strides=(1, 1, 1), padding='same')(x)
branch_pool = conv3d_bn(branch_pool, 192, 1, 1, 1)
x = layers.concatenate([branch1x1, branch7x7, branch7x7dbl, branch_pool], axis=channel_axis, name='mixed7')
branch3x3 = conv3d_bn(x, 192, 1, 1, 1)
branch3x3 = conv3d_bn(branch3x3, 320, 3, 3, 3, strides=(2, 2, 2), padding='same')
branch7x7x3 = conv3d_bn(x, 192, 1, 1, 1)
branch7x7x3 = conv3d_bn(branch7x7x3, 192, 1, 7, 1)
branch7x7x3 = conv3d_bn(branch7x7x3, 192, 7, 1, 1)
branch7x7x3 = conv3d_bn(branch7x7x3, 192, 3, 3, 3, strides=(2, 2, 2), padding='same')
branch_pool = layers.MaxPooling3D((3, 3, 3), strides=(2, 2, 2), padding='same')(x)
x = layers.concatenate([branch3x3, branch7x7x3, branch_pool], axis=channel_axis, name='mixed8')
for i in range(2):
branch1x1 = conv3d_bn(x, 320, 1, 1, 1)
branch3x3 = conv3d_bn(x, 384, 1, 1, 1)
branch3x3_1 = conv3d_bn(branch3x3, 384, 1, 3, 1)
branch3x3_2 = conv3d_bn(branch3x3, 384, 3, 1, 1)
branch3x3 = layers.concatenate([branch3x3_1, branch3x3_2], axis=channel_axis, name=('mixed9_' + str(i)))
branch3x3dbl = conv3d_bn(x, 448, 1, 1, 1)
branch3x3dbl = conv3d_bn(branch3x3dbl, 384, 3, 3, 3)
branch3x3dbl_1 = conv3d_bn(branch3x3dbl, 384, 1, 3, 1)
branch3x3dbl_2 = conv3d_bn(branch3x3dbl, 384, 3, 1, 1)
branch3x3dbl = layers.concatenate([branch3x3dbl_1, branch3x3dbl_2], axis=channel_axis)
branch_pool = layers.AveragePooling3D((3, 3, 3), strides=(1, 1, 1), padding='same')(x)
branch_pool = conv3d_bn(branch_pool, 192, 1, 1, 1)
x = layers.concatenate([branch1x1, branch3x3, branch3x3dbl, branch_pool], axis=channel_axis, name=('mixed' + str((9 + i))))
if include_top:
x = layers.GlobalAveragePooling3D(name='avg_pool')(x)
x = layers.Dense(classes, activation='softmax', name='predictions')(x)
elif (pooling == 'avg'):
x = layers.GlobalAveragePooling3D()(x)
elif (pooling == 'max'):
x = layers.GlobalMaxPooling3D()(x)
if (input_tensor is not None):
inputs = keras_utils.get_source_inputs(input_tensor)
else:
inputs = img_input
model = models.Model(inputs, x, name='inception_v3')
if (weights == 'imagenet'):
if include_top:
weights_path = keras_utils.get_file('inception_v3_weights_tf_dim_ordering_tf_kernels.h5', WEIGHTS_PATH, cache_subdir='models', file_hash='9a0d58056eeedaa3f26cb7ebd46da564')
else:
weights_path = keras_utils.get_file('inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5', WEIGHTS_PATH_NO_TOP, cache_subdir='models', file_hash='bcbd6486424b2319ff4ef7d526e38f63')
model.load_weights(weights_path)
elif (weights is not None):
model.load_weights(weights)
return model |
class WarmUp(tf.keras.optimizers.schedules.LearningRateSchedule):
def __init__(self, initial_learning_rate: float, decay_schedule_fn: Callable, warmup_steps: int, power: float=1.0, name: str=None):
super().__init__()
self.initial_learning_rate = initial_learning_rate
self.warmup_steps = warmup_steps
self.power = power
self.decay_schedule_fn = decay_schedule_fn
self.name = name
def __call__(self, step):
with tf.name_scope((self.name or 'WarmUp')) as name:
global_step_float = tf.cast(step, tf.float32)
warmup_steps_float = tf.cast(self.warmup_steps, tf.float32)
warmup_percent_done = (global_step_float / warmup_steps_float)
warmup_learning_rate = (self.initial_learning_rate * tf.math.pow(warmup_percent_done, self.power))
return tf.cond((global_step_float < warmup_steps_float), (lambda : warmup_learning_rate), (lambda : self.decay_schedule_fn((step - self.warmup_steps))), name=name)
def get_config(self):
return {'initial_learning_rate': self.initial_learning_rate, 'decay_schedule_fn': self.decay_schedule_fn, 'warmup_steps': self.warmup_steps, 'power': self.power, 'name': self.name} |
class ContextBlock(nn.Module):
def __init__(self, inplanes, ratio, pooling_type='att', fusion_types=('channel_add',)):
super(ContextBlock, self).__init__()
assert (pooling_type in ['avg', 'att'])
assert isinstance(fusion_types, (list, tuple))
valid_fusion_types = ['channel_add', 'channel_mul']
assert all([(f in valid_fusion_types) for f in fusion_types])
assert (len(fusion_types) > 0), 'at least one fusion should be used'
self.inplanes = inplanes
self.ratio = ratio
self.planes = int((inplanes * ratio))
self.pooling_type = pooling_type
self.fusion_types = fusion_types
if (pooling_type == 'att'):
self.conv_mask = nn.Conv2d(inplanes, 1, kernel_size=1)
self.softmax = nn.Softmax(dim=2)
else:
self.avg_pool = nn.AdaptiveAvgPool2d(1)
if ('channel_add' in fusion_types):
self.channel_add_conv = nn.Sequential(nn.Conv2d(self.inplanes, self.planes, kernel_size=1), nn.LayerNorm([self.planes, 1, 1]), nn.ReLU(inplace=True), nn.Conv2d(self.planes, self.inplanes, kernel_size=1))
else:
self.channel_add_conv = None
if ('channel_mul' in fusion_types):
self.channel_mul_conv = nn.Sequential(nn.Conv2d(self.inplanes, self.planes, kernel_size=1), nn.LayerNorm([self.planes, 1, 1]), nn.ReLU(inplace=True), nn.Conv2d(self.planes, self.inplanes, kernel_size=1))
else:
self.channel_mul_conv = None
self.reset_parameters()
def reset_parameters(self):
if (self.pooling_type == 'att'):
kaiming_init(self.conv_mask, mode='fan_in')
self.conv_mask.inited = True
if (self.channel_add_conv is not None):
last_zero_init(self.channel_add_conv)
if (self.channel_mul_conv is not None):
last_zero_init(self.channel_mul_conv)
def spatial_pool(self, x):
(batch, channel, height, width) = x.size()
if (self.pooling_type == 'att'):
input_x = x
input_x = input_x.view(batch, channel, (height * width))
input_x = input_x.unsqueeze(1)
context_mask = self.conv_mask(x)
context_mask = context_mask.view(batch, 1, (height * width))
context_mask = self.softmax(context_mask)
context_mask = context_mask.unsqueeze((- 1))
context = torch.matmul(input_x, context_mask)
context = context.view(batch, channel, 1, 1)
else:
context = self.avg_pool(x)
return context
def forward(self, x):
context = self.spatial_pool(x)
out = x
if (self.channel_mul_conv is not None):
channel_mul_term = torch.sigmoid(self.channel_mul_conv(context))
out = (out * channel_mul_term)
if (self.channel_add_conv is not None):
channel_add_term = self.channel_add_conv(context)
out = (out + channel_add_term)
return out |
class PlasmaArray():
def __init__(self, array):
super().__init__()
self.array = array
self.disable = (array.nbytes < )
self.object_id = None
self.path = None
self._client = None
self._server = None
self._server_tmp = None
self._plasma = None
def plasma(self):
if ((self._plasma is None) and (not self.disable)):
self._plasma = plasma
return self._plasma
def start_server(self):
if ((self.plasma is None) or (self._server is not None)):
return
assert (self.object_id is None)
assert (self.path is None)
self._server_tmp = tempfile.NamedTemporaryFile()
self.path = self._server_tmp.name
self._server = subprocess.Popen(['plasma_store', '-m', str(int((1.05 * self.array.nbytes))), '-s', self.path])
def client(self):
if (self._client is None):
assert (self.path is not None)
self._client = self.plasma.connect(self.path, num_retries=200)
return self._client
def __getstate__(self):
if (self.plasma is None):
return self.__dict__
if (self.object_id is None):
self.start_server()
self.object_id = self.client.put(self.array)
state = self.__dict__.copy()
del state['array']
state['_client'] = None
state['_server'] = None
state['_server_tmp'] = None
state['_plasma'] = None
return state
def __setstate__(self, state):
self.__dict__.update(state)
if (self.plasma is None):
return
self.array = self.client.get(self.object_id)
def __del__(self):
if (self._server is not None):
self._server.kill()
self._server = None
self._server_tmp.close()
self._server_tmp = None |
class DebertaV2ForMaskedLM(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def getPathGS(algo, inputEvents, tthread, NUM_ITEMS, NUM_ACCESS, key_skewness, overlap_ratio, abort_ratio, isCyclic, complexity):
return (FILE_FOLER + '/GrepSum/{}/threads = {}/totalEvents = {}/{}_{}_{}_{}_{}_{}_{}.latency'.format(algo, tthread, inputEvents, NUM_ITEMS, NUM_ACCESS, key_skewness, overlap_ratio, abort_ratio, isCyclic, complexity)) |
class Precision_grt():
def __init__(self, length=20, threshold=5):
self.length = length
self.threshold = threshold
def init(self, train):
return
def reset(self):
self.test = 0
self.hit = 0
def add(self, result, next_item, for_item=0, session=0, pop_bin=None, position=None):
if (position < self.threshold):
return
self.test += self.length
self.hit += len((set(next_item) & set(result[:self.length].index)))
def add_multiple(self, result, next_items, for_item=0, session=0, position=None):
if (position < self.threshold):
return
self.test += 1
self.hit += (len((set(next_items) & set(result[:self.length].index))) / self.length)
def add_batch(self, result, next_item):
i = 0
for (part, series) in result.iteritems():
result.sort_values(part, ascending=False, inplace=True)
self.add(series, next_item[i])
i += 1
def result(self):
return ((((('' + str(self.length)) + '>') + str(self.threshold)) + ': '), (self.hit / self.test)) |
def min_gt(seq: np.ndarray, val: Any) -> Any:
min = np.inf
idx = (len(seq) - 1)
while (idx >= 0):
if ((seq[idx] >= val) and (seq[idx] < min)):
min = seq[idx]
idx -= 1
return min |
def C2D_Axial_ResNet50(**kwargs):
c3d_idx = [[], [], [], []]
nl_idx = [[], [], [], []]
sa_idx = [[], [2, 3], [3, 4, 5], []]
return ResNet503D(AP3D.APP3DC, c3d_idx, nl_idx, sa_idx, **kwargs) |
def main_worker(gpu, ngpus_per_node, args):
global best_mIoU
args.gpu = gpu
if (args.gpu is not None):
print('Use GPU: {} for training'.format(args.gpu))
if args.distributed:
if ((args.dist_url == 'env://') and (args.rank == (- 1))):
args.rank = int(os.environ['RANK'])
if args.multiprocessing_distributed:
args.rank = ((args.rank * ngpus_per_node) + gpu)
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=args.rank)
if args.pretrained:
print("=> using pre-trained model 'DFANet'")
model = DFANet(pretrained=True, pretrained_backbone=False)
else:
print("=> creating model 'DFANet'")
model = DFANet(pretrained=False, pretrained_backbone=True)
if args.distributed:
if (args.gpu is not None):
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
args.batch_size = int((args.batch_size / ngpus_per_node))
args.workers = int((args.workers / ngpus_per_node))
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
else:
model.cuda()
model = torch.nn.parallel.DistributedDataParallel(model)
elif (args.gpu is not None):
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
else:
model = torch.nn.DataParallel(model).cuda()
criterion = nn.CrossEntropyLoss(ignore_index=19).cuda(args.gpu)
optimizer = torch.optim.SGD(model.parameters(), args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
metric = IoU(20, ignore_index=19)
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_mIoU = checkpoint['best_mIoU']
if (args.gpu is not None):
best_mIoU = best_mIoU.to(args.gpu)
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})".format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
train_dataset = Cityscapes(args.data, split='train', mode='fine', target_type='semantic', transform=joint_transforms.Compose([joint_transforms.RandomHorizontalFlip(), joint_transforms.RandomSized(1024), joint_transforms.ToTensor(), joint_transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]))
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
else:
train_sampler = None
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None), num_workers=args.workers, pin_memory=True, sampler=train_sampler)
val_loader = torch.utils.data.DataLoader(Cityscapes(args.data, split='val', mode='fine', target_type='semantic', transform=joint_transforms.Compose([joint_transforms.RandomHorizontalFlip(), joint_transforms.RandomSized(1024), joint_transforms.ToTensor(), joint_transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])), batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True)
if args.evaluate:
validate(val_loader, model, criterion, args)
return
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
(train_mIoU, train_loss) = validate(train_loader, model, criterion, metric, args)
(val_mIoU, val_loss) = validate(val_loader, model, criterion, metric, args)
print('Train mIoU: {}'.format(train_mIoU))
print('Train Loss: {}'.format(train_loss))
print('Val mIoU: {}'.format(val_mIoU))
print('Val mIoU: {}'.format(val_loss)) |
def flatten(l):
for el in l:
if hasattr(el, '__iter__'):
for sub in flatten(el):
(yield sub)
else:
(yield el) |
def _building_block_v1(inputs, filters, training, projection_shortcut, strides, data_format, bn):
shortcut = inputs
if (projection_shortcut is not None):
shortcut = projection_shortcut(inputs)
if bn:
shortcut = batch_norm(inputs=shortcut, training=training, data_format=data_format)
inputs = conv2d_fixed_padding(inputs=inputs, filters=filters, kernel_size=3, strides=strides, data_format=data_format)
if bn:
inputs = batch_norm(inputs, training, data_format)
inputs = tf.nn.relu(inputs)
inputs = conv2d_fixed_padding(inputs=inputs, filters=filters, kernel_size=3, strides=1, data_format=data_format)
if bn:
inputs = batch_norm(inputs, training, data_format)
inputs += shortcut
inputs = tf.nn.relu(inputs)
return inputs |
def _test_build_detectors(self, device):
cfg_files = get_config_files(None, EXCLUDED_FOLDERS)
self.assertGreater(len(cfg_files), 0)
for cfg_file in cfg_files:
with self.subTest(cfg_file=cfg_file):
print('Testing {}...'.format(cfg_file))
cfg = utils.load_config_from_file(cfg_file)
create_model(cfg, device) |
def get_encoding_dict(sentence_to_labels, original_file_path, aug_type, alpha):
encodings_path = get_encodings_path(original_file_path, aug_type, alpha)
if (not encodings_path.exists()):
print(f'creating {encodings_path}')
string_to_encoding = {}
for sentence in tqdm(sentence_to_labels.keys()):
encoding = get_encoding(sentence, tokenizer, model)
string_to_encoding[sentence] = encoding
common.save_pickle(encodings_path, string_to_encoding)
return common.load_pickle(encodings_path) |
class TestLLaVA(unittest.TestCase):
def setUpClass(self):
self.model = LlavaMistralForCausalLM.from_pretrained(MODEL_NAME, low_cpu_mem_usage=True)
self.tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
self.dummpy_input = {'input_ids': torch.tensor([[1, 1, 2, 2]]), 'labels': torch.tensor([[1, 1, 2, 2]]), 'attention_mask': torch.tensor([[1, 1, 1, 1]]), 'images': torch.randn([1, 3, 336, 336])}
def test_forward(self):
output = self.model(**self.dummpy_input)
self.assertTrue(isinstance(output['loss'], torch.Tensor))
def test_input(self):
tmp = self.model.prepare_inputs_for_generation(**self.dummpy_input)
self.assertTrue(isinstance(tmp['input_ids'], torch.Tensor))
def test_init(self):
class TestArgs():
vision_tower = False
mm_vision_select_layer = (- 2)
mm_vision_select_feature = 'patch'
pretrain_mm_mlp_adapter = None
vision_tower = 'hf-internal-testing/tiny-random-clip'
mm_use_im_patch_token = False
mm_use_im_start_end = False
model_args = TestArgs()
self.model.model.initialize_vision_modules(model_args)
self.assertTrue(isinstance(self.model, type(self.model)))
self.model.initialize_vision_tokenizer(model_args, self.tokenizer)
self.assertTrue(isinstance(self.tokenizer, type(self.tokenizer)))
image_dummpy = torch.randn([1, 3, 30, 30])
image_feature = self.model.encode_images(image_dummpy)
self.assertTrue((image_feature.shape == torch.Size([1, 225, 16]))) |
def load_data(data_dir, batch_size, dev_ratio, device):
(train_docs, test_docs, vocab) = read_dataset(data_dir)
if (dev_ratio > 0):
print('splitting train, dev datasets')
(train_docs, dev_docs) = train_test_split(train_docs, test_size=dev_ratio, shuffle=True)
print('train, dev, test', len(train_docs), len(dev_docs), len(test_docs))
train_loader = DataLoader(DocDataset(train_docs, len(vocab), device), batch_size, drop_last=False, num_workers=0)
dev_loader = DataLoader(DocDataset(dev_docs, len(vocab), device), batch_size, drop_last=False, num_workers=0)
test_loader = DataLoader(DocDataset(test_docs, len(vocab), device), batch_size, drop_last=False, num_workers=0)
return (train_loader, dev_loader, test_loader, vocab)
else:
print('train, test', len(train_docs), len(test_docs))
train_loader = DataLoader(DocDataset(train_docs, len(vocab), device), batch_size, drop_last=False, num_workers=0)
test_loader = DataLoader(DocDataset(test_docs, len(vocab), device), batch_size, drop_last=False, num_workers=0)
return (train_loader, test_loader, test_loader, vocab) |
def find_coref(ment, mentlist, person_names):
cur_m = ment['mention'].lower()
coref = []
for m in mentlist:
if ((len(m['candidates']) == 0) or (m['candidates'][0][0] not in person_names)):
continue
mention = m['mention'].lower()
start_pos = mention.find(cur_m)
if ((start_pos == (- 1)) or (mention == cur_m)):
continue
end_pos = ((start_pos + len(cur_m)) - 1)
if (((start_pos == 0) or (mention[(start_pos - 1)] == ' ')) and ((end_pos == (len(mention) - 1)) or (mention[(end_pos + 1)] == ' '))):
coref.append(m)
return coref |
class HandEggTouchSensorsEnv(ManipulateTouchSensorsEnv):
def __init__(self, target_position='random', target_rotation='xyz', reward_type='sparse'):
super(HandEggTouchSensorsEnv, self).__init__(model_path=MANIPULATE_EGG_XML, target_position=target_position, target_rotation=target_rotation, target_position_range=np.array([((- 0.04), 0.04), ((- 0.06), 0.02), (0.0, 0.06)]), reward_type=reward_type) |
class ElectraModelTester():
def __init__(self, parent):
self.parent = parent
self.batch_size = 13
self.seq_length = 7
self.is_training = True
self.use_input_mask = True
self.use_token_type_ids = True
self.use_labels = True
self.vocab_size = 99
self.hidden_size = 32
self.num_hidden_layers = 5
self.num_attention_heads = 4
self.intermediate_size = 37
self.hidden_act = 'gelu'
self.hidden_dropout_prob = 0.1
self.attention_probs_dropout_prob = 0.1
self.max_position_embeddings = 512
self.type_vocab_size = 16
self.type_sequence_label_size = 2
self.initializer_range = 0.02
self.num_labels = 3
self.num_choices = 4
self.scope = None
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
fake_token_labels = ids_tensor([self.batch_size, self.seq_length], 1)
config = ElectraConfig(vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range)
return (config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, fake_token_labels)
def create_and_check_electra_model(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, fake_token_labels):
model = ElectraModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)
result = model(input_ids, token_type_ids=token_type_ids)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def create_and_check_electra_for_masked_lm(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, fake_token_labels):
model = ElectraForMaskedLM(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_electra_for_token_classification(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, fake_token_labels):
config.num_labels = self.num_labels
model = ElectraForTokenClassification(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def create_and_check_electra_for_pretraining(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, fake_token_labels):
config.num_labels = self.num_labels
model = ElectraForPreTraining(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=fake_token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length))
def create_and_check_electra_for_sequence_classification(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, fake_token_labels):
config.num_labels = self.num_labels
model = ElectraForSequenceClassification(config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def create_and_check_electra_for_question_answering(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, fake_token_labels):
model = ElectraForQuestionAnswering(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, start_positions=sequence_labels, end_positions=sequence_labels)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def create_and_check_electra_for_multiple_choice(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, fake_token_labels):
config.num_choices = self.num_choices
model = ElectraForMultipleChoice(config=config)
model.to(torch_device)
model.eval()
multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand((- 1), self.num_choices, (- 1)).contiguous()
multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand((- 1), self.num_choices, (- 1)).contiguous()
multiple_choice_input_mask = input_mask.unsqueeze(1).expand((- 1), self.num_choices, (- 1)).contiguous()
result = model(multiple_choice_inputs_ids, attention_mask=multiple_choice_input_mask, token_type_ids=multiple_choice_token_type_ids, labels=choice_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, fake_token_labels) = config_and_inputs
inputs_dict = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return (config, inputs_dict) |
def apply_sequential(inputs, modules):
for mod in modules:
if isinstance(mod, (nn.BatchNorm2d, nn.SyncBatchNorm)):
shapes = [i.shape for i in inputs]
spatial_sizes = [(s[2] * s[3]) for s in shapes]
x = [i.flatten(2) for i in inputs]
x = torch.cat(x, dim=2).unsqueeze(3)
x = mod(x).split(spatial_sizes, dim=2)
inputs = [i.view(s) for (s, i) in zip(shapes, x)]
else:
inputs = [mod(i) for i in inputs]
return inputs |
def masked_whiten(values, mask, shift_mean=True):
(mean, var) = (masked_mean(values, mask), masked_var(values, mask))
whitened = ((values - mean) * torch.rsqrt((var + 1e-08)))
if (not shift_mean):
whitened += mean
return whitened |
def _find_library_candidates(library_names, library_file_extensions, library_search_paths):
candidates = set()
for library_name in library_names:
for search_path in library_search_paths:
glob_query = os.path.join(search_path, (('*' + library_name) + '*'))
for filename in glob.iglob(glob_query):
filename = os.path.realpath(filename)
if (filename in candidates):
continue
basename = os.path.basename(filename)
if basename.startswith(('lib' + library_name)):
basename_end = basename[len(('lib' + library_name)):]
elif basename.startswith(library_name):
basename_end = basename[len(library_name):]
else:
continue
for file_extension in library_file_extensions:
if basename_end.startswith(file_extension):
if (basename_end[len(file_extension):][:1] in ('', '.')):
candidates.add(filename)
if basename_end.endswith(file_extension):
basename_middle = basename_end[:(- len(file_extension))]
if all(((c in '.') for c in basename_middle)):
candidates.add(filename)
return candidates |
def _make_fusion_block(features, use_bn):
return FeatureFusionBlock_custom(features, nn.ReLU(False), deconv=False, bn=use_bn, expand=False, align_corners=True) |
def get_onnx_model():
import torch
import torchvision
from torch.autograd import Variable
model = torchvision.models.resnet18()
x = Variable(torch.randn(1, 3, 224, 224))
torch_out = torch.onnx.export(model, x, 'resnet18.onnx', export_params=True, verbose=True) |
def allreduce_grads(params, coalesce=True, bucket_size_mb=(- 1)):
warnings.warning('"mmcv.runner.fp16_utils.allreduce_grads" is deprecated, and will be removed in v2.8. Please switch to "mmcv.runner.allreduce_grads')
_allreduce_grads(params, coalesce=coalesce, bucket_size_mb=bucket_size_mb) |
def test_interpolation_potential_verticalfreq_outsidegrid():
rzpot = potential.interpRZPotential(RZPot=potential.MWPotential, rgrid=(0.01, 2.0, 201), logR=False, interpverticalfreq=True, zsym=False)
rs = [0.005, 2.5]
for r in rs:
vfdiff = numpy.fabs(((rzpot.verticalfreq(r) - potential.verticalfreq(potential.MWPotential, r)) / potential.verticalfreq(potential.MWPotential, r)))
assert (vfdiff < (10.0 ** (- 10.0))), f'RZPot interpolation w/ interpRZPotential fails outside the grid at R = {r:g} by {vfdiff:g}'
return None |
class BatchNorm3d(_SyncBatchNorm):
def _check_input_dim(self, input):
if (input.dim() != 5):
raise ValueError('expected 5D input (got {}D input)'.format(input.dim()))
super(BatchNorm3d, self)._check_input_dim(input) |
def latest_checkpoint(checkpoint_dir, latest_filename=None):
ckpt_manager = CheckpointStateManager(checkpoint_dir, latest_filename=latest_filename)
return ckpt_manager.latest_checkpoint |
def _build_faster_rcnn_feature_extractor(feature_extractor_config, is_training, reuse_weights=None):
feature_type = feature_extractor_config.type
first_stage_features_stride = feature_extractor_config.first_stage_features_stride
if (feature_type not in FASTER_RCNN_FEATURE_EXTRACTOR_CLASS_MAP):
raise ValueError('Unknown Faster R-CNN feature_extractor: {}'.format(feature_type))
feature_extractor_class = FASTER_RCNN_FEATURE_EXTRACTOR_CLASS_MAP[feature_type]
return feature_extractor_class(is_training, first_stage_features_stride, reuse_weights) |
def get_dist_info():
if (TORCH_VERSION < '1.0'):
initialized = dist._initialized
elif dist.is_available():
initialized = dist.is_initialized()
else:
initialized = False
if initialized:
rank = dist.get_rank()
world_size = dist.get_world_size()
else:
rank = 0
world_size = 1
return (rank, world_size) |
def validation(args, model, device, train_loader, train_scp, train_utt2label, val_loader, val_scp, val_utt2label):
logger.info('Starting Validation')
(train_loss, train_scores) = compute_loss(model, device, train_loader)
(val_loss, val_scores) = compute_loss(model, device, val_loader)
(train_preds, train_labels) = utt_scores(train_scores, train_scp, train_utt2label)
(val_preds, val_labels) = utt_scores(val_scores, val_scp, val_utt2label)
train_eer = compute_eer(train_labels, train_preds)
val_eer = compute_eer(val_labels, val_preds)
logger.info('===> Training set: Average loss: {:.4f}\tEER: {:.4f}\n'.format(train_loss, train_eer))
logger.info('===> Validation set: Average loss: {:.4f}\tEER: {:.4f}\n'.format(val_loss, val_eer))
return (val_loss, val_eer) |
class UNet2DModel(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch'])
def from_config(cls, *args, **kwargs):
requires_backends(cls, ['torch'])
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ['torch']) |
(scope='session')
def saliency_gpt2_model_tiny():
return load_model('hf-internal-testing/tiny-random-GPT2LMHeadModel', 'saliency') |
def INTERN_B():
pretrained = None
print('use InternImage_B as backbone')
model = InternImage(_delete_=True, type='InternImage', core_op='DCNv3', channels=112, depths=[4, 4, 21, 4], groups=[7, 14, 28, 56], mlp_ratio=4.0, drop_path_rate=0.4, norm_layer='LN', layer_scale=1.0, offset_scale=1.0, post_norm=True, with_cp=False, out_indices=(0, 1, 2, 3), init_cfg=dict(type='Pretrained', checkpoint=pretrained))
return model |
def write_info_file(info, model_base_filepath, cnt):
info_filename = (model_base_filepath + ('_%010d_info.txt' % cnt))
info_f = open(info_filename, 'w')
for (key, val) in info.items():
info_f.write(('%s=%s\n' % (key, val)))
info_f.close() |
def count_params(layer, **tags):
params = get_all_params(layer, **tags)
shapes = [p.get_value().shape for p in params]
counts = [np.prod(shape) for shape in shapes]
return sum(counts) |
def ant():
locals().update(default())
env = 'Ant-v1'
max_length = 1000
steps = .0
return locals() |
def get_duplicated_ugly_ts_df():
data = np.random.random_sample((50, 5))
df = pd.DataFrame(data, columns=['a', 'b', 'c', 'd', 'e'])
df['a'][0] = np.nan
df['datetime'] = pd.date_range('1/1/2019', periods=50)
for i in range(20):
df.loc[len(df)] = df.loc[np.random.randint(0, 49)]
return df |
def parse_args():
parser = argparse.ArgumentParser(description='deblur arguments')
parser.add_argument('--phase', type=str, default='test', help='determine whether train or test')
parser.add_argument('--datalist', type=str, default='./datalist_gopro.txt', help='training datalist')
parser.add_argument('--model', type=str, default='color', help='model type: [lstm | gray | color]')
parser.add_argument('--batch_size', help='training batch size', type=int, default=16)
parser.add_argument('--epoch', help='training epoch number', type=int, default=4000)
parser.add_argument('--lr', type=float, default=0.0001, dest='learning_rate', help='initial learning rate')
parser.add_argument('--gpu', dest='gpu_id', type=str, default='0', help='use gpu or cpu')
parser.add_argument('--height', type=int, default=720, help='height for the tensorflow placeholder, should be multiples of 16')
parser.add_argument('--width', type=int, default=1280, help='width for the tensorflow placeholder, should be multiple of 16 for 3 scales')
parser.add_argument('--input_path', type=str, default='./testing_set', help='input path for testing images')
parser.add_argument('--output_path', type=str, default='./testing_res', help='output path for testing images')
args = parser.parse_args()
return args |
class BasicUnitConverter(units.ConversionInterface):
def axisinfo(unit, axis):
if (unit == radians):
return units.AxisInfo(majloc=ticker.MultipleLocator(base=(np.pi / 2)), majfmt=ticker.FuncFormatter(rad_fn), label=unit.fullname)
elif (unit == degrees):
return units.AxisInfo(majloc=ticker.AutoLocator(), majfmt=ticker.FormatStrFormatter('$%i^\\circ$'), label=unit.fullname)
elif (unit is not None):
if hasattr(unit, 'fullname'):
return units.AxisInfo(label=unit.fullname)
elif hasattr(unit, 'unit'):
return units.AxisInfo(label=unit.unit.fullname)
return None
def convert(val, unit, axis):
if units.ConversionInterface.is_numlike(val):
return val
if iterable(val):
return [thisval.convert_to(unit).get_value() for thisval in val]
else:
return val.convert_to(unit).get_value()
def default_units(x, axis):
if iterable(x):
for thisx in x:
return thisx.unit
return x.unit |
class RewardFn(abc.ABC):
def __call__(self, state: State, action: chex.Array, next_state: State) -> chex.Array: |
class EnvSampler():
def __init__(self, env, max_path_length=1000):
self.env = env
self.path_length = 0
self.current_state = None
self.max_path_length = max_path_length
self.path_rewards = []
self.sum_reward = 0
def sample(self, agent, eval_t=False):
if (self.current_state is None):
self.current_state = self.env.reset()
cur_state = self.current_state
action = agent.select_action(self.current_state, eval_t)
(next_state, reward, terminal, info) = self.env.step(action)
self.path_length += 1
self.sum_reward += reward
if (terminal or (self.path_length >= self.max_path_length)):
self.current_state = None
self.path_length = 0
self.path_rewards.append(self.sum_reward)
self.sum_reward = 0
else:
self.current_state = next_state
return (cur_state, action, next_state, reward, terminal, info) |
def load_merge_bracket(fname, path):
merge_file = (fname + '.story.doc.conll.merge')
brack_file = (fname + '.story.doc.conll.brackets')
(disco_seg, EDU_pool, EDU_nsubj) = read_discourse_merge(os.path.join(path, merge_file))
(link, dep) = new_read_bracket(os.path.join(path, brack_file), EDU_pool, EDU_nsubj)
return (disco_seg, dep, link) |
def main(args):
corpus_files = os.listdir(args.tilde_corpus_dir)
corpus_dic = {}
for file in corpus_files:
with open((args.tilde_corpus_dir + f'/{file}'), 'r') as f:
lines = f.readlines()
for line in tqdm(lines, desc='Loading collection'):
data = json.loads(line)
corpus_dic[data['pid']] = data['psg']
train_files = os.listdir(args.psg_train_dir)
for file in tqdm(train_files, desc='writing files'):
id_fout = open(f'{args.output_dir}/{file}', 'a+')
with open((args.psg_train_dir + f'/{file}'), 'r') as f:
for line in f:
data = json.loads(line)
pos_passages = []
for pos_pass in data['pos']:
pos_passages.append({'pid': pos_pass['pid'], 'passage': corpus_dic[pos_pass['pid']]})
neg_passages = []
for neg_pass in data['neg']:
neg_passages.append({'pid': neg_pass['pid'], 'passage': corpus_dic[neg_pass['pid']]})
temp = {'qry': data['qry'], 'pos': pos_passages, 'neg': neg_passages}
id_fout.write(f'''{json.dumps(temp)}
''')
id_fout.close() |
class _deconv2d(prettytensor.VarStoreMethod):
def __call__(self, input_layer, kernel, depth, name, stride, activation_fn, l2loss, init, stddev, bias, edges, batch_normalize):
if (len(input_layer.shape) != 4):
raise ValueError(('Cannot perform conv2d on tensor with shape %s' % input_layer.shape))
if (input_layer.shape[3] is None):
raise ValueError('Input depth must be known')
kernel = _kernel(kernel)
stride = _stride(stride)
size = [kernel[0], kernel[1], depth, input_layer.shape[3]]
books = input_layer.bookkeeper
if (init is None):
if (stddev is None):
patch_size = (size[0] * size[1])
init = layers.xavier_init((size[2] * patch_size), (size[3] * patch_size))
elif stddev:
init = tf.truncated_normal_initializer(stddev=stddev)
else:
init = tf.zeros_initializer()
elif (stddev is not None):
raise ValueError('Do not set both init and stddev.')
dtype = input_layer.tensor.dtype
params = self.variable('weights', size, init, dt=dtype)
input_height = input_layer.shape[1]
input_width = input_layer.shape[2]
filter_height = kernel[0]
filter_width = kernel[1]
row_stride = stride[1]
col_stride = stride[2]
(out_rows, out_cols) = get2d_deconv_output_size(input_height, input_width, filter_height, filter_width, row_stride, col_stride, edges)
output_shape = [input_layer.shape[0], out_rows, out_cols, depth]
y = tf.nn.conv2d_transpose(input_layer, params, output_shape, stride, edges)
layers.add_l2loss(books, params, l2loss)
if bias:
y += self.variable('bias', [size[(- 2)]], tf.zeros_initializer(), dt=dtype)
books.add_scalar_summary(tf.reduce_mean(layers.spatial_slice_zeros(y)), ('%s/zeros_spatial' % y.op.name))
if batch_normalize:
y = input_layer.with_tensor(y).batch_normalize()
if (activation_fn is not None):
if (not isinstance(activation_fn, collections.Sequence)):
activation_fn = (activation_fn,)
y = layers.apply_activation(books, y, activation_fn[0], activation_args=activation_fn[1:])
return input_layer.with_tensor(y) |
def test_digits_naive():
model = FeatureBasedSelection(100, 'sqrt', optimizer='naive')
model.fit(X_digits, sample_cost=X_digits_costs)
assert_array_equal(model.ranking, digits_ranking)
assert_array_almost_equal(model.gains, digits_gains, 4)
assert_less_equal(sum(X_digits_costs[model.ranking]), 100) |
def publish_others():
box = ((0., (- 0.), (- 0.)), Pose)
squeeze_area = (0., 0., (- 0.))
trash = (0., 0., (- 0.)) |
class TestWeightTying(unittest.TestCase):
def setUp(self):
self.seed = 42
vocab_size = 30
tokens = [f'tok{i:02d}' for i in range(vocab_size)]
self.vocab = Vocabulary(tokens=tokens)
self.cfg = {'model': {'tied_embeddings': False, 'tied_softmax': False, 'encoder': {'type': 'recurrent', 'hidden_size': 64, 'embeddings': {'embedding_dim': 32}, 'num_layers': 1}, 'decoder': {'type': 'recurrent', 'hidden_size': 64, 'embeddings': {'embedding_dim': 32}, 'num_layers': 1}}}
def test_tied_embeddings(self):
torch.manual_seed(self.seed)
cfg = copy.deepcopy(self.cfg)
cfg['model']['tied_embeddings'] = True
cfg['model']['tied_softmax'] = False
src_vocab = trg_vocab = self.vocab
model = build_model(cfg['model'], src_vocab=src_vocab, trg_vocab=trg_vocab)
self.assertEqual(src_vocab, trg_vocab)
self.assertEqual(model.src_embed, model.trg_embed)
torch.testing.assert_close(model.src_embed.lut.weight, model.trg_embed.lut.weight)
self.assertEqual(model.src_embed.lut.weight.shape, model.trg_embed.lut.weight.shape)
def test_tied_softmax(self):
torch.manual_seed(self.seed)
cfg = copy.deepcopy(self.cfg)
cfg['model']['decoder']['type'] = 'transformer'
cfg['model']['tied_embeddings'] = False
cfg['model']['tied_softmax'] = True
cfg['model']['decoder']['embeddings']['embedding_dim'] = 64
src_vocab = trg_vocab = self.vocab
model = build_model(cfg['model'], src_vocab=src_vocab, trg_vocab=trg_vocab)
self.assertEqual(model.trg_embed.lut.weight.shape, model.decoder.output_layer.weight.shape)
torch.testing.assert_close(model.trg_embed.lut.weight, model.decoder.output_layer.weight)
def test_tied_src_trg_softmax(self):
torch.manual_seed(self.seed)
cfg = copy.deepcopy(self.cfg)
cfg['model']['decoder']['type'] = 'transformer'
cfg['model']['tied_embeddings'] = True
cfg['model']['tied_softmax'] = True
cfg['model']['decoder']['embeddings']['embedding_dim'] = 64
cfg['model']['encoder']['embeddings']['embedding_dim'] = 64
src_vocab = trg_vocab = self.vocab
model = build_model(cfg['model'], src_vocab=src_vocab, trg_vocab=trg_vocab)
src_weight = model.src_embed.lut.weight
trg_weight = model.trg_embed.lut.weight
output_weight = model.decoder.output_layer.weight
torch.testing.assert_close(src_weight, trg_weight)
torch.testing.assert_close(src_weight, output_weight)
self.assertEqual(src_weight.shape, trg_weight.shape)
self.assertEqual(trg_weight.shape, output_weight.shape)
output_weight.data.fill_(3.0)
self.assertEqual(output_weight.sum().item(), 6528)
self.assertEqual(output_weight.sum().item(), src_weight.sum().item())
self.assertEqual(output_weight.sum().item(), trg_weight.sum().item())
self.assertEqual(src_weight.sum().item(), trg_weight.sum().item()) |
def is_pretrained_cfg(model: str, tag: str):
if (model not in _PRETRAINED):
return False
return (tag.lower() in _PRETRAINED[model]) |
def md5_hash(path):
with open(path, 'rb') as f:
content = f.read()
return hashlib.md5(content).hexdigest() |
def build_neck(cfg):
assert (cfg.MODEL.NECK.CONV_BODY in registry.NECKS), 'cfg.MODEL.NECK.CONV_BODY: {} is not registered in registry'.format(cfg.MODEL.NECK.CONV_BODY)
return registry.NECKS[cfg.MODEL.NECK.CONV_BODY](cfg) |
def preprocess(args):
import spacy
global _NLP
_NLP = spacy.load('en', parser=False)
process_file(args.raw_devset_file, args.devset_file, args.n_history)
process_file(args.raw_trainset_file, args.trainset_file, args.n_history) |
class SLTopicDetectionConfig(FairseqDataclass):
data: str = field(default=MISSING, metadata={'help': 'path to data directory'})
dict_path: str = field(default=MISSING, metadata={'help': 'Path to dictionary mapping category number to category name'})
modeling_task: str = field(default='classification', metadata={'help': 'Modeling task.'})
num_labels: str = field(default=10, metadata={'help': 'Number of labelswhen modeling_task is classification'})
max_source_positions: Optional[int] = field(default=5500, metadata={'help': 'max number of frames in the source sequence'})
min_source_positions: Optional[int] = field(default=150, metadata={'help': 'min number of frames in the source sequence'})
max_target_positions: Optional[int] = field(default=1, metadata={'help': 'max number of tokens in the target sequence, for TD it must be one'})
normalization: ChoiceEnum([x.name for x in NormType]) = field(default=NormType.body.name, metadata={'help': 'select the type of normalization to apply'})
data_augmentation: bool = field(default=False, metadata={'help': 'set True to apply data_augmentation to every sample'})
shuffle_dataset: bool = field(default=True, metadata={'help': 'set True to shuffle the dataset between epochs'})
text_compression_level: ChoiceEnum([x.name for x in TextCompressionLevel]) = field(default='none', metadata={'help': 'compression level for texts (e.g. audio filenames, target texts): none/low/high (default: none). '})
feats_type: ChoiceEnum([x.name for x in SignFeatsType]) = field(default='i3d', metadata={'help': 'type of features for the sign input data: mediapipe/i3d/CNN2d/openpose (default: i3d). '})
eval_accuracy: bool = field(default=True, metadata={'help': 'set to True to evaluate validation accuracy'})
train_subset: str = II('dataset.train_subset')
valid_subset: str = II('dataset.valid_subset')
bpe_sentencepiece_model: str = II('bpe.sentencepiece_model') |
def test_tinydb_reader_loads_db_and_fs(tmpdir):
root = tmpdir.strpath
tinydb_obs = run_test_experiment(exp_name='exp1', exp_id='1234', root_dir=root)
tinydb_reader = TinyDbReader(root)
assert (tinydb_obs.fs.root == tinydb_reader.fs.root)
assert (str(tinydb_obs.runs.all()[0]) == str(tinydb_reader.runs.all()[0])) |
def grid() -> chex.Array:
grid = jnp.array([[0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0], [0, 1, 0, 1, 1, 1, 0, 0, 0], [0, 1, 1, 1, 1, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0]])
return grid |
def get_config():
config = ml_collections.ConfigDict()
config.algo = 'drq'
config.actor_lr = 0.0003
config.critic_lr = 0.0003
config.temp_lr = 0.0003
config.hidden_dims = (256, 256)
config.cnn_features = (32, 32, 32, 32)
config.cnn_strides = (2, 1, 1, 1)
config.cnn_padding = 'VALID'
config.latent_dim = 50
config.discount = 0.99
config.tau = 0.005
config.target_update_period = 1
config.init_temperature = 0.1
config.target_entropy = None
config.replay_buffer_size = 100000
config.gray_scale = False
config.image_size = 84
return config |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.