code stringlengths 101 5.91M |
|---|
class DiscreteMITrainHook(TrainerHook):
def learnable_modules(self) -> List[nn.Module]:
return [self._projector]
def __init__(self, *, name, model: nn.Module, feature_name: str, weight: float=1.0, num_clusters=20, num_subheads=5, padding=None) -> None:
super().__init__(hook_name=name)
assert (feature_name in (encoder_names + decoder_names)), feature_name
self._feature_name = feature_name
self._weight = weight
self._extractor = SingleFeatureExtractor(model, feature_name=feature_name)
input_dim = model.get_channel_dim(feature_name)
self._projector = self.init_projector(input_dim=input_dim, num_clusters=num_clusters, num_subheads=num_subheads)
self._criterion = self.init_criterion(padding=padding)
def __call__(self):
return _DiscreteMIEpochHook(name=self._hook_name, weight=self._weight, extractor=self._extractor, projector=self._projector, criterion=self._criterion)
def init_projector(self, *, input_dim, num_clusters, num_subheads=5):
projector = self.projector_class(input_dim=input_dim, num_clusters=num_clusters, num_subheads=num_subheads, head_type='linear', T=1, normalize=False)
return projector
def init_criterion(self, padding: int=None):
if (self._feature_name in encoder_names):
return self._init_criterion()
return self._init_dense_criterion(padding=(padding or 0))
def _init_dense_criterion(self, padding: int=0):
criterion = self.criterion_class(padding=padding)
return criterion
def _init_criterion(self):
criterion = self.criterion_class()
def criterion_wrapper(*args, **kwargs):
return criterion(*args, **kwargs)[0]
return criterion_wrapper
def projector_class(self):
from contrastyou.projectors.heads import DenseClusterHead, ClusterHead
if (self._feature_name in encoder_names):
return ClusterHead
return DenseClusterHead
def criterion_class(self):
from contrastyou.losses.iic_loss import IIDLoss, IIDSegmentationLoss
if (self._feature_name in encoder_names):
return IIDLoss
return IIDSegmentationLoss |
class ParameterStore(type):
def __getitem__(cls, key: str):
global parameters
return parameters[key]
def __setitem__(cls, key, value):
global parameters
parameters[key] = value |
def distributed_init(args):
if (args.distributed_world_size == 1):
raise ValueError('Cannot initialize distributed with distributed_world_size=1')
if ((args.ddp_backend == 'no_c10d') or (not c10d_status.has_c10d)):
args.ddp_backend = 'no_c10d'
_use_c10d[0] = False
print('| distributed init (rank {}): {}'.format(args.distributed_rank, args.distributed_init_method), flush=True)
if _use_c10d[0]:
init_fn = dist_c10d.init_process_group
else:
init_fn = dist_no_c10d.init_process_group
init_fn(backend=args.distributed_backend, init_method=args.distributed_init_method, world_size=args.distributed_world_size, rank=args.distributed_rank)
if (not is_master(args)):
suppress_output()
return args.distributed_rank |
def _generate_cplex(df: pd.DataFrame, category: str, k: int=3, target_column: str='CV3', timelimit: int=10) -> pd.DataFrame:
if (not sf.util.CPLEX_AVAILABLE):
raise errors.SolverNotFoundError('CPLEX solver not found.')
import cvxpy as cp
unique_sites = df['site'].unique()
unique_labels = df[category].unique()
n_label_by_site = [[len(df[((df['site'] == site) & (df[category] == label))]) for site in unique_sites] for label in unique_labels]
variables_by_cv = [cp.Variable(len(unique_sites), boolean=True) for _ in range(k)]
A = np.ones(len(unique_sites))
constraints = [(sum(variables_by_cv) == A)]
error = 0
for li in range(len(unique_labels)):
for cv in range(k):
error += cp.square((cp.sum((k * cp.multiply(variables_by_cv[cv], n_label_by_site[li]))) - sum(n_label_by_site[li])))
prob = cp.Problem(cp.Minimize(error), constraints)
prob.solve(solver='CPLEX', cplex_params={'timelimit': timelimit})
chosen_sites = [[site for (site_idx, site) in enumerate(unique_sites) if (variables_by_cv[cv].value[site_idx] > 0.5)] for cv in range(k)]
for i in range(k):
log.info(f'Crossfold {(i + 1)} Sites: {chosen_sites[i]}')
df.loc[(df['site'].isin(chosen_sites[i]), target_column)] = str((i + 1))
return df |
class AugMix(object):
def __init__(self, prob=0.5, aug_prob_coeff=0.1, mixture_width=3, mixture_depth=1, aug_severity=1):
self.prob = prob
self.aug_prob_coeff = aug_prob_coeff
self.mixture_width = mixture_width
self.mixture_depth = mixture_depth
self.aug_severity = aug_severity
def __call__(self, img):
if (random.random() > self.prob):
return np.asarray(img)
ws = np.float32(np.random.dirichlet(([self.aug_prob_coeff] * self.mixture_width)))
m = np.float32(np.random.beta(self.aug_prob_coeff, self.aug_prob_coeff))
mix = np.zeros([img.size[1], img.size[0], 3])
for i in range(self.mixture_width):
image_aug = img.copy()
depth = (self.mixture_depth if (self.mixture_depth > 0) else np.random.randint(1, 4))
for _ in range(depth):
op = np.random.choice(augmentations)
image_aug = op(image_aug, self.aug_severity)
mix += (ws[i] * np.asarray(image_aug))
mixed = (((1 - m) * np.asarray(img)) + (m * mix))
return mixed.astype(np.uint8) |
def save_results(f):
default_path = f'results_{util.timestamp()}.npz'
result_path = (FLAGS.result_path if FLAGS.result_path else default_path)
if (not os.path.isabs(result_path)):
result_path = os.path.join(FLAGS.logdir, result_path)
ordered_indices = np.argsort(f.image_path)
util.ensure_path_exists(result_path)
logging.info(f'Saving results to {result_path}')
np.savez(result_path, image_path=f.image_path[ordered_indices], coords3d_true=f.coords3d_true_orig_cam[ordered_indices], coords3d_pred=f.coords3d_pred_orig_cam[ordered_indices], coords3d_true_world=f.coords3d_true_world[ordered_indices], coords3d_pred_world=f.coords3d_pred_world[ordered_indices], activity_name=f.activity_name[ordered_indices], scene_name=f.scene_name[ordered_indices], joint_validity_mask=f.joint_validity_mask[ordered_indices]) |
class lossfun(torch.nn.Module):
def __init__(self):
super(lossfun, self).__init__()
def forward(self, gt, oup):
loss = F.l1_loss(oup, gt)
return loss |
def _get_fake_filepaths():
log_dir = '/fake/directory'
checkpoint_dir_name = 'checkpoints'
checkpoint_dir = os.path.join(log_dir, checkpoint_dir_name)
return (log_dir, checkpoint_dir_name, checkpoint_dir) |
def get_args():
parser = argparse.ArgumentParser(description='Download the model provided by mmflow and convert the model state dict which can be loaded in SCFlow project')
parser.add_argument('--model_url', type=str)
args = parser.parse_args()
return args |
def save_pickle(filename, obj):
with open(str(filename), 'wb') as f:
pickle.dump(obj, f)
logging.info('Saved: %s', filename) |
def mock_process(client_id, data_train, data_test, target='localhost:8980'):
init_fl_context(client_id, target)
df_train = pd.read_csv(os.path.join(resource_path, data_train))
fgboost_regression = FGBoostRegression()
if ('SalePrice' in df_train):
df_x = df_train.drop('SalePrice', 1)
df_y = df_train.filter(items=['SalePrice'])
fgboost_regression.fit(df_x, df_y, feature_columns=df_x.columns, label_columns=['SalePrice'], num_round=15)
else:
fgboost_regression.fit(df_train, feature_columns=df_train.columns, num_round=15)
df_test = pd.read_csv(os.path.join(resource_path, data_test))
result = fgboost_regression.predict(df_test, feature_columns=df_test.columns) |
def fairness(l):
a = (1 / (np.mean(l) - (scipy.stats.hmean(l) + 0.001)))
if a:
return a
return 0 |
def _subproc_worker(pipe, parent_pipe, env_fn_wrapper, obs_bufs, obs_shapes, obs_dtypes, keys):
def _write_obs(maybe_dict_obs):
flatdict = obs_to_dict(maybe_dict_obs)
for k in keys:
dst = obs_bufs[k].get_obj()
dst_np = np.frombuffer(dst, dtype=obs_dtypes[k]).reshape(obs_shapes[k])
np.copyto(dst_np, flatdict[k])
env = env_fn_wrapper.x()
parent_pipe.close()
try:
while True:
(cmd, data) = pipe.recv()
if (cmd == 'reset'):
pipe.send(_write_obs(env.reset()))
elif (cmd == 'step'):
(obs, reward, done, info) = env.step(data)
if done:
obs = env.reset()
pipe.send((_write_obs(obs), reward, done, info))
elif (cmd == 'render'):
pipe.send(env.render(mode='rgb_array'))
elif (cmd == 'close'):
pipe.send(None)
break
else:
raise RuntimeError(('Got unrecognized cmd %s' % cmd))
except KeyboardInterrupt:
print('ShmemVecEnv worker: got KeyboardInterrupt')
finally:
env.close() |
class Annotation(ABC):
def __init__(self, ontology=None):
if (ontology is not None):
assert isinstance(ontology, Ontology), 'Invalid ontology!'
self._ontology = ontology
def ontology(self):
return self._ontology
def load(cls, annotation_file, ontology):
def save(self, save_dir):
def render(self):
def hexdigest(self):
def __eq__(self, other):
return (self.hexdigest == other.hexdigest)
def __repr__(self):
return f'{self.__class__.__name__}[{os.path.basename(self.hexdigest)}]' |
def convert_excel_to_csv(file_name: str) -> str:
new_file = (file_name + '.csv')
excel_data = pd.read_excel(file_name)
excel_data.to_csv(new_file, index=False)
return new_file |
def voc_eval_with_return(result_file, dataset, iou_thr=0.5, logger='print', only_ap=True):
det_results = mmcv.load(result_file)
annotations = [dataset.get_ann_info(i) for i in range(len(dataset))]
if (hasattr(dataset, 'year') and (dataset.year == 2007)):
dataset_name = 'voc07'
else:
dataset_name = dataset.CLASSES
(mean_ap, eval_results) = eval_map(det_results, annotations, scale_ranges=None, iou_thr=iou_thr, dataset=dataset_name, logger=logger)
if only_ap:
eval_results = [{'ap': eval_results[i]['ap']} for i in range(len(eval_results))]
return (mean_ap, eval_results) |
def override_kwargs(block_kwargs, model_kwargs):
out_kwargs = (block_kwargs if (block_kwargs is not None) else model_kwargs)
return (out_kwargs or {}) |
def get_args():
checkpoint_path = '/home/qwt/code/IMFNet-main/pretrain/3DMatch/3DMatch.pth'
parser = argparse.ArgumentParser()
parser.add_argument('--use-cuda', action='store_true', default=True, help='Use NVIDIA GPU acceleration')
parser.add_argument('--checkpoint', default=checkpoint_path, help='Model checkpoint.')
parser.add_argument('--target', default=780, help='The target point index.')
args = parser.parse_args()
args.use_cuda = (args.use_cuda and torch.cuda.is_available())
if args.use_cuda:
print('Using GPU for acceleration')
else:
print('Using CPU for computation')
return args |
_torch
class MaskFormerSwinModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = ((MaskFormerSwinModel, MaskFormerSwinBackbone) if is_torch_available() else ())
pipeline_model_mapping = ({'feature-extraction': MaskFormerSwinModel} if is_torch_available() else {})
fx_compatible = False
test_torchscript = False
test_pruning = False
test_resize_embeddings = False
test_head_masking = False
def setUp(self):
self.model_tester = MaskFormerSwinModelTester(self)
self.config_tester = ConfigTester(self, config_class=MaskFormerSwinConfig, embed_dim=37)
_torch_multi_gpu
(reason="`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with `nn.DataParallel`")
def test_multi_gpu_data_parallel_forward(self):
pass
def test_config(self):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def create_and_test_config_common_properties(self):
return
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_backbone(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*config_and_inputs)
('Swin does not use inputs_embeds')
def test_inputs_embeds(self):
pass
('Swin does not support feedforward chunking')
def test_feed_forward_chunking(self):
pass
def test_model_common_attributes(self):
(config, _) = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
self.assertIsInstance(model.get_input_embeddings(), nn.Module)
x = model.get_output_embeddings()
self.assertTrue(((x is None) or isinstance(x, nn.Linear)))
def test_forward_signature(self):
(config, _) = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.forward)
arg_names = [*signature.parameters.keys()]
expected_arg_names = ['pixel_values']
self.assertListEqual(arg_names[:1], expected_arg_names)
(reason="MaskFormerSwin is only used as backbone and doesn't support output_attentions")
def test_attention_outputs(self):
pass
(reason='MaskFormerSwin is only used as an internal backbone')
def test_save_load_fast_init_to_base(self):
pass
def check_hidden_states_output(self, inputs_dict, config, model_class, image_size):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.hidden_states
expected_num_layers = getattr(self.model_tester, 'expected_num_hidden_layers', (len(self.model_tester.depths) + 1))
self.assertEqual(len(hidden_states), expected_num_layers)
patch_size = (config.patch_size if isinstance(config.patch_size, collections.abc.Iterable) else (config.patch_size, config.patch_size))
num_patches = ((image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]))
self.assertListEqual(list(hidden_states[0].shape[(- 2):]), [num_patches, self.model_tester.embed_dim])
def test_hidden_states_output(self):
(config, inputs_dict) = self.model_tester.prepare_config_and_inputs_for_common()
image_size = (self.model_tester.image_size if isinstance(self.model_tester.image_size, collections.abc.Iterable) else (self.model_tester.image_size, self.model_tester.image_size))
for model_class in self.all_model_classes:
inputs_dict['output_hidden_states'] = True
self.check_hidden_states_output(inputs_dict, config, model_class, image_size)
del inputs_dict['output_hidden_states']
config.output_hidden_states = True
self.check_hidden_states_output(inputs_dict, config, model_class, image_size)
def test_hidden_states_output_with_padding(self):
(config, inputs_dict) = self.model_tester.prepare_config_and_inputs_for_common()
config.patch_size = 3
image_size = (self.model_tester.image_size if isinstance(self.model_tester.image_size, collections.abc.Iterable) else (self.model_tester.image_size, self.model_tester.image_size))
patch_size = (config.patch_size if isinstance(config.patch_size, collections.abc.Iterable) else (config.patch_size, config.patch_size))
padded_height = ((image_size[0] + patch_size[0]) - (image_size[0] % patch_size[0]))
padded_width = ((image_size[1] + patch_size[1]) - (image_size[1] % patch_size[1]))
for model_class in self.all_model_classes:
inputs_dict['output_hidden_states'] = True
self.check_hidden_states_output(inputs_dict, config, model_class, (padded_height, padded_width))
del inputs_dict['output_hidden_states']
config.output_hidden_states = True
self.check_hidden_states_output(inputs_dict, config, model_class, (padded_height, padded_width))
(reason="MaskFormerSwin doesn't have pretrained checkpoints")
def test_model_from_pretrained(self):
pass
(reason='This will be fixed once MaskFormerSwin is replaced by native Swin')
def test_initialization(self):
pass
(reason='This will be fixed once MaskFormerSwin is replaced by native Swin')
def test_gradient_checkpointing_backward_compatibility(self):
pass
def test_model_outputs_equivalence(self):
(config, inputs_dict) = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(t):
t[(t != t)] = 0
return t
def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}):
with torch.no_grad():
tuple_output = model(**tuple_inputs, return_dict=False, **additional_kwargs)
dict_output = model(**dict_inputs, return_dict=True, **additional_kwargs).to_tuple()
def recursive_check(tuple_object, dict_object):
if isinstance(tuple_object, (List, Tuple)):
for (tuple_iterable_value, dict_iterable_value) in zip(tuple_object, dict_object):
recursive_check(tuple_iterable_value, dict_iterable_value)
elif isinstance(tuple_object, Dict):
for (tuple_iterable_value, dict_iterable_value) in zip(tuple_object.values(), dict_object.values()):
recursive_check(tuple_iterable_value, dict_iterable_value)
elif (tuple_object is None):
return
else:
self.assertTrue(torch.allclose(set_nan_tensor_to_zero(tuple_object), set_nan_tensor_to_zero(dict_object), atol=1e-05), msg=f'Tuple and dict output are not equal. Difference: {torch.max(torch.abs((tuple_object - dict_object)))}. Tuple has `nan`: {torch.isnan(tuple_object).any()} and `inf`: {torch.isinf(tuple_object)}. Dict has `nan`: {torch.isnan(dict_object).any()} and `inf`: {torch.isinf(dict_object)}.')
recursive_check(tuple_output, dict_output)
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
dict_inputs = self._prepare_for_class(inputs_dict, model_class)
check_equivalence(model, tuple_inputs, dict_inputs)
tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
check_equivalence(model, tuple_inputs, dict_inputs)
tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
dict_inputs = self._prepare_for_class(inputs_dict, model_class)
check_equivalence(model, tuple_inputs, dict_inputs, {'output_hidden_states': True})
tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
check_equivalence(model, tuple_inputs, dict_inputs, {'output_hidden_states': True}) |
class VQADataset(BaseDataset):
def __init__(self, vis_processor, text_processor, vis_root, ann_paths):
super().__init__(vis_processor, text_processor, vis_root, ann_paths)
def collater(self, samples):
(image_list, question_list, answer_list, weight_list) = ([], [], [], [])
num_answers = []
for sample in samples:
image_list.append(sample['image'])
question_list.append(sample['text_input'])
weight_list.extend(sample['weights'])
answers = sample['answers']
answer_list.extend(answers)
num_answers.append(len(answers))
return {'image': torch.stack(image_list, dim=0), 'text_input': question_list, 'answer': answer_list, 'weight': torch.Tensor(weight_list), 'n_answers': torch.LongTensor(num_answers)} |
def gen_CNN(channels, conv=tf.keras.layers.Conv1D, use_bias=True, activation=tf.keras.layers.ReLU, batch_norm=False):
layers = []
for i in range((len(channels) - 1)):
(in_size, out_size) = channels[i:(i + 2)]
layers.append(conv(out_size, 1, use_bias=use_bias, data_format='channels_first'))
if batch_norm:
layers.append(tf.keras.layers.BatchNormalization(axis=1, momentum=0.9, epsilon=1e-05))
if (activation is not None):
layers.append(activation())
return tf.keras.Sequential(layers) |
class BasicBlock(BaseModule):
expansion = 1
def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, style='pytorch', with_cp=False, conv_cfg=None, norm_cfg=dict(type='BN'), dcn=None, plugins=None, init_cfg=None):
super(BasicBlock, self).__init__(init_cfg)
assert (dcn is None), 'Not implemented yet.'
assert (plugins is None), 'Not implemented yet.'
(self.norm1_name, norm1) = build_norm_layer(norm_cfg, planes, postfix=1)
(self.norm2_name, norm2) = build_norm_layer(norm_cfg, planes, postfix=2)
self.conv1 = build_conv_layer(conv_cfg, inplanes, planes, 3, stride=stride, padding=dilation, dilation=dilation, bias=False)
self.add_module(self.norm1_name, norm1)
self.conv2 = build_conv_layer(conv_cfg, planes, planes, 3, padding=1, bias=False)
self.add_module(self.norm2_name, norm2)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.dilation = dilation
self.with_cp = with_cp
def norm1(self):
return getattr(self, self.norm1_name)
def norm2(self):
return getattr(self, self.norm2_name)
def forward(self, x):
def _inner_forward(x):
identity = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.norm2(out)
if (self.downsample is not None):
identity = self.downsample(x)
out += identity
return out
if (self.with_cp and x.requires_grad):
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.relu(out)
return out |
def main_worker(gpu, ngpus_per_node, config):
try:
seed = (config.seed if (('seed' in config) and config.seed) else 43)
fix_random_seed(seed)
config.gpu = gpu
model = build_model(config)
model = load_ckpt(config, model)
model = parallelize(config, model)
total_params = f'{round((count_parameters(model) / 1000000.0), 2)}M'
config.total_params = total_params
print(f'Total parameters : {total_params}')
train_loader = DepthDataLoader(config, 'train').data
test_loader = DepthDataLoader(config, 'online_eval').data
trainer = get_trainer(config)(config, model, train_loader, test_loader, device=config.gpu)
trainer.train()
finally:
import wandb
wandb.finish() |
def check_finish(all_model_dict, result_file):
tested_cfgs = []
with open(result_file, 'r+') as f:
for line in f:
line = json.loads(line)
tested_cfgs.append(line['cfg'])
is_finish = True
for cfg in sorted(all_model_dict.keys()):
if (cfg not in tested_cfgs):
return cfg
if is_finish:
with open(result_file, 'a+') as f:
f.write('finished\n') |
def download_dataset(dataset, basedir, envfile, force_download):
info = datasets[dataset]
datadir = os.path.join(basedir, dataset)
if force_download:
if os.path.exists(datadir):
print(f'Removing existing dir {datadir}')
shutil.rmtree(datadir)
for (subdir, flist) in info.items():
for (url, md5) in flist:
(fpath, download) = get_file(url, datadir=datadir, file_hash=md5, force_download=force_download)
if download:
extract_archive(fpath, path=os.path.join(datadir, subdir))
datapath = f'DATADIR_{dataset}={datadir}'
with open(envfile) as f:
lines = f.readlines()
with open(envfile, 'w') as f:
for l in lines:
if (f'DATADIR_{dataset}' in l):
l = f'''export {datapath}
'''
f.write(l)
print(f'Updated dataset path in {envfile} to "{datapath}".') |
def load_leaf_data(file_path):
with open(file_path) as json_file:
data = json.load(json_file)
to_ret = data['user_data']
data = None
return to_ret |
def check_norm_state(modules, train_state):
for mod in modules:
if isinstance(mod, _BatchNorm):
if (mod.training != train_state):
return False
return True |
class OpenBuddyAdapter(BaseAdapter):
def match(self, model_path: str):
return ('openbuddy' in model_path)
def load_model(self, model_path: str, from_pretrained_kwargs: dict):
if ('-bf16' in model_path):
from_pretrained_kwargs['torch_dtype'] = torch.bfloat16
warnings.warn('## This is a bf16(bfloat16) variant of OpenBuddy. Please make sure your GPU supports bf16.')
model = LlamaForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, **from_pretrained_kwargs)
tokenizer = LlamaTokenizer.from_pretrained(model_path)
return (model, tokenizer)
def get_default_conv_template(self, model_path: str) -> Conversation:
return get_conv_template('openbuddy') |
def extract_features_from_path(components_list, statistics_list, sample_rate, path):
try:
wave = Waveform(path=path, sample_rate=sample_rate)
feats = extract_features_from_waveform(components_list, statistics_list, wave)
return feats
except Exception as extraction_exception:
print(f'Found exception "{extraction_exception}". Skipping {path}')
return {}
except:
print(f'Unknown error. Skipping {path}')
return {} |
def _translateX(img, magnitude):
return img.transform(img.size, Image.AFFINE, (1, 0, ((magnitude * img.size[0]) * random.choice([(- 1), 1])), 0, 1, 0), fillcolor=fillcolor) |
def test_branching_2():
run_cell('y = 7')
run_cell('x = y + 3')
run_cell('\n if False:\n b = 5\n else:\n y = 9\n ')
run_cell('logging.info(x)')
assert_detected('x depends on stale y') |
def GW_distance(X, Y, p, q, lamda=0.5, iteration=5, OT_iteration=20, **kwargs):
Cs = cos_batch_torch(X, X).float().cuda()
Ct = cos_batch_torch(Y, Y).float().cuda()
bs = Cs.size(0)
m = Ct.size(2)
n = Cs.size(2)
(T, Cst) = GW_torch_batch(Cs, Ct, bs, n, m, p, q, beta=lamda, iteration=iteration, OT_iteration=OT_iteration)
temp = torch.bmm(torch.transpose(Cst, 1, 2), T)
distance = batch_trace(temp, m, bs)
return distance |
class InputMetadata():
def __init__(self, seq_groups: List[Tuple[(List[int], SamplingParams)]], seq_data: Dict[(int, SequenceData)], prompt_lens: List[int], context_lens: torch.Tensor, max_context_len: int, sliding_window: Optional[int]=None) -> None:
self.seq_groups = seq_groups
self.seq_data = seq_data
self.prompt_lens = prompt_lens
self.context_lens = context_lens
self.max_context_len = max_context_len
self.to_cache = None
self.num_prompts = len(prompt_lens)
self.num_prompt_tokens = sum(prompt_lens)
self.num_generation_tokens = context_lens.shape[0]
def __repr__(self) -> str:
return f'InputMetadata(num_valid_tokens={self.num_valid_tokens}, num_prompt_tokens={self.num_prompt_tokens}, num_prompts={self.num_prompts}, prompt_lens={self.prompt_lens}, num_generation_tokens={self.num_generation_tokens}, context_lens={self.context_lens}, max_context_len={self.max_context_len}), max_num_blocks_per_seq={self.max_num_blocks_per_seq}, block_tables={self.block_tables}), slot_mapping={self.slot_mapping}' |
def save_mod(model, mod_path):
print('Save to {}'.format(mod_path))
tf.saved_model.save(model, mod_path) |
def predictive_index(pred, true):
n = len(pred)
(ws, cs) = ([], [])
for i in range(n):
for j in range((i + 1), n):
w = abs((true[j] - true[i]))
c = (- 1)
if (((pred[j] - pred[i]) * (true[j] - true[i])) > 0):
c = 1
elif ((true[j] - true[i]) == 0):
c = 0
ws.append(w)
cs.append(c)
ws = np.array(ws)
cs = np.array(cs)
return (np.sum((ws * cs)) / np.sum(ws)) |
def set_quad_double_start_solutions(nvr, sols, vrblvl=0):
if (vrblvl > 0):
print('in set_quad_double_start_solutions, with nvr :', nvr)
print('the solutions :')
for (idx, sol) in enumerate(sols):
print('Solution', idx, ':')
print(sol)
set_quad_double_solutions(nvr, sols, vrblvl)
phc = get_phcfun()
aaa = pointer(c_int32(0))
bbb = pointer(c_int32(0))
ccc = pointer(c_double(0.0))
vrb = c_int32(vrblvl)
if (vrblvl > 0):
print('-> set_quad_double_start_solutions calls phc', end='')
retval = phc(268, aaa, bbb, ccc, vrb)
if (vrblvl > 0):
print(', return value :', retval)
return retval |
class ResNet(nn.Module):
def __init__(self, block, layers, nchannels, nfilters, nclasses=1000):
self.inplanes = nfilters
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(nchannels, nfilters, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(nfilters)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, nfilters, layers[0])
self.layer2 = self._make_layer(block, (2 * nfilters), layers[1], stride=2)
self.layer3 = self._make_layer(block, (4 * nfilters), layers[2], stride=2)
self.layer4 = self._make_layer(block, (8 * nfilters), layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7, stride=1)
self.fc = nn.Linear(((8 * nfilters) * block.expansion), nclasses)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(nn.Conv2d(self.inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((planes * block.expansion)))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = (planes * block.expansion)
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x0):
x = self.conv1(x0)
x = self.bn1(x)
x = self.relu(x)
x1 = self.maxpool(x)
x2 = self.layer1(x1)
x3 = self.layer2(x2)
x4 = self.layer3(x3)
x5 = self.layer4(x4)
x = self.avgpool(x5)
x = x.view(x.size(0), (- 1))
x6 = self.fc(x)
return [x0, x1, x2, x3, x4, x5] |
def get_global_norm(arrays):
ctx = arrays[0].context
total_norm = nd.add_n(*[nd.dot(x, x).as_in_context(ctx) for x in (arr.reshape(((- 1),)) for arr in arrays)])
total_norm = nd.sqrt(total_norm).asscalar()
return total_norm |
_REGISTRY.register()
def resnet101(pretrained=True, **kwargs):
model = ResNet(block=Bottleneck, layers=[3, 4, 23, 3])
if pretrained:
init_pretrained_weights(model, model_urls['resnet101'])
return model |
class TestMeters(unittest.TestCase):
def testAverageValueMeter(self):
m = meter.AverageValueMeter()
for i in range(1, 10):
m.add(i)
(mean, std) = m.value()
self.assertEqual(mean, 5.0)
m.reset()
(mean, std) = m.value()
self.assertTrue(np.isnan(mean))
def testAverageValueMeter_np_2d(self):
m = meter.AverageValueMeter()
for i in range(1, 10):
m.add(np.float32([[i, (i + 1)]]))
(mean, std) = m.value()
self.assertTrue(np.allclose(mean, [[5.0, 6.0]]))
self.assertTrue(np.allclose(std, [[2.738613, 2.738613]]))
m.reset()
(mean, std) = m.value()
self.assertTrue(np.isnan(mean))
def testAverageValueMeter_torch_2d(self):
m = meter.AverageValueMeter()
for i in range(1, 10):
m.add(torch.Tensor([[i, (i + 1)]]))
(mean, std) = m.value()
self.assertTrue(np.allclose(mean, [[5.0, 6.0]]))
self.assertTrue(np.allclose(std, [[2.738613, 2.738613]]))
m.reset()
(mean, std) = m.value()
self.assertTrue(np.isnan(mean))
def testAverageValueMeter_n(self):
m = meter.AverageValueMeter()
for i in range(1, 11):
m.add((i * i), n=i)
(mean, std) = m.value()
self.assertEqual(mean, 7.0)
m.reset()
(mean, std) = m.value()
self.assertTrue(np.isnan(mean))
def testAverageValueMeter_stable(self):
def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):
return (abs((a - b)) <= max((rel_tol * max(abs(a), abs(b))), abs_tol))
m = meter.AverageValueMeter()
samples = ([0.7] * 10)
truth = np.array([])
for sample in samples:
truth = np.append(truth, sample)
m.add(sample)
(mean, std) = m.value()
self.assertTrue(isclose(truth.mean(), mean))
self.assertTrue(((math.isnan(std) and math.isnan(truth.std(ddof=1))) or (math.isinf(std) and math.isnan(truth.std(ddof=1))) or isclose(std, truth.std(ddof=1), abs_tol=1e-07)))
def testClassErrorMeter(self):
mtr = meter.ClassErrorMeter(topk=[1])
output = torch.eye(3)
if hasattr(torch, 'arange'):
target = torch.arange(0, 3)
else:
target = torch.range(0, 2)
mtr.add(output, target)
err = mtr.value()
self.assertEqual(err, [0], 'All should be correct')
target[0] = 1
target[1] = 0
target[2] = 0
mtr.add(output, target)
err = mtr.value()
self.assertEqual(err, [50.0], 'Half should be correct')
def testClassErrorMeteri_batch1(self):
mtr = meter.ClassErrorMeter(topk=[1])
output = torch.tensor([1, 0, 0])
if hasattr(torch, 'arange'):
target = torch.arange(0, 1)
else:
target = torch.range(0, 0)
mtr.add(output, target)
err = mtr.value()
self.assertEqual(err, [0], 'All should be correct')
def testConfusionMeter(self):
mtr = meter.ConfusionMeter(k=3)
output = torch.Tensor([[0.8, 0.1, 0.1], [10, 11, 10], [0.2, 0.2, 0.3]])
if hasattr(torch, 'arange'):
target = torch.arange(0, 3)
else:
target = torch.range(0, 2)
mtr.add(output, target)
conf_mtrx = mtr.value()
self.assertEqual(conf_mtrx.sum(), 3, 'All should be correct')
self.assertEqual(conf_mtrx.diagonal().sum(), 3, 'All should be correct')
target = torch.Tensor([1, 0, 0])
mtr.add(output, target)
self.assertEqual(conf_mtrx.sum(), 6, 'Six tests should give six values')
self.assertEqual(conf_mtrx.diagonal().sum(), 3, "Shouldn't have changed since all new values were false")
self.assertEqual(conf_mtrx[0].sum(), 3, 'All top have gotten one guess')
self.assertEqual(conf_mtrx[1].sum(), 2, 'Two first at the 2nd row have a guess')
self.assertEqual(conf_mtrx[1][2], 0, 'The last one should be empty')
self.assertEqual(conf_mtrx[2].sum(), 1, 'Bottom row has only the first test correct')
self.assertEqual(conf_mtrx[2][2], 1, 'Bottom row has only the first test correct')
mtr = meter.ConfusionMeter(k=4, normalized=True)
output = torch.Tensor([[0.8, 0.1, 0.1, 0], [10, 11, 10, 0], [0.2, 0.2, 0.3, 0], [0, 0, 0, 1]])
target = torch.Tensor([0, 1, 2, 3])
mtr.add(output, target)
conf_mtrx = mtr.value()
self.assertEqual(conf_mtrx.sum(), output.size(1), 'All should be correct')
self.assertEqual(conf_mtrx.diagonal().sum(), output.size(1), 'All should be correct')
target[0] = 1
target[1] = 0
target[2] = 0
mtr.add(output, target)
conf_mtrx = mtr.value()
self.assertEqual(conf_mtrx.sum(), output.size(1), 'The normalization should sum all values to 1')
for (i, row) in enumerate(conf_mtrx):
self.assertEqual(row.sum(), 1, (('Row no ' + str(i)) + ' fails to sum to one in normalized mode'))
def testMSEMeter(self):
a = torch.ones(7)
b = torch.zeros(7)
mtr = meter.MSEMeter()
mtr.add(a, b)
self.assertEqual(1.0, mtr.value())
def testMovingAverageValueMeter(self):
mtr = meter.MovingAverageValueMeter(3)
mtr.add(1)
(avg, var) = mtr.value()
self.assertEqual(avg, 1.0)
self.assertEqual(var, 0.0)
mtr.add(3)
(avg, var) = mtr.value()
self.assertEqual(avg, 2.0)
self.assertEqual(var, math.sqrt(2))
mtr.add(5)
(avg, var) = mtr.value()
self.assertEqual(avg, 3.0)
self.assertEqual(var, 2.0)
mtr.add(4)
(avg, var) = mtr.value()
self.assertEqual(avg, 4.0)
self.assertEqual(var, 1.0)
mtr.add(0)
(avg, var) = mtr.value()
self.assertEqual(avg, 3.0)
self.assertEqual(var, math.sqrt(7))
def testAUCMeter(self):
mtr = meter.AUCMeter()
test_size = 1000
mtr.add(torch.rand(test_size), torch.zeros(test_size))
mtr.add(torch.rand(test_size), torch.Tensor(test_size).fill_(1))
(val, tpr, fpr) = mtr.value()
self.assertTrue((math.fabs((val - 0.5)) < 0.1), msg='AUC Meter fails')
mtr.reset()
mtr.add(torch.Tensor(test_size).fill_(0), torch.zeros(test_size))
mtr.add(torch.Tensor(test_size).fill_(0.1), torch.zeros(test_size))
mtr.add(torch.Tensor(test_size).fill_(0.2), torch.zeros(test_size))
mtr.add(torch.Tensor(test_size).fill_(0.3), torch.zeros(test_size))
mtr.add(torch.Tensor(test_size).fill_(0.4), torch.zeros(test_size))
mtr.add(torch.Tensor(test_size).fill_(1), torch.Tensor(test_size).fill_(1))
(val, tpr, fpr) = mtr.value()
self.assertEqual(val, 1.0, msg='AUC Meter fails')
def testAPMeter(self):
mtr = meter.APMeter()
target = torch.Tensor([0, 1, 0, 1])
output = torch.Tensor([0.1, 0.2, 0.3, 4])
weight = torch.Tensor([0.5, 1.0, 2.0, 0.1])
mtr.add(output, target, weight)
ap = mtr.value()
val = ((((((1 * 0.1) / 0.1) + ((0 * 2.0) / 2.1)) + ((1.1 * 1) / 3.1)) + ((0 * 1) / 4)) / 2.0)
self.assertTrue((math.fabs((ap[0] - val)) < 0.01), msg='ap test1 failed')
mtr.reset()
mtr.add(output, target)
ap = mtr.value()
val = ((((((1 * 1.0) / 1.0) + ((0 * 1.0) / 2.0)) + ((2 * 1.0) / 3.0)) + ((0 * 1.0) / 4.0)) / 2.0)
self.assertTrue((math.fabs((ap[0] - val)) < 0.01), msg='ap test2 failed')
target = torch.Tensor([0, 1, 0, 1])
output = torch.Tensor([4, 3, 2, 1])
weight = torch.Tensor([1, 2, 3, 4])
mtr.reset()
mtr.add(output, target, weight)
ap = mtr.value()
val = ((((((0 * 1.0) / 1.0) + ((1.0 * 2.0) / 3.0)) + ((2.0 * 0) / 6.0)) + ((6.0 * 1.0) / 10.0)) / 2.0)
self.assertTrue((math.fabs((ap[0] - val)) < 0.01), msg='ap test3 failed')
mtr.reset()
mtr.add(output, target)
ap = mtr.value()
val = (((((0 * 1.0) + ((1 * 1.0) / 2.0)) + ((0 * 1.0) / 3.0)) + ((2 * 1.0) / 4.0)) / 2.0)
self.assertTrue((math.fabs((ap[0] - val)) < 0.01), msg='ap test4 failed')
target = torch.Tensor([0, 1, 0, 1])
output = torch.Tensor([1, 4, 2, 3])
weight = torch.Tensor([1, 2, 3, 4])
mtr.reset()
mtr.add(output, target, weight)
ap = mtr.value()
val = ((((((4 * 1.0) / 4.0) + ((6 * 1.0) / 6.0)) + ((0 * 6.0) / 9.0)) + ((0 * 6.0) / 10.0)) / 2.0)
self.assertTrue((math.fabs((ap[0] - val)) < 0.01), msg='ap test5 failed')
mtr.reset()
mtr.add(output, target)
ap = mtr.value()
val = (((((1 * 1.0) + ((2 * 1.0) / 2.0)) + ((0 * 1.0) / 3.0)) + ((0 * 1.0) / 4.0)) / 2.0)
self.assertTrue((math.fabs((ap[0] - val)) < 0.01), msg='ap test6 failed')
target = torch.Tensor([0, 0, 0, 0])
output = torch.Tensor([1, 4, 2, 3])
weight = torch.Tensor([1.0, 0.1, 0.0, 0.5])
mtr.reset()
mtr.add(output, target, weight)
ap = mtr.value()
self.assertEqual(ap[0], 0.0)
mtr.reset()
mtr.add(output, target)
ap = mtr.value()
self.assertEqual(ap[0], 0.0)
target = torch.Tensor([1, 1, 0])
output = torch.Tensor([3, 1, 2])
weight = torch.Tensor([1, 0.1, 3])
mtr.reset()
mtr.add(output, target, weight)
ap = mtr.value()
val = (((((1 * 1.0) / 1.0) + ((1 * 0.0) / 4.0)) + (1.1 / 4.1)) / 2.0)
self.assertTrue((math.fabs((ap[0] - val)) < 0.01), msg='ap test7 failed')
mtr.reset()
mtr.add(output, target)
ap = mtr.value()
val = ((((1 * 1.0) + ((0 * 1.0) / 2.0)) + ((2 * 1.0) / 3.0)) / 2.0)
self.assertTrue((math.fabs((ap[0] - val)) < 0.01), msg='ap test8 failed')
target = torch.Tensor([[0, 1, 0, 1], [0, 1, 0, 1]]).transpose(0, 1)
output = torch.Tensor([[0.1, 0.2, 0.3, 4], [4, 3, 2, 1]]).transpose(0, 1)
weight = torch.Tensor([[1.0, 0.5, 2.0, 3.0]]).transpose(0, 1)
mtr.reset()
mtr.add(output, target, weight)
ap = mtr.value()
self.assertTrue((math.fabs((ap.sum() - torch.Tensor([((((((1 * 3.0) / 3.0) + ((0 * 3.0) / 5.0)) + ((3.5 * 1) / 5.5)) + ((0 * 3.5) / 6.5)) / 2.0), ((((((0 * 1.0) / 1.0) + ((1 * 0.5) / 1.5)) + ((0 * 0.5) / 3.5)) + ((1 * 3.5) / 6.5)) / 2.0)]).sum())) < 0.01), msg='ap test9 failed')
mtr.reset()
mtr.add(output, target)
ap = mtr.value()
self.assertTrue((math.fabs((ap.sum() - torch.Tensor([(((((1 * 1.0) + ((0 * 1.0) / 2.0)) + ((2 * 1.0) / 3)) + ((0 * 1.0) / 4.0)) / 2.0), (((((0 * 1.0) + ((1 * 1.0) / 2.0)) + ((0 * 1.0) / 3.0)) + ((2.0 * 1.0) / 4.0)) / 2.0)]).sum())) < 0.01), msg='ap test10 failed')
mtr.reset()
output = torch.Tensor(5, 4).fill_(0.25)
target = torch.ones(5, 4)
mtr.add(output, target)
output = torch.Tensor(1, 4).fill_(0.25)
target = torch.ones(1, 4)
mtr.add(output, target)
self.assertEqual(mtr.value().size(0), 4, msg='ap test11 failed')
def testmAPMeter(self):
mtr = meter.mAPMeter()
target = torch.Tensor([0, 1, 0, 1])
output = torch.Tensor([0.1, 0.2, 0.3, 4])
weight = torch.Tensor([0.5, 1.0, 2.0, 0.1])
mtr.add(output, target)
ap = mtr.value()
val = ((((((1 * 1.0) / 1.0) + ((0 * 1.0) / 2.0)) + ((2.0 * 1.0) / 3.0)) + ((0 * 1.0) / 4.0)) / 2.0)
self.assertTrue((math.fabs((ap - val)) < 0.01), msg='mAP test1 failed')
mtr.reset()
mtr.add(output, target, weight)
ap = mtr.value()
val = ((((((1 * 0.1) / 0.1) + ((0 * 2.0) / 2.1)) + ((1.1 * 1) / 3.1)) + ((0 * 1.0) / 4.0)) / 2.0)
self.assertTrue((math.fabs((ap - val)) < 0.01), msg='mAP test2 failed')
target = torch.Tensor([[0, 1, 0, 1], [0, 1, 0, 1]]).transpose(0, 1)
output = torch.Tensor([[0.1, 0.2, 0.3, 4], [4, 3, 2, 1]]).transpose(0, 1)
weight = torch.Tensor([[1.0, 0.5, 2.0, 3.0]]).transpose(0, 1)
mtr.reset()
mtr.add(output, target, weight)
ap = mtr.value()
self.assertTrue((math.fabs((ap - torch.Tensor([((((((1 * 3.0) / 3.0) + ((0 * 3.0) / 5.0)) + ((3.5 * 1) / 5.5)) + ((0 * 3.5) / 6.5)) / 2.0), ((((((0 * 1.0) / 1.0) + ((1 * 0.5) / 1.5)) + ((0 * 0.5) / 3.5)) + ((1 * 3.5) / 6.5)) / 2.0)]).mean())) < 0.01), msg='mAP test3 failed')
mtr.reset()
mtr.add(output, target)
ap = mtr.value()
self.assertTrue((math.fabs((ap - torch.Tensor([(((((1 * 1.0) + ((0 * 1.0) / 2.0)) + ((2 * 1.0) / 3.0)) + ((0 * 1.0) / 4.0)) / 2.0), (((((0 * 1.0) + ((1 * 1.0) / 2.0)) + ((0 * 1.0) / 3.0)) + ((2 * 1.0) / 4.0)) / 2.0)]).mean())) < 0.01), msg='mAP test4 failed') |
def simxSetObjectIntParameter(clientID, objectHandle, parameterID, parameterValue, operationMode):
return c_SetObjectIntParameter(clientID, objectHandle, parameterID, parameterValue, operationMode) |
class LLMConnection():
def __init__(self, config, exec_query):
self.conn = config.llm_connection
self.model = config.model
self.context_size = config.context_size
self.exec_query = exec_query
def exec_query(self, query):
return self.exec_query(self.model, self.context_size, query)
def get_context_size(self):
return self.context_size
def get_model(self) -> str:
return self.model
def get_context_size(self) -> int:
return self.context_size |
def osnet_x1_0_ms234_a0d1(num_classes=1000, pretrained=True, loss='softmax', **kwargs):
model = OSNet(num_classes, blocks=[OSBlock, OSBlock, OSBlock], layers=[2, 2, 2], channels=[64, 256, 384, 512], loss=loss, mixstyle_layers=['conv2', 'conv3', 'conv4'], mixstyle_alpha=0.1, **kwargs)
if pretrained:
init_pretrained_weights(model, key='osnet_x1_0')
return model |
class Using(Node):
def __init__(self, start, end, names):
Node.__init__(self, start, end)
self.names = names
def __str__(self):
return self._StringHelper(self.__class__.__name__, str(self.names)) |
def quad_double_cascade_step(dim, embsys, esols, tasks=0):
from phcpy.phcpy2c3 import py2c_copy_quaddobl_container_to_start_system
from phcpy.phcpy2c3 import py2c_copy_quaddobl_container_to_start_solutions
from phcpy.phcpy2c3 import py2c_quaddobl_cascade_homotopy
from phcpy.phcpy2c3 import py2c_solve_by_quaddobl_homotopy_continuation
from phcpy.phcpy2c3 import py2c_solcon_clear_quaddobl_solutions
from phcpy.phcpy2c3 import py2c_copy_quaddobl_target_solutions_to_container
from phcpy.interface import store_quaddobl_witness_set
from phcpy.interface import load_quaddobl_solutions
store_quaddobl_witness_set(len(embsys), dim, embsys, esols)
py2c_copy_quaddobl_container_to_start_system()
py2c_copy_quaddobl_container_to_start_solutions()
py2c_quaddobl_cascade_homotopy()
py2c_solve_by_quaddobl_homotopy_continuation(tasks)
py2c_solcon_clear_quaddobl_solutions()
py2c_copy_quaddobl_target_solutions_to_container()
return load_quaddobl_solutions() |
_task('multilingual_translation')
class MultilingualTranslationTask(FairseqTask):
def add_args(parser):
parser.add_argument('data', metavar='DIR', help='path to data directory')
parser.add_argument('--lang-pairs', default=None, metavar='PAIRS', help='comma-separated list of language pairs (in training order): en-de,en-fr,de-fr')
parser.add_argument('-s', '--source-lang', default=None, metavar='SRC', help='source language (only needed for inference)')
parser.add_argument('-t', '--target-lang', default=None, metavar='TARGET', help='target language (only needed for inference)')
parser.add_argument('--left-pad-source', default='True', type=str, metavar='BOOL', help='pad the source on the left (default: True)')
parser.add_argument('--left-pad-target', default='False', type=str, metavar='BOOL', help='pad the target on the left (default: False)')
parser.add_argument('--max-source-positions', default=1024, type=int, metavar='N', help='max number of tokens in the source sequence')
parser.add_argument('--max-target-positions', default=1024, type=int, metavar='N', help='max number of tokens in the target sequence')
parser.add_argument('--upsample-primary', default=1, type=int, help='amount to upsample primary dataset')
parser.add_argument('--encoder-langtok', default=None, type=str, choices=['src', 'tgt'], metavar='SRCTGT', help='replace beginning-of-sentence in source sentence with source or target language token. (src/tgt)')
parser.add_argument('--decoder-langtok', action='store_true', help='replace beginning-of-sentence in target sentence with target language token')
def __init__(self, args, dicts, training):
super().__init__(args)
self.dicts = dicts
self.training = training
if training:
self.lang_pairs = args.lang_pairs
else:
self.lang_pairs = ['{}-{}'.format(args.source_lang, args.target_lang)]
self.eval_lang_pairs = self.lang_pairs
self.model_lang_pairs = self.lang_pairs
self.langs = list(dicts.keys())
def setup_task(cls, args, **kwargs):
(dicts, training) = cls.prepare(args, **kwargs)
return cls(args, dicts, training)
def prepare(cls, args, **kargs):
args.left_pad_source = options.eval_bool(args.left_pad_source)
args.left_pad_target = options.eval_bool(args.left_pad_target)
if (args.lang_pairs is None):
raise ValueError('--lang-pairs is required. List all the language pairs in the training objective.')
if isinstance(args.lang_pairs, str):
args.lang_pairs = args.lang_pairs.split(',')
sorted_langs = sorted(list({x for lang_pair in args.lang_pairs for x in lang_pair.split('-')}))
if ((args.source_lang is not None) or (args.target_lang is not None)):
training = False
else:
training = True
dicts = OrderedDict()
for lang in sorted_langs:
paths = utils.split_paths(args.data)
assert (len(paths) > 0)
dicts[lang] = cls.load_dictionary(os.path.join(paths[0], 'dict.{}.txt'.format(lang)))
if (len(dicts) > 0):
assert (dicts[lang].pad() == dicts[sorted_langs[0]].pad())
assert (dicts[lang].eos() == dicts[sorted_langs[0]].eos())
assert (dicts[lang].unk() == dicts[sorted_langs[0]].unk())
if ((args.encoder_langtok is not None) or args.decoder_langtok):
for lang_to_add in sorted_langs:
dicts[lang].add_symbol(_lang_token(lang_to_add))
logger.info('[{}] dictionary: {} types'.format(lang, len(dicts[lang])))
return (dicts, training)
def get_encoder_langtok(self, src_lang, tgt_lang):
if (self.args.encoder_langtok is None):
return self.dicts[src_lang].eos()
if (self.args.encoder_langtok == 'src'):
return _lang_token_index(self.dicts[src_lang], src_lang)
else:
return _lang_token_index(self.dicts[src_lang], tgt_lang)
def get_decoder_langtok(self, tgt_lang):
if (not self.args.decoder_langtok):
return self.dicts[tgt_lang].eos()
return _lang_token_index(self.dicts[tgt_lang], tgt_lang)
def alter_dataset_langtok(self, lang_pair_dataset, src_eos=None, src_lang=None, tgt_eos=None, tgt_lang=None):
if ((self.args.encoder_langtok is None) and (not self.args.decoder_langtok)):
return lang_pair_dataset
new_src_eos = None
if ((self.args.encoder_langtok is not None) and (src_eos is not None) and (src_lang is not None) and (tgt_lang is not None)):
new_src_eos = self.get_encoder_langtok(src_lang, tgt_lang)
else:
src_eos = None
new_tgt_bos = None
if (self.args.decoder_langtok and (tgt_eos is not None) and (tgt_lang is not None)):
new_tgt_bos = self.get_decoder_langtok(tgt_lang)
else:
tgt_eos = None
return TransformEosLangPairDataset(lang_pair_dataset, src_eos=src_eos, new_src_eos=new_src_eos, tgt_bos=tgt_eos, new_tgt_bos=new_tgt_bos)
def load_dataset(self, split, epoch=1, **kwargs):
paths = utils.split_paths(self.args.data)
assert (len(paths) > 0)
data_path = paths[((epoch - 1) % len(paths))]
def language_pair_dataset(lang_pair):
(src, tgt) = lang_pair.split('-')
langpair_dataset = load_langpair_dataset(data_path, split, src, self.dicts[src], tgt, self.dicts[tgt], combine=True, dataset_impl=self.args.dataset_impl, upsample_primary=self.args.upsample_primary, left_pad_source=self.args.left_pad_source, left_pad_target=self.args.left_pad_target, max_source_positions=self.args.max_source_positions, max_target_positions=self.args.max_target_positions)
return self.alter_dataset_langtok(langpair_dataset, src_eos=self.dicts[src].eos(), src_lang=src, tgt_eos=self.dicts[tgt].eos(), tgt_lang=tgt)
self.datasets[split] = RoundRobinZipDatasets(OrderedDict([(lang_pair, language_pair_dataset(lang_pair)) for lang_pair in self.lang_pairs]), eval_key=(None if self.training else ('%s-%s' % (self.args.source_lang, self.args.target_lang))))
def build_dataset_for_inference(self, src_tokens, src_lengths):
lang_pair = ('%s-%s' % (self.args.source_lang, self.args.target_lang))
return RoundRobinZipDatasets(OrderedDict([(lang_pair, self.alter_dataset_langtok(LanguagePairDataset(src_tokens, src_lengths, self.source_dictionary), src_eos=self.source_dictionary.eos(), src_lang=self.args.source_lang, tgt_eos=self.target_dictionary.eos(), tgt_lang=self.args.target_lang))]), eval_key=lang_pair)
def build_model(self, args):
def check_args():
messages = []
if (len(set(self.args.lang_pairs).symmetric_difference(args.lang_pairs)) != 0):
messages.append('--lang-pairs should include all the language pairs {}.'.format(args.lang_pairs))
if (self.args.encoder_langtok != args.encoder_langtok):
messages.append('--encoder-langtok should be {}.'.format(args.encoder_langtok))
if (self.args.decoder_langtok != args.decoder_langtok):
messages.append('--decoder-langtok should {} be set.'.format(('' if args.decoder_langtok else 'not')))
if (len(messages) > 0):
raise ValueError(' '.join(messages))
check_args()
from fairseq import models
model = models.build_model(args, self)
if (not isinstance(model, FairseqMultiModel)):
raise ValueError('MultilingualTranslationTask requires a FairseqMultiModel architecture')
return model
def train_step(self, sample, model, criterion, optimizer, update_num, ignore_grad=False):
model.train()
from collections import defaultdict
(agg_loss, agg_sample_size, agg_logging_output) = (0.0, 0.0, defaultdict(float))
curr_lang_pairs = [lang_pair for lang_pair in self.model_lang_pairs if ((sample[lang_pair] is not None) and (len(sample[lang_pair]) != 0))]
for (idx, lang_pair) in enumerate(curr_lang_pairs):
def maybe_no_sync():
if ((self.args.distributed_world_size > 1) and hasattr(model, 'no_sync') and (idx < (len(curr_lang_pairs) - 1))):
return model.no_sync()
else:
return contextlib.ExitStack()
with maybe_no_sync():
(loss, sample_size, logging_output) = criterion(model.models[lang_pair], sample[lang_pair])
if ignore_grad:
loss *= 0
optimizer.backward(loss)
agg_loss += loss.detach().item()
agg_sample_size += sample_size
for k in logging_output:
agg_logging_output[k] += logging_output[k]
agg_logging_output[f'{lang_pair}:{k}'] += logging_output[k]
return (agg_loss, agg_sample_size, agg_logging_output)
def valid_step(self, sample, model, criterion):
model.eval()
with torch.no_grad():
from collections import defaultdict
(agg_loss, agg_sample_size, agg_logging_output) = (0.0, 0.0, defaultdict(float))
for lang_pair in self.eval_lang_pairs:
if ((lang_pair not in sample) or (sample[lang_pair] is None) or (len(sample[lang_pair]) == 0)):
continue
(loss, sample_size, logging_output) = criterion(model.models[lang_pair], sample[lang_pair])
agg_loss += loss.data.item()
agg_sample_size += sample_size
for k in logging_output:
agg_logging_output[k] += logging_output[k]
agg_logging_output[f'{lang_pair}:{k}'] += logging_output[k]
return (agg_loss, agg_sample_size, agg_logging_output)
def inference_step(self, generator, models, sample, prefix_tokens=None):
with torch.no_grad():
if self.args.decoder_langtok:
bos_token = _lang_token_index(self.target_dictionary, self.args.target_lang)
else:
bos_token = self.target_dictionary.eos()
return generator.generate(models, sample, prefix_tokens=prefix_tokens, bos_token=bos_token)
def reduce_metrics(self, logging_outputs, criterion):
with metrics.aggregate():
super().reduce_metrics(logging_outputs, criterion)
for k in ['sample_size', 'nsentences', 'ntokens']:
metrics.log_scalar(k, sum((l[k] for l in logging_outputs)))
def source_dictionary(self):
if self.training:
return next(iter(self.dicts.values()))
else:
return self.dicts[self.args.source_lang]
def target_dictionary(self):
if self.training:
return next(iter(self.dicts.values()))
else:
return self.dicts[self.args.target_lang]
def max_positions(self):
if (len(self.datasets.values()) == 0):
return {('%s-%s' % (self.args.source_lang, self.args.target_lang)): (self.args.max_source_positions, self.args.max_target_positions)}
return OrderedDict([(key, (self.args.max_source_positions, self.args.max_target_positions)) for split in self.datasets.keys() for key in self.datasets[split].datasets.keys()]) |
class AdjustSaturation(object):
def __init__(self, saturation):
self.saturation = saturation
def __call__(self, img, mask):
assert (img.size == mask.size)
return (tf.adjust_saturation(img, random.uniform((1 - self.saturation), (1 + self.saturation))), mask) |
def main(args):
for file in os.listdir(osp.join(args.data_dir, args.dataset)):
cudnn.deterministic = False
cudnn.benchmark = True
model = resmap.create(args.arch, ibn_type=args.ibn, final_layer=args.final_layer, neck=args.neck).cuda()
num_features = model.num_features
feamap_factor = {'layer2': 8, 'layer3': 16, 'layer4': 32}
hei = (args.height // feamap_factor[args.final_layer])
wid = (args.width // feamap_factor[args.final_layer])
matcher = QAConv(num_features, hei, wid).cuda()
for arg in sys.argv:
print(('%s ' % arg), end='')
print('\n')
(dataset, img_path, transformer) = get_data(args.dataset, args.data_dir, file, args)
criterion = PairwiseMatchingLoss(matcher).cuda()
print('Loading checkpoint...')
print('###', osp.join(args.save_path, 'checkpoint.pth.tar'))
checkpoint = load_checkpoint(osp.join(args.save_path, 'checkpoint.pth.tar'))
model.load_state_dict(checkpoint['model'])
criterion.load_state_dict(checkpoint['criterion'])
model = nn.DataParallel(model).cuda()
dist = calc_distance(dataset, img_path, transformer, model, matcher, True, args.gal_batch_size, args.prob_batch_size)
dist_numpy = dist.cpu().numpy()
folder = img_path.split('/').pop()
savepath = ((args.save_data_path + folder) + '/')
if (not os.path.exists(savepath)):
os.makedirs(savepath)
if (dist.size()[0] >= 2):
tri_mat = np.triu(dist.cpu(), 1)
tri_mat = tri_mat[np.nonzero(tri_mat)]
tri_mat = np.sort(tri_mat, axis=None)
rho = args.rho
top_num = np.round((rho * tri_mat.size)).astype(int)
eps = args.eps
savepath_new = ((savepath + str(eps)) + '/')
print('savepath:', savepath_new)
if (not os.path.exists(savepath_new)):
os.makedirs(savepath_new)
print('EPS for cluster: {:.3f}'.format(eps))
cluster = DBSCAN(eps=eps, min_samples=args.min_samples, metric='precomputed', n_jobs=(- 1))
print('Clustering and labeling...')
labels = cluster.fit_predict(dist.cpu())
for i in range((- 1), (np.max(labels) + 1)):
index_list = np.where((labels == i))[0]
if (len(index_list) > 0):
reg1 = dist_numpy[np.ix_(index_list, index_list)]
pic1 = np.array(dataset)[(index_list, 0)]
dist_average = get_dist_average(reg1)
current_savepath = ((((savepath_new + folder) + '_') + str(i)) + '/')
if (not os.path.exists(current_savepath)):
os.makedirs(current_savepath)
for j in range(0, len(pic1)):
print((((current_savepath + str(dist_average[j])) + '_') + pic1[j]))
shutil.copyfile(((img_path + '/') + pic1[j]), (((current_savepath + str(dist_average[j])) + '_') + pic1[j]))
num_ids = (len(set(labels)) - (1 if ((- 1) in labels) else 0))
print('DBSCAN: clustered into {} classes.'.format(num_ids))
labels = np.array(labels)
noisy_samples = (labels == (- 1)).sum()
core_samples = (len(dataset) - noisy_samples)
print(('Core samples: %d. Noisy samples: %d. Average samples per cluster: %d.\n' % (core_samples, noisy_samples, (core_samples // num_ids))))
else:
average_dist = get_dist_average(dist_numpy)
for i in range(0, len(dataset)):
shutil.copyfile(((img_path + '/') + dataset[i][0]), (((savepath + str(average_dist[i])) + '_') + dataset[i][0]))
print(folder) |
def dynamic_rnn(cell, inputs, sequence_length=None, initial_state=None, dtype=None, parallel_iterations=None, swap_memory=False, time_major=False, scope=None):
assert (not time_major)
flat_inputs = flatten(inputs, 2)
flat_len = (None if (sequence_length is None) else tf.cast(flatten(sequence_length, 0), 'int64'))
(flat_outputs, final_state) = _dynamic_rnn(cell, flat_inputs, sequence_length=flat_len, initial_state=initial_state, dtype=dtype, parallel_iterations=parallel_iterations, swap_memory=swap_memory, time_major=time_major, scope=scope)
outputs = reconstruct(flat_outputs, inputs, 2)
return (outputs, final_state) |
class SparseMultiheadAttention(MultiheadAttention):
def __init__(self, embed_dim, num_heads, kdim=None, vdim=None, dropout=0.0, bias=True, add_bias_kv=False, add_zero_attn=False, self_attention=False, encoder_decoder_attention=False, stride=32, expressivity=8, is_bidirectional=True):
super().__init__(embed_dim, num_heads, kdim, vdim, dropout, bias, add_bias_kv, add_zero_attn, self_attention, encoder_decoder_attention)
self.is_bidirectional = is_bidirectional
self.stride = stride
self.expressivity = expressivity
assert ((self.stride > 0) and (self.stride >= self.expressivity))
def compute_checkpoint(self, word_index):
if (((word_index % self.stride) == 0) and (word_index != 0)):
checkpoint_index = (word_index - self.expressivity)
else:
checkpoint_index = (((math.floor((word_index / self.stride)) * self.stride) + self.stride) - self.expressivity)
return checkpoint_index
def compute_subset_summaries(self, absolute_max):
checkpoint_index = self.compute_checkpoint(0)
subset_two = set()
while (checkpoint_index <= (absolute_max - 1)):
summary = set(range(checkpoint_index, min(((checkpoint_index + self.expressivity) + 1), absolute_max)))
subset_two = subset_two.union(summary)
checkpoint_index = self.compute_checkpoint((checkpoint_index + self.stride))
return subset_two
def compute_fixed_attention_subset(self, word_index, tgt_len):
if (not self.is_bidirectional):
absolute_max = (word_index + 1)
else:
absolute_max = tgt_len
rounded_index = (math.floor(((word_index + self.stride) / self.stride)) * self.stride)
if (((word_index % self.stride) == 0) and (word_index != 0)):
subset_one = set(range((word_index - self.stride), min(absolute_max, (word_index + 1))))
else:
subset_one = set(range(max(0, (rounded_index - self.stride)), min(absolute_max, (rounded_index + 1))))
subset_two = set()
if (not self.is_bidirectional):
subset_two = self.compute_subset_summaries(absolute_max)
return subset_one.union(subset_two)
def buffered_sparse_mask(self, tensor, tgt_len, src_len):
assert (tgt_len > self.stride)
sparse_mask = torch.empty((tgt_len, src_len)).float().fill_(float('-inf'))
subset_summaries = set()
if self.is_bidirectional:
subset_summaries = self.compute_subset_summaries(tgt_len)
for i in range(tgt_len):
fixed_attention_subset = self.compute_fixed_attention_subset(i, tgt_len)
fixed_attention_subset = fixed_attention_subset.union(subset_summaries)
included_word_indices = torch.LongTensor(list(fixed_attention_subset))
sparse_mask[i].index_fill_(0, included_word_indices, 0)
return sparse_mask.type_as(tensor)
def apply_sparse_mask(self, attn_weights, tgt_len, src_len, bsz):
sparse_mask = self.buffered_sparse_mask(attn_weights, tgt_len, src_len)
sparse_mask = sparse_mask.unsqueeze(0).expand((bsz * self.num_heads), tgt_len, src_len)
attn_weights += sparse_mask |
def scatter_plot(viz, title, x):
if (title in VISDOMWINDOWS):
window = VISDOMWINDOWS[title]
viz.scatter(X=x, win=window, update='replace', opts={'title': title})
else:
window = viz.scatter(X=x, opts={'title': title})
VISDOMWINDOWS[title] = window |
def _Resnet(x, num_units, num_filters, bottle_neck, momentum=0.9, eps=1e-05, use_global_stats=False, bn_data=True, strides=(1, 2, 2, 2), dilates=(1, 1, 1, 1), name=None, lr_mult=1, reuse=None):
name = ('' if (name is None) else name)
x = ResStemV1(x, num_filters[0], momentum, eps, use_global_stats, bn_data, name, lr_mult, reuse)
for i in range(4):
x = ResBlockV1(x, num_units[i], num_filters[(i + 1)], strides[i], dilates[i], bottle_neck, momentum, eps, use_global_stats, (name + ('stage%d' % (i + 1))), lr_mult, reuse)
return x |
def generate_segment_latex(sentence, segment):
segment_type = segment[0]
if (segment_type == 'M'):
return generate_match(sentence, segment)
elif (segment_type == 'O'):
return generate_overlap(sentence, segment)
elif (segment_type == 'G'):
return generate_gold_left(sentence, segment)
elif (segment_type == 'P'):
return generate_pred_left(sentence, segment)
else:
return generate_not_entity(sentence, segment) |
.parametrize('inp,out', [([], []), (['O', 'O', 'O'], [None, None, None]), (['O', 'B-ORG', 'O'], [None, 'ORG', None]), (['O', 'B-ORG', 'B-ORG'], [None, 'ORG', 'ORG']), (['O', 'B-PERSON', 'I-PERSON'], [None, 'PERSON', 'PERSON']), (['B-A', 'O', 'B-T'], ['A', None, 'T'])])
def test_get_etypes(inp, out):
assert (get_etypes(inp) == out) |
def CWT(lenth, data):
scale = np.arange(1, lenth)
(cwtmatr, freqs) = pywt.cwt(data, scale, 'mexh')
return cwtmatr |
def output_padding_shape(h_in, conv_out, padding, kernel_size, stride):
return tuple((output_padding(h_in[i], conv_out[i], padding, kernel_size, stride) for i in range(len(h_in)))) |
def _draw_space(space, batch=None):
for s in space.shapes:
if (not (hasattr(s, 'ignore_draw') and s.ignore_draw)):
_draw_shape(s, batch) |
_grad()
def compare_planes(pred_planes, gt_planes):
pred_planes = torch.tensor(np.array(pred_planes), dtype=torch.float32)
pred_offsets = (torch.norm(pred_planes, p=2, dim=1) + 1e-05)
pred_norms = pred_planes.div(pred_offsets.view((- 1), 1).expand_as(pred_planes))
gt_planes = torch.tensor(np.array(gt_planes), dtype=torch.float32)
gt_offsets = (torch.norm(gt_planes, p=2, dim=1) + 1e-05)
gt_norms = gt_planes.div(gt_offsets.view((- 1), 1).expand_as(gt_planes))
norm_distance_matrix = torch.clamp(torch.cdist(pred_norms, gt_norms, p=2), 0, 2)
norm_angle_matrix = (((2 * torch.asin((norm_distance_matrix / 2))) / np.pi) * 180)
offset_distance_matrix = torch.cdist(pred_offsets.view((- 1), 1), gt_offsets.view((- 1), 1), p=1)
return {'norm': norm_angle_matrix, 'offset': offset_distance_matrix} |
('span_f1_dist')
class SpanF1Measure(Metric):
def __init__(self) -> None:
self._true_positives: Dict[(str, int)] = defaultdict(int)
self._false_positives: Dict[(str, int)] = defaultdict(int)
self._false_negatives: Dict[(str, int)] = defaultdict(int)
self.training_finished = False
def __call__(self, predictions: List[List[TypedStringSpan]], gold_labels: List[List[TypedStringSpan]]):
for (predicted_spans, gold_spans) in zip(predictions, gold_labels):
gold_spans = set(gold_spans)
for span in predicted_spans:
if (span in gold_spans):
self._true_positives[span[0]] += 1
gold_spans.remove(span)
else:
self._false_positives[span[0]] += 1
for span in gold_spans:
self._false_negatives[span[0]] += 1
def get_metric(self, reset: bool=False):
if (reset and is_distributed() and (not self.training_finished)):
def _gather(d: Dict) -> DefaultDict:
dist.all_gather_object(dict_list, d)
combined = defaultdict(int)
for d in dict_list:
for (k, v) in d.items():
combined[k] += v
return combined
dict_list = [dict() for _ in range(dist.get_world_size())]
self._true_positives = _gather(self._true_positives)
self._false_positives = _gather(self._false_positives)
self._false_negatives = _gather(self._false_negatives)
all_tags: Set[str] = set()
all_tags.update(self._true_positives.keys())
all_tags.update(self._false_positives.keys())
all_tags.update(self._false_negatives.keys())
all_metrics = {}
for tag in all_tags:
(precision, recall, f1_measure) = self._compute_metrics(self._true_positives[tag], self._false_positives[tag], self._false_negatives[tag])
precision_key = (('precision' + '-') + tag)
recall_key = (('recall' + '-') + tag)
f1_key = (('f1-measure' + '-') + tag)
all_metrics[precision_key] = precision
all_metrics[recall_key] = recall
all_metrics[f1_key] = f1_measure
pr = {'precision': list(), 'recall': list()}
for (name, score) in all_metrics.items():
for (k, v) in pr.items():
if (k in name):
v.append(score)
break
else:
(p, r) = [((sum(pr[k]) / len(pr[k])) if (len(pr[k]) > 0) else 0) for k in ('precision', 'recall')]
all_metrics['precision-overall-MACRO'] = p
all_metrics['recall-overall-MACRO'] = r
all_metrics['f1-measure-overall-MACRO'] = ((2.0 * (p * r)) / ((p + r) + 1e-13))
(precision, recall, f1_measure) = self._compute_metrics(sum(self._true_positives.values()), sum(self._false_positives.values()), sum(self._false_negatives.values()))
all_metrics['precision-overall'] = precision
all_metrics['recall-overall'] = recall
all_metrics['f1-measure-overall'] = f1_measure
if reset:
self.reset()
return all_metrics
def _compute_metrics(true_positives: int, false_positives: int, false_negatives: int):
precision = (true_positives / ((true_positives + false_positives) + 1e-13))
recall = (true_positives / ((true_positives + false_negatives) + 1e-13))
f1_measure = ((2.0 * (precision * recall)) / ((precision + recall) + 1e-13))
return (precision, recall, f1_measure)
def reset(self):
self._true_positives = defaultdict(int)
self._false_positives = defaultdict(int)
self._false_negatives = defaultdict(int) |
class AbstractOptimizer(ABC):
support_parallel_opt = False
support_constraint = False
support_multi_objective = False
support_combinatorial = False
support_contextual = False
def __init__(self, space: DesignSpace) -> None:
self.space = space
def suggest(self, n_suggestions=1, fix_input: dict=None):
pass
def observe(self, x: pd.DataFrame, y: np.ndarray):
pass
def best_x(self) -> pd.DataFrame:
pass
def best_y(self) -> float:
pass |
def test_fetch_metadata_function_with_querry(tmpdir):
root = tmpdir.strpath
run_test_experiment(exp_name='experiment 1 alpha', exp_id='1234', root_dir=root)
run_test_experiment(exp_name='experiment 2 beta', exp_id='5678', root_dir=root)
run_test_experiment(exp_name='experiment 3 alpha beta', exp_id='9990', root_dir=root)
tinydb_reader = TinyDbReader(root)
record = Query()
exp1_query = record.experiment.name.matches('.*alpha$')
exp3_query = (record.experiment.name.search('alpha') & (record._id == '9990'))
res1 = tinydb_reader.fetch_metadata(query=exp1_query)
assert (len(res1) == 1)
assert (res1[0]['experiment']['name'] == 'experiment 1 alpha')
res2 = tinydb_reader.fetch_metadata(query=record.experiment.name.search('experiment [23]'))
assert (len(res2) == 2)
res3 = tinydb_reader.fetch_metadata(query=exp3_query)
assert (len(res3) == 1)
assert (res3[0]['experiment']['name'] == 'experiment 3 alpha beta')
with pytest.raises(ValueError):
tinydb_reader.fetch_metadata() |
class PIP():
def __init__(self, tile, index):
self.tile = tile
self.index = index
self.data = tile.get_pip_data(index)
def src_wire(self):
return Wire(self.tile, self.data.from_wire)
def dst_wire(self):
return Wire(self.tile, self.data.to_wire)
def is_route_thru(self):
return self.data.is_route_thru
def is_bidi(self):
return self.data.is_bidi
def is_buffered(self):
return self.data.is_buffered
def min_delay(self):
return self.data.min_delay
def max_delay(self):
return self.data.max_delay
def resistance(self):
return self.data.resistance
def capacitance(self):
return self.data.capacitance |
def write_csv_timeseries(df, path, float_format=None):
df = df.copy()
df.index = df.index.strftime('%Y-%m-%dT%H:%M:%S%z')
df.index.name = 'time_iso8601'
log.info('write time series data to CSV file %s -- df:\n%s', path, df)
with open(path, 'wb') as f:
f.write(df.to_csv(float_format=float_format).encode('utf-8')) |
class Credentials():
def __init__(self, username: str, password: str):
self.username = username
self.password = password |
def loss_D_fn(P, D, options, images, gen_images):
assert (images.size(0) == gen_images.size(0))
gen_images = gen_images.detach()
N = images.size(0)
all_images = torch.cat([images, gen_images], dim=0)
d_all = D(P.augment_fn(all_images))
(d_real, d_gen) = (d_all[:N], d_all[N:])
if (options['loss'] == 'nonsat'):
d_loss = (F.softplus(d_gen).mean() + F.softplus((- d_real)).mean())
elif (options['loss'] == 'wgan'):
d_loss = (d_gen.mean() - d_real.mean())
elif (options['loss'] == 'hinge'):
d_loss = (F.relu((1.0 + d_gen), inplace=True).mean() + F.relu((1.0 - d_real), inplace=True).mean())
else:
raise NotImplementedError()
penalty = compute_penalty(P.penalty, P=P, D=D, all_images=all_images, images=images, gen_images=gen_images, d_real=d_real, d_gen=d_gen, lbd=options['lbd'], lbd2=options['lbd2'])
return (d_loss, {'penalty': penalty, 'd_real': d_real.mean(), 'd_gen': d_gen.mean()}) |
class ControlledGDEFunc(GDEFunc):
def __init__(self, gnn: nn.Module):
super().__init__(gnn)
self.nfe = 0
def forward(self, t, x):
self.nfe += 1
x = torch.cat([x, self.h0], 1)
x = self.gnn(x)
return x |
class _DiverseCFV2SchemaConstants():
DATA_INTERFACE = 'data_interface'
MODEL_TYPE = 'model_type'
DESIRED_CLASS = 'desired_class'
DESIRED_RANGE = 'desired_range'
FEATURE_NAMES_INCLUDING_TARGET = 'feature_names_including_target'
FEATURE_NAMES = 'feature_names'
TEST_INSTANCE_LIST = 'test_instance_list'
FINAL_CFS_LIST = 'final_cfs_list' |
class GraspSamplerVAE(GraspSampler):
def __init__(self, model_scale, pointnet_radius=0.02, pointnet_nclusters=128, latent_size=2, device='cpu'):
super(GraspSamplerVAE, self).__init__(latent_size, device)
self.create_encoder(model_scale, pointnet_radius, pointnet_nclusters)
self.create_decoder(model_scale, pointnet_radius, pointnet_nclusters, (latent_size + 3))
self.create_bottleneck((model_scale * 1024), latent_size)
def create_encoder(self, model_scale, pointnet_radius, pointnet_nclusters):
self.encoder = base_network(pointnet_radius, pointnet_nclusters, model_scale, 19)
def create_bottleneck(self, input_size, latent_size):
mu = nn.Linear(input_size, latent_size)
logvar = nn.Linear(input_size, latent_size)
self.latent_space = nn.ModuleList([mu, logvar])
def encode(self, xyz, xyz_features):
for module in self.encoder[0]:
(xyz, xyz_features) = module(xyz, xyz_features)
return self.encoder[1](xyz_features.squeeze((- 1)))
def bottleneck(self, z):
return (self.latent_space[0](z), self.latent_space[1](z))
def reparameterize(self, mu, logvar):
std = torch.exp((0.5 * logvar))
eps = torch.randn_like(std)
return (mu + (eps * std))
def forward(self, pc, grasp=None, train=True):
if train:
return self.forward_train(pc, grasp)
else:
return self.forward_test(pc, grasp)
def forward_train(self, pc, grasp):
input_features = torch.cat((pc, grasp.unsqueeze(1).expand((- 1), pc.shape[1], (- 1))), (- 1)).transpose((- 1), 1).contiguous()
z = self.encode(pc, input_features)
(mu, logvar) = self.bottleneck(z)
z = self.reparameterize(mu, logvar)
(qt, confidence) = self.decode(pc, z)
return (qt, confidence, mu, logvar)
def forward_test(self, pc, grasp):
input_features = torch.cat((pc, grasp.unsqueeze(1).expand((- 1), pc.shape[1], (- 1))), (- 1)).transpose((- 1), 1).contiguous()
z = self.encode(pc, input_features)
(mu, _) = self.bottleneck(z)
(qt, confidence) = self.decode(pc, mu)
return (qt, confidence)
def sample_latent(self, batch_size):
return torch.randn(batch_size, self.latent_size).to(self.device)
def generate_grasps(self, pc, z=None):
if (z is None):
z = self.sample_latent(pc.shape[0])
(qt, confidence) = self.decode(pc, z)
return (qt, confidence, z.squeeze())
def generate_dense_latents(self, resolution):
latents = torch.meshgrid(*[torch.linspace((- 2), 2, resolution) for i in range(self.latent_size)])
return torch.stack([latents[i].flatten() for i in range(len(latents))], dim=(- 1)).to(self.device) |
def _cache_spectrogram(labeled_spectrogram: CachedLabeledSpectrogram) -> None:
labeled_spectrogram.z_normalized_transposed_spectrogram() |
(derivate=True, coderize=True)
_loss
def knowledge_distillation_kl_div_loss(pred, soft_label, T, detach_target=True):
assert (pred.size() == soft_label.size())
target = F.softmax((soft_label / T), dim=1)
if detach_target:
target = target.detach()
kd_loss = (F.kl_div(F.log_softmax((pred / T), dim=1), target, reduction='none').mean(1) * (T * T))
return kd_loss |
def main():
args = parse_args()
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO)
device = (torch.device('cuda:0') if torch.cuda.is_available() else torch.device('cpu'))
if (args.seed is not None):
set_seed(args.seed)
if (args.task_name is not None):
raw_datasets = load_dataset('glue', args.task_name)
else:
data_files = {}
if (args.train_file is not None):
data_files['train'] = args.train_file
if (args.validation_file is not None):
data_files['validation'] = args.validation_file
extension = (args.train_file if (args.train_file is not None) else args.valid_file).split('.')[(- 1)]
raw_datasets = load_dataset(extension, data_files=data_files)
if (args.task_name is not None):
is_regression = (args.task_name == 'stsb')
if (not is_regression):
label_list = raw_datasets['train'].features['label'].names
num_labels = len(label_list)
else:
num_labels = 1
else:
is_regression = (datasets['train'].features['label'].dtype in ['float32', 'float64'])
if is_regression:
num_labels = 1
else:
label_list = datasets['train'].unique('label')
label_list.sort()
num_labels = len(label_list)
config = AutoConfig.from_pretrained(args.model_name_or_path, num_labels=num_labels, finetuning_task=args.task_name, use_auth_token=args.use_auth_token)
tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, use_fast=(not args.use_slow_tokenizer), use_auth_token=args.use_auth_token)
model = AutoModelForSequenceClassification.from_pretrained(args.model_name_or_path, from_tf=bool(('.ckpt' in args.model_name_or_path)), config=config, use_auth_token=args.use_auth_token)
if args.resume:
try:
model.load_state_dict(torch.load(args.resume))
logger.info('Resumed model from {}'.format(args.resume))
except:
raise TypeError('Provided {} is not a valid checkpoint file, please provide .pt file'.format(args.resume))
model.to(device)
if (args.task_name is not None):
(sentence1_key, sentence2_key) = task_to_keys[args.task_name]
else:
non_label_column_names = [name for name in datasets['train'].column_names if (name != 'label')]
if (('sentence1' in non_label_column_names) and ('sentence2' in non_label_column_names)):
(sentence1_key, sentence2_key) = ('sentence1', 'sentence2')
elif (len(non_label_column_names) >= 2):
(sentence1_key, sentence2_key) = non_label_column_names[:2]
else:
(sentence1_key, sentence2_key) = (non_label_column_names[0], None)
label_to_id = None
if ((model.config.label2id != PretrainedConfig(num_labels=num_labels).label2id) and (args.task_name is not None) and (not is_regression)):
label_name_to_id = {k.lower(): v for (k, v) in model.config.label2id.items()}
if (list(sorted(label_name_to_id.keys())) == list(sorted(label_list))):
logger.info(f'The configuration of the model provided the following label correspondence: {label_name_to_id}. Using it!')
label_to_id = {i: label_name_to_id[label_list[i]] for i in range(num_labels)}
else:
logger.warn("Your model seems to have been trained with labels, but they don't match the dataset: ", f'''model labels: {list(sorted(label_name_to_id.keys()))}, dataset labels: {list(sorted(label_list))}.
Ignoring the model labels as a result.''')
elif (args.task_name is None):
label_to_id = {v: i for (i, v) in enumerate(label_list)}
padding = ('max_length' if args.pad_to_max_length else False)
def preprocess_function(examples):
texts = ((examples[sentence1_key],) if (sentence2_key is None) else (examples[sentence1_key], examples[sentence2_key]))
result = tokenizer(*texts, padding=padding, max_length=args.max_seq_length, truncation=True)
if ('label' in examples):
if (label_to_id is not None):
result['labels'] = [label_to_id[l] for l in examples['label']]
else:
result['labels'] = examples['label']
return result
processed_datasets = raw_datasets.map(preprocess_function, batched=True, remove_columns=raw_datasets['train'].column_names)
train_dataset = processed_datasets['train']
eval_dataset = processed_datasets[('validation_matched' if (args.task_name == 'mnli') else 'validation')]
for index in random.sample(range(len(train_dataset)), 3):
logger.info(f'Sample {index} of the training set: {train_dataset[index]}.')
if args.pad_to_max_length:
data_collator = default_data_collator
else:
data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=None)
train_dataloader = DataLoader(train_dataset, shuffle=True, collate_fn=data_collator, batch_size=args.batch_size, drop_last=True)
eval_dataloader = DataLoader(eval_dataset, collate_fn=data_collator, batch_size=args.batch_size, drop_last=True)
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [{'params': [p for (n, p) in model.named_parameters() if (not any(((nd in n) for nd in no_decay)))], 'weight_decay': args.weight_decay}, {'params': [p for (n, p) in model.named_parameters() if any(((nd in n) for nd in no_decay))], 'weight_decay': 0.0}]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate)
num_update_steps_per_epoch = math.ceil((len(train_dataloader) / args.gradient_accumulation_steps))
if (args.max_train_steps is None):
args.max_train_steps = (args.num_train_epochs * num_update_steps_per_epoch)
else:
args.num_train_epochs = math.ceil((args.max_train_steps / num_update_steps_per_epoch))
lr_scheduler = get_scheduler(name=args.lr_scheduler_type, optimizer=optimizer, num_warmup_steps=args.num_warmup_steps, num_training_steps=args.max_train_steps)
if (args.task_name is not None):
metric = load_metric('glue', args.task_name)
combs = []
if args.do_distillation:
from neural_compressor.config import DistillationConfig, IntermediateLayersKnowledgeDistillationLossConfig
layer_mappings = [[['bert.encoder.layer.0.output']], [['bert.encoder.layer.0.attention', '1']], [['bert.encoder.layer.1.output']], [['bert.encoder.layer.1.attention', '1']], [['bert.encoder.layer.2.output']], [['bert.encoder.layer.2.attention', '1']], [['bert.encoder.layer.3.output']], [['bert.encoder.layer.3.attention', '1']], [['bert.encoder.layer.4.output']], [['bert.encoder.layer.4.attention', '1']], [['bert.encoder.layer.5.output']], [['bert.encoder.layer.5.attention', '1']], [['bert.encoder.layer.6.output']], [['bert.encoder.layer.6.attention', '1']], [['bert.encoder.layer.7.output']], [['bert.encoder.layer.7.attention', '1']], [['bert.encoder.layer.8.output']], [['bert.encoder.layer.8.attention', '1']], [['bert.encoder.layer.9.output']], [['bert.encoder.layer.9.attention', '1']], [['bert.encoder.layer.10.output']], [['bert.encoder.layer.10.attention', '1']], [['bert.encoder.layer.11.output']], [['bert.encoder.layer.11.attention', '1']], [['classifier']]]
loss_weights = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
distillation_criterion = IntermediateLayersKnowledgeDistillationLossConfig(layer_mappings=layer_mappings, loss_weights=loss_weights)
teacher_config = AutoConfig.from_pretrained(args.teacher_model_name_or_path, num_labels=num_labels, finetuning_task=args.task_name)
teacher_tokenizer = AutoTokenizer.from_pretrained(args.teacher_model_name_or_path, use_fast=(not args.use_slow_tokenizer))
assert (teacher_tokenizer.vocab == tokenizer.vocab), 'teacher model and student model should have same tokenizer.'
teacher_model = AutoModelForSequenceClassification.from_pretrained(args.teacher_model_name_or_path, from_tf=bool(('.ckpt' in args.teacher_model_name_or_path)), config=teacher_config)
teacher_model.to(device)
para_counter = (lambda model: sum((p.numel() for p in model.parameters())))
logger.info('***** Number of teacher model parameters: {:.2f}M *****'.format((para_counter(teacher_model) / (10 ** 6))))
logger.info('***** Number of student model parameters: {:.2f}M *****'.format((para_counter(model) / (10 ** 6))))
model.config.output_attentions = True
teacher_model.config.output_attentions = True
d_conf = DistillationConfig(teacher_model=teacher_model, criterion=distillation_criterion)
combs.append(d_conf)
if args.do_quantization:
from neural_compressor import QuantizationAwareTrainingConfig
q_conf = QuantizationAwareTrainingConfig()
combs.append(q_conf)
if (len(combs) == 0):
assert False, 'Please set at least one of do_distillation and do_quantization.'
from neural_compressor.training import prepare_compression
compression_manager = prepare_compression(model, combs)
compression_manager.callbacks.on_train_begin()
model = compression_manager.model
train(args, model, train_dataloader, lr_scheduler, compression_manager, optimizer, eval_dataloader, metric)
compression_manager.callbacks.on_train_end()
model = model.model
if args.do_eval:
eval_dataloader = tqdm(eval_dataloader, desc='Evaluating')
model.eval()
model_device = next(model.parameters()).device
for (step, batch) in enumerate(eval_dataloader):
batch = move_input_to_device(batch, model_device)
outputs = model(**batch)
predictions = outputs['logits'].argmax(dim=(- 1))
metric.add_batch(predictions=predictions, references=batch['labels'])
eval_metric = metric.compute()
logger.info(f'eval_metric: {eval_metric}')
if (args.task_name == 'mnli'):
eval_dataset = processed_datasets['validation_mismatched']
eval_dataloader = DataLoader(eval_dataset, collate_fn=data_collator, batch_size=args.batch_size, drop_last=True)
model.eval()
model_device = next(model.parameters()).device
for (step, batch) in enumerate(eval_dataloader):
batch = move_input_to_device(batch, model_device)
outputs = model(**batch)
predictions = outputs['logits'].argmax(dim=(- 1))
(pred, gt) = gather_results(predictions, batch['labels'])
metric.add_batch(predictions=pred, references=gt)
eval_metric = metric.compute()
logger.info(f'mnli-mm: {eval_metric}') |
def get_imdb(file_path):
imdb = [{'dataset_name': 'gqa'}]
questions = json.load(open(file_path, 'r'))
print('Processing file {}'.format(file_path))
for (qid, item) in tqdm.tqdm(questions.items()):
entry = {'image_name': (item['imageId'] + 'jpg'), 'image_id': item['imageId'], 'question_id': qid, 'question_str': item['question'], 'question_tokens': tokenize(item['question'])}
if ('answer' in item):
entry['all_answers'] = [item['answer'] for _ in range(10)]
entry['valid_answers'] = [item['answer'] for _ in range(10)]
entry['semantic_string'] = (item['semanticStr'],)
entry['gt_object_ids'] = (get_objects(item['semanticStr']),)
entry['meta_data'] = item['types']
imdb.append(entry)
return np.array(imdb) |
class ClusterNet6cTwoHead(VGGNet):
cfg = [(64, 1), ('M', None), (128, 1), ('M', None), (256, 1), ('M', None), (512, 1)]
def __init__(self, num_channel: int=3, input_size: int=64, output_k_A: int=10, output_k_B: int=10, num_sub_heads: int=5, semisup: bool=False, batchnorm_track: bool=True):
super(ClusterNet6cTwoHead, self).__init__()
self.batchnorm_track = batchnorm_track
self.trunk = ClusterNet6cTrunk(num_channel=num_channel, batchnorm_track=self.batchnorm_track)
self.head_A = ClusterNet6cTwoHeadHead(input_size=input_size, output_k=output_k_A, num_sub_heads=num_sub_heads, semisup=semisup, batchnorm_track=self.batchnorm_track)
self.head_B = ClusterNet6cTwoHeadHead(input_size=input_size, output_k=output_k_B, num_sub_heads=num_sub_heads, semisup=semisup, batchnorm_track=self.batchnorm_track)
self._initialize_weights()
def forward(self, x, head='B', kmeans_use_features=False, trunk_features=False, penultimate_features=False):
if penultimate_features:
print('Not needed/implemented for this arch')
exit(1)
x = self.trunk(x)
if trunk_features:
return x
if (head == 'A'):
x = self.head_A(x, kmeans_use_features=kmeans_use_features)
elif (head == 'B'):
x = self.head_B(x, kmeans_use_features=kmeans_use_features)
else:
assert False
return x |
def sample_1hot(batch_size, num_classes, device='cuda'):
return torch.randint(low=0, high=num_classes, size=(batch_size,), device=device, dtype=torch.int64, requires_grad=False) |
class Demo(data.Dataset):
def __init__(self, args, train=False):
self.args = args
self.name = 'Demo'
self.scale = args.scale
self.idx_scale = 0
self.train = False
self.benchmark = False
self.filelist = []
for f in os.listdir(args.dir_demo):
if ((f.find('.png') >= 0) or (f.find('.jp') >= 0)):
self.filelist.append(os.path.join(args.dir_demo, f))
self.filelist.sort()
def __getitem__(self, idx):
filename = os.path.split(self.filelist[idx])[(- 1)]
(filename, _) = os.path.splitext(filename)
lr = misc.imread(self.filelist[idx])
lr = common.set_channel([lr], self.args.n_colors)[0]
return (common.np2Tensor([lr], self.args.rgb_range)[0], (- 1), filename)
def __len__(self):
return len(self.filelist)
def set_scale(self, idx_scale):
self.idx_scale = idx_scale |
def get_geo_loss(gt_geo, pred_geo):
(d1_gt, d2_gt, d3_gt, d4_gt, angle_gt) = torch.split(gt_geo, 1, 1)
(d1_pred, d2_pred, d3_pred, d4_pred, angle_pred) = torch.split(pred_geo, 1, 1)
area_gt = ((d1_gt + d2_gt) * (d3_gt + d4_gt))
area_pred = ((d1_pred + d2_pred) * (d3_pred + d4_pred))
w_union = (torch.min(d3_gt, d3_pred) + torch.min(d4_gt, d4_pred))
h_union = (torch.min(d1_gt, d1_pred) + torch.min(d2_gt, d2_pred))
area_intersect = (w_union * h_union)
area_union = ((area_gt + area_pred) - area_intersect)
iou_loss_map = (- torch.log(((area_intersect + 1.0) / (area_union + 1.0))))
angle_loss_map = (1 - torch.cos((angle_pred - angle_gt)))
return (iou_loss_map, angle_loss_map) |
class SpatialAttentionBlock(nn.Module):
def __init__(self, in_channels):
super(SpatialAttentionBlock, self).__init__()
self.query = nn.Sequential(nn.Conv2d(in_channels, (in_channels // 8), kernel_size=(1, 3), padding=(0, 1)), nn.BatchNorm2d((in_channels // 8)), nn.ReLU(inplace=True))
self.key = nn.Sequential(nn.Conv2d(in_channels, (in_channels // 8), kernel_size=(3, 1), padding=(1, 0)), nn.BatchNorm2d((in_channels // 8)), nn.ReLU(inplace=True))
self.value = nn.Conv2d(in_channels, in_channels, kernel_size=1)
self.gamma = nn.Parameter(torch.zeros(1))
self.softmax = nn.Softmax(dim=(- 1))
def forward(self, x):
(B, C, H, W) = x.size()
proj_query = self.query(x).view(B, (- 1), (W * H)).permute(0, 2, 1)
proj_key = self.key(x).view(B, (- 1), (W * H))
affinity = torch.matmul(proj_query, proj_key)
affinity = self.softmax(affinity)
proj_value = self.value(x).view(B, (- 1), (H * W))
weights = torch.matmul(proj_value, affinity.permute(0, 2, 1))
weights = weights.view(B, C, H, W)
out = ((self.gamma * weights) + x)
return out |
def get_raw2scannet_label_map():
lines = [line.rstrip() for line in open('scannet-labels.combined.tsv')]
lines = lines[1:]
raw2scannet = {}
for i in range(len(lines)):
label_classes_set = set(g_label_names)
elements = lines[i].split('\t')
raw_name = elements[0]
nyu40_name = elements[6]
if (nyu40_name not in label_classes_set):
raw2scannet[raw_name] = 'unannotated'
else:
raw2scannet[raw_name] = nyu40_name
return raw2scannet |
def make_mask(folders_to_convert, split_to_convert, data_dir, save_dir, n_dataloader_workers=4, batch_size=64):
if ((folders_to_convert is None) and (split_to_convert is not None)):
split_to_convert = eval(split_to_convert)
logger.info(f'Converting from split {split_to_convert}')
folders_to_convert = sorted(list(set(((split_to_convert['train'] + split_to_convert['val']) + split_to_convert['test']))))
assert (folders_to_convert is not None), 'No folders to convert. Aborting'
logger.info(f'Converting folders {str(folders_to_convert)}')
assert (len(folders_to_convert) == 1)
pool = Pool(n_dataloader_workers)
for fpath in tqdm(os.listdir(os.path.join(data_dir, SOURCE_TASK, folders_to_convert[0], SOURCE_TASK))):
dirname = get_parent_dirname(fpath)
source_path = os.path.join(data_dir, SOURCE_TASK, folders_to_convert[0], SOURCE_TASK, fpath)
target_path = os.path.join(save_dir, SOURCE_TASK, folders_to_convert[0], fpath)
pool.apply_async(shrink_file, args=(source_path, target_path))
pool.close()
pool.join()
print(target_path) |
def test_inheritance_init(msg):
class Python(m.Pet):
def __init__(self):
pass
with pytest.raises(TypeError) as exc_info:
Python()
expected = 'm.class_.Pet.__init__() must be called when overriding __init__'
assert (msg(exc_info.value) == expected)
class RabbitHamster(m.Rabbit, m.Hamster):
def __init__(self):
m.Rabbit.__init__(self, 'RabbitHamster')
with pytest.raises(TypeError) as exc_info:
RabbitHamster()
expected = 'm.class_.Hamster.__init__() must be called when overriding __init__'
assert (msg(exc_info.value) == expected) |
class CodeGenMatlab(CodeGen):
def __init__(self):
super().__init__(ParserTypeEnum.MATLAB)
def init_type(self, type_walker, func_name):
super().init_type(type_walker, func_name)
self.new_id_prefix = ''
self.post_str = ''
def get_dim_check_str(self):
check_list = []
if (len(self.get_cur_param_data().same_dim_list) > 0):
check_list = super().get_dim_check_str()
check_list = [' assert( {} );'.format(stat) for stat in check_list]
return check_list
def get_arith_dim_check_str(self):
check_list = []
if (len(self.get_cur_param_data().arith_dim_list) > 0):
check_list = [' assert( mod({}, 1) == 0.0 );'.format(dims) for dims in self.get_cur_param_data().arith_dim_list]
return check_list
def randn_str(self, sizes=[]):
if (len(sizes) == 0):
return 'randn()'
elif (len(sizes) == 1):
return 'randn({},1)'.format(sizes[0])
else:
return 'randn({})'.format(','.join([str(s) for s in sizes]))
def randi_str(self, rand_int_max, sizes=[]):
if (len(sizes) == 0):
return 'randi({})'.format(rand_int_max)
elif (len(sizes) == 1):
return 'randi({},{},1)'.format(rand_int_max, sizes[0])
else:
return 'randi({},{})'.format(rand_int_max, ','.join([str(s) for s in sizes]))
def get_rand_test_str(self, la_type, rand_int_max):
rand_test = ''
if la_type.is_matrix():
element_type = la_type.element_type
if (isinstance(element_type, LaVarType) and element_type.is_scalar() and element_type.is_int):
rand_test = self.randi_str(rand_int_max, [la_type.rows, la_type.cols])
else:
rand_test = self.randn_str([la_type.rows, la_type.cols])
elif la_type.is_vector():
element_type = la_type.element_type
if (isinstance(element_type, LaVarType) and element_type.is_scalar() and element_type.is_int):
rand_test = self.randi_str(rand_int_max, [la_type.rows])
else:
rand_test = self.randn_str([la_type.rows])
elif la_type.is_scalar():
rand_test = self.randn_str()
return rand_test
def get_func_test_str(self, var_name, func_type, rand_int_max):
test_content = []
param_list = []
dim_definition = []
for index in range(len(func_type.params)):
param_list.append('{}{}'.format(self.param_name_test, index))
anon_str = ' {} = ({}) '.format(var_name, ', '.join(param_list))
test_indent = ''
test_content.append((test_indent + ' {} = {}Func;'.format(var_name, (var_name + ''))))
test_content.append((test_indent + ' rseed = randi(2^32);'))
ret_list = []
content_list = []
for cur_index in range(len(func_type.ret)):
cur_name = self.generate_var_name('ret')
ret_list.append(cur_name)
if func_type.ret[cur_index].is_set():
content_list += self.get_set_test_list(cur_name, self.generate_var_name('dim'), 'i', func_type.ret[cur_index], rand_int_max, ' ')
else:
content_list.append((test_indent + ' {} = {};'.format(cur_name, self.get_rand_test_str(func_type.ret[cur_index], rand_int_max))))
test_content.append((test_indent + ' function [{}] = {}({})'.format(', '.join(ret_list), (var_name + 'Func'), ', '.join(param_list))))
test_content.append((test_indent + ' rng(rseed);'))
test_content += dim_definition
test_content += content_list
test_content.append((test_indent + ' end'))
return test_content
def get_set_test_list(self, parameter, dim_name, ind_name, la_type, rand_int_max, test_indent=' '):
test_content = []
test_content.append('{} = [];'.format(parameter))
test_content.append('{} = randi({});'.format(dim_name, rand_int_max))
test_content.append('for {} = 1:{} '.format(ind_name, dim_name))
gen_list = []
for i in range(la_type.size):
if la_type.int_list[i]:
gen_list.append('randi({})'.format(rand_int_max))
else:
gen_list.append('randn()')
test_content.append(((' {} = [{};'.format(parameter, parameter) + ', '.join(gen_list)) + '];'))
test_content.append('end')
test_content = ['{}{}'.format(test_indent, line) for line in test_content]
return test_content
def visit_id(self, node, **kwargs):
content = node.get_name()
content = self.filter_symbol(content)
if (content in self.name_convention_dict):
content = self.name_convention_dict[content]
if (self.convert_matrix and node.contain_subscript()):
if (len(node.subs) == 2):
if self.get_sym_type(node.main_id).is_matrix():
if self.get_sym_type(node.main_id).sparse:
content = '{}({}, {})'.format(node.main_id, node.subs[0], node.subs[1])
else:
content = '{}({}, {})'.format(node.main_id, node.subs[0], node.subs[1])
return CodeNodeInfo(content)
def get_result_name(self):
return 'output'
def get_module_str(self):
def_struct = ''
init_struct = ''
init_var = ''
if (len(self.module_list) > 0):
for module in self.module_list:
def_struct += self.update_prelist_str([module.frame.struct], ' ')
if (len(module.params) > 0):
init_struct += ' {}_ = {}({});\n'.format(module.name, module.name, ', '.join(module.params))
else:
init_struct += ' {}_ = {}();\n'.format(module.name, module.name)
for sym in module.syms:
init_var += ' {} = {}_.{};\n'.format(sym, module.name, sym)
return ((def_struct + init_struct) + init_var)
def get_struct_definition(self, init_content):
ret_name = self.get_result_name()
assign_list = []
for parameter in self.lhs_list:
if ((parameter in self.symtable) and (self.get_sym_type(parameter) is not None)):
assign_list.append(' {}.{} = {};'.format(ret_name, parameter, parameter))
for parameter in self.local_func_syms:
assign_list.append(' {}.{} = {};'.format(ret_name, parameter, parameter))
return (('\n'.join(assign_list) + self.get_used_params_content()) + '\n')
def get_ret_struct(self):
return '{}({})'.format(self.get_result_name(), ', '.join(self.lhs_list))
def gen_same_seq_test(self):
test_content = []
visited_sym_set = set()
rand_int_max = 10
subs_list = self.get_intersect_list()
if (len(subs_list) > 0):
rand_name_dict = {}
rand_def_dict = {}
for keys in self.seq_dim_dict:
new_name = self.generate_var_name(keys)
rand_name_dict[keys] = new_name
rand_def_dict[keys] = ' {} = randi({});'.format(new_name, rand_int_max)
new_seq_dim_dict = self.convert_seq_dim_dict()
def get_keys_in_set(cur_set):
keys_list = []
for sym in cur_set:
keys_list += new_seq_dim_dict[sym].values()
return set(keys_list)
for sym_set in subs_list:
visited_sym_set = visited_sym_set.union(sym_set)
cur_test_content = []
defined_content = []
cur_block_content = []
first = True
keys_set = get_keys_in_set(sym_set)
for key in keys_set:
cur_block_content.append(rand_def_dict[key])
for cur_sym in sym_set:
if first:
first = False
cur_test_content.append(' for i = 1:{}'.format(self.get_sym_type(cur_sym).size))
dim_dict = new_seq_dim_dict[cur_sym]
defined_content.append(' {} = {{}};'.format(cur_sym, self.get_sym_type(cur_sym).size))
if self.get_sym_type(cur_sym).element_type.is_vector():
if self.get_sym_type(cur_sym).element_type.is_integer_element():
cur_block_content.append(' {} = [{}; randi({}, {})];'.format(cur_sym, cur_sym, rand_int_max, rand_name_dict[dim_dict[1]]))
else:
cur_block_content.append(' {} = [{}; randn({})];'.format(cur_sym, cur_sym, rand_name_dict[dim_dict[1]]))
else:
row_str = (self.get_sym_type(cur_sym).element_type.rows if (not self.get_sym_type(cur_sym).element_type.is_dynamic_row()) else rand_name_dict[dim_dict[1]])
col_str = (self.get_sym_type(cur_sym).element_type.cols if (not self.get_sym_type(cur_sym).element_type.is_dynamic_col()) else rand_name_dict[dim_dict[2]])
if self.get_sym_type(cur_sym).element_type.is_integer_element():
cur_block_content.append(' {} = [{}; randi({}, {}, {})];'.format(cur_sym, cur_sym, rand_int_max, row_str, col_str))
else:
cur_block_content.append(' {} = [{}; randn({}, {})];'.format(cur_sym, cur_sym, row_str, col_str))
cur_test_content = ((defined_content + cur_test_content) + cur_block_content)
cur_test_content.append(' end')
test_content += cur_test_content
return (visited_sym_set, test_content)
def gen_dim_content(self, rand_int_max=10):
test_indent = ' '
test_content = []
dim_content = ''
dim_defined_dict = {}
dim_defined_list = []
if self.get_cur_param_data().dim_dict:
for (key, target_dict) in self.get_cur_param_data().dim_dict.items():
if ((key in self.parameters) or (key in self.get_cur_param_data().dim_seq_set)):
continue
target = list(target_dict.keys())[0]
dim_defined_dict[target] = target_dict[target]
has_defined = False
if (len(self.get_cur_param_data().same_dim_list) > 0):
if (key not in dim_defined_list):
for cur_set in self.get_cur_param_data().same_dim_list:
if (key in cur_set):
int_dim = self.get_int_dim(cur_set)
has_defined = True
if (int_dim == (- 1)):
test_content.append((test_indent + ' {} = {};'.format(key, self.randi_str(rand_int_max))))
else:
test_content.append((test_indent + ' {} = {};'.format(key, int_dim)))
for same_key in cur_set:
if (same_key != key):
dim_defined_list.append(same_key)
if (not isinstance(same_key, int)):
if (int_dim == (- 1)):
test_content.append((test_indent + ' {} = {};'.format(same_key, key)))
else:
test_content.append((test_indent + ' {} = {};'.format(same_key, int_dim)))
break
else:
has_defined = True
if (not has_defined):
test_content.append((test_indent + ' {} = {};'.format(key, self.randi_str(rand_int_max))))
if (self.get_cur_param_data().symtable[target].is_sequence() and self.get_cur_param_data().symtable[target].element_type.is_dynamic()):
if (target_dict[target] == 0):
dim_content += ' {} = size({}, 1);\n'.format(key, target)
else:
dim_content += ' {} = size({}{{1}}, {});\n'.format(key, target, target_dict[target])
else:
dim_content += ' {} = size({}, {});\n'.format(key, target, (target_dict[target] + 1))
return (dim_defined_dict, test_content, dim_content)
def get_used_params_content(self):
assign_list = []
for param in self.used_params:
assign_list.append(' {}.{} = {};'.format(self.get_result_name(), param, param))
if (len(assign_list) > 0):
return ('\n' + ' \n'.join(assign_list))
else:
return ''
def get_param_content(self, test_indent, type_declare, test_generated_sym_set, rand_func_name):
test_content = []
doc = []
test_function = [(test_indent + 'function [{}] = {}()'.format(', '.join(self.parameters), rand_func_name))]
type_checks = []
rand_int_max = 10
for parameter in self.parameters:
if self.get_sym_type(parameter).desc:
show_doc = True
doc.append(' :param :{} :{}'.format(parameter, self.get_sym_type(parameter).desc))
if self.get_sym_type(parameter).is_sequence():
ele_type = self.get_sym_type(parameter).element_type
data_type = ele_type.element_type
if (ele_type.is_matrix() and ele_type.sparse):
type_checks.append(' assert {}.shape == ({},)'.format(parameter, self.get_sym_type(parameter).size))
test_content.append((test_indent + ' {} = [];'.format(parameter)))
test_content.append((test_indent + ' for i = 1:{}'.format(self.get_sym_type(parameter).size)))
if (isinstance(data_type, LaVarType) and data_type.is_scalar() and data_type.is_int):
test_content.append((test_indent + ' {}.append(sparse.random({}, {}, dtype=np.integer, density=0.25))'.format(parameter, ele_type.rows, ele_type.cols)))
else:
test_content.append((test_indent + ' {}.append(sparse.random({}, {}, dtype=np.float64, density=0.25))'.format(parameter, ele_type.rows, ele_type.cols)))
else:
sizes = []
if ele_type.is_matrix():
if (not ele_type.is_dynamic()):
type_checks.append(' assert( isequal(size({}), [{}, {}, {}]) );'.format(parameter, self.get_sym_type(parameter).size, ele_type.rows, ele_type.cols))
sizes = [self.get_sym_type(parameter).size, ele_type.rows, ele_type.cols]
elif (parameter not in test_generated_sym_set):
row_str = ('randi({})'.format(rand_int_max) if ele_type.is_dynamic_row() else ele_type.rows)
col_str = ('randi({})'.format(rand_int_max) if ele_type.is_dynamic_col() else ele_type.cols)
test_content.append(' {} = {{}};'.format(parameter))
test_content.append(' for i = 1:{}'.format(self.get_sym_type(parameter).size))
if ele_type.is_integer_element():
test_content.append(' {} = [{}; randi({}, {}, {})];'.format(parameter, parameter, rand_int_max, row_str, col_str))
else:
test_content.append(' {} = [{}; randn({}, {})];'.format(parameter, parameter, row_str, col_str))
test_content.append(' end')
elif ele_type.is_vector():
if (not ele_type.is_dynamic()):
type_checks.append(' assert( isequal(size({}), [{}, {}]) );'.format(parameter, self.get_sym_type(parameter).size, ele_type.rows))
sizes = [self.get_sym_type(parameter).size, ele_type.rows]
elif (parameter not in test_generated_sym_set):
test_content.append(' {} = {{}};'.format(parameter))
test_content.append(' for i = 1:{}'.format(self.get_sym_type(parameter).size))
if ele_type.is_integer_element():
test_content.append(' {} = [{}; randi({}, randi({}))];'.format(parameter, parameter, rand_int_max, rand_int_max))
else:
test_content.append(' {} = [{}; randn(randi({}))];'.format(parameter, parameter, rand_int_max))
test_content.append(' end')
elif ele_type.is_scalar():
type_declare.append(' {} = reshape({},[],1);'.format(parameter, parameter))
type_checks.append(' assert( size({},1) == {} );'.format(parameter, self.get_sym_type(parameter).size))
sizes = [self.get_sym_type(parameter).size]
if isinstance(data_type, LaVarType):
if (data_type.is_scalar() and data_type.is_int):
if ((parameter not in test_generated_sym_set) and (not ele_type.is_dynamic())):
test_content.append(' {} = {};'.format(parameter, self.randi_str(rand_int_max, sizes)))
elif ele_type.is_set():
test_content.append(' {} = {{}};'.format(parameter))
test_content.append(' for i = 1:{}'.format(self.get_sym_type(parameter).size))
set_content = self.get_set_test_list('{}_tmp'.format(parameter), self.generate_var_name('dim'), 'j', ele_type, rand_int_max, ' ')
set_content = [' {}'.format(line) for line in set_content]
test_content += set_content
test_content.append(' {} = [{}, {}];'.format(parameter, parameter, '{}_tmp'.format(parameter)))
test_content.append(' end')
elif ((parameter not in test_generated_sym_set) and (not ele_type.is_dynamic())):
test_content.append(' {} = {};'.format(parameter, self.randn_str(sizes)))
elif ele_type.is_function():
test_content.append(' {} = {{}};'.format(parameter))
func_content = self.get_func_test_str('{}_f'.format(parameter), ele_type, rand_int_max)
func_content = [' {}'.format(line) for line in func_content]
test_content += func_content
test_content.append(' for i = 1:{}'.format(self.get_sym_type(parameter).size))
test_content.append(' {}{{end+1,1}} = {};'.format(parameter, '{}_f'.format(parameter)))
test_content.append(' end')
else:
test_content.append(' {} = {};'.format(parameter, self.randn_str(sizes)))
elif self.get_sym_type(parameter).is_matrix():
element_type = self.get_sym_type(parameter).element_type
if isinstance(element_type, LaVarType):
if self.get_sym_type(parameter).sparse:
if (element_type.is_scalar() and element_type.is_int):
test_content.append((test_indent + ' {} = sparse.random({}, {}, dtype=np.integer, density=0.25)'.format(parameter, self.get_sym_type(parameter).rows, self.get_sym_type(parameter).cols)))
else:
test_content.append((test_indent + ' {} = sparse.random({}, {}, dtype=np.float64, density=0.25)'.format(parameter, self.get_sym_type(parameter).rows, self.get_sym_type(parameter).cols)))
elif (element_type.is_scalar() and element_type.is_int):
test_content.append((test_indent + ' {} = randi({}, {}, {});'.format(parameter, rand_int_max, self.get_sym_type(parameter).rows, self.get_sym_type(parameter).cols)))
else:
test_content.append((test_indent + ' {} = randn({}, {});'.format(parameter, self.get_sym_type(parameter).rows, self.get_sym_type(parameter).cols)))
elif self.get_sym_type(parameter).sparse:
test_content.append((test_indent + ' {} = sparse.random({}, {}, dtype=np.float64, density=0.25)'.format(parameter, self.get_sym_type(parameter).rows, self.get_sym_type(parameter).cols)))
else:
type_checks.append(' {} = np.asarray({})'.format(parameter, parameter))
test_content.append((test_indent + ' {} = randn({}, {});'.format(parameter, self.get_sym_type(parameter).rows, self.get_sym_type(parameter).cols)))
type_checks.append(' assert( isequal(size({}), [{}, {}]) );'.format(parameter, self.get_sym_type(parameter).rows, self.get_sym_type(parameter).cols))
elif self.get_sym_type(parameter).is_vector():
element_type = self.get_sym_type(parameter).element_type
type_declare.append(' {} = reshape({},[],1);'.format(parameter, parameter))
if isinstance(element_type, LaVarType):
if (element_type.is_scalar() and element_type.is_int):
test_content.append((test_indent + ' {} = randi({}, {});'.format(parameter, rand_int_max, self.get_sym_type(parameter).rows)))
else:
test_content.append((test_indent + ' {} = randn({},1);'.format(parameter, self.get_sym_type(parameter).rows)))
else:
test_content.append((test_indent + ' {} = randn({},1)'.format(parameter, self.get_sym_type(parameter).rows)))
type_checks.append(' assert( numel({}) == {} );'.format(parameter, self.get_sym_type(parameter).rows))
elif self.get_sym_type(parameter).is_scalar():
type_checks.append(' assert(numel({}) == 1);'.format(parameter))
if self.get_sym_type(parameter).is_int:
test_function.append((test_indent + ' {} = randi({});'.format(parameter, rand_int_max)))
else:
test_function.append((test_indent + ' {} = randn();'.format(parameter)))
elif self.get_sym_type(parameter).is_set():
if (self.get_sym_type(parameter).size > 1):
type_checks.append(' assert(size({},2) == {})'.format(parameter, self.get_sym_type(parameter).size))
test_content += self.get_set_test_list(parameter, self.generate_var_name('dim'), 'i', self.get_sym_type(parameter), rand_int_max, ' ')
elif self.get_sym_type(parameter).is_function():
param_list = []
dim_definition = []
if self.get_sym_type(parameter).ret_template():
for ret_dim in self.get_sym_type(parameter).ret_symbols:
param_i = self.get_sym_type(parameter).template_symbols[ret_dim]
if self.get_sym_type(parameter).params[param_i].is_vector():
dim_definition.append(' {} = size({}{}, 1);'.format(ret_dim, self.param_name_test, param_i))
elif self.get_sym_type(parameter).params[param_i].is_matrix():
if (ret_dim == self.get_sym_type(parameter).params[param_i].rows):
dim_definition.append(' {} = size({}{}, 1);'.format(ret_dim, self.param_name_test, param_i))
else:
dim_definition.append(' {} = size({}{}, 2);'.format(ret_dim, self.param_name_test, param_i))
for index in range(len(self.get_sym_type(parameter).params)):
param_list.append('{}{}'.format(self.param_name_test, index))
test_content.append((test_indent + ' {} = {};'.format(parameter, (parameter + 'Func'))))
test_content.append((test_indent + ' rseed = randi(2^32);'))
ret_list = []
content_list = []
for cur_index in range(len(self.get_sym_type(parameter).ret)):
cur_name = self.generate_var_name('ret')
ret_list.append(cur_name)
if self.get_sym_type(parameter).ret[cur_index].is_set():
content_list += self.get_set_test_list(cur_name, self.generate_var_name('dim'), 'i', self.get_sym_type(parameter).ret[cur_index], rand_int_max, ' ')
else:
content_list.append((test_indent + ' {} = {};'.format(cur_name, self.get_rand_test_str(self.get_sym_type(parameter).ret[cur_index], rand_int_max))))
test_content.append((test_indent + ' function [{}] = {}({})'.format(', '.join(ret_list), (parameter + 'Func'), ', '.join(param_list))))
test_content.append((test_indent + ' rng(rseed);'))
test_content += dim_definition
test_content += content_list
test_content.append((test_indent + ' end\n'))
return (type_checks, doc, test_content, test_function)
def visit_block(self, node, **kwargs):
type_declare = []
show_doc = False
rand_func_name = 'generateRandomData'
test_indent = ' '
rand_int_max = 10
(dim_defined_dict, test_content, dim_content) = self.gen_dim_content()
(test_generated_sym_set, seq_test_list) = self.gen_same_seq_test()
test_content += seq_test_list
(type_checks, doc, param_test_content, test_function) = self.get_param_content(test_indent, type_declare, test_generated_sym_set, rand_func_name)
test_content += param_test_content
declaration_content = 'function {} = {}({})\n'.format(self.get_result_name(), self.func_name, ', '.join(self.parameters))
comment_content = '% {} = {}({})\n%\n'.format(self.get_result_name(), self.func_name, ', '.join(self.parameters))
comment_content += '% {}'.format('\n% '.join(self.la_content.split('\n')))
comment_content += '\n'
content = ''
test_function += test_content
test_function.append((test_indent + 'end'))
if (len(self.parameters) > 0):
content += ' if nargin==0\n'
content += " warning('generating random input data');\n"
content += ' [{}] = {}();\n'.format(', '.join(self.parameters), rand_func_name)
content += ' end\n'
content += '\n'.join(test_function)
content += '\n\n'
if (len(type_declare) > 0):
content += ('\n'.join(type_declare) + '\n\n')
content += dim_content
type_checks += self.get_dim_check_str()
type_checks += self.get_arith_dim_check_str()
if (len(type_checks) > 0):
content += ('\n'.join(type_checks) + '\n\n')
stats_content = self.get_module_str()
for index in range(len(node.stmts)):
ret_str = ''
if (index == (len(node.stmts) - 1)):
if (type(node.stmts[index]).__name__ != 'AssignNode'):
if (type(node.stmts[index]).__name__ == 'LocalFuncNode'):
self.visit(node.stmts[index], **kwargs)
continue
kwargs[LHS] = self.ret_symbol
ret_str = ((' ' + self.ret_symbol) + ' = ')
elif (type(node.stmts[index]).__name__ != 'AssignNode'):
if (type(node.stmts[index]).__name__ == 'LocalFuncNode'):
self.visit(node.stmts[index], **kwargs)
continue
stat_info = self.visit(node.stmts[index], **kwargs)
if stat_info.pre_list:
stats_content += ''.join(stat_info.pre_list)
stats_content += (ret_str + stat_info.content)
if (index == (len(node.stmts) - 1)):
if (type(node.stmts[index]).__name__ != 'AssignNode'):
stats_content += ';\n'
stats_content += (self.local_func_def + self.get_struct_definition(''))
content += stats_content
content += 'end\n'
declaration_content = self.trim_content(declaration_content)
content = self.trim_content(content)
self.code_frame.struct = ((declaration_content + comment_content) + content)
return ((declaration_content + comment_content) + content)
def visit_summation(self, node, **kwargs):
target_var = []
def set_name_conventions(sub):
name_convention = {}
for var in node.symbols:
if self.contain_subscript(var):
var_ids = self.get_all_ids(var)
var_subs = var_ids[1]
for var_sub in var_subs:
if (sub == var_sub):
target_var.append(var_ids[0])
if (len(var_ids[1]) > 1):
name_convention[var] = '{}({}, {})'.format(var_ids[0], var_ids[1][0], var_ids[1][1])
else:
name_convention[var] = '{}({})'.format(var_ids[0], var_ids[1][0])
self.add_name_conventions(name_convention)
return name_convention
if node.enum_list:
name_convention = {}
for sub in node.enum_list:
name_convention.update(set_name_conventions(sub))
else:
sub = self.visit(node.id).content
name_convention = set_name_conventions(sub)
for (sym, subs) in node.sym_dict.items():
target_var.append(sym)
assign_id = node.symbol
cond_content = ''
if node.cond:
cond_info = self.visit(node.cond, **kwargs)
cond_content = (('if ' + cond_info.content) + '\n')
kwargs[WALK_TYPE] = WalkTypeEnum.RETRIEVE_EXPRESSION
content = []
exp_info = self.visit(node.exp)
exp_str = exp_info.content
assign_id_type = self.get_sym_type(assign_id)
if assign_id_type.is_matrix():
content.append('{} = zeros({}, {});\n'.format(assign_id, assign_id_type.rows, assign_id_type.cols))
elif assign_id_type.is_vector():
content.append('{} = zeros({},1);\n'.format(assign_id, assign_id_type.rows))
elif assign_id_type.is_sequence():
ele_type = assign_id_type.element_type
content.append('{} = zeros({}, {}, {});\n'.format(assign_id, assign_id_type.size, ele_type.rows, ele_type.cols))
else:
content.append('{} = 0;\n'.format(assign_id))
if node.enum_list:
range_info = self.visit(node.range, **kwargs)
index_name = self.generate_var_name('index')
content.append('for {} = 1:size({}, 1)\n'.format(index_name, range_info.content))
extra_content = ''
for i in range(len(node.enum_list)):
content.append(' {} = {}({}, {});\n'.format(node.enum_list[i], range_info.content, index_name, (i + 1)))
exp_pre_list = []
if exp_info.pre_list:
list_content = ''.join(exp_info.pre_list)
list_content = list_content.split('\n')
for index in range(len(list_content)):
if (index != (len(list_content) - 1)):
exp_pre_list.append((list_content[index] + '\n'))
content += exp_pre_list
content.append(str(((((' ' + assign_id) + ' += ') + exp_str) + ';\n')))
content[0] = (' ' + content[0])
self.del_name_conventions(name_convention)
return CodeNodeInfo(assign_id, pre_list=[' '.join(content)])
sym_info = node.sym_dict[target_var[0]]
if self.get_sym_type(target_var[0]).is_matrix():
if (sub == sym_info[0]):
content.append('for {} = 1:size({},1)\n'.format(sub, target_var[0]))
else:
content.append('for {} = 1:size({},2)\n'.format(sub, target_var[0]))
elif self.get_sym_type(target_var[0]).is_sequence():
sym_list = node.sym_dict[target_var[0]]
sub_index = sym_list.index(sub)
if (sub_index == 0):
size_str = '{}, 1'.format(target_var[0])
elif (sub_index == 1):
if self.get_sym_type(target_var[0]).element_type.is_dynamic_row():
size_str = '{}{{{}}}, 1'.format(self.convert_bound_symbol(target_var[0]), sym_list[0])
else:
size_str = '{}'.format(self.get_sym_type(target_var[0]).element_type.rows)
elif self.get_sym_type(target_var[0]).element_type.is_dynamic_col():
size_str = '{}{{{}}}, 2'.format(self.convert_bound_symbol(target_var[0]), sym_list[0])
else:
size_str = '{}'.format(self.get_sym_type(target_var[0]).element_type.cols)
content.append('for {} = 1:size({})\n'.format(sub, size_str))
else:
content.append('for {} = 1:size({},1)\n'.format(sub, self.convert_bound_symbol(target_var[0])))
if exp_info.pre_list:
list_content = ''.join(exp_info.pre_list)
list_content = list_content.split('\n')
for index in range(len(list_content)):
if (index != (len(list_content) - 1)):
content.append((list_content[index] + '\n'))
indent = str(' ')
if node.cond:
content.append((' ' + cond_content))
indent += ' '
content.append(str(((((((indent + assign_id) + ' = ') + assign_id) + ' + ') + exp_str) + ';\n')))
content[0] = (' ' + content[0])
if node.cond:
content.append(' end\n')
content.append('end\n')
return CodeNodeInfo(assign_id, pre_list=[' '.join(content)])
def visit_local_func(self, node, **kwargs):
self.local_func_parsing = True
name_info = self.visit(node.name, **kwargs)
self.local_func_name = name_info.content
param_list = []
for parameter in node.params:
param_info = self.visit(parameter, **kwargs)
param_list.append(param_info.content)
content = ''
type_declare = []
rand_func_name = 'generateRandomData'
test_indent = ' '
(dim_defined_dict, test_content, dim_content) = self.gen_dim_content()
(test_generated_sym_set, seq_test_list) = self.gen_same_seq_test()
test_content += seq_test_list
(type_checks, doc, param_test_content, test_function) = self.get_param_content(test_indent, type_declare, test_generated_sym_set, rand_func_name)
test_content += param_test_content
if (len(type_declare) > 0):
content += self.update_prelist_str(type_declare, ' ')
if (dim_content != ''):
content += self.update_prelist_str([dim_content], ' ')
type_checks += self.get_dim_check_str()
type_checks += self.get_arith_dim_check_str()
if (len(type_checks) > 0):
type_checks = self.update_prelist_str(type_checks, ' ')
content += (type_checks + '\n')
name_list = []
for cur_index in range(len(node.expr)):
cur_ret_name = self.generate_var_name('ret')
name_list.append(cur_ret_name)
expr_info = self.visit(node.expr[cur_index], **kwargs)
if (len(expr_info.pre_list) > 0):
content += self.update_prelist_str(expr_info.pre_list, ' ')
if (not node.expr[0].is_node(IRNodeType.MultiConds)):
content += ' {} = {};\n'.format(cur_ret_name, expr_info.content)
content += ' end\n\n'
self.local_func_def += (' function [{}] = {}({})\n'.format(', '.join(name_list), name_info.content, ', '.join(param_list)) + content)
self.local_func_parsing = False
return CodeNodeInfo()
def visit_norm(self, node, **kwargs):
value_info = self.visit(node.value, **kwargs)
value = value_info.content
pre_list = value_info.pre_list
type_info = node.value
content = ''
if type_info.la_type.is_scalar():
content = 'abs({})'.format(value)
elif type_info.la_type.is_vector():
if (node.norm_type == NormType.NormDet):
content = 'det({})'.format(value)
elif (node.norm_type == NormType.NormInteger):
if (node.sub is None):
content = 'norm({}, {})'.format(value, 2)
else:
content = 'norm({}, {})'.format(value, node.sub)
elif (node.norm_type == NormType.NormMax):
content = 'norm({}, inf)'.format(value)
elif (node.norm_type == NormType.NormIdentifier):
sub_info = self.visit(node.sub, **kwargs)
pre_list += sub_info.pre_list
if node.sub.la_type.is_scalar():
content = 'norm({}, {})'.format(value, sub_info.content)
else:
content = "sqrt(({})' * {} * ({}))".format(value, sub_info.content, value)
elif type_info.la_type.is_matrix():
if (node.norm_type == NormType.NormDet):
content = 'det({})'.format(value)
elif (node.norm_type == NormType.NormFrobenius):
content = "norm({}, 'fro')".format(value)
elif (node.norm_type == NormType.NormNuclear):
content = 'norm(svd({}),1)'.format(value)
return CodeNodeInfo(content, pre_list)
def visit_transpose(self, node, **kwargs):
f_info = self.visit(node.f, **kwargs)
f_info.content = "{}'".format(f_info.content)
return f_info
def visit_pseudoinverse(self, node, **kwargs):
f_info = self.visit(node.f, **kwargs)
f_info.content = 'pinv({})'.format(f_info.content)
return f_info
def visit_squareroot(self, node, **kwargs):
f_info = self.visit(node.value, **kwargs)
f_info.content = 'sqrt({})'.format(f_info.content)
return f_info
def visit_power(self, node, **kwargs):
base_info = self.visit(node.base, **kwargs)
if node.t:
base_info.content = "{}'".format(base_info.content)
elif node.r:
if node.la_type.is_scalar():
base_info.content = '1 / ({})'.format(base_info.content)
else:
base_info.content = 'inv({})'.format(base_info.content)
else:
power_info = self.visit(node.power, **kwargs)
if node.base.la_type.is_scalar():
base_info.content = '{}.^{}'.format(base_info.content, power_info.content)
else:
base_info.content = '{}^{}'.format(base_info.content, power_info.content)
return base_info
def visit_solver(self, node, **kwargs):
left_info = self.visit(node.left, **kwargs)
right_info = self.visit(node.right, **kwargs)
left_info.pre_list += right_info.pre_list
left_info.content = '({}\\{})'.format(left_info.content, right_info.content)
return left_info
def visit_multi_conditionals(self, node, **kwargs):
assign_node = node.get_ancestor(IRNodeType.Assignment)
if (assign_node is not None):
name = self.visit(assign_node.left, **kwargs).content
else:
func_node = node.get_ancestor(IRNodeType.LocalFunc)
name = self.visit(func_node.name, **kwargs).content
type_info = node
cur_m_id = ''
pre_list = []
if_info = self.visit(node.ifs, **kwargs)
pre_list += if_info.content
if node.other:
other_info = self.visit(node.other, **kwargs)
pre_list.append(' else\n')
pre_list.append(' {} = {};\n'.format('ret', other_info.content))
pre_list.append(' end\n')
return CodeNodeInfo(cur_m_id, pre_list)
def visit_sparse_matrix(self, node, **kwargs):
op_type = kwargs[ASSIGN_TYPE]
lhs = kwargs[LHS]
type_info = node
cur_m_id = type_info.symbol
pre_list = []
index_var = type_info.la_type.index_var
value_var = type_info.la_type.value_var
pre_list.append(' {} = zeros(2,0);\n'.format(index_var))
pre_list.append(' {} = zeros(1,0);\n'.format(value_var))
if_info = self.visit(node.ifs, **kwargs)
pre_list += if_info.content
if (op_type == '='):
pre_list.append(' {} = sparse({}(1,:),{}(2,:),{},{},{});\n'.format(cur_m_id, index_var, index_var, value_var, self.get_sym_type(cur_m_id).rows, self.get_sym_type(cur_m_id).cols))
elif (op_type == '+='):
pre_list.append(' {} = scipy.sparse.coo_matrix(({}+{}.data.tolist(), np.hstack((np.asarray({}).T, np.asarray(({}.row, {}.col))))), shape=({}, {}))\n'.format(cur_m_id, value_var, cur_m_id, index_var, cur_m_id, cur_m_id, self.get_sym_type(cur_m_id).rows, self.get_sym_type(cur_m_id).cols))
return CodeNodeInfo(cur_m_id, pre_list)
def visit_sparse_ifs(self, node, **kwargs):
assign_node = node.get_ancestor(IRNodeType.Assignment)
if (assign_node is None):
right_node = node.get_ancestor(IRNodeType.LocalFunc).expr[0]
else:
right_node = assign_node.right[0]
if right_node.is_node(IRNodeType.SparseMatrix):
assign_node = node.get_ancestor(IRNodeType.Assignment)
sparse_node = node.get_ancestor(IRNodeType.SparseMatrix)
subs = assign_node.left[0].subs
ret = [' for {} = 1:{}\n'.format(subs[0], sparse_node.la_type.rows), ' for {} = 1:{}\n'.format(subs[1], sparse_node.la_type.cols)]
pre_list = []
if node.in_cond_only:
ret = []
for cond in node.cond_list:
cond_info = self.visit(cond, **kwargs)
for index in range(len(cond_info.content)):
cond_info.content[index] = self.update_prelist_str([cond_info.content[index]], ' ')
ret += cond_info.pre_list
ret += cond_info.content
else:
for cond in node.cond_list:
cond_info = self.visit(cond, **kwargs)
for index in range(len(cond_info.content)):
cond_info.content[index] = (' ' + cond_info.content[index])
ret += cond_info.content
pre_list += cond_info.pre_list
ret += ' end\n'
ret += ' end\n'
ret += ' end\n'
else:
pre_list = []
ret = []
for cond in node.cond_list:
cond_info = self.visit(cond, **kwargs)
for index in range(len(cond_info.content)):
cond_info.content[index] = (' ' + cond_info.content[index])
ret += cond_info.content
pre_list += cond_info.pre_list
return CodeNodeInfo(ret, pre_list)
def visit_sparse_if(self, node, **kwargs):
assign_node = node.get_ancestor(IRNodeType.Assignment)
if (assign_node is None):
func_node = node.get_ancestor(IRNodeType.LocalFunc)
right_node = func_node.expr[0]
left_node = func_node.name
else:
right_node = assign_node.right[0]
left_node = assign_node.left[0]
if right_node.is_node(IRNodeType.SparseMatrix):
self.convert_matrix = True
sparse_node = node.get_ancestor(IRNodeType.SparseMatrix)
subs = assign_node.left[0].subs
cond_info = self.visit(node.cond, **kwargs)
stat_info = self.visit(node.stat, **kwargs)
content = []
stat_content = stat_info.content
stat_content = stat_content.replace('_{}{}'.format(subs[0], subs[1]), '({},{})'.format(subs[0], subs[1]))
if node.loop:
content += stat_info.pre_list
content.append(cond_info.content)
content.append(' {}(1:2,end+1) = [{};{}];\n'.format(sparse_node.la_type.index_var, subs[0], subs[1]))
content.append(' {}(end+1) = {};\n'.format(sparse_node.la_type.value_var, stat_content))
content.append('end\n')
else:
content.append('{} {}\n'.format(('if' if node.first_in_list else 'elseif'), cond_info.content))
content.append(' {}(1:2,end+1) = [{};{}];\n'.format(sparse_node.la_type.index_var, subs[0], subs[1]))
content.append(' {}(end+1) = {};\n'.format(sparse_node.la_type.value_var, stat_content))
self.convert_matrix = False
else:
cond_info = self.visit(node.cond, **kwargs)
stat_info = self.visit(node.stat, **kwargs)
content = cond_info.pre_list
stat_content = stat_info.content
content.append('{} {}\n'.format(('if' if node.first_in_list else 'elseif'), cond_info.content))
content += stat_info.pre_list
if (assign_node is None):
content.append(' ret = {};\n'.format(stat_content))
else:
content.append(' {} = {};\n'.format(self.visit(assign_node.left[0], **kwargs).content, stat_content))
return CodeNodeInfo(content)
def visit_sparse_other(self, node, **kwargs):
content = ''
return CodeNodeInfo(' '.join(content))
def visit_vector(self, node, **kwargs):
cur_m_id = node.symbol
ret = []
pre_list = []
for item in node.items:
item_info = self.visit(item, **kwargs)
ret.append(item_info.content)
pre_list += item_info.pre_list
content = '[{}]'.format('; '.join(ret))
return CodeNodeInfo(content, pre_list=pre_list)
def visit_to_matrix(self, node, **kwargs):
node_info = self.visit(node.item, **kwargs)
node_info.content = 'reshape({}, [{}, 1])'.format(node_info.content, node.item.la_type.rows)
return node_info
def visit_matrix(self, node, **kwargs):
content = ' '
type_info = node
cur_m_id = type_info.symbol
kwargs['cur_id'] = cur_m_id
ret_info = self.visit(node.value, **kwargs)
ret = ret_info.content
if type_info.la_type.block:
all_rows = []
m_content = ''
for i in range(len(ret)):
if type_info.la_type.list_dim:
for j in range(len(ret[i])):
if ((i, j) in type_info.la_type.list_dim):
dims = type_info.la_type.list_dim[(i, j)]
if ((dims[0] == 1) and (dims[1] == 1)):
continue
if (ret[i][j] == '0'):
func_name = 'zeros'
elif (ret[i][j] == '1'):
func_name = 'ones'
elif (('I' in ret[i][j]) and ('I' not in self.symtable)):
assert (dims[0] == dims[1]), 'I must be square matrix'
ret[i][j] = ret[i][j].replace('I', 'speye({})'.format(dims[0]))
continue
else:
func_name = (ret[i][j] + ' * ones')
if (dims[1] == 1):
ret[i][j] = '{}({}, 1)'.format(func_name, dims[0])
else:
ret[i][j] = '{}({}, {})'.format(func_name, dims[0], dims[1])
for j in range(len(ret[i])):
if (node.la_type.item_types and node.la_type.item_types[i][j].la_type.is_vector()):
ret[i][j] = 'reshape({}, [{}, 1])'.format(ret[i][j], node.la_type.item_types[i][j].la_type.rows)
all_rows.append((('[' + ', '.join(ret[i])) + ']'))
m_content += '[{}]'.format('; '.join(all_rows))
content += '{} = {};\n'.format(cur_m_id, m_content)
else:
content += '{} = zeros({}, {});\n'.format(cur_m_id, self.get_sym_type(cur_m_id).rows, self.get_sym_type(cur_m_id).cols)
for i in range(len(ret)):
content += ' {}({},:) = [{}];\n'.format(cur_m_id, (i + 1), ', '.join(ret[i]))
pre_list = [content]
if ret_info.pre_list:
pre_list = (ret_info.pre_list + pre_list)
return CodeNodeInfo(cur_m_id, pre_list)
def visit_num_matrix(self, node, **kwargs):
post_s = ''
if node.id:
func_name = 'speye'
elif (node.left == '0'):
func_name = 'zeros'
elif ((node.left == '1') or (node.left == '1')):
func_name = 'ones'
id1_info = self.visit(node.id1, **kwargs)
if node.id2:
id2_info = self.visit(node.id2, **kwargs)
content = '{}({}, {})'.format(func_name, id1_info.content, id2_info.content)
else:
content = '{}({})'.format(func_name, id1_info.content)
node_info = CodeNodeInfo((content + post_s))
return node_info
def visit_matrix_index(self, node, **kwargs):
main_info = self.visit(node.main, **kwargs)
if (node.row_index is not None):
row_info = self.visit(node.row_index, **kwargs)
if node.row_index.la_type.index_type:
row_content = row_info.content
else:
row_content = '{}'.format(row_info.content)
if (node.col_index is not None):
col_info = self.visit(node.col_index, **kwargs)
if node.col_index.la_type.index_type:
col_content = col_info.content
else:
col_content = '{}'.format(col_info.content)
if self.get_sym_type(main_info.content).sparse:
content = '{}({}, {})'.format(main_info.content, row_content, col_content)
else:
content = '{}({}, {})'.format(main_info.content, row_content, col_content)
else:
content = "{}({}, :)'".format(main_info.content, row_content)
else:
col_info = self.visit(node.col_index, **kwargs)
if node.col_index.la_type.index_type:
content = '{}(:, {})'.format(main_info.content, col_info.content)
else:
content = '{}(:, {})'.format(main_info.content, col_info.content)
return CodeNodeInfo(content)
def visit_vector_index(self, node, **kwargs):
main_info = self.visit(node.main, **kwargs)
index_info = self.visit(node.row_index, **kwargs)
if node.row_index.la_type.index_type:
return CodeNodeInfo('{}({})'.format(main_info.content, index_info.content))
else:
return CodeNodeInfo('{}({})'.format(main_info.content, index_info.content))
def visit_sequence_index(self, node, **kwargs):
main_info = self.visit(node.main, **kwargs)
main_index_content = self.visit(node.main_index, **kwargs).content
if node.slice_matrix:
if (node.row_index is not None):
row_content = self.visit(node.row_index, **kwargs).content
if self.get_sym_type(main_info.content).is_dynamic():
content = '{}{{{}}}({}, :)'.format(main_info.content, main_index_content, row_content)
else:
content = '{}({})({}, :)'.format(main_info.content, main_index_content, row_content)
else:
col_content = self.visit(node.col_index, **kwargs).content
if self.get_sym_type(main_info.content).is_dynamic():
content = '{}{{{}}}(:, {})'.format(main_info.content, main_index_content, col_content)
else:
content = '{}({})(:, {})'.format(main_info.content, main_index_content, col_content)
elif (node.row_index is not None):
row_content = self.visit(node.row_index, **kwargs).content
if (node.col_index is not None):
col_content = self.visit(node.col_index, **kwargs).content
if self.get_sym_type(main_info.content).is_dynamic():
content = '{}{{{}}}({},{})'.format(main_info.content, main_index_content, row_content, col_content)
else:
content = '{}({},{},{})'.format(main_info.content, main_index_content, row_content, col_content)
elif self.get_sym_type(main_info.content).is_dynamic():
content = '{}{{{}}}({})'.format(main_info.content, main_index_content, row_content)
else:
content = '{}({},{})'.format(main_info.content, main_index_content, row_content)
elif self.get_sym_type(main_info.content).is_dynamic():
content = '{}{{{}}}'.format(main_info.content, main_index_content)
elif node.la_type.is_vector():
content = "{}({},:)'".format(main_info.content, main_index_content)
elif node.la_type.is_matrix():
content = 'squeeze({}({},:,:))'.format(main_info.content, main_index_content)
elif node.la_type.is_scalar():
content = '{}({})'.format(main_info.content, main_index_content)
else:
content = '{}{{{}}}'.format(main_info.content, main_index_content)
return CodeNodeInfo(content)
def visit_seq_dim_index(self, node, **kwargs):
main_index_info = self.visit(node.main_index, **kwargs)
if node.is_row_index():
content = 'size({}({}), 1)'.format(node.real_symbol, main_index_info.content)
else:
content = 'size({}({}), 2)'.format(node.real_symbol, main_index_info.content)
return CodeNodeInfo(content)
def visit_add_sub(self, node, **kwargs):
left_info = self.visit(node.left, **kwargs)
right_info = self.visit(node.right, **kwargs)
left_info.content = ((left_info.content + ' +- ') + right_info.content)
left_info.pre_list += right_info.pre_list
return left_info
def visit_mul(self, node, **kwargs):
left_info = self.visit(node.left, **kwargs)
right_info = self.visit(node.right, **kwargs)
l_info = node.left
r_info = node.right
mul = ' * '
if (l_info.la_type.is_matrix() or l_info.la_type.is_vector()):
if (r_info.la_type.is_matrix() or r_info.la_type.is_vector()):
mul = ' * '
left_info.content = ((left_info.content + mul) + right_info.content)
left_info.pre_list += right_info.pre_list
return left_info
def visit_div(self, node, **kwargs):
left_info = self.visit(node.left, **kwargs)
right_info = self.visit(node.right, **kwargs)
left_info.content = ((left_info.content + ' / ') + right_info.content)
left_info.pre_list += right_info.pre_list
return left_info
def visit_cast(self, node, **kwargs):
value_info = self.visit(node.value, **kwargs)
if node.la_type.is_scalar():
value_info.content = '{}'.format(value_info.content)
return value_info
def visit_assignment(self, node, **kwargs):
type_info = node
placeholder = '{}_{}\n'.format(self.comment_placeholder, node.parse_info.line)
self.comment_dict[placeholder] = self.update_prelist_str([node.raw_text], ' % ')
content = placeholder
if node.optimize_param:
content = ''
lhs_list = []
for cur_index in range(len(node.left)):
left_info = self.visit(node.left[cur_index], **kwargs)
lhs_list.append(left_info.content)
right_info = self.visit(node.right[0], **kwargs)
if right_info.pre_list:
content += ''.join(right_info.pre_list)
lhs_content = ', '.join(lhs_list)
if (len(lhs_list) > 1):
lhs_content = '[{}]'.format(lhs_content)
content += ' {} = {};\n'.format(lhs_content, right_info.content)
else:
for cur_index in range(len(node.left)):
left_info = self.visit(node.left[cur_index], **kwargs)
left_id = left_info.content
kwargs[LHS] = left_id
kwargs[ASSIGN_TYPE] = node.op
right_info = self.visit(node.right[cur_index], **kwargs)
right_exp = ''
if node.left[cur_index].contain_subscript():
left_ids = node.left[cur_index].get_all_ids()
left_subs = left_ids[1]
if (len(left_subs) == 2):
sequence = left_ids[0]
sub_strs = (left_subs[0] + left_subs[1])
if (self.get_sym_type(sequence).is_matrix() and self.get_sym_type(sequence).sparse):
if (left_subs[0] == left_subs[1]):
content = ''
if self.get_sym_type(sequence).diagonal:
content += ' {} = zeros(2,0);\n'.format(self.get_sym_type(sequence).index_var)
content += ' {} = zeros(1,0);\n'.format(self.get_sym_type(sequence).value_var)
content += ' for {} = 1:{}\n'.format(left_subs[0], self.get_sym_type(sequence).rows)
if right_info.pre_list:
content += self.update_prelist_str(right_info.pre_list, ' ')
content += ' {}(1:2,end+1) = [{};{}];\n'.format(self.get_sym_type(sequence).index_var, left_subs[0], left_subs[0])
content += ' {}(end+1) = {};\n'.format(self.get_sym_type(sequence).value_var, right_info.content)
content += ' end\n'
content += ' {} = sparse({}(1,:),{}(2,:),{},{},{});\n'.format(sequence, self.get_sym_type(sequence).index_var, self.get_sym_type(sequence).index_var, self.get_sym_type(sequence).value_var, self.get_sym_type(sequence).rows, self.get_sym_type(sequence).cols)
else:
if right_info.pre_list:
content += ''.join(right_info.pre_list)
right_exp += ((((' ' + sequence) + ' = ') + right_info.content) + ';\n')
content += right_exp
elif (left_subs[0] == left_subs[1]):
content = ''
content += ' for {} = 1:{}\n'.format(left_subs[0], self.get_sym_type(sequence).rows)
if right_info.pre_list:
content += self.update_prelist_str(right_info.pre_list, ' ')
content += ' {}({}, {}) = {};\n'.format(sequence, left_subs[0], left_subs[0], right_info.content)
content += ' end\n'
else:
for right_var in type_info.symbols:
if (sub_strs in right_var):
var_ids = self.get_all_ids(right_var)
right_info.content = right_info.content.replace(right_var, '{}({}, {})'.format(var_ids[0], var_ids[1][0], var_ids[1][1]))
right_exp += ' {}({}, {}) = {}'.format(self.get_main_id(left_id), left_subs[0], left_subs[1], right_info.content)
if self.get_sym_type(sequence).is_matrix():
if (node.op == '='):
content += ' {} = zeros({}, {});\n'.format(sequence, self.get_sym_type(sequence).rows, self.get_sym_type(sequence).cols)
content += ' for {} = 1:{}\n'.format(left_subs[0], self.get_sym_type(sequence).rows)
content += ' for {} = 1:{}\n'.format(left_subs[1], self.get_sym_type(sequence).cols)
if right_info.pre_list:
content += self.update_prelist_str(right_info.pre_list, ' ')
content += ((' ' + right_exp) + ';\n')
content += ' end\n'
content += ' end\n'
elif (len(left_subs) == 1):
sequence = left_ids[0]
for right_var in type_info.symbols:
if self.contain_subscript(right_var):
var_ids = self.get_all_ids(right_var)
right_info.content = right_info.content.replace(right_var, '{}[{}]'.format(var_ids[0], var_ids[1][0]))
left_content = left_info.content
right_content = right_info.content
if (left_content[(- 1)] == "'"):
left_content = left_content[:(- 1)]
right_content = "({})'".format(right_content)
right_exp += ' {} = {}'.format(left_content, right_content)
ele_type = self.get_sym_type(sequence).element_type
if self.get_sym_type(sequence).is_sequence():
if ele_type.is_matrix():
content += ' {} = zeros({}, {}, {});\n'.format(sequence, self.get_sym_type(sequence).size, ele_type.rows, ele_type.cols)
elif ele_type.is_vector():
content += ' {} = zeros({}, {});\n'.format(sequence, self.get_sym_type(sequence).size, ele_type.rows)
else:
content += ' {} = zeros({},1);\n'.format(sequence, self.get_sym_type(sequence).size)
content += ' for {} = 1:{}\n'.format(left_subs[0], self.get_sym_type(sequence).size)
else:
content += ' {} = zeros({},1);\n'.format(sequence, self.get_sym_type(sequence).rows)
content += ' for {} = 1:{}\n'.format(left_subs[0], self.get_sym_type(sequence).rows)
if right_info.pre_list:
content += self.update_prelist_str(right_info.pre_list, ' ')
content += ((' ' + right_exp) + ';\n')
content += ' end\n'
else:
if right_info.pre_list:
content += ''.join(right_info.pre_list)
op = ' = '
if (node.op == '+='):
op = ' += '
if (not node.right[cur_index].is_node(IRNodeType.MultiConds)):
right_exp += (((' ' + self.get_main_id(left_id)) + op) + right_info.content)
content += (right_exp + ';\n')
la_remove_key(LHS, **kwargs)
return CodeNodeInfo(content)
def visit_if(self, node, **kwargs):
ret_info = self.visit(node.cond)
return ret_info
def visit_condition(self, node, **kwargs):
if (len(node.cond_list) > 1):
pre_list = []
content_list = []
for condition in node.cond_list:
info = self.visit(condition)
pre_list += info.pre_list
content_list.append((('(' + info.content) + ')'))
if (node.cond_type == ConditionType.ConditionAnd):
content = ' && '.join(content_list)
else:
content = ' || '.join(content_list)
return CodeNodeInfo(content=content, pre_list=pre_list)
return self.visit(node.cond_list[0])
def visit_in(self, node, **kwargs):
item_list = []
pre_list = []
right_info = self.visit(node.set, **kwargs)
if node.set.la_type.index_type:
for item in node.items:
item_info = self.visit(item, **kwargs)
item_content = item_info.content
if (not item.la_type.index_type):
item_content = '{}'.format(item_info.content)
item_list.append(item_content)
else:
for item in node.items:
item_info = self.visit(item, **kwargs)
if (not item.la_type.index_type):
item_content = '{}'.format(item_info.content)
else:
item_content = '{}+1'.format(item_info.content)
item_list.append(item_content)
if node.loop:
index_name = self.generate_var_name('index')
content = 'for {} = 1:size({}, 1)\n'.format(index_name, right_info.content)
content += ' {} = {}({}, 1);\n'.format(item_list[0], right_info.content, index_name)
content += ' {} = {}({}, 2);\n'.format(item_list[1], right_info.content, index_name)
else:
content = (((('ismember([' + ', '.join(item_list)) + '],') + right_info.content) + ",'rows')")
return CodeNodeInfo(content=content, pre_list=pre_list)
def visit_not_in(self, node, **kwargs):
item_list = []
pre_list = []
right_info = self.visit(node.set, **kwargs)
if node.set.la_type.index_type:
for item in node.items:
item_info = self.visit(item, **kwargs)
item_content = item_info.content
if (not item.la_type.index_type):
item_content = '{}'.format(item_info.content)
item_list.append(item_content)
else:
for item in node.items:
item_info = self.visit(item, **kwargs)
if (not item.la_type.index_type):
item_content = '{}'.format(item_info.content)
else:
item_content = '{}+1'.format(item_info.content)
item_list.append(item_content)
content = (((('~ismember([' + ', '.join(item_list)) + '],') + right_info.content) + ",'rows')")
return CodeNodeInfo(content=content, pre_list=pre_list)
def get_bin_comp_str(self, comp_type):
op = ''
if (comp_type == IRNodeType.Eq):
op = '=='
elif (comp_type == IRNodeType.Ne):
op = '~='
elif (comp_type == IRNodeType.Lt):
op = '<'
elif (comp_type == IRNodeType.Le):
op = '<='
elif (comp_type == IRNodeType.Gt):
op = '>'
elif (comp_type == IRNodeType.Ge):
op = '>='
return op
def visit_bin_comp(self, node, **kwargs):
left_info = self.visit(node.left, **kwargs)
right_info = self.visit(node.right, **kwargs)
left_content = left_info.content
right_content = right_info.content
if (node.left.la_type.index_type and (not node.right.la_type.index_type)):
left_content = '{}+1'.format(left_info.content)
if ((not node.left.la_type.index_type) and node.right.la_type.index_type):
right_content = '{}+1'.format(right_info.content)
left_info.content = ((left_content + ' {} '.format(self.get_bin_comp_str(node.comp_type))) + right_content)
left_info.pre_list += right_info.pre_list
return left_info
def visit_derivative(self, node, **kwargs):
return CodeNodeInfo('')
def visit_optimize(self, node, **kwargs):
self.opt_key = node.key
id_list = []
pack_list = []
unpack_list = []
init_str_list = []
base_str_list = []
for cur_index in range(len(node.base_list)):
cur_la_type = node.base_type_list[cur_index].la_type
id_info = self.visit(node.base_list[cur_index], **kwargs)
id_list.append(id_info.content)
if cur_la_type.is_scalar():
init_value = 0
base_str = " {} = optimvar('{}');\n".format(id_info.content, id_info.content)
elif cur_la_type.is_vector():
init_value = 'zeros({},1)'.format(cur_la_type.rows)
base_str = " {} = optimvar('{}', {});\n".format(id_info.content, id_info.content, cur_la_type.rows)
elif cur_la_type.is_matrix():
init_value = 'zeros({}*{},1)'.format(cur_la_type.rows, cur_la_type.cols)
base_str = " {} = optimvar('{}', {}, {});\n".format(id_info.content, id_info.content, cur_la_type.rows, cur_la_type.cols)
name_convention = {id_info.content: 'reshape({}, [{}, {}])'.format(id_info.content, cur_la_type.rows, cur_la_type.cols)}
self.add_name_conventions(name_convention)
base_str_list.append(base_str)
exp_info = self.visit(node.exp, **kwargs)
category = ''
if (node.opt_type == OptimizeType.OptimizeMin):
category = 'min'
elif (node.opt_type == OptimizeType.OptimizeMax):
category = 'max'
elif (node.opt_type == OptimizeType.OptimizeArgmin):
category = 'argmin'
elif (node.opt_type == OptimizeType.OptimizeArgmax):
category = 'argmax'
opt_param = self.generate_var_name('x')
opt_ret = self.generate_var_name('ret')
pre_list = []
constraint_list = []
prob_name = self.generate_var_name('prob')
pre_list += base_str_list
constraint_list.append('{} = optimproblem;'.format(prob_name))
for cond_index in range(len(node.cond_list)):
cond_node = node.cond_list[cond_index]
if (cond_node.cond.node_type == IRNodeType.BinComp):
if ((cond_node.cond.comp_type == IRNodeType.Gt) or (cond_node.cond.comp_type == IRNodeType.Ge)):
constraint_list.append('{}.Constraints.cons{} = {} >= {};'.format(prob_name, (cond_index + 1), self.visit(cond_node.cond.left, **kwargs).content, self.visit(cond_node.cond.right, **kwargs).content))
elif ((cond_node.cond.comp_type == IRNodeType.Lt) or (cond_node.cond.comp_type == IRNodeType.Le)):
constraint_list.append('{}.Constraints.cons{} = {} <= {};'.format(prob_name, (cond_index + 1), self.visit(cond_node.cond.left, **kwargs).content, self.visit(cond_node.cond.right, **kwargs).content))
elif (cond_node.cond.node_type == IRNodeType.In):
v_set = self.visit(cond_node.cond.set, **kwargs).content
opt_func = self.generate_var_name(category)
pre_list.append(' function ret = {}({})\n'.format(opt_func, opt_param))
pre_list.append(' {} = 1\n'.format(opt_ret))
pre_list.append(' for i = 1:numel({}):\n'.format(v_set))
pre_list.append(' {} *= ({}[0] - {}[i])\n'.format(opt_ret, opt_param, v_set))
pre_list.append(' ret = {}\n'.format(opt_ret))
constraint_list.append("{{'type': 'eq', 'fun': {}}}".format(opt_func))
constraints_param = ''
if (len(node.cond_list) > 0):
cons = self.generate_var_name('cons')
pre_list += [' {}\n'.format(cons) for cons in constraint_list]
target_func = self.generate_var_name('target')
exp = exp_info.content
if ((node.opt_type == OptimizeType.OptimizeMax) or (node.opt_type == OptimizeType.OptimizeArgmax)):
exp = '-({})'.format(exp)
if (len(exp_info.pre_list) > 0):
pre_list.append(' function ret = {}({})\n'.format(target_func, id_info.content))
for pre in exp_info.pre_list:
lines = pre.split('\n')
for line in lines:
if (line != ''):
pre_list.append(' {}\n'.format(line))
pre_list.append(' ret = {};\n'.format(exp))
pre_list.append(' end\n')
target_func = '{}'.format(target_func)
elif (len(node.cond_list) == 0):
pre_list.append(' {} = ({}) {};\n'.format(target_func, id_info.content, exp))
opt_name = self.generate_var_name('optimize')
if (len(node.cond_list) > 0):
pre_list.append(' {}.Objective = {};\n'.format(prob_name, exp))
opt_exp = 'solve({})'.format(prob_name)
if ((node.opt_type == OptimizeType.OptimizeArgmin) or (node.opt_type == OptimizeType.OptimizeArgmax)):
content = '{}.{}'.format(opt_name, id_info.content)
else:
content = opt_name
if (node.opt_type == OptimizeType.OptimizeMax):
content = ('-' + content)
else:
opt_exp = 'fminunc({},{})'.format(target_func, init_value)
if ((node.opt_type == OptimizeType.OptimizeMax) or (node.opt_type == OptimizeType.OptimizeArgmax)):
opt_exp = ('-' + opt_exp)
content = opt_name
if ((node.opt_type == OptimizeType.OptimizeArgmin) or (node.opt_type == OptimizeType.OptimizeArgmax)):
pre_list.append(' [{}, ~] = {};\n'.format(opt_name, opt_exp))
else:
pre_list.append(' [~, {}] = {};\n'.format(opt_name, opt_exp))
if cur_la_type.is_matrix():
self.del_name_conventions(name_convention)
return CodeNodeInfo(content, pre_list=pre_list)
def visit_domain(self, node, **kwargs):
return CodeNodeInfo('')
def visit_integral(self, node, **kwargs):
pre_list = []
lower_info = self.visit(node.domain.lower, **kwargs)
pre_list += lower_info.pre_list
upper_info = self.visit(node.domain.upper, **kwargs)
pre_list += upper_info.pre_list
exp_info = self.visit(node.exp, **kwargs)
pre_list += exp_info.pre_list
base_info = self.visit(node.base, **kwargs)
pre_list += exp_info.pre_list
func_content = '({}) {}'.format(base_info.content, exp_info.content)
content = "integral({}, {}, {},'ArrayValued',true)".format(func_content, lower_info.content, upper_info.content)
return CodeNodeInfo(content, pre_list=pre_list)
def visit_inner_product(self, node, **kwargs):
left_info = self.visit(node.left, **kwargs)
right_info = self.visit(node.right, **kwargs)
if node.sub:
sub_info = self.visit(node.sub, **kwargs)
content = "({})' * ({}) * ({})".format(right_info.content, sub_info.content, left_info.content)
else:
content = "({})' * ({})".format(right_info.content, left_info.content)
return CodeNodeInfo(content, pre_list=(left_info.pre_list + right_info.pre_list))
def visit_fro_product(self, node, **kwargs):
left_info = self.visit(node.left, **kwargs)
right_info = self.visit(node.right, **kwargs)
return CodeNodeInfo('sum({}(:).*{}(:))'.format(left_info.content, right_info.content), pre_list=(left_info.pre_list + right_info.pre_list))
def visit_hadamard_product(self, node, **kwargs):
left_info = self.visit(node.left, **kwargs)
right_info = self.visit(node.right, **kwargs)
return CodeNodeInfo('{}.*{}'.format(left_info.content, right_info.content), pre_list=(left_info.pre_list + right_info.pre_list))
def visit_cross_product(self, node, **kwargs):
left_info = self.visit(node.left, **kwargs)
right_info = self.visit(node.right, **kwargs)
return CodeNodeInfo('cross({}, {})'.format(left_info.content, right_info.content), pre_list=(left_info.pre_list + right_info.pre_list))
def visit_kronecker_product(self, node, **kwargs):
left_info = self.visit(node.left, **kwargs)
right_info = self.visit(node.right, **kwargs)
return CodeNodeInfo('kron({}, {})'.format(left_info.content, right_info.content), pre_list=(left_info.pre_list + right_info.pre_list))
def visit_dot_product(self, node, **kwargs):
left_info = self.visit(node.left, **kwargs)
right_info = self.visit(node.right, **kwargs)
return CodeNodeInfo('dot({},{})'.format(left_info.content, right_info.content), pre_list=(left_info.pre_list + right_info.pre_list))
def visit_math_func(self, node, **kwargs):
content = ''
param_info = self.visit(node.param, **kwargs)
params_content = param_info.content
pre_list = param_info.pre_list
if (node.func_type == MathFuncType.MathFuncSin):
content = 'sin'
elif (node.func_type == MathFuncType.MathFuncAsin):
content = 'asin'
elif (node.func_type == MathFuncType.MathFuncCos):
content = 'cos'
elif (node.func_type == MathFuncType.MathFuncAcos):
content = 'acos'
elif (node.func_type == MathFuncType.MathFuncTan):
content = 'tan'
elif (node.func_type == MathFuncType.MathFuncAtan):
content = 'atan'
elif (node.func_type == MathFuncType.MathFuncSinh):
content = 'sinh'
elif (node.func_type == MathFuncType.MathFuncAsinh):
content = 'asinh'
elif (node.func_type == MathFuncType.MathFuncCosh):
content = 'cosh'
elif (node.func_type == MathFuncType.MathFuncAcosh):
content = 'acosh'
elif (node.func_type == MathFuncType.MathFuncTanh):
content = 'tanh'
elif (node.func_type == MathFuncType.MathFuncAtanh):
content = 'atanh'
elif (node.func_type == MathFuncType.MathFuncCot):
content = '1./tan'
elif (node.func_type == MathFuncType.MathFuncSec):
content = '1./cos'
elif (node.func_type == MathFuncType.MathFuncCsc):
content = '1./sin'
elif (node.func_type == MathFuncType.MathFuncAtan2):
content = 'atan2'
remain_info = self.visit(node.remain_params[0], **kwargs)
params_content += (', ' + remain_info.content)
pre_list += remain_info.pre_list
elif (node.func_type == MathFuncType.MathFuncExp):
content = 'exp'
elif (node.func_type == MathFuncType.MathFuncLog):
content = 'log'
elif (node.func_type == MathFuncType.MathFuncLog2):
content = 'log2'
elif (node.func_type == MathFuncType.MathFuncLog10):
content = 'log10'
elif (node.func_type == MathFuncType.MathFuncLn):
content = 'log'
elif (node.func_type == MathFuncType.MathFuncSqrt):
content = 'sqrt'
elif (node.func_type == MathFuncType.MathFuncTrace):
content = 'trace'
elif (node.func_type == MathFuncType.MathFuncDiag):
content = 'diag'
elif (node.func_type == MathFuncType.MathFuncVec):
return CodeNodeInfo('reshape({},[],1)'.format(params_content))
elif (node.func_type == MathFuncType.MathFuncDet):
content = 'det'
elif (node.func_type == MathFuncType.MathFuncRank):
content = 'rank'
elif (node.func_type == MathFuncType.MathFuncNull):
content = 'null'
elif (node.func_type == MathFuncType.MathFuncOrth):
content = 'orth'
elif (node.func_type == MathFuncType.MathFuncInv):
content = 'inv'
return CodeNodeInfo('{}({})'.format(content, params_content), pre_list=pre_list)
def visit_constant(self, node, **kwargs):
content = ''
if (node.c_type == ConstantType.ConstantPi):
content = 'pi'
elif (node.c_type == ConstantType.ConstantE):
content = 'exp(1)'
return CodeNodeInfo(content) |
def main():
all_examples = []
for path in [args.train_path, args.valid_path, args.test_path]:
assert os.path.exists(path)
print('Process {}...'.format(path))
if (args.task.lower() == 'wn18rr'):
all_examples += preprocess_wn18rr(path)
elif (args.task.lower() == 'fb15k237'):
all_examples += preprocess_fb15k237(path)
elif (args.task.lower() in ['wiki5m_trans', 'wiki5m_ind']):
all_examples += preprocess_wiki5m(path, is_train=(path == args.train_path))
else:
assert False, 'Unknown task: {}'.format(args.task)
if (args.task.lower() == 'wn18rr'):
id2text = {k: v[2] for (k, v) in wn18rr_id2ent.items()}
elif (args.task.lower() == 'fb15k237'):
id2text = {k: v[2] for (k, v) in fb15k_id2ent.items()}
elif (args.task.lower() in ['wiki5m_trans', 'wiki5m_ind']):
id2text = wiki5m_id2text
else:
assert False, 'Unknown task: {}'.format(args.task)
dump_all_entities(all_examples, out_path='{}/entities.json'.format(os.path.dirname(args.train_path)), id2text=id2text)
print('Done') |
def convert_id_to_task_name(task_id: int):
startswith = ('Task%03.0d' % task_id)
if (preprocessing_output_dir is not None):
candidates_preprocessed = subdirs(preprocessing_output_dir, prefix=startswith, join=False)
else:
candidates_preprocessed = []
if (nnUNet_raw_data is not None):
candidates_raw = subdirs(nnUNet_raw_data, prefix=startswith, join=False)
else:
candidates_raw = []
if (nnUNet_cropped_data is not None):
candidates_cropped = subdirs(nnUNet_cropped_data, prefix=startswith, join=False)
else:
candidates_cropped = []
candidates_trained_models = []
if (network_training_output_dir is not None):
for m in ['2d', '3d_lowres', '3d_fullres', '3d_cascade_fullres']:
if isdir(join(network_training_output_dir, m)):
candidates_trained_models += subdirs(join(network_training_output_dir, m), prefix=startswith, join=False)
all_candidates = (((candidates_cropped + candidates_preprocessed) + candidates_raw) + candidates_trained_models)
unique_candidates = np.unique(all_candidates)
if (len(unique_candidates) > 1):
raise RuntimeError(('More than one task name found for task id %d. Please correct that. (I looked in the following folders:\n%s\n%s\n%s' % (task_id, nnUNet_raw_data, preprocessing_output_dir, nnUNet_cropped_data)))
if (len(unique_candidates) == 0):
raise RuntimeError(('Could not find a task with the ID %d. Make sure the requested task ID exists and that nnU-Net knows where raw and preprocessed data are located (see Documentation - Installation). Here are your currently defined folders:\nnnUNet_preprocessed=%s\nRESULTS_FOLDER=%s\nnnUNet_raw_data_base=%s\nIf something is not right, adapt your environemnt variables.' % (task_id, (os.environ.get('nnUNet_preprocessed') if (os.environ.get('nnUNet_preprocessed') is not None) else 'None'), (os.environ.get('RESULTS_FOLDER') if (os.environ.get('RESULTS_FOLDER') is not None) else 'None'), (os.environ.get('nnUNet_raw_data_base') if (os.environ.get('nnUNet_raw_data_base') is not None) else 'None'))))
return unique_candidates[0] |
class DependencyInjection():
alignSentencesIntoTextCalculator: AlignSentencesIntoTextCalculator
textNormalizer: TextNormalizer
audioStatisticComponent: AudioStatisticComponent
textStatisticComponent: TextStatisticComponent
phoneticSentenceToSymbolSentenceConverter: PhoneticSentenceToSymbolSentenceConverter
sentenceToPhoneticSentenceConverter: SentenceToPhoneticSentenceConverter
transcriptsToSentencesConverter: TranscriptsToSentencesConverter
listToStatisticConverter: ListToStatisticConverter
listToHistogramConverter: ListToHistogramConverter
stringToSentencesConverter: StringToSentencesConverter
audioToSentenceConverter: AudioToSentenceConverter
audioFilter: AudioFilter
audioPersistenz: AudioPersistenz
audioTranscriptPairPersistenz: AudioTranscriptPairPersistenz
transcriptsPersistenz: TranscriptsPersistenz
audiosFromLibrivoxPersistenz: AudiosFromLibrivoxPersistenz
GutenbergBookPersistenz: GutenbergBookPersistenz
audioAddSilenceTransformer: AudioAddSilenceTransformer
audioSamplingRateTransformer: AudioSamplingRateTransformer
transcriptsSelectionTransformer: TranscriptsSelectionTransformer
audioSplitTransformer: AudioSplitTransformer
sentenceDistanceTransformer: SentenceDistanceTransformer
audioLoudnessTransformer: AudioLoudnessTransformer
audioFadeTransformer: AudioFadeTransformer
pathUtil: PathUtil
fileListUtil: FileListUtil
step0_Overview: Step0_Overview
step1_DownloadAudio: Step1_DownloadAudio
step2_SplitAudio: Step2_SplitAudio
step2_1_AudioStatistic: Step2_1_AudioStatistic
step3_DowloadText: Step3_DownloadText
step3_1_PrepareText: Step3_1_PrepareText
step4_TranscriptAudio: Step4_TranscriptAudio
step5_AlignText: Step5_AlignText
step6_FinalizeDataset: Step6_FinalizeDataset
step7_AudioRawStatistic: Step7_AudioRawStatistic
step8_DatasetStatistic: Step8_DatasetStatistic
step9_GenerateCleanDataset: Step9_GenerateCleanDataset
plot: Plot
def __init__(self, config={}):
configWithDefault = defaultConfig.copy()
configWithDefault.update(config)
self.allClassReferences = self.getAllClassReferences(configWithDefault)
initialedClasses = {}
for (name, classInstance) in self.allClassReferences.items():
def getLambda(name, classInstance):
return property((lambda _: self.initClass(name, classInstance, self.classConstructor, initialedClasses, configWithDefault, name)))
setattr(DependencyInjection, name, getLambda(name, classInstance))
def initClass(self, className, classReference, classConstructorMethod, initialedClasses, config, requestedClass=''):
if (className in initialedClasses):
return initialedClasses[className]
arguments = self.getConstructorReferenceClasses(classReference)
for argument in arguments:
if ((argument not in initialedClasses.values()) and (arguments[argument] is not None)):
self.initClass(argument, arguments[argument], classConstructorMethod, initialedClasses, config, requestedClass)
classConfig = (config[className].copy() if (className in config) else {})
if ('#' in classConfig):
classConfig.pop('#')
classConfig
try:
newClassInstance = classConstructorMethod(classReference, initialedClasses, classConfig)
except Exception as e:
raise DependencyInjectionError(e, classConfig, classReference.__name__, requestedClass)
initialedClasses[className] = newClassInstance
return newClassInstance
def classConstructor(self, classReference, initialedClasses, classConfig):
classConstructor = classConfig.copy()
references = self.getConstructorReferenceClasses(classReference)
for ref in references:
if (references[ref] is not None):
classConstructor[ref] = initialedClasses[ref]
classInstance = classReference(**classConstructor)
return classInstance
def getConstructorReferenceClasses(self, classReference):
arguments = self.getAllConstructorArguments(classReference)
references = {}
for argument in arguments:
if (argument in ['self', 'args', 'kwargs']):
continue
references[argument] = (self.allClassReferences[argument] if (argument in self.allClassReferences.keys()) else None)
return references
def getAllConstructorArguments(self, classInstance):
return list(inspect.signature(classInstance.__init__).parameters.keys())
def getAllClassReferences(self, configWithDefault):
classes = globalClassesAtImportTime.copy()
for className in configWithDefault:
if ('#' in configWithDefault[className]):
classes[className] = configWithDefault[className]['#']
return classes |
class PairAggregator(SequenceAttributionAggregator):
aggregator_name = 'pair'
aggregator_family = 'pair'
default_fn = (lambda x, y: (y - x))
def pre_aggregate_hook(cls, attr: 'FeatureAttributionSequenceOutput', paired_attr: 'FeatureAttributionSequenceOutput', **kwargs):
super().pre_aggregate_hook(attr, **kwargs)
cls.validate_pair(attr, paired_attr)
def validate_pair(cls, attr, paired_attr):
assert (len(attr.source) == len(paired_attr.source)), 'Source sequences must be the same length.'
assert (len(attr.target) == len(paired_attr.target)), 'Target sequences must be the same length.'
if (attr.source_attributions is not None):
assert (attr.source_attributions.shape == paired_attr.source_attributions.shape), 'Source attributions must be the same shape.'
if (attr.target_attributions is not None):
assert (attr.target_attributions.shape == paired_attr.target_attributions.shape), 'Target attributions must be the same shape.'
if (attr.step_scores is not None):
assert (paired_attr.step_scores is not None), 'Paired attribution must have step scores.'
for (key, value) in attr.step_scores.items():
assert (key in paired_attr.step_scores), f'Step score {key} must be in paired attribution.'
assert (value.shape == paired_attr.step_scores[key].shape), f'Step score {key} must be the same shape.'
if (attr.sequence_scores is not None):
assert (paired_attr.sequence_scores is not None), 'Paired attribution must have sequence scores.'
for (key, value) in attr.sequence_scores.items():
assert (key in paired_attr.sequence_scores), f'Sequence score {key} must be in paired attribution.'
assert (value.shape == paired_attr.sequence_scores[key].shape), f'Sequence score {key} must be the same shape.'
def aggregate_source(attr, paired_attr, **kwargs):
return aggregate_token_pair(attr.source, paired_attr.source)
def aggregate_target(attr, paired_attr, **kwargs):
return aggregate_token_pair(attr.target, paired_attr.target)
def aggregate_source_attributions(attr, paired_attr, aggregate_fn, **kwargs):
if (attr.source_attributions is None):
return attr.source_attributions
return aggregate_fn(attr.source_attributions, paired_attr.source_attributions)
def aggregate_target_attributions(attr, paired_attr, aggregate_fn, **kwargs):
if (attr.target_attributions is None):
return attr.target_attributions
return aggregate_fn(attr.target_attributions, paired_attr.target_attributions)
def aggregate_step_scores(attr, paired_attr, aggregate_fn, **kwargs):
if (not attr.step_scores):
return attr.step_scores
out_dict = {}
for (name, step_scores) in attr.step_scores.items():
agg_fn = (aggregate_fn[name] if isinstance(aggregate_fn, dict) else aggregate_fn)
out_dict[name] = agg_fn(step_scores, paired_attr.step_scores[name])
return out_dict
def aggregate_sequence_scores(attr, paired_attr, aggregate_fn, **kwargs):
if (not attr.sequence_scores):
return attr.sequence_scores
out_dict = {}
for (name, sequence_scores) in attr.sequence_scores.items():
agg_fn = (aggregate_fn[name] if isinstance(aggregate_fn, dict) else aggregate_fn)
out_dict[name] = agg_fn(sequence_scores, paired_attr.sequence_scores[name])
return out_dict |
class MaskedBertConfig(PretrainedConfig):
model_type = 'masked_bert'
def __init__(self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, pruning_method='topK', mask_init='constant', mask_scale=0.0, **kwargs):
super().__init__(pad_token_id=pad_token_id, **kwargs)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.pruning_method = pruning_method
self.mask_init = mask_init
self.mask_scale = mask_scale |
def shift(carry: MoveCarry) -> MoveUpdate:
return MoveUpdate(target=carry.origin, origin=0, additional_reward=0.0, target_idx=carry.target_idx, origin_idx=(carry.origin_idx + 1)) |
def drn_d_40(BatchNorm, pretrained=True):
model = DRN(BasicBlock, [1, 1, 3, 4, 6, 3, 2, 2], arch='D', BatchNorm=BatchNorm)
if pretrained:
pretrained = model_zoo.load_url(model_urls['drn-d-40'])
del pretrained['fc.weight']
del pretrained['fc.bias']
model.load_state_dict(pretrained)
return model |
class ResNet_locate(nn.Module):
def __init__(self, block, layers):
super(ResNet_locate, self).__init__()
self.resnet = ResNet(block, layers)
self.in_planes = 512
self.out_planes = [512, 256, 256, 128]
self.ppms_pre = nn.Conv2d(2048, self.in_planes, 1, 1, bias=False)
(ppms, infos) = ([], [])
for ii in [1, 3, 5]:
ppms.append(nn.Sequential(nn.AdaptiveAvgPool2d(ii), nn.Conv2d(self.in_planes, self.in_planes, 1, 1, bias=False), nn.ReLU(inplace=True)))
self.ppms = nn.ModuleList(ppms)
self.ppm_cat = nn.Sequential(nn.Conv2d((self.in_planes * 4), self.in_planes, 3, 1, 1, bias=False), nn.ReLU(inplace=True))
for ii in self.out_planes:
infos.append(nn.Sequential(nn.Conv2d(self.in_planes, ii, 3, 1, 1, bias=False), nn.ReLU(inplace=True)))
self.infos = nn.ModuleList(infos)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, 0.01)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def load_pretrained_model(self, model):
self.resnet.load_state_dict(torch.load(model), strict=False)
def forward(self, x):
x_size = x.size()[2:]
xs = self.resnet(x)
xs_1 = self.ppms_pre(xs[(- 1)])
xls = [xs_1]
for k in range(len(self.ppms)):
xls.append(F.interpolate(self.ppms[k](xs_1), xs_1.size()[2:], mode='bilinear', align_corners=True))
xls = self.ppm_cat(torch.cat(xls, dim=1))
infos = []
for k in range(len(self.infos)):
infos.append(self.infos[k](F.interpolate(xls, xs[((len(self.infos) - 1) - k)].size()[2:], mode='bilinear', align_corners=True)))
return (xs, infos) |
def preprocess(method, args):
base_path = args['base_path']
origin_folder = args['origin_folder']
core_folder = args.get('core_folder', None)
node_file = args['node_file']
walk_pair_folder = args['walk_pair_folder']
node_freq_folder = args['node_freq_folder']
file_sep = args['file_sep']
generate_core = args.get('generate_core', False)
run_walk = args.get('run_walk', True)
weighted = args['weighted']
walk_time = args['walk_time']
walk_length = args['walk_length']
worker = args['worker']
t1 = time.time()
processing = Processing(base_path=base_path, origin_folder=origin_folder, core_folder=core_folder, walk_pair_folder=walk_pair_folder, node_freq_folder=node_freq_folder, node_file=node_file, walk_time=walk_time, walk_length=walk_length)
processing.run(worker=worker, generate_core=generate_core, run_walk=run_walk, sep=file_sep, weighted=weighted)
t2 = time.time()
print((('finish ' + method) + ' preprocessing! total cost time:'), (t2 - t1), ' seconds!') |
def init_seed(seed=1, use_cuda=False):
np.random.seed(seed)
torch.manual_seed(seed)
if use_cuda:
torch.cuda.manual_seed(seed) |
def shuffle_pc(file, output_path):
mesh = pymesh.load_mesh(file)
vertices = copy.deepcopy(mesh.vertices)
permutation = np.random.permutation(len(vertices))
vertices = vertices[permutation]
new_mesh = pymesh.meshio.form_mesh(vertices, mesh.faces)
new_mesh.add_attribute('vertex_nx')
new_mesh.set_attribute('vertex_nx', mesh.get_vertex_attribute('vertex_nx')[permutation])
new_mesh.add_attribute('vertex_ny')
new_mesh.set_attribute('vertex_ny', mesh.get_vertex_attribute('vertex_ny')[permutation])
new_mesh.add_attribute('vertex_nz')
new_mesh.set_attribute('vertex_nz', mesh.get_vertex_attribute('vertex_nz')[permutation])
pymesh.save_mesh(output_path, new_mesh, *new_mesh.get_attribute_names(), ascii=True, anonymous=True, use_float=True) |
def test_write(policy, X):
simulator = (lambda x: 1.0)
ACTIONS = np.array([0, 1], np.int32)
policy.write(ACTIONS, np.apply_along_axis(simulator, 1, X[ACTIONS]))
numpy.testing.assert_array_equal(ACTIONS, policy.history.chosen_actions[:len(ACTIONS)])
assert ((len(X) - len(ACTIONS)) == len(policy.actions)) |
def _gen_efficientnet(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs):
arch_def = [['ds_r1_k3_s1_e1_c16_se0.25'], ['ir_r2_k3_s2_e6_c24_se0.25'], ['ir_r2_k5_s2_e6_c40_se0.25'], ['ir_r3_k3_s2_e6_c80_se0.25'], ['ir_r3_k5_s1_e6_c112_se0.25'], ['ir_r4_k5_s2_e6_c192_se0.25'], ['ir_r1_k3_s1_e6_c320_se0.25']]
model_kwargs = dict(block_args=decode_arch_def(arch_def, depth_multiplier), num_features=round_channels(1280, channel_multiplier, 8, None), stem_size=32, channel_multiplier=channel_multiplier, act_layer=resolve_act_layer(kwargs, 'swish'), norm_kwargs=resolve_bn_args(kwargs), **kwargs)
model = _create_effnet(variant, pretrained, **model_kwargs)
return model |
def __scale_width_then_half(img, target_width):
(ow, oh) = img.size
if (ow == target_width):
return img
w = target_width
h = int(((target_width * oh) / ow))
img = img.resize((w, h), Image.BICUBIC)
top = np.random.randint(0, int((h / 2)))
left = np.random.randint(0, int((w / 2)))
img = img.crop((left, top, int((left + (w / 2))), int((top + (h / 2)))))
return img |
def process_text(text, dic, r, grams):
X = lil_matrix((len(text), len(dic)))
for (i, l) in enumerate(text):
tokens = tokenize(l, grams)
indexes = []
for t in tokens:
try:
indexes += [dic[t]]
except KeyError:
pass
indexes = list(set(indexes))
indexes.sort()
for j in indexes:
X[(i, j)] = r[j]
return csr_matrix(X) |
def get_checkpoint_url(config_path):
name = config_path.replace('.yaml', '')
if (config_path in _ModelZooUrls.CONFIG_PATH_TO_URL_SUFFIX):
suffix = _ModelZooUrls.CONFIG_PATH_TO_URL_SUFFIX[config_path]
return (((_ModelZooUrls.S3_PREFIX + name) + '/') + suffix)
raise RuntimeError('{} not available in Model Zoo!'.format(name)) |
class State():
board: Board
step_count: chex.Numeric
flat_mine_locations: chex.Array
key: chex.PRNGKey |
class BNN(object):
def __init__(self, dim_input, dim_output, dim_hidden, num_layers, is_bnn=True):
self.dim_input = dim_input
self.dim_output = dim_output
self.dim_hidden = dim_hidden
self.num_layers = num_layers
self.is_bnn = is_bnn
def construct_network_weights(self, scope='network'):
params = OrderedDict()
fc_initializer = tf.contrib.layers.xavier_initializer(dtype=tf.float32)
params['w1'] = tf.get_variable(name=(scope + '_w1'), shape=[self.dim_input, self.dim_hidden], initializer=fc_initializer)
params['b1'] = tf.Variable(name=(scope + '_b1'), initial_value=tf.random_normal([self.dim_hidden], 0.0, 0.01))
for l in range(self.num_layers):
if (l < (self.num_layers - 1)):
dim_output = self.dim_hidden
else:
dim_output = self.dim_output
params['w{}'.format((l + 2))] = tf.get_variable(name=(scope + '_w{}'.format((l + 2))), shape=[self.dim_hidden, dim_output], initializer=fc_initializer)
params['b{}'.format((l + 2))] = tf.Variable(name=(scope + '_b{}'.format((l + 2))), initial_value=tf.random_normal([dim_output], 0.0, 0.01))
if self.is_bnn:
init_val = np.random.normal((- np.log(FLAGS.m_l)), 0.001, [1])
params['log_lambda'] = tf.Variable(name=(scope + '_log_lambda'), initial_value=init_val, dtype=tf.float32)
print('log_lambda: ', init_val)
init_val = np.random.normal((- np.log(FLAGS.m_g)), 0.001, [1])
params['log_gamma'] = tf.Variable(name=(scope + '_log_gamma'), initial_value=init_val, dtype=tf.float32)
print('log_gamma: ', init_val)
return params
def log_likelihood_data(self, predict_y, target_y, log_gamma):
if (not self.is_bnn):
NotImplementedError()
error_y = (predict_y - target_y)
log_lik_data = ((0.5 * log_gamma) - ((0.5 * tf.exp(log_gamma)) * tf.square(error_y)))
return log_lik_data
def log_prior_weight(self, W_dict):
if (not self.is_bnn):
NotImplementedError()
W_vec = self.dicval2vec(W_dict)
log_lambda = tf.reshape(W_vec[(- 2)], (1,))
log_gamma = tf.reshape(W_vec[(- 1)], (1,))
W_vec = W_vec[:(- 2)]
num_params = tf.cast(W_vec.shape[0], tf.float32)
log_prior_gamma = ((((FLAGS.a_g - 1) * log_gamma) - (FLAGS.b_g * tf.exp(log_gamma))) + log_gamma)
W_diff = W_vec
log_prior_w = (((0.5 * num_params) * log_lambda) - ((0.5 * tf.exp(log_lambda)) * tf.reduce_sum((W_diff ** 2))))
log_prior_lambda = ((((FLAGS.a_l - 1) * log_lambda) - (FLAGS.b_l * tf.exp(log_lambda))) + log_lambda)
return (log_prior_w, log_prior_gamma, log_prior_lambda)
def mse_data(self, predict_y, target_y):
return tf.reduce_sum(tf.square((predict_y - target_y)), axis=1)
def forward_network(self, x, W_dict):
hid = tf.nn.relu((tf.matmul(x, W_dict['w1']) + W_dict['b1']))
for l in range(self.num_layers):
hid = (tf.matmul(hid, W_dict['w{}'.format((l + 2))]) + W_dict['b{}'.format((l + 2))])
if (l < (self.num_layers - 1)):
hid = tf.nn.relu(hid)
return hid
def list2vec(self, list_in):
return tf.concat([tf.reshape(ww, [(- 1)]) for ww in list_in], axis=0)
def vec2dic(self, W_vec):
if self.is_bnn:
log_lambda = tf.reshape(W_vec[(- 2)], (1,))
log_gamma = tf.reshape(W_vec[(- 1)], (1,))
W_vec = W_vec[:(- 2)]
W_dic = self.network_weight_vec2dict(W_vec)
W_dic['log_lambda'] = log_lambda
W_dic['log_gamma'] = log_gamma
else:
W_dic = self.network_weight_vec2dict(W_vec)
return W_dic
def network_weight_vec2dict(self, W_vec):
W_dic = OrderedDict()
dim_list = (([self.dim_input] + ([self.dim_hidden] * self.num_layers)) + [self.dim_output])
for l in range((len(dim_list) - 1)):
(dim_input, dim_output) = (dim_list[l], dim_list[(l + 1)])
W_dic['w{}'.format((l + 1))] = tf.reshape(W_vec[:(dim_input * dim_output)], [dim_input, dim_output])
W_dic['b{}'.format((l + 1))] = W_vec[(dim_input * dim_output):((dim_input * dim_output) + dim_output)]
if (l < (len(dim_list) - 2)):
W_vec = W_vec[((dim_input * dim_output) + dim_output):]
return W_dic
def dicval2vec(self, dic):
return tf.concat([tf.reshape(val, [(- 1)]) for val in dic.values()], axis=0) |
def test_beit_layer_decay_optimizer_constructor():
backbone = ToyBEiT()
model = PseudoDataParallel(ToySegmentor(backbone))
optimizer_cfg = dict(type='AdamW', lr=1, betas=(0.9, 0.999), weight_decay=0.05)
paramwise_cfg = dict(layer_decay_rate=2, num_layers=3)
optim_constructor = LayerDecayOptimizerConstructor(optimizer_cfg, paramwise_cfg)
optimizer = optim_constructor(model)
check_optimizer_lr_wd(optimizer, expected_layer_wise_wd_lr_beit) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.