code stringlengths 101 5.91M |
|---|
class KRTToRCBijectionTypeA2Odd(KRTToRCBijectionTypeA):
def next_state(self, val):
n = self.n
tableau_height = (len(self.cur_path[0]) - 1)
if (val > 0):
KRTToRCBijectionTypeA.next_state(self, val)
return
pos_val = (- val)
if (len(self.ret_rig_con[(pos_val - 1)]) > 0):
max_width = self.ret_rig_con[(pos_val - 1)][0]
else:
max_width = 1
for a in range((pos_val - 1), n):
max_width = self.ret_rig_con[a].insert_cell(max_width)
for a in reversed(range(tableau_height, (n - 1))):
max_width = self.ret_rig_con[a].insert_cell(max_width)
self._update_vacancy_nums((a + 1))
self._update_partition_values((a + 1))
if (tableau_height < n):
self._update_vacancy_nums(tableau_height)
self._update_partition_values(tableau_height)
if (pos_val <= tableau_height):
for a in range((pos_val - 1), tableau_height):
self._update_vacancy_nums(a)
self._update_partition_values(a)
if (pos_val > 1):
self._update_vacancy_nums((pos_val - 2))
self._update_partition_values((pos_val - 2))
elif (tableau_height > 0):
self._update_vacancy_nums((tableau_height - 1))
self._update_partition_values((tableau_height - 1)) |
def GenerateSM75_TensorOp_1688(manifest, cuda_version):
if (not CudaToolkitVersionSatisfies(cuda_version, 10, 2)):
return
layouts = [(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), (LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor), (LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), (LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor)]
math_instructions = [MathInstruction([16, 8, 8], DataType.f16, DataType.f16, DataType.f32, OpcodeClass.TensorOp, MathOperation.multiply_add), MathInstruction([16, 8, 8], DataType.f16, DataType.f16, DataType.f16, OpcodeClass.TensorOp, MathOperation.multiply_add)]
min_cc = 75
max_cc = 1024
alignment_constraints = [8, 4, 2, 1]
for math_inst in math_instructions:
tile_descriptions = [TileDescription([256, 128, 32], 2, [4, 2, 1], math_inst, min_cc, max_cc), TileDescription([128, 256, 32], 2, [2, 4, 1], math_inst, min_cc, max_cc), TileDescription([128, 128, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc), TileDescription([64, 256, 32], 2, [1, 4, 1], math_inst, min_cc, max_cc), TileDescription([256, 64, 32], 2, [4, 1, 1], math_inst, min_cc, max_cc), TileDescription([64, 128, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc), TileDescription([128, 64, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc), TileDescription([64, 64, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc), TileDescription([64, 128, 64], 2, [1, 2, 2], math_inst, min_cc, max_cc)]
data_type = [math_inst.element_a, math_inst.element_b, math_inst.element_accumulator, math_inst.element_accumulator]
CreateGemmOperator(manifest, layouts, tile_descriptions, data_type, alignment_constraints)
conv_layout = (LayoutType.TensorNHWC, LayoutType.TensorNHWC, LayoutType.TensorNHWC)
CreateConv2dOperator(manifest, conv_layout, tile_descriptions, data_type, alignment_constraints)
if (math_inst.element_a != math_inst.element_accumulator):
data_type_mixed = [math_inst.element_a, math_inst.element_b, math_inst.element_a, math_inst.element_accumulator]
CreateGemmOperator(manifest, layouts, tile_descriptions, data_type_mixed, alignment_constraints)
CreateConv2dOperator(manifest, conv_layout, tile_descriptions, data_type_mixed, alignment_constraints)
GenerateSM75_TensorOp_1688_FewChannels(manifest, cuda_version, math_inst) |
def shapley_coefficients(n):
out = np.zeros(n)
for i in range(n):
out[i] = (1 / (n * scipy.special.comb((n - 1), i)))
return out |
def soft_threshold(x, gamma):
x_abs = np.abs(x)
return (np.maximum(0, (1 - (gamma / x_abs))) * x) |
def _transform_day(result_str: str, day_token: str, day: int) -> str:
result = deepcopy(result_str)
if (day_token != ''):
if (day == (- 1)):
if (len(day_token) == 2):
result = result.replace(day_token, '--')
elif (len(day_token) == 1):
result = result.replace(day_token, '-')
elif (len(day_token) == 2):
if (day < 10):
result = result.replace(day_token, f'{0}{day}', 1)
else:
result = result.replace(day_token, str(day), 1)
else:
result = result.replace(day_token, str(day))
return result |
def create_instances_from_document(all_documents, document_index, max_seq_length, short_seq_prob=0.1):
document = all_documents[document_index]
max_num_tokens = (max_seq_length - 3)
target_seq_length = max_num_tokens
if (random.random() < short_seq_prob):
target_seq_length = random.randint(2, max_num_tokens)
instances = []
current_chunk = []
current_length = 0
i = 0
while (i < len(document)):
segment = document[i]
current_chunk.append(segment)
current_length += len(segment)
if ((i == (len(document) - 1)) or (current_length >= target_seq_length)):
if current_chunk:
a_end = 1
if (len(current_chunk) >= 2):
a_end = random.randint(1, (len(current_chunk) - 1))
tokens_a = []
for j in range(a_end):
tokens_a.extend(current_chunk[j])
tokens_b = []
is_random_next = False
if ((len(current_chunk) == 1) or (random.random() < 0.5)):
is_random_next = True
target_b_length = (target_seq_length - len(tokens_a))
for _ in range(10):
random_document_index = random.randint(0, (len(all_documents) - 1))
if (random_document_index != document_index):
break
random_document = all_documents[random_document_index]
random_start = random.randint(0, (len(random_document) - 1))
for j in range(random_start, len(random_document)):
tokens_b.extend(random_document[j])
if (len(tokens_b) >= target_b_length):
break
num_unused_segments = (len(current_chunk) - a_end)
i -= num_unused_segments
else:
is_random_next = False
for j in range(a_end, len(current_chunk)):
tokens_b.extend(current_chunk[j])
truncate_seq_pair(tokens_a, tokens_b, max_num_tokens)
assert (len(tokens_a) >= 1)
assert (len(tokens_b) >= 1)
instance = (tokens_a, tokens_b, is_random_next)
instances.append(instance)
current_chunk = []
current_length = 0
i += 1
return instances |
def softmax(x):
x = (x - np.max(x))
exp_x = np.exp(x)
softmax_x = (exp_x / np.sum(exp_x))
return softmax_x |
def _prepare_caffe2(x):
from caffe2.python import workspace
x = workspace.FetchBlob(x)
return x |
class AttentionLayer(nn.Module):
def __init__(self, image_dim, question_dim, **kwargs):
super().__init__()
combine_type = kwargs['modal_combine']['type']
combine_params = kwargs['modal_combine']['params']
modal_combine_layer = ModalCombineLayer(combine_type, image_dim, question_dim, **combine_params)
transform_type = kwargs['transform']['type']
transform_params = kwargs['transform']['params']
transform_layer = TransformLayer(transform_type, modal_combine_layer.out_dim, **transform_params)
normalization = kwargs['normalization']
self.module = TopDownAttention(modal_combine_layer, transform_layer, normalization)
if hasattr(self.module, 'out_dim'):
self.out_dim = self.module.out_dim
def forward(self, *args, **kwargs):
return self.module(*args, **kwargs) |
('mmseg.apis.multi_gpu_test', multi_gpu_test)
def test_dist_eval_hook():
with pytest.raises(TypeError):
test_dataset = ExampleModel()
data_loader = [DataLoader(test_dataset, batch_size=1, sampler=None, num_worker=0, shuffle=False)]
DistEvalHook(data_loader)
test_dataset = ExampleDataset()
test_dataset.pre_eval = MagicMock(return_value=[torch.tensor([1])])
test_dataset.evaluate = MagicMock(return_value=dict(test='success'))
loader = DataLoader(test_dataset, batch_size=1)
model = ExampleModel()
data_loader = DataLoader(test_dataset, batch_size=1, sampler=None, num_workers=0, shuffle=False)
optim_cfg = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005)
optimizer = obj_from_dict(optim_cfg, torch.optim, dict(params=model.parameters()))
with tempfile.TemporaryDirectory() as tmpdir:
eval_hook = DistEvalHook(data_loader, by_epoch=False, efficient_test=True)
runner = mmcv.runner.IterBasedRunner(model=model, optimizer=optimizer, work_dir=tmpdir, logger=logging.getLogger())
runner.register_hook(eval_hook)
runner.run([loader], [('train', 1)], 1)
test_dataset.evaluate.assert_called_with([torch.tensor([1])], logger=runner.logger) |
def train(loss_val, var_list):
optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate)
grads = optimizer.compute_gradients(loss_val, var_list=var_list)
if FLAGS.debug:
for (grad, var) in grads:
utils.add_gradient_summary(grad, var)
return optimizer.apply_gradients(grads) |
class CategoricalEncodingAlgo(abc.ABC):
def fit_transform(self, log_attributes: pd.DataFrame) -> pd.DataFrame:
pass |
def load_models(config, mode):
gen_conf = deepcopy(config.models['generator'])
dis_conf = deepcopy(config.models['discriminator'])
if (mode == 'source'):
gen_conf['args']['n_classes'] = gen_conf['args']['n_classes_src']
dis_conf['args']['n_classes'] = dis_conf['args']['n_classes_src']
elif (mode == 'target'):
gen_conf['args']['n_classes'] = gen_conf['args']['n_classes_tgt']
dis_conf['args']['n_classes'] = dis_conf['args']['n_classes_tgt']
else:
raise NotImplementedError
gen_conf['args'].pop('n_classes_src')
gen_conf['args'].pop('n_classes_tgt')
dis_conf['args'].pop('n_classes_src')
dis_conf['args'].pop('n_classes_tgt')
gen = yaml_utils.load_model(gen_conf['fn'], gen_conf['name'], gen_conf['args'])
dis = yaml_utils.load_model(dis_conf['fn'], dis_conf['name'], dis_conf['args'])
return (gen, dis) |
_node_type()
class GaussianSource(optplan.EmSource):
type = schema_utils.polymorphic_model_type('source.gaussian_beam')
w0 = types.FloatType()
center = optplan.vec3d()
beam_center = optplan.vec3d()
extents = optplan.vec3d()
normal = optplan.vec3d()
theta = types.FloatType()
psi = types.FloatType()
polarization_angle = types.FloatType()
overwrite_bloch_vector = types.BooleanType()
power = types.FloatType()
normalize_by_sim = types.BooleanType(default=False) |
class XGLMTokenizer(metaclass=DummyObject):
_backends = ['sentencepiece']
def __init__(self, *args, **kwargs):
requires_backends(self, ['sentencepiece']) |
def affiliation_precision_distance(Is=[(1, 2), (3, 4), (5, 6)], J=(2, 5.5)):
if all([(I is None) for I in Is]):
return math.nan
return (sum([integral_interval_distance(I, J) for I in Is]) / sum_interval_lengths(Is)) |
class TFRegNetModel(metaclass=DummyObject):
_backends = ['tf']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tf']) |
class FurthestPointSampling(Function):
def forward(ctx, xyz, npoint):
return pl.furthest_point_sampling(xyz.contiguous(), npoint)
def backward(xyz, a=None):
return (None, None) |
def test_cross_entropy_no_batch_dim_dense_target():
logits_raw = torch.tensor([0.0, 0.0, math.log(10.0), 0.0, 0.0, 0.0])
target_raw = torch.tensor([0.0, 0.0, 0.5, 0.0, 0.0, 0.5])
classes_dim = Dim(dimension=6)
logits = Tensor(name='logits', dims=[classes_dim], dtype='float32', raw_tensor=logits_raw)
target = Tensor(name='target', dims=[classes_dim], dtype='float32', raw_tensor=target_raw)
cross_entropy = rf.cross_entropy(estimated=logits, target=target, axis=classes_dim, estimated_type='logits')
assert (not cross_entropy.dims)
assert (cross_entropy.raw_tensor.tolist() == pytest.approx((((- 0.5) * math.log((10 / 15))) - (0.5 * math.log((1 / 15)))))) |
class BaseOfflinePolicyLearner(metaclass=ABCMeta):
n_actions: int
len_list: int = 1
def __post_init__(self) -> None:
check_scalar(self.n_actions, 'n_actions', int, min_val=2)
check_scalar(self.len_list, 'len_list', int, min_val=1, max_val=self.n_actions)
def policy_type(self) -> PolicyType:
return PolicyType.OFFLINE
def fit(self) -> None:
raise NotImplementedError
def predict(self, context: np.ndarray) -> np.ndarray:
raise NotImplementedError |
def label2id(image):
array = np.array(image)
out_array = np.empty(array.shape, dtype=array.dtype)
for l in labels:
if (0 <= l.trainId < 255):
out_array[(array == l.trainId)] = l.id
return Image.fromarray(out_array) |
def WriteStatus(num_steps, eval_metric, best_eval_metric):
status = os.path.join((os.getenv('GOOGLE_STATUS_DIR') or '/tmp'), 'STATUS')
message = ('Parameters: %s | Steps: %d | Tuning score: %.2f%% | Best tuning score: %.2f%%' % (FLAGS.params, num_steps, eval_metric, best_eval_metric))
with gfile.FastGFile(status, 'w') as fout:
fout.write(message)
with gfile.FastGFile(OutputPath('status'), 'a') as fout:
fout.write((message + '\n')) |
class AnnotatedConvBnModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.qconfig = default_qconfig
self.conv = torch.nn.Conv2d(3, 5, 3, bias=False).to(dtype=torch.float)
self.bn = torch.nn.BatchNorm2d(5).to(dtype=torch.float)
self.quant = QuantStub()
self.dequant = DeQuantStub()
def forward(self, x):
x = self.quant(x)
x = self.conv(x)
x = self.bn(x)
x = self.dequant(x)
return x |
def parse(exit_code, log, output):
(findings, infos) = ([], set())
(errors, fails) = sb.parse_utils.errors_fails(exit_code, log)
for line in log:
i = line.find(': ')
if (i >= 0):
k = line[0:i].strip()
v = line[(i + 2):].strip()
if v.isdigit():
v = int(v)
if k.endswith('ruleId'):
finding = {'name': v}
findings.append(finding)
elif (k in ('severity', 'line', 'column')):
finding[k] = v
return (findings, infos, errors, fails) |
def is_OctalStringMonoidElement(x):
from .string_monoid import OctalStringMonoid
return (isinstance(x, StringMonoidElement) and isinstance(x.parent(), OctalStringMonoid)) |
class RandomActiveLearningNodeMean(LearningNodeMean, RandomActiveLeafRegressor):
def __init__(self, initial_stats=None, max_features=2, random_state=None):
super().__init__(initial_stats)
self.max_features = max_features
self.feature_indices = np.array([])
self.random_state = random_state
self._random_state = check_random_state(self.random_state) |
def get_alignment_angle_arctan2(left_eye: Union[(list, tuple)], right_eye: Union[(list, tuple)]) -> float:
return float(np.degrees(np.arctan2((right_eye[1] - left_eye[1]), (right_eye[0] - left_eye[0])))) |
class A064553(SloaneSequence):
def __init__(self):
SloaneSequence.__init__(self, offset=1)
def _repr_(self):
return 'a(1) = 1, a(prime(i)) = i+1 for i > 0 and a(u*v) = a(u)*a(v) for u,v > 0'
def _eval(self, n):
return prod([((prime_pi(p) + 1) ** e) for (p, e) in arith.factor(n)]) |
def get_trainer_params():
return d(cls=LatentTrainer, params=d(dynamics_learning_rate=0.0001, latent_learning_rate=0.0005, latent_train_every_n_steps=LATENT_TRAIN_EVERY_N, sample_every_n_steps=0, train_every_n_steps=1, holdout_every_n_steps=500, max_steps=100000.0, max_train_data_steps=0, max_holdout_data_steps=0, log_every_n_steps=1000.0, save_every_n_steps=1000.0, checkpoint_model_file=MODEL_FILE, save_checkpoints=True)) |
()
def ssh():
instances = query_instances()
if (len(instances) == 0):
typer.secho(f'No instances found', fg='red', err=True)
raise typer.Abort()
instance_map = {f'{i.region_tag}, {i.public_ip()} ({i.instance_state()})': i for i in instances}
choices = list(sorted(instance_map.keys()))
typer.secho('Select an instance:', fg='yellow', bold=True)
for (i, choice) in enumerate(choices):
typer.secho(f'{(i + 1)}) {choice}', fg='yellow')
choice = IntPrompt.ask('Enter an instance number', choices=list([str(i) for i in range(1, (len(choices) + 1))]), show_choices=False)
instance = instance_map[choices[(choice - 1)]]
cmd = instance.get_ssh_cmd()
logger.info(f'Running SSH command: {cmd}')
logger.info('It may ask for a private key password, try `skyplane`.')
proc = subprocess.Popen(split(cmd))
proc.wait() |
.parametrize('csr_container', CSR_CONTAINERS)
def test_load_offset_exhaustive_splits(csr_container):
rng = np.random.RandomState(0)
X = np.array([[0, 0, 0, 0, 0, 0], [1, 2, 3, 4, 0, 6], [1, 2, 3, 4, 0, 6], [0, 0, 0, 0, 0, 0], [1, 0, 3, 0, 0, 0], [0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0]])
X = csr_container(X)
(n_samples, n_features) = X.shape
y = rng.randint(low=0, high=2, size=n_samples)
query_id = (np.arange(n_samples) // 2)
f = BytesIO()
dump_svmlight_file(X, y, f, query_id=query_id)
f.seek(0)
size = len(f.getvalue())
for mark in range(size):
f.seek(0)
(X_0, y_0, q_0) = load_svmlight_file(f, n_features=n_features, query_id=True, offset=0, length=mark)
(X_1, y_1, q_1) = load_svmlight_file(f, n_features=n_features, query_id=True, offset=mark, length=(- 1))
q_concat = np.concatenate([q_0, q_1])
y_concat = np.concatenate([y_0, y_1])
X_concat = sp.vstack([X_0, X_1])
assert_array_almost_equal(y, y_concat)
assert_array_equal(query_id, q_concat)
assert_array_almost_equal(X.toarray(), X_concat.toarray()) |
def worker(gpu, ngpus_per_node, args):
if args.adv:
model = AdvTrainer(args)
else:
model = BaseTrainer(args)
model.make_model_env(gpu, ngpus_per_node)
model.make_run_env()
model.train() |
def get_criterion():
criterion_dict = {}
from ctrl.utils.loss_functions import CrossEntropy2D
criterion_dict['semseg'] = CrossEntropy2D()
from ctrl.utils.loss_functions import BerHuLossDepth
criterion_dict['depth'] = BerHuLossDepth()
from ctrl.utils.loss_functions import BCELossSS
criterion_dict['disc_loss'] = BCELossSS()
return criterion_dict |
def get_checkpoint_callback(fix_config, save_path) -> ModelCheckpoint:
prefix = save_path
suffix = 'Best-{epoch:02d}-{val_loss:.4f}-{val_acc:.4f}'
checkpoint_callback = ModelCheckpoint(dirpath=prefix, filename=suffix, save_top_k=1, save_last=True, monitor=fix_config.monitor.metric, mode=fix_config.monitor.mode, save_weights_only=True, verbose=True)
return checkpoint_callback |
def get_index_mask(data, index, flattened_too=False, is_data_flattened=False):
(lats, lons) = get_region_bounds(index)
return cord_mask(data, lat=lats, lon=lons, flattened_too=flattened_too, is_flattened=is_data_flattened) |
def type_to_python(typename, size=None):
typename = typename.replace(' ', '')
if ((typename in {'IntArrayRef', 'TensorList'}) and (size is not None)):
typename += '[]'
typename = {'Device': 'Device', 'Generator': 'Generator', 'IntegerTensor': 'Tensor', 'Scalar': 'Number', 'ScalarType': '_dtype', 'Storage': 'Storage', 'BoolTensor': 'Tensor', 'IndexTensor': 'Tensor', 'Tensor': 'Tensor', 'MemoryFormat': 'memory_format', 'IntArrayRef': '_size', 'IntArrayRef[]': 'Union[_int, _size]', 'TensorList': 'Union[Tuple[Tensor, ...], List[Tensor]]', 'TensorList[]': 'Union[Tensor, Tuple[Tensor, ...], List[Tensor]]', 'bool': '_bool', 'double': '_float', 'int64_t': '_int', 'accreal': 'Number', 'real': 'Number', 'void*': '_int', 'void': 'None', 'std::string': 'str', 'Dimname': 'Union[str, ellipsis, None]', 'DimnameList': 'Sequence[Union[str, ellipsis, None]]', 'QScheme': '_qscheme', 'ArrayRef<double>': 'Sequence[float]'}[typename]
return typename |
def parse_args():
parser = argparse.ArgumentParser(description='Train a change detector')
parser.add_argument('config', help='train config file path')
parser.add_argument('--work-dir', help='the dir to save logs and models')
parser.add_argument('--load-from', help='the checkpoint file to load weights from')
parser.add_argument('--resume-from', help='the checkpoint file to resume from')
parser.add_argument('--no-validate', action='store_true', help='whether not to evaluate the checkpoint during training')
group_gpus = parser.add_mutually_exclusive_group()
group_gpus.add_argument('--gpus', type=int, help='(Deprecated, please use --gpu-id) number of gpus to use (only applicable to non-distributed training)')
group_gpus.add_argument('--gpu-ids', type=int, nargs='+', help='(Deprecated, please use --gpu-id) ids of gpus to use (only applicable to non-distributed training)')
group_gpus.add_argument('--gpu-id', type=int, default=0, help='id of gpu to use (only applicable to non-distributed training)')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument('--diff_seed', action='store_true', help='Whether or not set different seeds for different ranks')
parser.add_argument('--deterministic', action='store_true', help='whether to set deterministic options for CUDNN backend.')
parser.add_argument('--options', nargs='+', action=DictAction, help='--options is deprecated in favor of --cfg_options\' and it will not be supported in version v0.22.0. Override some settings in the used config, the key-value pair in xxx=yyy format will be merged into config file. If the value to be overwritten is a list, it should be like key="[a,b]" or key=a,b It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" Note that the quotation marks are necessary and that no white space is allowed.')
parser.add_argument('--cfg-options', nargs='+', action=DictAction, help='override some settings in the used config, the key-value pair in xxx=yyy format will be merged into config file. If the value to be overwritten is a list, it should be like key="[a,b]" or key=a,b It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" Note that the quotation marks are necessary and that no white space is allowed.')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm', 'mpi'], default='none', help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument('--auto-resume', action='store_true', help='resume from the latest checkpoint automatically.')
args = parser.parse_args()
if ('LOCAL_RANK' not in os.environ):
os.environ['LOCAL_RANK'] = str(args.local_rank)
if (args.options and args.cfg_options):
raise ValueError('--options and --cfg-options cannot be both specified, --options is deprecated in favor of --cfg-options. --options will not be supported in version v0.22.0.')
if args.options:
warnings.warn('--options is deprecated in favor of --cfg-options. --options will not be supported in version v0.22.0.')
args.cfg_options = args.options
return args |
def as_markdown(value: Union[(List[str], Dict[(str, str)], str)]) -> Union[(str, Number)]:
if isinstance(value, list):
return __as_markdown_list(value)
elif isinstance(value, dict):
return __as_markdown_dict(value)
elif isinstance(value, str):
return value
elif (isinstance(value, int) or isinstance(value, float)):
return value
else:
raise UnsupportedTypeError(value) |
class GBlock(ControlFlowScope):
def as_string(self, indent: int=0):
result = ((indent * INDENTATION) + 'gblock:\n')
return (result + super().as_string(indent)) |
class Updater(object):
def _force_to_list(self, x):
if (type(x) is list):
return x
else:
return [x]
def __init__(self, solver=None, loss=None, data_feeder=(lambda : True), forward_callback_on_start=(lambda i: True), forward_callback_on_finish=(lambda i: True), backward_callback_on_start=(lambda i: True), backward_callback_on_finish=(lambda i: True), comm_callback_on_start=(lambda i: True), comm_callback_on_finish=(lambda i: True), update_callback_on_start=(lambda i: True), update_callback_on_finish=(lambda i: True), clear_buffer=True, accum_grad=1, comm=None, grads=[]):
self.solver = solver
self.loss = loss
self.data_feeder = data_feeder
self.forward_callback_on_start = self._force_to_list(forward_callback_on_start)
self.forward_callback_on_finish = self._force_to_list(forward_callback_on_finish)
self.backward_callback_on_start = self._force_to_list(backward_callback_on_start)
self.backward_callback_on_finish = self._force_to_list(backward_callback_on_finish)
self.comm_callback_on_start = self._force_to_list(comm_callback_on_start)
self.comm_callback_on_finish = self._force_to_list(comm_callback_on_finish)
self.update_callback_on_start = self._force_to_list(update_callback_on_start)
self.update_callback_on_finish = self._force_to_list(update_callback_on_finish)
self.clear_buffer = clear_buffer
self.accum_grad = accum_grad
self.comm = comm
self.grads = grads
def update(self, i):
self.solver.zero_grad()
for _ in range(self.accum_grad):
self.data_feeder()
for callback in self.forward_callback_on_finish:
callback(i)
self.loss.forward(clear_no_need_grad=self.clear_buffer)
for callback in self.forward_callback_on_finish:
callback(i)
for callback in self.backward_callback_on_start:
callback(i)
self.loss.backward(clear_buffer=self.clear_buffer)
for callback in self.backward_callback_on_finish:
callback(i)
if (self.comm and (len(grads) != 0)):
for callback in self.comm_callback_on_start:
callback(i)
self.comm.all_reduce(self.grads, division=False, inplace=False)
for callback in self.comm_callback_on_finish:
callback(i)
for callback in self.update_callback_on_start:
callback(i)
self.solver.update()
for callback in self.update_callback_on_finish:
callback(i) |
def expected_version(done_fwds, done_bwds, se) -> Tuple[(int, int)]:
return (my_version(done_bwds, se), expected_staleness(done_fwds, done_bwds, se)) |
def LF_definite_left_7_10(span, negex):
left = get_left_span(span, span.sentence)
trigger = match_regex(negex.rgxs['definite']['left'], left)
if (not trigger):
return ABSTAIN
dist = token_distance(trigger, span)
right = get_right_span(trigger, window=2)
if pseudo_negation.search(right.text):
return ABSTAIN
return (NEGATED if ((dist >= 7) and (dist <= 10)) else ABSTAIN) |
class SubwordSlotTokenizer(Tokenizer):
def __init__(self, spm, slots):
super().__init__()
if ((spm.pad_id() != 0) or (spm.eos_id() != 1) or (spm.unk_id() != 2)):
raise ValueError('Please train sentencepiece model with following argument:\n--pad_id=0 --eos_id=1 --unk_id=2 --bos_id=-1 --model_type=bpe --eos_piece=<eos>')
self.spm = spm
self.slots = slots
self.slot2id = {self.slots[i]: (i + len(self.spm)) for i in range(len(self.slots))}
self.id2slot = {(i + len(self.spm)): self.slots[i] for i in range(len(self.slots))}
def encode(self, sent: str, iobs: str) -> List[int]:
sent = sent.strip('\r\n ')
iobs = iobs.strip('\r\n ')
sent = re.sub(' +', ' ', sent).strip(' ')
sent = sent.split(' ')
iobs = iobs.split(' ')
assert (len(sent) == len(iobs)), f'transcription and iobs should have same number of words (split by space)'
if (sent[0] == 'BOS'):
sent = sent[1:]
iobs = iobs[1:]
if (sent[(- 1)] == 'EOS'):
sent = sent[:(- 1)]
iobs = iobs[:(- 1)]
tokens = []
for (i, (wrd, iob)) in enumerate(zip(sent, iobs)):
if ((iob != 'O') and ((i == 0) or (iobs[(i - 1)] != iob))):
tokens.append(self.slot2id[('B-' + iob)])
encoded = self.spm.encode_as_ids(wrd)
assert (encoded[(- 1)] == self.eos_idx)
tokens += encoded[:(- 1)]
if ((iob != 'O') and ((i == (len(sent) - 1)) or (iobs[(i + 1)] != iob))):
tokens.append(self.slot2id[('E-' + iob)])
assert (tokens[(- 1)] != self.eos_idx)
tokens.append(self.eos_idx)
return tokens
def decode(self, idxs: List[int], ignore_repeat: bool=False) -> str:
crop_idx = []
for (t, idx) in enumerate(idxs):
if (idx == self.eos_idx):
break
elif ((idx == self.pad_idx) or (ignore_repeat and (t > 0) and (idx == idxs[(t - 1)]))):
continue
else:
crop_idx.append(idx)
(sent, ret) = ([], [])
for (i, x) in enumerate(crop_idx):
if (x >= len(self.spm)):
slot = self.id2slot[x]
ret.append(slot)
if (len(sent) > 0):
decoded = self.spm.decode_ids(sent)
ret.insert((- 1), decoded)
sent = []
else:
sent.append(x)
return ' '.join(ret)
def load_from_file(cls, filepath: str, slots_file: str):
import sentencepiece as splib
spm = splib.SentencePieceProcessor()
spm.load(filepath)
spm.set_encode_extra_options(':eos')
org_slots = open(slots_file).read().split('\n')
slots = []
for slot in [slot for slot in org_slots if (slot != 'O')]:
slots.append(('B-' + slot))
slots.append(('E-' + slot))
return cls(spm, slots)
def __setstate__(self, state):
self.__dict__.update(state)
self.spm.set_encode_extra_options('eos')
def vocab_size(self) -> int:
return (len(self.spm) + len(self.slots))
def token_type(self) -> str:
return 'subword-slot' |
class Resnet_Imb_CB_beta099999_ep100_cifar100_2():
def __init__(self):
self.set_config()
def set_config(self):
self.filename_head = (self.__class__.__name__ + '_')
self.checkpoint_path = None
def get_model(self):
model = resnet.ResNet18(num_classes=100)
return model
def get_dataset(self, return_target=True):
DOWNLOAD = False
tr_transformer = alb.Compose([albtr.Flip(p=0.5), albtr.ShiftScaleRotate(shift_limit=0.15, scale_limit=0.15, rotate_limit=15, p=0.5), albtr.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.201)), albToTensor()])
ts_transformer = alb.Compose([albtr.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.201)), albToTensor()])
usage_rate = (((1,) * 50) + ((0.05,) * 50))
seed = 2020
(tr_ds, tr_tg) = cifar.get_dataset_cifar100(True, DOWNLOAD, torch_data_utils.ImgDataset, tr_transformer, usage_rate, seed, return_target)
(ts_ds, ts_tg) = cifar.get_dataset_cifar100(False, DOWNLOAD, torch_data_utils.ImgDataset, ts_transformer, None, None, return_target)
if return_target:
return (tr_ds, ts_ds, tr_tg, ts_tg)
else:
return (tr_ds, ts_ds)
def train_model(self, use_checkpoint=False, fine_turning=False):
(tr_ds, ts_ds, tr_tg, ts_tg) = self.get_dataset(return_target=True)
if use_checkpoint:
CP = get_checkpoint(self.checkpoint_path)
else:
CP = None
model = self.get_model()
if (CP is not None):
model.load_state_dict(CP['state_dict'])
TR_BATCH_SIZE = 128
TS_BATCH_SIZE = 512
tr_loader = torch_data_utils.get_dataloader(tr_ds, TR_BATCH_SIZE)
ts_loader = torch_data_utils.get_dataloader(ts_ds, TS_BATCH_SIZE, shuffle=False)
LR = 0.1
opt = optim.SGD(model.parameters(), lr=LR, momentum=0.9, weight_decay=0.0005)
if (CP is not None):
if (not fine_turning):
opt.load_state_dict(CP['optimizer'])
tr_criterion = cb_loss.ClassBalanced_CELoss(tr_tg, 100, beta=0.99999)
vl_criterion = cb_loss.ClassBalanced_CELoss(ts_tg, 100, beta=0.99999)
grad_accum_steps = 1
start_epoch = (0 if ((CP is None) or fine_turning) else CP['epoch'])
EPOCHS = 100
warmup_epoch = 0
step_scheduler = optim.lr_scheduler.MultiStepLR(opt, milestones=[51, 86, 101], gamma=0.1)
model = training.train_model(model, tr_loader, ts_loader, opt, tr_criterion, vl_criterion, grad_accum_steps, start_epoch, EPOCHS, warmup_epoch, step_scheduler, self.filename_head, use_yoto=False)
return |
def get_numpy_iterator(train: NumpyOrSparse, valid: Optional[NumpyOrSparse]=None, n_folds: Optional[int]=None, iterator: Optional[CustomIdxs]=None) -> Union[(FoldsIterator, HoldoutIterator, CustomIterator, DummyIterator)]:
if (valid is not None):
train_valid = HoldoutIterator(train, valid)
elif (iterator is not None):
train_valid = CustomIterator(train, iterator)
elif (train.folds is not None):
train_valid = FoldsIterator(train, n_folds)
else:
train_valid = DummyIterator(train)
return train_valid |
class TargetPlatformModel(ImmutableClass):
def __init__(self, default_qco: QuantizationConfigOptions, name='default_tp_model'):
super().__init__()
self.name = name
self.operator_set = []
assert isinstance(default_qco, QuantizationConfigOptions)
assert (len(default_qco.quantization_config_list) == 1), f'Default QuantizationConfigOptions must contain only one option'
self.default_qco = default_qco
self.fusing_patterns = []
self.is_simd_padding = False
def get_config_options_by_operators_set(self, operators_set_name: str) -> QuantizationConfigOptions:
for op_set in self.operator_set:
if (operators_set_name == op_set.name):
return op_set.qc_options
return get_default_quantization_config_options()
def get_default_op_quantization_config(self) -> OpQuantizationConfig:
assert (len(self.default_qco.quantization_config_list) == 1), f'Default quantization configuration options must contain only one option, but found {len(get_current_tp_model().default_qco.quantization_config_list)} configurations.'
return self.default_qco.quantization_config_list[0]
def is_opset_in_model(self, opset_name: str) -> bool:
return (opset_name in [x.name for x in self.operator_set])
def get_opset_by_name(self, opset_name: str) -> OperatorsSetBase:
opset_list = [x for x in self.operator_set if (x.name == opset_name)]
assert (len(opset_list) <= 1), f'Found more than one OperatorsSet in TargetPlatformModel with the name {opset_name}. OperatorsSet name must be unique.'
if (len(opset_list) == 0):
return None
return opset_list[0]
def append_component(self, tp_model_component: TargetPlatformModelComponent):
if isinstance(tp_model_component, Fusing):
self.fusing_patterns.append(tp_model_component)
elif isinstance(tp_model_component, OperatorsSetBase):
self.operator_set.append(tp_model_component)
else:
raise Exception(f'Trying to append an unfamiliar TargetPlatformModelComponent of type: {type(tp_model_component)}')
def __enter__(self):
_current_tp_model.set(self)
return self
def __exit__(self, exc_type, exc_value, tb):
if (exc_value is not None):
print(exc_value, exc_value.args)
raise exc_value
self.__validate_model()
_current_tp_model.reset()
self.initialized_done()
return self
def __validate_model(self):
opsets_names = [op.name for op in self.operator_set]
if (len(set(opsets_names)) != len(opsets_names)):
Logger.error(f'OperatorsSet must have unique names')
def get_default_config(self) -> OpQuantizationConfig:
assert (len(self.default_qco.quantization_config_list) == 1), f'Default quantization configuration options must contain only one option, but found {len(self.default_qco.quantization_config_list)} configurations.'
return self.default_qco.quantization_config_list[0]
def get_info(self) -> Dict[(str, Any)]:
return {'Model name': self.name, 'Default quantization config': self.get_default_config().get_info(), 'Operators sets': [o.get_info() for o in self.operator_set], 'Fusing patterns': [f.get_info() for f in self.fusing_patterns]}
def show(self):
pprint.pprint(self.get_info(), sort_dicts=False)
def set_simd_padding(self, is_simd_padding: bool):
self.is_simd_padding = is_simd_padding |
def check_output_dir(training_args):
if (os.path.isdir(training_args.output_dir) and training_args.do_train and (not training_args.overwrite_output_dir)):
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if ((last_checkpoint is None) and (len(os.listdir(training_args.output_dir)) > 0)):
raise ValueError(f'Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome.')
elif ((last_checkpoint is not None) and (training_args.resume_from_checkpoint is None)):
logger.info(f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change the `--output_dir` or add `--overwrite_output_dir` to train from scratch.')
return training_args |
.parametrize('observation_shape', [((100,), (200,))])
.parametrize('batch_size', [32])
def test_tuple_observation_scaler_with_transition_picker(observation_shape: Shape, batch_size: int) -> None:
observations = create_observations(observation_shape, batch_size)
actions = np.random.random((batch_size, 1))
rewards: Float32NDArray = np.random.random(batch_size).astype(np.float32)
terminals: Float32NDArray = np.zeros(batch_size, dtype=np.float32)
terminals[(- 1)] = 1.0
episodes = EpisodeGenerator(observations=observations, actions=actions, rewards=rewards, terminals=terminals)()
scaler = TupleObservationScaler([MinMaxObservationScaler(), MinMaxObservationScaler()])
assert (not scaler.built)
scaler.fit_with_transition_picker(episodes, BasicTransitionPicker())
assert scaler.built
TupleObservationScaler.deserialize_from_dict(scaler.serialize_to_dict())
torch_observations = convert_to_torch_recursively(observations, 'cpu')
transformed_observations = scaler.transform(torch_observations)
transformed_observations_numpy = scaler.transform_numpy(observations)
for i in range(len(observation_shape)):
assert torch.allclose(transformed_observations[i], scaler.observation_scalers[i].transform(torch_observations[i]))
assert np.allclose(transformed_observations_numpy[i], scaler.observation_scalers[i].transform_numpy(observations[i]))
reversed_observations = scaler.reverse_transform(transformed_observations)
reversed_observations_numpy = scaler.reverse_transform_numpy(transformed_observations_numpy)
for i in range(len(observation_shape)):
assert torch.allclose(reversed_observations[i], torch_observations[i])
assert np.allclose(reversed_observations_numpy[i], observations[i]) |
def main():
graph = as_733()
sis_params = {'model': 'SIS', 'b': 0.001, 'd': 0.01, 'c': 1, 'runs': 1, 'steps': 5000, 'seed': 1, 'diffusion': 'min', 'method': 'ns_node', 'k': 5, 'plot_transition': True, 'gif_animation': True, 'edge_style': 'bundled', 'node_style': 'force_atlas', 'fa_iter': 20}
ds = Diffusion(graph, **sis_params)
results = ds.run_simulation()
ds.plot_results(results) |
class MomentumUpdaterHook(Hook):
def __init__(self, by_epoch=True, warmup=None, warmup_iters=0, warmup_ratio=0.9):
if (warmup is not None):
if (warmup not in ['constant', 'linear', 'exp']):
raise ValueError(f'"{warmup}" is not a supported type for warming up, valid types are "constant" and "linear"')
if (warmup is not None):
assert (warmup_iters > 0), '"warmup_iters" must be a positive integer'
assert (0 < warmup_ratio <= 1.0), '"warmup_momentum" must be in range (0,1]'
self.by_epoch = by_epoch
self.warmup = warmup
self.warmup_iters = warmup_iters
self.warmup_ratio = warmup_ratio
self.base_momentum = []
self.regular_momentum = []
def _set_momentum(self, runner, momentum_groups):
if isinstance(runner.optimizer, dict):
for (k, optim) in runner.optimizer.items():
for (param_group, mom) in zip(optim.param_groups, momentum_groups[k]):
if ('momentum' in param_group.keys()):
param_group['momentum'] = mom
elif ('betas' in param_group.keys()):
param_group['betas'] = (mom, param_group['betas'][1])
else:
for (param_group, mom) in zip(runner.optimizer.param_groups, momentum_groups):
if ('momentum' in param_group.keys()):
param_group['momentum'] = mom
elif ('betas' in param_group.keys()):
param_group['betas'] = (mom, param_group['betas'][1])
def get_momentum(self, runner, base_momentum):
raise NotImplementedError
def get_regular_momentum(self, runner):
if isinstance(runner.optimizer, dict):
momentum_groups = {}
for k in runner.optimizer.keys():
_momentum_group = [self.get_momentum(runner, _base_momentum) for _base_momentum in self.base_momentum[k]]
momentum_groups.update({k: _momentum_group})
return momentum_groups
else:
return [self.get_momentum(runner, _base_momentum) for _base_momentum in self.base_momentum]
def get_warmup_momentum(self, cur_iters):
def _get_warmup_momentum(cur_iters, regular_momentum):
if (self.warmup == 'constant'):
warmup_momentum = [(_momentum / self.warmup_ratio) for _momentum in self.regular_momentum]
elif (self.warmup == 'linear'):
k = ((1 - (cur_iters / self.warmup_iters)) * (1 - self.warmup_ratio))
warmup_momentum = [(_momentum / (1 - k)) for _momentum in self.regular_mom]
elif (self.warmup == 'exp'):
k = (self.warmup_ratio ** (1 - (cur_iters / self.warmup_iters)))
warmup_momentum = [(_momentum / k) for _momentum in self.regular_mom]
return warmup_momentum
if isinstance(self.regular_momentum, dict):
momentum_groups = {}
for (key, regular_momentum) in self.regular_momentum.items():
momentum_groups[key] = _get_warmup_momentum(cur_iters, regular_momentum)
return momentum_groups
else:
return _get_warmup_momentum(cur_iters, self.regular_momentum)
def before_run(self, runner):
if isinstance(runner.optimizer, dict):
self.base_momentum = {}
for (k, optim) in runner.optimizer.items():
for group in optim.param_groups:
if ('momentum' in group.keys()):
group.setdefault('initial_momentum', group['momentum'])
else:
group.setdefault('initial_momentum', group['betas'][0])
_base_momentum = [group['initial_momentum'] for group in optim.param_groups]
self.base_momentum.update({k: _base_momentum})
else:
for group in runner.optimizer.param_groups:
if ('momentum' in group.keys()):
group.setdefault('initial_momentum', group['momentum'])
else:
group.setdefault('initial_momentum', group['betas'][0])
self.base_momentum = [group['initial_momentum'] for group in runner.optimizer.param_groups]
def before_train_epoch(self, runner):
if (not self.by_epoch):
return
self.regular_mom = self.get_regular_momentum(runner)
self._set_momentum(runner, self.regular_mom)
def before_train_iter(self, runner):
cur_iter = runner.iter
if (not self.by_epoch):
self.regular_mom = self.get_regular_momentum(runner)
if ((self.warmup is None) or (cur_iter >= self.warmup_iters)):
self._set_momentum(runner, self.regular_mom)
else:
warmup_momentum = self.get_warmup_momentum(cur_iter)
self._set_momentum(runner, warmup_momentum)
elif self.by_epoch:
if ((self.warmup is None) or (cur_iter > self.warmup_iters)):
return
elif (cur_iter == self.warmup_iters):
self._set_momentum(runner, self.regular_mom)
else:
warmup_momentum = self.get_warmup_momentum(cur_iter)
self._set_momentum(runner, warmup_momentum) |
def _tensorviewer_from_parmap(par_map, batch_size):
(names, slices, _) = list(zip(*sorted(((paramset_name, paramset_spec['slice'], paramset_spec['slice'].start) for (paramset_name, paramset_spec) in par_map.items()), key=(lambda x: x[2]))))
return _tensorviewer_from_slices(slices, names, batch_size) |
class Matcher(object):
version_class = None
_operators = {'<': (lambda v, c, p: (v < c)), '>': (lambda v, c, p: (v > c)), '<=': (lambda v, c, p: ((v == c) or (v < c))), '>=': (lambda v, c, p: ((v == c) or (v > c))), '==': (lambda v, c, p: (v == c)), '===': (lambda v, c, p: (v == c)), '~=': (lambda v, c, p: ((v == c) or (v > c))), '!=': (lambda v, c, p: (v != c))}
def parse_requirement(self, s):
return parse_requirement(s)
def __init__(self, s):
if (self.version_class is None):
raise ValueError('Please specify a version class')
self._string = s = s.strip()
r = self.parse_requirement(s)
if (not r):
raise ValueError(('Not valid: %r' % s))
self.name = r.name
self.key = self.name.lower()
clist = []
if r.constraints:
for (op, s) in r.constraints:
if s.endswith('.*'):
if (op not in ('==', '!=')):
raise ValueError(("'.*' not allowed for %r constraints" % op))
(vn, prefix) = (s[:(- 2)], True)
self.version_class(vn)
else:
(vn, prefix) = (self.version_class(s), False)
clist.append((op, vn, prefix))
self._parts = tuple(clist)
def match(self, version):
if isinstance(version, string_types):
version = self.version_class(version)
for (operator, constraint, prefix) in self._parts:
f = self._operators.get(operator)
if isinstance(f, string_types):
f = getattr(self, f)
if (not f):
msg = ('%r not implemented for %s' % (operator, self.__class__.__name__))
raise NotImplementedError(msg)
if (not f(version, constraint, prefix)):
return False
return True
def exact_version(self):
result = None
if ((len(self._parts) == 1) and (self._parts[0][0] in ('==', '==='))):
result = self._parts[0][1]
return result
def _check_compatible(self, other):
if ((type(self) != type(other)) or (self.name != other.name)):
raise TypeError(('cannot compare %s and %s' % (self, other)))
def __eq__(self, other):
self._check_compatible(other)
return ((self.key == other.key) and (self._parts == other._parts))
def __ne__(self, other):
return (not self.__eq__(other))
def __hash__(self):
return (hash(self.key) + hash(self._parts))
def __repr__(self):
return ('%s(%r)' % (self.__class__.__name__, self._string))
def __str__(self):
return self._string |
class global_state():
def __init__(self):
self.graph = None
self.analysis_data = None
self.figure_cache = None
self.dist_cache = None
self.weight_cache = None
self.draggable = None
self.zIndex = 50
self.f32_mlir = ''
self.quant_mlir = ''
self.input = ''
self.manual_run = False |
def load_cython(name):
from sage.misc.cython import cython
(mod, dir) = cython(name, compile_message=True, use_cache=True)
import sys
sys.path.append(dir)
return 'from {} import *'.format(mod) |
def create_distiller(opt, verbose=True):
distiller = find_distiller_using_name(opt.distiller)
instance = distiller(opt)
if verbose:
print(('distiller [%s] was created' % type(instance).__name__))
return instance |
def train(args, model, train_loader, eval_loader, num_epochs, output, opt=None, s_epoch=0):
device = args.device
lr_default = args.lr
lr_decay_step = 2
lr_decay_rate = 0.75
best_model = ''
lr_decay_epochs = (range(10, 20, lr_decay_step) if (eval_loader is not None) else range(10, 20, lr_decay_step))
gradual_warmup_steps = [(0.5 * lr_default), (1.0 * lr_default), (1.5 * lr_default), (2.0 * lr_default)]
saving_epoch = 15
grad_clip = args.clip_norm
utils.create_dir(output)
optim = (torch.optim.Adamax(filter((lambda p: p.requires_grad), model.parameters()), lr=lr_default) if (opt is None) else opt)
criterion = torch.nn.BCEWithLogitsLoss(reduction='sum')
ae_criterion = torch.nn.MSELoss()
logger = utils.Logger(os.path.join(output, 'log.txt'))
logger.write(args.__repr__())
utils.print_model(model, logger)
logger.write(('optim: adamax lr=%.4f, decay_step=%d, decay_rate=%.2f, grad_clip=%.2f' % (lr_default, lr_decay_step, lr_decay_rate, grad_clip)))
trainer = Trainer(args, model, criterion, optim, ae_criterion)
update_freq = int(args.update_freq)
wall_time_start = time.time()
best_eval_score = 0
for epoch in range(s_epoch, num_epochs):
total_loss = 0
train_score = 0
total_norm = 0
count_norm = 0
num_updates = 0
t = time.time()
N = len(train_loader.dataset)
num_batches = int(((N / args.batch_size) + 1))
if (epoch < len(gradual_warmup_steps)):
trainer.optimizer.param_groups[0]['lr'] = gradual_warmup_steps[epoch]
logger.write(('gradual warm up lr: %.4f' % trainer.optimizer.param_groups[0]['lr']))
elif (epoch in lr_decay_epochs):
trainer.optimizer.param_groups[0]['lr'] *= lr_decay_rate
logger.write(('decreased lr: %.4f' % trainer.optimizer.param_groups[0]['lr']))
else:
logger.write(('lr: %.4f' % trainer.optimizer.param_groups[0]['lr']))
for (i, (v, q, a, _, _, _)) in enumerate(train_loader):
if args.maml:
v[0] = v[0].reshape(v[0].shape[0], 84, 84).unsqueeze(1)
if args.autoencoder:
v[1] = v[1].reshape(v[1].shape[0], 128, 128).unsqueeze(1)
if args.clip:
if (args.clip_vision_encoder == 'RN50x4'):
v[2] = v[2].reshape(v[2].shape[0], 3, 288, 288)
else:
v[2] = v[2].reshape(v[2].shape[0], 3, 250, 250)
v[0] = v[0].to(device)
v[1] = v[1].to(device)
v[2] = v[2].to(device)
q = q.to(device)
a = a.to(device)
sample = [v, q, a]
if ((i < (num_batches - 1)) and (((i + 1) % update_freq) > 0)):
trainer.train_step(sample, update_params=False)
else:
(loss, grad_norm, batch_score) = trainer.train_step(sample, update_params=True)
total_norm += grad_norm
count_norm += 1
total_loss += loss.item()
train_score += batch_score
num_updates += 1
if ((num_updates % int((args.print_interval / update_freq))) == 0):
print('Iter: {}, Loss {:.4f}, Norm: {:.4f}, Total norm: {:.4f}, Num updates: {}, Wall time: {:.2f}, ETA: {}'.format((i + 1), (total_loss / (num_updates + 1)), grad_norm, total_norm, num_updates, (time.time() - wall_time_start), utils.time_since(t, (i / num_batches))))
total_loss /= num_updates
train_score = ((100 * train_score) / (num_updates * args.batch_size))
if (eval_loader is not None):
print('Evaluating...')
trainer.model.train(False)
(eval_score, bound) = evaluate(model, eval_loader, args)
trainer.model.train(True)
logger.write(('epoch %d, time: %.2f' % (epoch, (time.time() - t))))
logger.write(('\ttrain_loss: %.2f, norm: %.4f, score: %.2f' % (total_loss, (total_norm / count_norm), train_score)))
if (eval_loader is not None):
logger.write(('\teval score: %.2f (%.2f)' % ((100 * eval_score), (100 * bound))))
if (epoch >= saving_epoch):
model_path = os.path.join(output, ('model_epoch%d.pth' % epoch))
utils.save_model(model_path, model, epoch, trainer.optimizer)
if ((eval_loader is not None) and (eval_score > best_eval_score)):
model_path = os.path.join(output, 'model_epoch_best.pth')
utils.save_model(model_path, model, epoch, trainer.optimizer)
best_eval_score = eval_score
best_model = model |
class ModuleList(BaseModule, nn.ModuleList):
def __init__(self, modules: Optional[Iterable]=None, init_cfg: Optional[dict]=None):
BaseModule.__init__(self, init_cfg)
nn.ModuleList.__init__(self, modules) |
_args('v', 'i', 'i', 'none')
def sort(g, self, dim, decending, out=None):
if (out is not None):
_unimplemented('Sort', 'Out parameter is not supported for sort')
if (not self.isCompleteTensor()):
return _unimplemented('Sort', 'input size not accessible')
return g.op('TopK', self, k_i=self.type().sizes()[dim], axis_i=dim, outputs=2) |
_module()
class NASFCOS(SingleStageDetector):
def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None):
super(NASFCOS, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained, init_cfg) |
def create_crefs(refs):
crefs = []
for ref in refs:
crefs.append(cook_refs(ref))
return crefs |
def repro_fig_4(gpu=None, interp='bicubic'):
net = caffe.Net('/home/ruthfong/packages/caffe/models/bvlc_googlenet/deploy_force_backward.prototxt', '/home/ruthfong/packages/caffe/models/bvlc_googlenet/bvlc_googlenet.caffemodel', caffe.TEST)
topName = 'loss3/classifier'
bottomName = 'pool2/3x3_s2'
zebra_i = 340
elephant_i = 386
transformer = get_ILSVRC_net_transformer(net)
img_path = '/home/ruthfong/neural_coding/fnn_images/zeb-ele1.jpg'
zebra_map = compute_heatmap(net=net, transformer=transformer, paths=img_path, labels=zebra_i, heatmap_type='excitation_backprop', topBlobName=topName, topLayerName=topName, outputBlobName=bottomName, outputLayerName=bottomName, gpu=gpu)
elephant_map = compute_heatmap(net=net, transformer=transformer, paths=img_path, labels=elephant_i, heatmap_type='excitation_backprop', topBlobName=topName, topLayerName=topName, outputBlobName=bottomName, outputLayerName=bottomName, gpu=gpu)
img = caffe.io.load_image(img_path)
pylab.rcParams['figure.figsize'] = (12.0, 12.0)
(f, ax) = plt.subplots(1, 3)
ax[0].imshow(img)
ax[1].imshow(overlay_map(img, zebra_map, overlay=False, interp=interp), interpolation=interp)
ax[2].imshow(overlay_map(img, elephant_map, overlay=False, interp=interp), interpolation=interp) |
class TestCost(unittest.TestCase):
def test_valid_args(self):
cost = Cost(mac_op=1, mem_hier=(200, 6, 2, 1), noc_hop=10, idl_unit=0)
self.assertEqual(cost.mac_op, 1, 'mac_op')
self.assertEqual(cost.mem_hier, (200, 6, 2, 1), 'mem_hier')
self.assertEqual(cost.noc_hop, 10, 'noc_hop')
self.assertEqual(cost.idl_unit, 0, 'idl_unit')
def test_invalid_mac_op(self):
with self.assertRaisesRegex(TypeError, 'Cost: .*mac_op.*'):
_ = Cost(mac_op=(1, 2), mem_hier=(200, 6, 2, 1), noc_hop=10, idl_unit=0)
def test_invalid_mem_hier_type(self):
with self.assertRaisesRegex(TypeError, 'Cost: .*mem_hier.*'):
_ = Cost(mac_op=1, mem_hier=200, noc_hop=10, idl_unit=0)
with self.assertRaisesRegex(TypeError, 'Cost: .*mem_hier.*'):
_ = Cost(mac_op=1, mem_hier=[200, 6, 2, 1], noc_hop=10, idl_unit=0)
def test_invalid_mem_hier_len(self):
with self.assertRaisesRegex(ValueError, 'Cost: .*mem_hier.*'):
_ = Cost(mac_op=1, mem_hier=(200, 6), noc_hop=10, idl_unit=0)
def test_invalid_noc_hop(self):
with self.assertRaisesRegex(TypeError, 'Cost: .*noc_hop.*'):
_ = Cost(mac_op=1, mem_hier=(200, 6, 2, 1), noc_hop=[10, 10], idl_unit=0)
def test_invalid_idl_unit(self):
with self.assertRaisesRegex(TypeError, 'Cost: .*idl_unit.*'):
_ = Cost(mac_op=1, mem_hier=(200, 6, 2, 1), noc_hop=10, idl_unit=set([1, 2]))
def test_mem_hier_at(self):
cost = Cost(mac_op=1, mem_hier=(200, 6, 2, 1), noc_hop=10, idl_unit=0)
self.assertEqual(cost.mem_hier_at(me.DRAM), 200, 'mem_hier: DRAM')
self.assertEqual(cost.mem_hier_at(me.GBUF), 6, 'mem_hier: GBUF')
self.assertEqual(cost.mem_hier_at(me.ITCN), 2, 'mem_hier: ITCN')
self.assertEqual(cost.mem_hier_at(me.REGF), 1, 'mem_hier: REGF')
def test_mem_hier_at_error(self):
cost = Cost(mac_op=1, mem_hier=(200, 6, 2, 1), noc_hop=10, idl_unit=0)
self.assertIsNone(cost.mem_hier_at(me.NUM))
self.assertIsNone(cost.mem_hier_at(None)) |
class HParams(tf_contrib.training.HParams):
def del_hparam(self, name):
if hasattr(self, name):
delattr(self, name)
del self._hparam_types[name]
def pop_hparam(self, name):
value = getattr(self, name)
self.del_hparam(name)
return value
def get_hparam(self, name):
value = getattr(self, name)
return value
def save_to_file(self, filename):
with tf.io.gfile.GFile(filename, 'w') as f:
json.dump(self.to_json(), f) |
class TextCNN(nn.Module):
def __init__(self, vocab_dict, glove_file=None, emb_dim=104, dropout_p=0.1, word_embed_dim=50):
super(TextCNN, self).__init__()
Ks = [3, 4, 5]
Ci = 1
Co = 1000
self.embed = nn.Embedding(len(vocab_dict), word_embed_dim)
if glove_file:
embeddings = load_glove_embeddings(glove_file, vocab_dict, embedding_dim=word_embed_dim)
self.embed.weight = nn.Parameter(embeddings)
self.convs1 = nn.ModuleList([nn.Conv2d(Ci, Co, (K, word_embed_dim)) for K in Ks])
self.dropout = nn.Dropout(dropout_p)
self.fc1 = nn.Linear((len(Ks) * Co), emb_dim)
def forward(self, x):
x = self.embed(x)
x = x.unsqueeze(1)
x = [F.relu(conv(x)).squeeze(3) for conv in self.convs1]
x = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in x]
x = torch.cat(x, 1)
x = self.dropout(x)
x = self.fc1(x)
return x |
def test_parser():
parser = parse_args(['--predictions', 'predictions.tsv', '--ground-truth', 'ground_truth.tsv', '--metrics', 'metrics.tsv'])
assert (parser.predictions is not None)
assert ('predictions.tsv' == parser.predictions)
assert (parser.ground_truth is not None)
assert ('ground_truth.tsv' == parser.ground_truth)
assert (parser.metrics is not None)
assert ('metrics.tsv' == parser.metrics) |
def _make_unique_name(seen: Set[str], name: str, min_version: int=0):
assert (name is not None)
i = min_version
x = (('%s_%d' % (name, i)) if i else name)
while (x in seen):
i += 1
x = ('%s_%d' % (name, i))
seen.add(x)
return x |
def test_inclusive_policy_positive_examples_3(digraph, features_1d, labels):
policy = InclusivePolicy(digraph, features_1d, labels)
ground_truth = [False, False, True, False, True, True, False, False]
result = policy.positive_examples('2.1')
assert_array_equal(ground_truth, result) |
def create_pipeline_configuration(DEBUG=False, batch_size=32):
config = {'batch_dim': 0, 'depth': 10000, 'basic_blocks': (CrossEntropyLoss, T5LayerNorm, Linear, StatelessEmbedding, Dropout, T5Block), 'model_inputs': {'attention_mask': {'shape': torch.Size([32, 1, 1, 64]), 'dtype': torch.float32, 'is_batched': True, 'used_by': [0, 1, 2, 3]}, 'decoder_attention_mask': {'shape': torch.Size([32, 1, 4, 4]), 'dtype': torch.float32, 'is_batched': True, 'used_by': [3, 4, 5, 6, 7]}, 'decoder_input_ids': {'shape': torch.Size([32, 4]), 'dtype': torch.int64, 'is_batched': True, 'used_by': [0]}, 'input_ids': {'shape': torch.Size([32, 64]), 'dtype': torch.int64, 'is_batched': True, 'used_by': [0]}, 'inverted_encoder_attention_mask': {'shape': torch.Size([32, 1, 1, 64]), 'dtype': torch.float32, 'is_batched': True, 'used_by': [3, 4, 5, 6, 7]}, 'lm_labels': {'shape': torch.Size([32, 4]), 'dtype': torch.int64, 'is_batched': True, 'used_by': [7]}}, 'model_outputs': {'T5ForConditionalGeneration/CrossEntropyLoss[lm_loss]': {'shape': torch.Size([1]), 'dtype': torch.float32, 'is_batched': False, 'created_by': 7}}, 'stages': {0: {'stage_cls': Partition0, 'inputs': {'attention_mask': {'shape': torch.Size([32, 1, 1, 64]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'decoder_input_ids': {'shape': torch.Size([32, 4]), 'dtype': torch.int64, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'input_ids': {'shape': torch.Size([32, 64]), 'dtype': torch.int64, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___22_1': {'shape': torch.Size([32, 32, 64, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [1]}, 'T5ForConditionalGeneration/T5Stack[encoder]/T5Block[6]': {'shape': torch.Size([32, 64, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [1]}, 'T5ForConditionalGeneration/T5Stack[decoder]/Dropout[dropout]': {'shape': torch.Size([32, 4, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [3]}}, 'devices': [('cpu' if DEBUG else 'cuda:0')], 'stage_depth': 7}, 1: {'stage_cls': Partition1, 'inputs': {'attention_mask': {'shape': torch.Size([32, 1, 1, 64]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___22_1': {'shape': torch.Size([32, 32, 64, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 0}, 'T5ForConditionalGeneration/T5Stack[encoder]/T5Block[6]': {'shape': torch.Size([32, 64, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 0}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___22_2': {'shape': torch.Size([32, 32, 64, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [2]}, 'T5ForConditionalGeneration/T5Stack[encoder]/T5Block[13]': {'shape': torch.Size([32, 64, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [2]}}, 'devices': [('cpu' if DEBUG else 'cuda:1')], 'stage_depth': 6}, 2: {'stage_cls': Partition2, 'inputs': {'attention_mask': {'shape': torch.Size([32, 1, 1, 64]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___22_2': {'shape': torch.Size([32, 32, 64, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 1}, 'T5ForConditionalGeneration/T5Stack[encoder]/T5Block[13]': {'shape': torch.Size([32, 64, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 1}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___22_3': {'shape': torch.Size([32, 32, 64, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [3]}, 'T5ForConditionalGeneration/T5Stack[encoder]/T5Block[20]': {'shape': torch.Size([32, 64, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [3]}}, 'devices': [('cpu' if DEBUG else 'cuda:2')], 'stage_depth': 5}, 3: {'stage_cls': Partition3, 'inputs': {'attention_mask': {'shape': torch.Size([32, 1, 1, 64]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'decoder_attention_mask': {'shape': torch.Size([32, 1, 4, 4]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'inverted_encoder_attention_mask': {'shape': torch.Size([32, 1, 1, 64]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___22_3': {'shape': torch.Size([32, 32, 64, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 2}, 'T5ForConditionalGeneration/T5Stack[encoder]/T5Block[20]': {'shape': torch.Size([32, 64, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 2}, 'T5ForConditionalGeneration/T5Stack[decoder]/Dropout[dropout]': {'shape': torch.Size([32, 4, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 0}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_4': {'shape': torch.Size([32, 64, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [4]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___130_4': {'shape': torch.Size([32, 32, 4, 4]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [4]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___132_4': {'shape': torch.Size([32, 32, 4, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [4]}, 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[3]': {'shape': torch.Size([32, 4, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [4]}}, 'devices': [('cpu' if DEBUG else 'cuda:3')], 'stage_depth': 4}, 4: {'stage_cls': Partition4, 'inputs': {'decoder_attention_mask': {'shape': torch.Size([32, 1, 4, 4]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'inverted_encoder_attention_mask': {'shape': torch.Size([32, 1, 1, 64]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_4': {'shape': torch.Size([32, 64, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 3}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___130_4': {'shape': torch.Size([32, 32, 4, 4]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 3}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___132_4': {'shape': torch.Size([32, 32, 4, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 3}, 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[3]': {'shape': torch.Size([32, 4, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 3}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_5': {'shape': torch.Size([32, 64, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [5]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___130_5': {'shape': torch.Size([32, 32, 4, 4]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [5]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___132_5': {'shape': torch.Size([32, 32, 4, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [5]}, 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[8]': {'shape': torch.Size([32, 4, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [5]}}, 'devices': [('cpu' if DEBUG else 'cuda:4')], 'stage_depth': 3}, 5: {'stage_cls': Partition5, 'inputs': {'decoder_attention_mask': {'shape': torch.Size([32, 1, 4, 4]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'inverted_encoder_attention_mask': {'shape': torch.Size([32, 1, 1, 64]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_5': {'shape': torch.Size([32, 64, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 4}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___130_5': {'shape': torch.Size([32, 32, 4, 4]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 4}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___132_5': {'shape': torch.Size([32, 32, 4, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 4}, 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[8]': {'shape': torch.Size([32, 4, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 4}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_6': {'shape': torch.Size([32, 64, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [6]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___130_6': {'shape': torch.Size([32, 32, 4, 4]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [6]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___132_6': {'shape': torch.Size([32, 32, 4, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [6]}, 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[13]': {'shape': torch.Size([32, 4, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [6]}}, 'devices': [('cpu' if DEBUG else 'cuda:5')], 'stage_depth': 2}, 6: {'stage_cls': Partition6, 'inputs': {'decoder_attention_mask': {'shape': torch.Size([32, 1, 4, 4]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'inverted_encoder_attention_mask': {'shape': torch.Size([32, 1, 1, 64]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_6': {'shape': torch.Size([32, 64, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 5}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___130_6': {'shape': torch.Size([32, 32, 4, 4]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 5}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___132_6': {'shape': torch.Size([32, 32, 4, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 5}, 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[13]': {'shape': torch.Size([32, 4, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 5}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_7': {'shape': torch.Size([32, 64, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [7]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___130_7': {'shape': torch.Size([32, 32, 4, 4]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [7]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___132_7': {'shape': torch.Size([32, 32, 4, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [7]}, 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[18]': {'shape': torch.Size([32, 4, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [7]}}, 'devices': [('cpu' if DEBUG else 'cuda:6')], 'stage_depth': 1}, 7: {'stage_cls': Partition7, 'inputs': {'decoder_attention_mask': {'shape': torch.Size([32, 1, 4, 4]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'inverted_encoder_attention_mask': {'shape': torch.Size([32, 1, 1, 64]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'lm_labels': {'shape': torch.Size([32, 4]), 'dtype': torch.int64, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_7': {'shape': torch.Size([32, 64, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 6}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___130_7': {'shape': torch.Size([32, 32, 4, 4]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 6}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___132_7': {'shape': torch.Size([32, 32, 4, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 6}, 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[18]': {'shape': torch.Size([32, 4, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 6}}, 'outputs': {'T5ForConditionalGeneration/CrossEntropyLoss[lm_loss]': {'shape': torch.Size([1]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': False, 'used_by': [(- 1)]}}, 'devices': [('cpu' if DEBUG else 'cuda:7')], 'stage_depth': 0}}}
batch_dim = config['batch_dim']
for d in chain(config['model_inputs'].values(), config['model_outputs'].values()):
if d['is_batched']:
shape = d['shape']
d['shape'] = torch.Size(((shape[:batch_dim] + (batch_size,)) + shape[(batch_dim + 1):]))
for s in config['stages'].values():
for d in chain(s['inputs'].values(), s['outputs'].values()):
if d['is_batched']:
shape = d['shape']
d['shape'] = torch.Size(((shape[:batch_dim] + (batch_size,)) + shape[(batch_dim + 1):]))
return config |
class AutoProcessor():
def __init__(self):
raise EnvironmentError('AutoProcessor is designed to be instantiated using the `AutoProcessor.from_pretrained(pretrained_model_name_or_path)` method.')
_list_option_in_docstrings(PROCESSOR_MAPPING_NAMES)
def from_pretrained(cls, pretrained_model_name_or_path, **kwargs):
config = kwargs.pop('config', None)
trust_remote_code = kwargs.pop('trust_remote_code', False)
kwargs['_from_auto'] = True
processor_class = None
processor_auto_map = None
get_file_from_repo_kwargs = {key: kwargs[key] for key in inspect.signature(get_file_from_repo).parameters.keys() if (key in kwargs)}
preprocessor_config_file = get_file_from_repo(pretrained_model_name_or_path, FEATURE_EXTRACTOR_NAME, **get_file_from_repo_kwargs)
if (preprocessor_config_file is not None):
(config_dict, _) = ImageProcessingMixin.get_image_processor_dict(pretrained_model_name_or_path, **kwargs)
processor_class = config_dict.get('processor_class', None)
if ('AutoProcessor' in config_dict.get('auto_map', {})):
processor_auto_map = config_dict['auto_map']['AutoProcessor']
if ((preprocessor_config_file is not None) and (processor_class is None)):
(config_dict, _) = FeatureExtractionMixin.get_feature_extractor_dict(pretrained_model_name_or_path, **kwargs)
processor_class = config_dict.get('processor_class', None)
if ('AutoProcessor' in config_dict.get('auto_map', {})):
processor_auto_map = config_dict['auto_map']['AutoProcessor']
if (processor_class is None):
tokenizer_config_file = get_file_from_repo(pretrained_model_name_or_path, TOKENIZER_CONFIG_FILE, **get_file_from_repo_kwargs)
if (tokenizer_config_file is not None):
with open(tokenizer_config_file, encoding='utf-8') as reader:
config_dict = json.load(reader)
processor_class = config_dict.get('processor_class', None)
if ('AutoProcessor' in config_dict.get('auto_map', {})):
processor_auto_map = config_dict['auto_map']['AutoProcessor']
if (processor_class is None):
if (not isinstance(config, PretrainedConfig)):
config = AutoConfig.from_pretrained(pretrained_model_name_or_path, trust_remote_code=trust_remote_code, **kwargs)
processor_class = getattr(config, 'processor_class', None)
if (hasattr(config, 'auto_map') and ('AutoProcessor' in config.auto_map)):
processor_auto_map = config.auto_map['AutoProcessor']
if (processor_class is not None):
if (processor_auto_map is not None):
if (not trust_remote_code):
raise ValueError(f'Loading {pretrained_model_name_or_path} requires you to execute the feature extractor file in that repo on your local machine. Make sure you have read the code there to avoid malicious use, then set the option `trust_remote_code=True` to remove this error.')
processor_class = get_class_from_dynamic_module(processor_auto_map, pretrained_model_name_or_path, **kwargs)
else:
processor_class = processor_class_from_name(processor_class)
return processor_class.from_pretrained(pretrained_model_name_or_path, trust_remote_code=trust_remote_code, **kwargs)
if (type(config) in PROCESSOR_MAPPING):
return PROCESSOR_MAPPING[type(config)].from_pretrained(pretrained_model_name_or_path, **kwargs)
try:
return AutoTokenizer.from_pretrained(pretrained_model_name_or_path, trust_remote_code=trust_remote_code, **kwargs)
except Exception:
try:
return AutoImageProcessor.from_pretrained(pretrained_model_name_or_path, trust_remote_code=trust_remote_code, **kwargs)
except Exception:
pass
try:
return AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path, trust_remote_code=trust_remote_code, **kwargs)
except Exception:
pass
raise ValueError(f"Unrecognized processing class in {pretrained_model_name_or_path}. Can't instantiate a processor, a tokenizer, an image processor or a feature extractor for this model. Make sure the repository containsthe files of at least one of those processing classes.")
def register(config_class, processor_class):
PROCESSOR_MAPPING.register(config_class, processor_class) |
def get_named_beta_schedule(schedule_name, num_diffusion_timesteps, scale_betas=1.0):
if (schedule_name == 'linear'):
scale = ((scale_betas * 1000) / num_diffusion_timesteps)
beta_start = (scale * 0.0001)
beta_end = (scale * 0.02)
return np.linspace(beta_start, beta_end, num_diffusion_timesteps, dtype=np.float64)
elif (schedule_name == 'cosine'):
return betas_for_alpha_bar(num_diffusion_timesteps, (lambda t: (math.cos(((((t + 0.008) / 1.008) * math.pi) / 2)) ** 2)))
else:
raise NotImplementedError(f'unknown beta schedule: {schedule_name}') |
class FiniteField_pari_ffelt(FiniteField):
def __init__(self, p, modulus, name=None):
n = modulus.degree()
if (n < 2):
raise ValueError('the degree must be at least 2')
FiniteField.__init__(self, base=GF(p), names=name, normalize=True)
self._modulus = modulus
self._degree = n
self._gen_pari = modulus._pari_with_name(self._names[0]).ffgen()
self._zero_element = self.element_class(self, 0)
self._one_element = self.element_class(self, 1)
self._gen = self.element_class(self, self._gen_pari)
self.__pari_frobenius_powers = []
Element = FiniteFieldElement_pari_ffelt
def __reduce__(self):
return self._factory_data[0].reduce_data(self)
def gen(self, n=0):
if n:
raise IndexError('only one generator')
return self._gen
def characteristic(self):
return self.base_ring().characteristic()
def degree(self):
return self._degree
def _pari_frobenius(self, k=1):
k = (k % self.degree())
if (k == 0):
raise ValueError('_pari_frobenius requires a non-zero exponent')
g = self.gen()
i = len(self.__pari_frobenius_powers)
if (i == 0):
self.__pari_frobenius_powers.append(g.__pari__().fffrobenius(1))
i = 1
f1 = self.__pari_frobenius_powers[0]
while (i < k):
i += 1
fi = self.__pari_frobenius_powers[(- 1)].ffcompomap(f1)
self.__pari_frobenius_powers.append(fi)
return self.__pari_frobenius_powers[(k - 1)] |
def corpus_bleu(sys_stream, ref_streams):
bleu = _corpus_bleu(sys_stream, ref_streams, tokenize='none')
return bleu.score |
def mk_zimpl_input(dialog):
data_dir = './tmp'
edus = dialog['edus']
turn_len = []
turn_off = []
edu_ind = []
c_off = 0
for (i, edu) in enumerate(dialog['edus']):
edu_ind.append((edu['turn'] + 1))
i = 0
while (i < len(edus)):
j = i
while ((j < len(edus)) and (edus[i]['turn'] == edus[j]['turn'])):
j += 1
turn_len.append((j - i))
turn_off.append(c_off)
c_off += (j - i)
i = j
data_path = fp.join(data_dir, 'turn.dat')
with open(data_path, 'w') as f_data:
f_data.write((pretty_data([turn_len, turn_off, edu_ind]) + '\n'))
speakers = {}
for edu in edus:
speakers[edu['speaker']] = len(speakers)
last_mat = np.zeros((len(edus), len(edus)), dtype=int)
current_last = {}
for (i, edu) in enumerate(edus):
for plast in current_last.values():
last_mat[plast][i] = 1
try:
current_last[edu['speaker']] = i
except KeyError:
pass
data_path = fp.join(data_dir, 'mlast.dat')
with open(data_path, 'w') as f_data:
f_data.write((pretty_data(last_mat) + '\n'))
subord_idc = []
header = '\n'.join(('param EDU_COUNT := {0} ;'.format(len(edus)), 'param TURN_COUNT := {0} ;'.format(len(turn_off)), 'param PLAYER_COUNT := {0} ;'.format(len(speakers)), 'param LABEL_COUNT := {0} ;'.format(NUM_LABELS), 'set RSub := {{{0}}} ;'.format(', '.join((str(i) for i in subord_idc))), 'param SUB_LABEL_COUNT := {0} ;'.format(len(subord_idc))))
template_path = fp.join('template.zpl')
input_path = fp.join(data_dir, 'input.zpl')
with open(template_path) as f_template:
template = f_template.read()
with open(input_path, 'w') as f_input:
f_input.write((header + '\n'))
f_input.write((template + '\n'))
return input_path |
class CNN3(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)):
super().__init__()
self.conv1 = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, bias=False)
self.conv2 = nn.Conv2d(in_channels=out_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, bias=False)
self.conv3 = nn.Conv2d(in_channels=out_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, bias=False)
self.bn1 = nn.BatchNorm2d(out_channels)
self.bn2 = nn.BatchNorm2d(out_channels)
self.bn3 = nn.BatchNorm2d(out_channels)
def forward(self, x):
x = F.relu_(self.bn1(self.conv1(x)))
x = F.max_pool2d(x, kernel_size=(4, 4))
x = F.relu_(self.bn2(self.conv2(x)))
x = F.max_pool2d(x, kernel_size=(2, 4))
x = F.relu_(self.bn3(self.conv3(x)))
x = F.max_pool2d(x, kernel_size=(2, 2))
return x |
def test_unknowntype():
t = UnknownType()
assert (str(ak.types.from_datashape(str(t), highlevel=False)) == str(t)) |
class Function_zeta(GinacFunction):
def __init__(self):
GinacFunction.__init__(self, 'zeta', conversions={'giac': 'Zeta', 'maple': 'Zeta', 'sympy': 'zeta', 'mathematica': 'Zeta'}) |
def vocab_token_counts(text_filepattern, max_lines):
ret = {}
for (i, line) in enumerate(_read_filepattern(text_filepattern, max_lines=max_lines)):
if (',' not in line):
tf.logging.warning("Malformed vocab line #%d '%s'", i, line)
continue
(token, count) = line.rsplit(',', 1)
ret[_native_to_unicode(token)] = int(count)
return ret |
def _expand_vocabulary(skip_thoughts_emb, skip_thoughts_vocab, word2vec):
tf.logging.info('Finding shared words')
shared_words = [w for w in word2vec.vocab if (w in skip_thoughts_vocab)]
tf.logging.info('Selecting embeddings for %d shared words', len(shared_words))
shared_st_emb = skip_thoughts_emb[[skip_thoughts_vocab[w] for w in shared_words]]
shared_w2v_emb = word2vec[shared_words]
tf.logging.info('Training linear regression model')
model = sklearn.linear_model.LinearRegression()
model.fit(shared_w2v_emb, shared_st_emb)
tf.logging.info('Creating embeddings for expanded vocabuary')
combined_emb = collections.OrderedDict()
for w in word2vec.vocab:
if ('_' not in w):
w_emb = model.predict(word2vec[w].reshape(1, (- 1)))
combined_emb[w] = w_emb.reshape((- 1))
for w in skip_thoughts_vocab:
combined_emb[w] = skip_thoughts_emb[skip_thoughts_vocab[w]]
tf.logging.info('Created expanded vocabulary of %d words', len(combined_emb))
return combined_emb |
class TSInt(object):
thisown = _swig_property((lambda x: x.this.own()), (lambda x, v: x.this.own(v)), doc='The membership flag')
__repr__ = _swig_repr
Val = _swig_property(_snap.TSInt_Val_get, _snap.TSInt_Val_set)
def __init__(self, *args):
_snap.TSInt_swiginit(self, _snap.new_TSInt(*args))
def Load(self, SIn):
return _snap.TSInt_Load(self, SIn)
def Save(self, SOut):
return _snap.TSInt_Save(self, SOut)
def GetPrimHashCd(self):
return _snap.TSInt_GetPrimHashCd(self)
def GetSecHashCd(self):
return _snap.TSInt_GetSecHashCd(self)
__swig_destroy__ = _snap.delete_TSInt |
def import_statements(*objects, **kwds):
import itertools
import inspect
from sage.misc.lazy_import import LazyImport
answer = defaultdict(list)
module_name = None
lazy = kwds.pop('lazy', False)
verbose = kwds.pop('verbose', True)
answer_as_str = kwds.pop('answer_as_str', False)
if kwds:
raise TypeError("Unexpected '{}' argument".format(next(iter(kwds))))
def expand_comma_separated_names(obj):
if isinstance(obj, str):
for w in obj.strip('()').split(','):
(yield w.strip())
else:
(yield obj)
for obj in itertools.chain.from_iterable((expand_comma_separated_names(object) for object in objects)):
name = None
if isinstance(obj, str):
from sage.all import sage_globals
G = sage_globals()
name = obj
if (name in G):
obj = [G[name]]
else:
obj = find_objects_from_name(name, 'sage', include_lazy_imports=True)
if (not obj):
obj = find_objects_from_name(name, include_lazy_imports=True)
i = 0
deprecation = None
while (i < len(obj)):
if isinstance(obj[i], LazyImport):
tmp = obj.pop(i)
tmp_deprecation = tmp._get_deprecation_issue()
if tmp_deprecation:
deprecation = tmp_deprecation
else:
tmp = tmp._get_object()
if all(((u is not tmp) for u in obj)):
obj.append(tmp)
else:
i += 1
if (verbose and (len(obj) > 1)):
modules = set()
for o in obj:
modules.update(find_object_modules(o))
print("# **Warning**: distinct objects with name '{}' in:".format(name))
for mod in sorted(modules):
print('# - {}'.format(mod))
try:
obj = obj[0]
except IndexError:
if deprecation:
raise LookupError('object named {!r} is deprecated (see github issue {})'.format(name, deprecation))
else:
raise LookupError('no object named {!r}'.format(name))
if isinstance(obj, LazyImport):
obj = obj._get_object()
if inspect.ismodule(obj):
module_name = obj.__name__
answer[module_name].append((None, None))
continue
modules = find_object_modules(obj)
if ('__main__' in modules):
del modules['__main__']
if ('__mp_main__' in modules):
del modules['__mp_main__']
if (not modules):
raise ValueError("no import statement found for '{}'.".format(obj))
if (name is None):
def is_ascii(s):
return all(((ord(c) < 128) for c in s))
if any((is_ascii(s) for (module_name, obj_names) in modules.items() for s in obj_names)):
for (module_name, obj_names) in list(modules.items()):
if any(((not is_ascii(s)) for s in obj_names)):
obj_names = [name for name in obj_names if is_ascii(name)]
if (not obj_names):
del modules[module_name]
else:
modules[module_name] = obj_names
if (len(modules) == 1):
((module_name, obj_names),) = modules.items()
if (name is None):
if (verbose and (len(obj_names) > 1)):
print('# ** Warning **: several names for that object: {}'.format(', '.join(sorted(obj_names))))
name = alias = obj_names[0]
elif (name in modules[module_name]):
alias = name
else:
alias = name
name = obj_names[0]
answer[module_name].append((name, alias))
continue
if (name is not None):
good_modules = []
for mod in modules:
if (name in modules[mod]):
good_modules.append(mod)
if (len(good_modules) == 1):
answer[good_modules[0]].append((name, name))
continue
from .sageinspect import isclassinstance
if isclassinstance(obj):
module_name = type(obj).__module__
i = module_name.rfind('.')
all_module_name = (module_name[:i] + '.all')
if (all_module_name in modules):
module_name = all_module_name
modules[module_name][0]
else:
module_name = None
if (module_name is None):
all_re = re.compile('.+\\.all(?:_\\w+)?$')
not_all_modules = [mod for mod in modules if (not all_re.match(mod))]
if (not not_all_modules):
print('# ** Warning **: the object {} is only defined in .all modules'.format(obj))
module_name = next(iter(modules))
else:
if (len(not_all_modules) > 1):
print('# ** Warning **: several modules for the object {}: {}'.format(obj, ', '.join(sorted(modules))))
module_name = not_all_modules[0]
if (name is None):
alias = name = modules[module_name][0]
else:
alias = name
name = modules[module_name][0]
answer[module_name].append((name, alias))
res = []
if lazy:
res.append('from sage.misc.lazy_import import lazy_import')
for module_name in sorted(answer):
res.append(import_statement_string(module_name, answer[module_name], lazy))
if answer_as_str:
return '\n'.join(res)
else:
print('\n'.join(res)) |
def get_net_qc_graph(config_file: str):
with open(config_file, 'r') as fh:
config = load(fh)
graph = {}
for node in config[ParallelRouterNetTopo.ALL_NODE]:
if (node[ParallelRouterNetTopo.TYPE] == ParallelRouterNetTopo.QUANTUM_ROUTER):
graph[node[ParallelRouterNetTopo.NAME]] = []
bsm_to_router_map = {}
for qc in config[ParallelRouterNetTopo.ALL_Q_CHANNEL]:
(router, bsm) = (qc[ParallelRouterNetTopo.SRC], qc[ParallelRouterNetTopo.DST])
if (not (bsm in bsm_to_router_map)):
bsm_to_router_map[bsm] = router
else:
(n1, n2) = (bsm_to_router_map[bsm], router)
graph[n1].append(n2)
graph[n2].append(n1)
return graph |
def generate_compl_labels(labels):
K = (torch.max(labels) + 1)
candidates = np.arange(K)
candidates = np.repeat(candidates.reshape(1, K), len(labels), 0)
mask = np.ones((len(labels), K), dtype=bool)
mask[(range(len(labels)), labels.numpy())] = False
candidates_ = candidates[mask].reshape(len(labels), (K - 1))
idx = np.random.randint(0, (K - 1), len(labels))
complementary_labels = candidates_[(np.arange(len(labels)), np.array(idx))]
return complementary_labels |
class FakeConstantModel(flexs.Model):
def __init__(self, constant):
super().__init__(name='ConstantModel')
self.constant = constant
def _fitness_function(self, sequences):
return (np.ones(len(sequences)) * self.constant)
def train(self, *args, **kwargs):
pass |
def eval_model(args):
model = torch.load(args.save)
model.eval()
evaluateL2 = nn.MSELoss().to(args.device)
evaluateL1 = nn.L1Loss().to(args.device)
Data = DataLoaderS(args, train_dates=train_dates, val_dates=val_dates, test_dates=test_dates)
(test_acc, test_rae, test_corr, oni_test_stats, preds, Ytrue) = evaluate(Data, Data.test[0], Data.test[1], model, evaluateL2, evaluateL1, args, return_oni_preds=True)
print('Exp1 Test stats... OVERALL: rse {:5.4f} , RMSE {:5.4f} , corr {:5.4f} | ONI: RMSE {:5.4f} , corr {:5.4f}'.format(test_acc, test_rae, test_corr, oni_test_stats['RMSE'], oni_test_stats['Corrcoef']))
return (preds, Ytrue, Data.semantic_time_steps['test']) |
def register_Ns3UanChannel_methods(root_module, cls):
cls.add_constructor([param('ns3::UanChannel const &', 'arg0')])
cls.add_constructor([])
cls.add_method('AddDevice', 'void', [param('ns3::Ptr< ns3::UanNetDevice >', 'dev'), param('ns3::Ptr< ns3::UanTransducer >', 'trans')])
cls.add_method('Clear', 'void', [])
cls.add_method('GetDevice', 'ns3::Ptr< ns3::NetDevice >', [param('uint32_t', 'i')], is_const=True, is_virtual=True)
cls.add_method('GetNDevices', 'uint32_t', [], is_const=True, is_virtual=True)
cls.add_method('GetNoiseDbHz', 'double', [param('double', 'fKhz')])
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('SetNoiseModel', 'void', [param('ns3::Ptr< ns3::UanNoiseModel >', 'noise')])
cls.add_method('SetPropagationModel', 'void', [param('ns3::Ptr< ns3::UanPropModel >', 'prop')])
cls.add_method('TxPacket', 'void', [param('ns3::Ptr< ns3::UanTransducer >', 'src'), param('ns3::Ptr< ns3::Packet >', 'packet'), param('double', 'txPowerDb'), param('ns3::UanTxMode', 'txmode')], is_virtual=True)
cls.add_method('DoDispose', 'void', [], visibility='protected', is_virtual=True)
cls.add_method('SendUp', 'void', [param('uint32_t', 'i'), param('ns3::Ptr< ns3::Packet >', 'packet'), param('double', 'rxPowerDb'), param('ns3::UanTxMode', 'txMode'), param('ns3::UanPdp', 'pdp')], visibility='protected')
return |
class PyGNodePropPredDataset(InMemoryDataset):
def __init__(self, name: str, root: str, transform: Optional[Callable]=None, pre_transform: Optional[Callable]=None):
self.name = name
self.root = root
self.dataset = NodePropPredDataset(name=name, root=root)
self._train_mask = torch.from_numpy(self.dataset.train_mask)
self._val_mask = torch.from_numpy(self.dataset.val_mask)
self._test_mask = torch.from_numpy(self.dataset.test_mask)
self.__num_classes = self.dataset.num_classes
super().__init__(root, transform, pre_transform)
self.process_data()
def num_classes(self) -> int:
return self.__num_classes
def eval_metric(self) -> str:
return self.dataset.eval_metric
def train_mask(self) -> torch.Tensor:
if (self._train_mask is None):
raise ValueError("training split hasn't been loaded")
return self._train_mask
def val_mask(self) -> torch.Tensor:
if (self._val_mask is None):
raise ValueError("validation split hasn't been loaded")
return self._val_mask
def test_mask(self) -> torch.Tensor:
if (self._test_mask is None):
raise ValueError("test split hasn't been loaded")
return self._test_mask
def src(self) -> torch.Tensor:
return self._src
def dst(self) -> torch.Tensor:
return self._dst
def ts(self) -> torch.Tensor:
return self._ts
def edge_feat(self) -> torch.Tensor:
return self._edge_feat
def edge_label(self) -> torch.Tensor:
return self._edge_label
def process_data(self):
src = torch.from_numpy(self.dataset.full_data['sources'])
dst = torch.from_numpy(self.dataset.full_data['destinations'])
t = torch.from_numpy(self.dataset.full_data['timestamps'])
edge_label = torch.from_numpy(self.dataset.full_data['edge_label'])
msg = torch.from_numpy(self.dataset.full_data['edge_feat'])
if (src.dtype != torch.int64):
src = src.long()
if (dst.dtype != torch.int64):
dst = dst.long()
if (t.dtype != torch.int64):
t = t.long()
if (msg.dtype != torch.float32):
msg = msg.float()
self._src = src
self._dst = dst
self._ts = t
self._edge_label = edge_label
self._edge_feat = msg
def get_TemporalData(self) -> TemporalData:
data = TemporalData(src=self._src, dst=self._dst, t=self._ts, msg=self._edge_feat, y=self._edge_label)
return data
def reset_label_time(self) -> None:
self.dataset.reset_label_time()
def get_node_label(self, cur_t):
label_tuple = self.dataset.find_next_labels_batch(cur_t)
if (label_tuple is None):
return None
(label_ts, label_srcs, labels) = (label_tuple[0], label_tuple[1], label_tuple[2])
label_ts = torch.from_numpy(label_ts).long()
label_srcs = torch.from_numpy(label_srcs).long()
labels = torch.from_numpy(labels).to(torch.float32)
return (label_ts, label_srcs, labels)
def get_label_time(self) -> int:
return self.dataset.return_label_ts()
def len(self) -> int:
return self._src.shape[0]
def get(self, idx: int) -> TemporalData:
data = TemporalData(src=self._src[idx], dst=self._dst[idx], t=self._ts[idx], msg=self._edge_feat[idx], y=self._edge_label[idx])
return data
def __repr__(self) -> str:
return f'{self.name.capitalize()}()' |
class PretrainedBartModel():
def __init__(self, *args, **kwargs):
requires_pytorch(self)
def from_pretrained(self, *args, **kwargs):
requires_pytorch(self) |
class LieGroupOps(GroupOps):
def tangent_dim(a: T.ElementOrType) -> int:
return LieGroupOps.implementation(get_type(a)).tangent_dim(a)
def from_tangent(a: T.ElementOrType, vec: T.Sequence[T.Scalar], epsilon: T.Scalar=sf.epsilon()) -> T.Element:
return LieGroupOps.implementation(get_type(a)).from_tangent(a, vec, epsilon)
def to_tangent(a: T.Element, epsilon: T.Scalar=sf.epsilon()) -> T.List[T.Scalar]:
type_a = get_type(a)
return LieGroupOps.implementation(type_a).to_tangent(a, epsilon)
def retract(a: T.Element, vec: T.Sequence[T.Scalar], epsilon: T.Scalar=sf.epsilon()) -> T.Element:
return LieGroupOps.implementation(get_type(a)).retract(a, vec, epsilon)
def local_coordinates(a: T.Element, b: T.Element, epsilon: T.Scalar=sf.epsilon()) -> T.List[T.Scalar]:
return LieGroupOps.implementation(get_type(a)).local_coordinates(a, b, epsilon)
def interpolate(a: T.Element, b: T.Element, alpha: T.Scalar, epsilon: T.Scalar=sf.epsilon()) -> T.List[T.Scalar]:
return LieGroupOps.retract(a, [(c * alpha) for c in LieGroupOps.local_coordinates(a, b, epsilon)], epsilon)
def storage_D_tangent(a: T.Element) -> geo.Matrix:
try:
return LieGroupOps.implementation(get_type(a)).storage_D_tangent(a)
except NotImplementedError:
logger.error('storage_D_tangent not implemented for {}; use storage_D_tangent.ipynb to compute'.format(get_type(a)))
raise
def tangent_D_storage(a: T.Element) -> geo.Matrix:
try:
return LieGroupOps.implementation(get_type(a)).tangent_D_storage(a)
except NotImplementedError:
logger.error('tangent_D_storage not implemented for {}; use tangent_D_storage.ipynb to compute'.format(get_type(a)))
raise |
class DWConv2d_BN_M(nn.Module):
def __init__(self, in_ch, out_ch, kernel_size=1, stride=1, norm_layer=nn.BatchNorm2d, act_layer=nn.Hardswish, bn_weight_init=1, num_domains=1):
super().__init__()
self.dwconv = nn.Conv2d(in_ch, in_ch, kernel_size, stride, ((kernel_size - 1) // 2), groups=in_ch, bias=False)
self.pwconv = nn.Conv2d(in_ch, out_ch, 1, 1, 0, bias=False)
self.bns = nn.ModuleList([norm_layer(out_ch) for _ in range(num_domains)])
self.act = (act_layer() if (act_layer is not None) else nn.Identity())
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
if (m.bias is not None):
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(bn_weight_init)
m.bias.data.zero_()
def forward(self, x, d=None):
d = int(d)
x = self.dwconv(x)
x = self.pwconv(x)
x = self.bns[d](x)
x = self.act(x)
return x |
class _CrossEntropy(nn.Module):
def __init__(self, sumit=True):
super(_CrossEntropy, self).__init__()
self.sumit = sumit
def forward(self, p, q):
if self.sumit:
return ((- p) * torch.log(q)).sum(dim=1)
else:
return ((- p) * torch.log(q))
def __str__(self):
return '{}(): Cross-entropy over continuous target. sumit={}.'.format(self.__class__.__name__, self.sumit) |
def test_main_wrapper_loads_from_fsspec():
with fsspec.open('memory://test.yaml', 'w') as f:
f.write('\n project: test\n ')
args = ['--config_path', 'memory://test.yaml', '--x', '2']
class Config():
project: str
x: int = 1
.main(args=args)
def main(config: Config):
assert (config.project == 'test')
assert (config.x == 2)
main() |
class MINRES(CPAlgorithm):
def __init__(self, num_runs=10):
self.num_runs = num_runs
self.n_jobs = 1
def detect(self, G):
(A, nodelabel) = utils.to_adjacency_matrix(G)
def _detect(A, maxIt=10000):
w = np.random.rand(A.shape[0])
adam = ADAM()
for _it in range(maxIt):
wnorm = np.linalg.norm(w)
grad = ((A w) - (((wnorm ** 2) - (w ** 2)) * w))
wnew = adam.update(w, grad, 0, False)
diff = (np.linalg.norm((wnew - w)) / wnorm)
w = wnew.copy()
if (diff < 0.01):
break
Q = self._score(A, None, w)
cids = np.zeros(A.shape[0])
return {'cids': cids, 'x': w, 'q': Q[0]}
res = Parallel(n_jobs=self.n_jobs)((delayed(_detect)(A) for i in range(self.num_runs)))
res = max(res, key=(lambda x: x['q']))
(cids, x, Q) = (res['cids'], res['x'], res['q'])
self.nodelabel = nodelabel
self.c_ = cids.astype(int)
self.x_ = x
self.Q_ = Q
self.qs_ = [Q]
def _score(self, A, c, x):
Asq = np.sum(np.power(A.data, 2))
wnorm = np.linalg.norm(x)
Q = ((Asq - (((2 * x.reshape((1, (- 1)))) A) x.reshape(((- 1), 1)))) + ((wnorm * wnorm) * ((wnorm * wnorm) - 1)))
Q = Q[0][0]
return [Q] |
def get_frames(video_name):
if (not video_name):
cap = cv2.VideoCapture(0)
for i in range(5):
cap.read()
while True:
(ret, frame) = cap.read()
if ret:
(yield frame)
else:
break
elif (video_name.endswith('avi') or video_name.endswith('mp4')):
cap = cv2.VideoCapture(args.video_name)
while True:
(ret, frame) = cap.read()
if ret:
(yield frame)
else:
break
else:
images = glob(os.path.join(video_name, '*.jp*'))
images = sorted(images, key=(lambda x: int(x.split('/')[(- 1)].split('.')[0])))
for img in images:
frame = cv2.imread(img)
(yield frame) |
def numpy_to_cutlass(inp):
if numpy_available:
if (inp == np.float16):
return cutlass.float16
elif (inp == np.float32):
return cutlass.float32
elif (inp == np.float64):
return cutlass.float64
elif (inp == np.int8):
return cutlass.int8
elif (inp == np.int32):
return cutlass.int32
return None |
class ScriptMaker(object):
script_template = SCRIPT_TEMPLATE
executable = None
def __init__(self, source_dir, target_dir, add_launchers=True, dry_run=False, fileop=None):
self.source_dir = source_dir
self.target_dir = target_dir
self.add_launchers = add_launchers
self.force = False
self.clobber = False
self.set_mode = ((os.name == 'posix') or ((os.name == 'java') and (os._name == 'posix')))
self.variants = set(('', 'X.Y'))
self._fileop = (fileop or FileOperator(dry_run))
self._is_nt = ((os.name == 'nt') or ((os.name == 'java') and (os._name == 'nt')))
def _get_alternate_executable(self, executable, options):
if (options.get('gui', False) and self._is_nt):
(dn, fn) = os.path.split(executable)
fn = fn.replace('python', 'pythonw')
executable = os.path.join(dn, fn)
return executable
if sys.platform.startswith('java'):
def _is_shell(self, executable):
try:
with open(executable) as fp:
return (fp.read(2) == '#!')
except (OSError, IOError):
logger.warning('Failed to open %s', executable)
return False
def _fix_jython_executable(self, executable):
if self._is_shell(executable):
import java
if (java.lang.System.getProperty('os.name') == 'Linux'):
return executable
elif executable.lower().endswith('jython.exe'):
return executable
return ('/usr/bin/env %s' % executable)
def _build_shebang(self, executable, post_interp):
if (os.name != 'posix'):
simple_shebang = True
else:
shebang_length = ((len(executable) + len(post_interp)) + 3)
if (sys.platform == 'darwin'):
max_shebang_length = 512
else:
max_shebang_length = 127
simple_shebang = ((b' ' not in executable) and (shebang_length <= max_shebang_length))
if simple_shebang:
result = (((b'#!' + executable) + post_interp) + b'\n')
else:
result = b'#!/bin/sh\n'
result += (((b"'''exec' " + executable) + post_interp) + b' "$0" "$"\n')
result += b"' '''"
return result
def _get_shebang(self, encoding, post_interp=b'', options=None):
enquote = True
if self.executable:
executable = self.executable
enquote = False
elif (not sysconfig.is_python_build()):
executable = get_executable()
elif in_venv():
executable = os.path.join(sysconfig.get_path('scripts'), ('python%s' % sysconfig.get_config_var('EXE')))
else:
executable = os.path.join(sysconfig.get_config_var('BINDIR'), ('python%s%s' % (sysconfig.get_config_var('VERSION'), sysconfig.get_config_var('EXE'))))
if options:
executable = self._get_alternate_executable(executable, options)
if sys.platform.startswith('java'):
executable = self._fix_jython_executable(executable)
executable = os.path.normcase(executable)
if enquote:
executable = _enquote_executable(executable)
executable = executable.encode('utf-8')
if ((sys.platform == 'cli') and ('-X:Frames' not in post_interp) and ('-X:FullFrames' not in post_interp)):
post_interp += b' -X:Frames'
shebang = self._build_shebang(executable, post_interp)
try:
shebang.decode('utf-8')
except UnicodeDecodeError:
raise ValueError(('The shebang (%r) is not decodable from utf-8' % shebang))
if (encoding != 'utf-8'):
try:
shebang.decode(encoding)
except UnicodeDecodeError:
raise ValueError(('The shebang (%r) is not decodable from the script encoding (%r)' % (shebang, encoding)))
return shebang
def _get_script_text(self, entry):
return (self.script_template % dict(module=entry.prefix, func=entry.suffix))
manifest = _DEFAULT_MANIFEST
def get_manifest(self, exename):
base = os.path.basename(exename)
return (self.manifest % base)
def _write_script(self, names, shebang, script_bytes, filenames, ext):
use_launcher = (self.add_launchers and self._is_nt)
linesep = os.linesep.encode('utf-8')
if (not use_launcher):
script_bytes = ((shebang + linesep) + script_bytes)
else:
if (ext == 'py'):
launcher = self._get_launcher('t')
else:
launcher = self._get_launcher('w')
stream = BytesIO()
with ZipFile(stream, 'w') as zf:
zf.writestr('__main__.py', script_bytes)
zip_data = stream.getvalue()
script_bytes = (((launcher + shebang) + linesep) + zip_data)
for name in names:
outname = os.path.join(self.target_dir, name)
if use_launcher:
(n, e) = os.path.splitext(outname)
if e.startswith('.py'):
outname = n
outname = ('%s.exe' % outname)
try:
self._fileop.write_binary_file(outname, script_bytes)
except Exception:
logger.warning('Failed to write executable - trying to use .deleteme logic')
dfname = ('%s.deleteme' % outname)
if os.path.exists(dfname):
os.remove(dfname)
os.rename(outname, dfname)
self._fileop.write_binary_file(outname, script_bytes)
logger.debug('Able to replace executable using .deleteme logic')
try:
os.remove(dfname)
except Exception:
pass
else:
if (self._is_nt and (not outname.endswith(('.' + ext)))):
outname = ('%s.%s' % (outname, ext))
if (os.path.exists(outname) and (not self.clobber)):
logger.warning('Skipping existing file %s', outname)
continue
self._fileop.write_binary_file(outname, script_bytes)
if self.set_mode:
self._fileop.set_executable_mode([outname])
filenames.append(outname)
def _make_script(self, entry, filenames, options=None):
post_interp = b''
if options:
args = options.get('interpreter_args', [])
if args:
args = (' %s' % ' '.join(args))
post_interp = args.encode('utf-8')
shebang = self._get_shebang('utf-8', post_interp, options=options)
script = self._get_script_text(entry).encode('utf-8')
name = entry.name
scriptnames = set()
if ('' in self.variants):
scriptnames.add(name)
if ('X' in self.variants):
scriptnames.add(('%s%s' % (name, sys.version[0])))
if ('X.Y' in self.variants):
scriptnames.add(('%s-%s' % (name, sys.version[:3])))
if (options and options.get('gui', False)):
ext = 'pyw'
else:
ext = 'py'
self._write_script(scriptnames, shebang, script, filenames, ext)
def _copy_script(self, script, filenames):
adjust = False
script = os.path.join(self.source_dir, convert_path(script))
outname = os.path.join(self.target_dir, os.path.basename(script))
if ((not self.force) and (not self._fileop.newer(script, outname))):
logger.debug('not copying %s (up-to-date)', script)
return
try:
f = open(script, 'rb')
except IOError:
if (not self.dry_run):
raise
f = None
else:
first_line = f.readline()
if (not first_line):
logger.warning('%s: %s is an empty file (skipping)', self.get_command_name(), script)
return
match = FIRST_LINE_RE.match(first_line.replace(b'\r\n', b'\n'))
if match:
adjust = True
post_interp = (match.group(1) or b'')
if (not adjust):
if f:
f.close()
self._fileop.copy_file(script, outname)
if self.set_mode:
self._fileop.set_executable_mode([outname])
filenames.append(outname)
else:
logger.info('copying and adjusting %s -> %s', script, self.target_dir)
if (not self._fileop.dry_run):
(encoding, lines) = detect_encoding(f.readline)
f.seek(0)
shebang = self._get_shebang(encoding, post_interp)
if (b'pythonw' in first_line):
ext = 'pyw'
else:
ext = 'py'
n = os.path.basename(outname)
self._write_script([n], shebang, f.read(), filenames, ext)
if f:
f.close()
def dry_run(self):
return self._fileop.dry_run
_run.setter
def dry_run(self, value):
self._fileop.dry_run = value
if ((os.name == 'nt') or ((os.name == 'java') and (os._name == 'nt'))):
def _get_launcher(self, kind):
if (struct.calcsize('P') == 8):
bits = '64'
else:
bits = '32'
name = ('%s%s.exe' % (kind, bits))
distlib_package = __name__.rsplit('.', 1)[0]
result = finder(distlib_package).find(name).bytes
return result
def make(self, specification, options=None):
filenames = []
entry = get_export_entry(specification)
if (entry is None):
self._copy_script(specification, filenames)
else:
self._make_script(entry, filenames, options=options)
return filenames
def make_multiple(self, specifications, options=None):
filenames = []
for specification in specifications:
filenames.extend(self.make(specification, options))
return filenames |
def validate_fr_tva(df: Union[(str, pd.Series, dd.Series, pd.DataFrame, dd.DataFrame)], column: str='') -> Union[(bool, pd.Series, pd.DataFrame)]:
if isinstance(df, (pd.Series, dd.Series)):
return df.apply(tva.is_valid)
elif isinstance(df, (pd.DataFrame, dd.DataFrame)):
if (column != ''):
return df[column].apply(tva.is_valid)
else:
return df.applymap(tva.is_valid)
return tva.is_valid(df) |
_tf
class TFCLIPVisionModelTest(TFModelTesterMixin, unittest.TestCase):
all_model_classes = ((TFCLIPVisionModel,) if is_tf_available() else ())
test_pruning = False
test_resize_embeddings = False
test_head_masking = False
test_onnx = False
def setUp(self):
self.model_tester = TFCLIPVisionModelTester(self)
self.config_tester = ConfigTester(self, config_class=CLIPVisionConfig, has_text_modality=False, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_inputs_embeds(self):
pass
def test_graph_mode_with_inputs_embeds(self):
pass
def test_model_common_attributes(self):
(config, _) = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
self.assertIsInstance(model.get_input_embeddings(), tf.keras.layers.Layer)
x = model.get_output_embeddings()
self.assertTrue(((x is None) or isinstance(x, tf.keras.layers.Layer)))
def test_forward_signature(self):
(config, _) = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.call)
arg_names = [*signature.parameters.keys()]
expected_arg_names = ['pixel_values']
self.assertListEqual(arg_names[:1], expected_arg_names)
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_attention_outputs(self):
(config, inputs_dict) = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
image_size = (self.model_tester.image_size, self.model_tester.image_size)
patch_size = (self.model_tester.patch_size, self.model_tester.patch_size)
num_patches = ((image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]))
seq_len = (num_patches + 1)
for model_class in self.all_model_classes:
inputs_dict['output_attentions'] = True
inputs_dict['output_hidden_states'] = False
config.return_dict = True
model = model_class(config)
outputs = model(**self._prepare_for_class(inputs_dict, model_class), training=False)
attentions = outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
del inputs_dict['output_attentions']
config.output_attentions = True
model = model_class(config)
outputs = model(**self._prepare_for_class(inputs_dict, model_class), training=False)
attentions = outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
out_len = len(outputs)
inputs_dict['output_attentions'] = True
inputs_dict['output_hidden_states'] = True
model = model_class(config)
outputs = model(**self._prepare_for_class(inputs_dict, model_class), training=False)
added_hidden_states = 1
self.assertEqual((out_len + added_hidden_states), len(outputs))
self_attentions = outputs.attentions
self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(list(self_attentions[0].shape[(- 3):]), [self.model_tester.num_attention_heads, seq_len, seq_len])
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
outputs = model(**self._prepare_for_class(inputs_dict, model_class), training=False)
hidden_states = (outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states)
expected_num_layers = getattr(self.model_tester, 'expected_num_hidden_layers', (self.model_tester.num_hidden_layers + 1))
self.assertEqual(len(hidden_states), expected_num_layers)
image_size = (self.model_tester.image_size, self.model_tester.image_size)
patch_size = (self.model_tester.patch_size, self.model_tester.patch_size)
num_patches = ((image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]))
seq_length = (num_patches + 1)
self.assertListEqual(list(hidden_states[0].shape[(- 2):]), [seq_length, self.model_tester.hidden_size])
(config, inputs_dict) = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs_dict['output_hidden_states'] = True
check_hidden_states_output(inputs_dict, config, model_class)
del inputs_dict['output_hidden_states']
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
def test_model_from_pretrained(self):
for model_name in TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = TFCLIPVisionModel.from_pretrained(model_name)
self.assertIsNotNone(model) |
class Wallet():
_accounts: list
_imported_accounts: list
_web3: Web3
_default_account: Account
_chain_id: int
_url: str
_max_fee: float
_max_priority_fee: float
_KEY_DERIVATION_PATH = "m/44'/60'/0'/0/{}"
_DEFAULT_MNEMONIC = 'great amazing fun seed lab protect network system security prevent attack future'
_mnemonic: str
_mnemonic_account_index: int
def __init__(self, mnemonic=None, chain_id=1337, max_fee=3.0, max_tip=2.0):
self._accounts = []
self._imported_accounts = []
self._web3 = None
self._url = None
self._default_account = None
self._mnemonic_account_index = 0
self._chain_id = chain_id
self._max_fee = Web3.toWei(max_fee, 'gwei')
self._max_priority_fee = Web3.toWei(max_tip, 'gwei')
self._mnemonic = mnemonic
if (self._mnemonic is None):
self._mnemonic = self._DEFAULT_MNEMONIC
return
def importAccount(self, key: str, name: str):
account = Account.from_key(key)
self._imported_accounts.append({'name': name, 'account': account})
return self
def importAccountFromKeystore(self, keystore: str, name: str):
filename = (keystore + '/_index.json')
json_file = open(filename)
assert (json_file is not None), ('File %s does not exist' % filename)
eth_accounts = json.load(json_file)
counter = 0
for address in eth_accounts:
account = eth_accounts[address]
if (account['name'] == name):
if (counter > 0):
name = (name + ('-%d' % counter))
self.importAccount(account['key'], name)
else:
self.importAccount(account['key'], name)
counter += 1
return self
def importEncryptedAccount(self, encrypted_key: str, name: str, password='admin'):
private_key = Account.decrypt(encrypted_key, password)
self.importAccount(private_key, name)
return self
def exportEncryptedAccount(self, name: str, password='admin') -> str:
encrypted = None
for account in (self._accounts + self._imported_accounts):
if (account['name'] == name):
encrypted = account['account'].encrypt(password=password)
return encrypted
def createAccount(self, name: str):
Account.enable_unaudited_hdwallet_features()
path = self._KEY_DERIVATION_PATH.format(self._mnemonic_account_index)
account = Account.from_mnemonic(self._mnemonic, account_path=path)
self._accounts.append({'name': name, 'account': account})
if (self._default_account is None):
self._default_account = account
self._mnemonic_account_index += 1
return self
def setDefaultAccount(self, name: str):
for account in (self._accounts + self._imported_accounts):
if (account['name'] == name):
self._default_account = account['account']
assert (self._default_account is not None), ('The key (%s) cannot be found' % name)
return self
def connectToBlockchain(self, url: str, isPOA=False):
self._url = url
self._web3 = Web3(Web3.HTTPProvider(url))
if isPOA:
self._web3.middleware_onion.inject(geth_poa_middleware, layer=0)
assert self._web3.isConnected(), 'Connection failed'
return self
def sendRawTransaction(self, key, transaction: dict, wait=True, verbose=True):
assert (self._web3 is not None)
print(json.dumps(transaction, indent=2))
signed_tx = self._web3.eth.account.sign_transaction(transaction, key)
tx_hash = self._web3.eth.sendRawTransaction(signed_tx.rawTransaction)
print('Transaction Hash: {}'.format(tx_hash.hex()))
if wait:
print('Waiting for receipt ...')
tx_receipt = self._web3.eth.wait_for_transaction_receipt(tx_hash)
if verbose:
Wallet.printTransactionReceipt(tx_receipt, short=False)
else:
print('Abbreviated transaction ----')
Wallet.printTransactionReceipt(tx_receipt, short=True)
return tx_hash
def printTransactionReceipt(tx_receipt: str, short=False):
tx_dict = dict(tx_receipt)
if short:
short_receipt = {}
selected_fields = ['from', 'to', 'status', 'blockNumber', 'effectiveGasPrice', 'gasUsed', 'contractAddress']
for field in selected_fields:
if (tx_dict[field] is not None):
short_receipt[field] = tx_dict[field]
print(json.dumps(short_receipt, indent=3))
else:
print(tx_receipt)
def sendTransaction(self, recipient, amount, sender_name='', gas=30000, nonce: int=(- 1), data: str='', maxFeePerGas: float=(- 1), maxPriorityFeePerGas: float=(- 1), wait=True, verbose=True):
assert (self._web3 is not None)
if (sender_name == ''):
sender = self._default_account
else:
sender = self.__getAccountByName(sender_name)
assert (sender is not None), 'Sender account does not exist'
if (nonce == (- 1)):
nonce = self._web3.eth.getTransactionCount(sender.address)
if (maxFeePerGas < 0):
maxFeePerGas = self._max_fee
else:
maxFeePerGas = Web3.toWei(maxFeePerGas, 'gwei')
if (maxPriorityFeePerGas < 0):
maxPriorityFeePerGas = self._max_priority_fee
else:
maxPriorityFeePerGas = Web3.toWei(maxPriorityFeePerGas, 'gwei')
transaction = {'nonce': nonce, 'from': sender.address, 'to': recipient, 'value': Web3.toWei(amount, 'ether'), 'chainId': self._chain_id, 'gas': gas, 'maxFeePerGas': maxFeePerGas, 'maxPriorityFeePerGas': maxPriorityFeePerGas, 'data': data}
tx_hash = self.sendRawTransaction(sender.key, transaction, wait, verbose)
return tx_hash
def deployContract(self, contract_file, sender_name='', amount=0, gas=3000000, wait=True, verbose=True):
with open(contract_file) as contract:
data = contract.read()
data.replace('\n', '')
txhash = self.sendTransaction(None, amount, sender_name, gas=gas, data=data, wait=wait, verbose=verbose)
return txhash
def createContract(self, address, abi: str):
contract = self._web3.eth.contract(address=address, abi=abi)
return contract
def invokeContract(self, function, sender_name='', amount=0, gas=3000000, wait=True, verbose=True):
assert (self._web3 is not None)
assert (function is not None)
if (sender_name == ''):
sender = self._default_account
else:
sender = self.__getAccountByName(sender_name)
assert (sender is not None), 'Sender account does not exist'
transaction_info = {'nonce': self._web3.eth.getTransactionCount(sender.address), 'from': sender.address, 'value': Web3.toWei(amount, 'ether'), 'chainId': self._chain_id, 'gas': gas, 'maxFeePerGas': self._max_fee, 'maxPriorityFeePerGas': self._max_priority_fee}
transaction = function.buildTransaction(transaction_info)
tx_hash = self.sendRawTransaction(sender.key, transaction, wait, verbose)
return tx_hash
def __getAccountByAddress(self, address: str):
for account in (self._accounts + self._imported_accounts):
if (account['account'].address == address):
return account['account']
return None
def __getAccountByName(self, name: str):
for account in (self._accounts + self._imported_accounts):
if (account['name'] == name):
return account['account']
return None
def getAccountAddressByName(self, name: str):
account = self.__getAccountByName(name)
if (account is not None):
return account.address
else:
return None
def getBalanceByName(self, name, unit='ether') -> int:
address = self.getAccountAddressByName(name)
return self.getBalance(address, unit)
def getBalance(self, address, unit='ether') -> int:
checksum_address = Web3.toChecksumAddress(address)
balance = self._web3.eth.get_balance(checksum_address)
return Web3.fromWei(balance, unit)
def getBalanceForAll(self, unit='ether') -> dict:
all_balance = {}
for account in (self._accounts + self._imported_accounts):
address = account['account'].address
balance = float(self.getBalance(address, unit))
name = account['name']
all_balance[name] = {'address': address, 'balance': balance}
return all_balance
def getNonce(self, address) -> int:
checksum_address = Web3.toChecksumAddress(address)
return self._web3.eth.getTransactionCount(checksum_address)
def getNonceByName(self, name: str) -> int:
address = self.getAccountAddressByName(name)
return self.getNonce(address)
def getNonceForAll(self) -> dict:
all_nonces = {}
for account in (self._accounts + self._imported_accounts):
address = account['account'].address
nonce = self.getNonce(address)
name = account['name']
all_nonces[name] = {'address': address, 'nonce': nonce}
return all_nonces
def printAccounts(self):
for account in (self._accounts + self._imported_accounts):
print(('Address: %s' % account['account'].address))
print(('Private key: %s' % account['account'].key.hex()))
def getTransactionReceipt(self, txhash: str) -> dict:
receipt = self._web3.eth.get_transaction_receipt(txhash)
tx_receipt = dict(receipt)
return tx_receipt
def getContractAddress(self, txhash: str) -> str:
receipt = self._web3.eth.get_transaction_receipt(txhash)
tx_receipt = dict(receipt)
return tx_receipt['contractAddress'] |
class PlaceSet(UniqueRepresentation, Parent):
Element = FunctionFieldPlace
def __init__(self, field):
self.Element = field._place_class
Parent.__init__(self, category=Sets().Infinite())
self._field = field
def _repr_(self):
return 'Set of places of {}'.format(self._field)
def _element_constructor_(self, x):
from .ideal import FunctionFieldIdeal
if (isinstance(x, FunctionFieldIdeal) and x.is_prime()):
return self.element_class(self, x)
else:
raise ValueError('not a prime ideal')
def _an_element_(self):
d = 1
while True:
try:
p = self._field.places(d).pop()
except IndexError:
d = (d + 1)
else:
break
return p
def function_field(self):
return self._field |
class FPN(nn.Module):
def __init__(self, in_channels_list, out_channels, conv_block, top_blocks=None):
super(FPN, self).__init__()
self.inner_blocks = []
self.layer_blocks = []
for (idx, in_channels) in enumerate(in_channels_list, 1):
inner_block = 'fpn_inner{}'.format(idx)
layer_block = 'fpn_layer{}'.format(idx)
if (in_channels == 0):
continue
inner_block_module = conv_block(in_channels, out_channels, 1)
layer_block_module = conv_block(out_channels, out_channels, 3, 1)
self.add_module(inner_block, inner_block_module)
self.add_module(layer_block, layer_block_module)
self.inner_blocks.append(inner_block)
self.layer_blocks.append(layer_block)
self.top_blocks = top_blocks
def forward(self, x):
last_inner = getattr(self, self.inner_blocks[(- 1)])(x[(- 1)])
results = []
results.append(getattr(self, self.layer_blocks[(- 1)])(last_inner))
for (feature, inner_block, layer_block) in zip(x[:(- 1)][::(- 1)], self.inner_blocks[:(- 1)][::(- 1)], self.layer_blocks[:(- 1)][::(- 1)]):
if (not inner_block):
continue
inner_top_down = F.interpolate(last_inner, scale_factor=2, mode='nearest')
inner_lateral = getattr(self, inner_block)(feature)
last_inner = (inner_lateral + inner_top_down)
results.insert(0, getattr(self, layer_block)(last_inner))
if isinstance(self.top_blocks, LastLevelP6P7):
last_results = self.top_blocks(x[(- 1)], results[(- 1)])
results.extend(last_results)
elif isinstance(self.top_blocks, LastLevelMaxPool):
last_results = self.top_blocks(results[(- 1)])
results.extend(last_results)
return tuple(results) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.