code stringlengths 101 5.91M |
|---|
def main():
parser = argparse.ArgumentParser(description='Prepare SCROLLS predictions')
parser.add_argument('--output_dir', type=str, help='Path to output the predictions file', required=True)
parser.add_argument('--qmsum_file', type=str, help='The path to the qmsum dataset json file containing predictions', required=True)
parser.add_argument('--qasper_file', type=str, help='The path to the qasper dataset json file containing predictions', required=True)
parser.add_argument('--summ_screen_file', type=str, help='The path to the summ_screen dataset json file containing predictions', required=True)
parser.add_argument('--quality_file', type=str, help='The path to the quality dataset json file containing predictions', required=True)
parser.add_argument('--narrative_qa_file', type=str, help='The path to the narrative_qa dataset json file containing predictions', required=True)
parser.add_argument('--contract_nli_file', type=str, help='The path to the contact_nli dataset json file containing predictions', required=True)
parser.add_argument('--gov_report_file', type=str, help='The path to the gov_report dataset json file containing predictions', required=True)
args = parser.parse_args()
tasks_dfs = pd.DataFrame(columns=COLUMNS, data=[])
for (file_key, task_name) in TASKS_MAPPING.items():
print(f'Adding predictions for {task_name} from {file_key}...')
with open(getattr(args, file_key)) as f:
task_data = json.load(f)
task_df = pd.DataFrame.from_dict(task_data, orient='index', columns=COLUMNS[(- 1):]).reset_index(drop=False)
task_df[COLUMNS[0]] = task_name
task_df[COLUMNS[1]] = task_df['index']
tasks_dfs = pd.concat((tasks_dfs, task_df[COLUMNS]))
os.makedirs(args.output_dir, exist_ok=True)
outfile = os.path.join(args.output_dir, 'scrolls_predictions.csv')
print(f'Saving the complete predictions file to: {outfile}')
tasks_dfs = tasks_dfs.reset_index(drop=True)
tasks_dfs.to_csv(outfile, index=False)
print('validating submission file is exactly the same as expected')
recovered_tasks_dfs = safe_read_csv(outfile)
assert (len(recovered_tasks_dfs) == len(tasks_dfs))
assert (recovered_tasks_dfs.columns.tolist() == tasks_dfs.columns.tolist())
assert np.all((recovered_tasks_dfs.values == tasks_dfs.values))
print(f'Your benchmark predictions file is ready. If it contains predictions for the test sets please head over to {SUBMISSION_LINK} to submit to the SCROLLS leaderboard.') |
class SSHClient():
def __init__(self, ip_address, ssh_credentials):
self.ip_address = ip_address
self.ssh_credentials = ssh_credentials
self.ssh_client = None
if ('key_filename' in self.ssh_credentials):
fpath = os.path.expanduser(self.ssh_credentials['key_filename'])
if (not os.path.exists(fpath)):
raise Exception(f"Private key file {fpath} doesn't exist")
self.ssh_credentials['key_filename'] = fpath
def close(self):
self.ssh_client.close()
self.ssh_client = None
def create_client(self, timeout=2):
try:
self.ssh_client = paramiko.SSHClient()
self.ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
user = self.ssh_credentials.get('username')
password = self.ssh_credentials.get('password')
pkey = None
if self.ssh_credentials.get('key_filename'):
with open(self.ssh_credentials['key_filename']) as f:
pkey = paramiko.RSAKey.from_private_key(f)
self.ssh_client.connect(self.ip_address, username=user, password=password, pkey=pkey, timeout=timeout, banner_timeout=200, allow_agent=False, look_for_keys=False)
logger.debug(f'{self.ip_address} ssh client created')
except Exception as e:
pk = self.ssh_credentials.get('key_filename')
if (pk and (str(e) == 'Authentication failed.')):
raise Exception(f'Private key {pk} is not valid')
raise e
return self.ssh_client
def exec_command(self, cmd, timeout=None, run_async=False):
if ((not self.ip_address) or (self.ip_address == '0.0.0.0')):
raise Exception('Invalid IP Address')
if (self.ssh_client is None):
self.ssh_client = self.create_client()
try:
return self.ssh_client.exec_command(cmd, timeout=timeout)
except Exception:
self.ssh_client = self.create_client()
return self.ssh_client.exec_command(cmd, timeout=timeout)
def run_remote_command(self, cmd, timeout=None, run_async=False):
if ((not self.ip_address) or (self.ip_address == '0.0.0.0')):
raise Exception('Invalid IP Address')
if (self.ssh_client is None):
self.ssh_client = self.create_client()
try:
(stdin, stdout, stderr) = self.ssh_client.exec_command(cmd, timeout=timeout)
except Exception:
self.ssh_client = self.create_client()
(stdin, stdout, stderr) = self.ssh_client.exec_command(cmd, timeout=timeout)
out = None
if (not run_async):
out = stdout.read().decode().strip()
stderr.read().decode().strip()
return out
def download_remote_file(self, remote_src, local_dst):
if (self.ssh_client is None):
self.ssh_client = self.create_client()
dirname = os.path.dirname(local_dst)
if (dirname and (not os.path.exists(dirname))):
os.makedirs(dirname)
ftp_client = self.ssh_client.open_sftp()
ftp_client.get(remote_src, local_dst)
ftp_client.close()
def upload_local_file(self, local_src, remote_dst):
if (self.ssh_client is None):
self.ssh_client = self.create_client()
ftp_client = self.ssh_client.open_sftp()
ftp_client.put(local_src, remote_dst)
ftp_client.close()
def upload_multiple_local_files(self, file_list):
if (self.ssh_client is None):
self.ssh_client = self.create_client()
ftp_client = self.ssh_client.open_sftp()
for (local_src, remote_dst) in file_list:
ftp_client.put(local_src, remote_dst)
ftp_client.close()
def upload_data_to_file(self, data, remote_dst):
if (self.ssh_client is None):
self.ssh_client = self.create_client()
ftp_client = self.ssh_client.open_sftp()
with ftp_client.open(remote_dst, 'w') as f:
f.write(data)
ftp_client.close() |
def _standardize_domains_of_(systems):
identical_domains = True
for ds in systems:
if (ds.domain() != systems[0].domain()):
identical_domains = False
break
over_number_fields = True
all_over_QQ = True
for ds in systems:
if (ds.base_ring() not in NumberFields()):
over_number_fields = False
if (ds.base_ring() is not QQ):
all_over_QQ = False
biggest_ring = None
if (over_number_fields and (not all_over_QQ)):
number_fields = []
for ds in systems:
number_fields.append(ds.base_ring())
minimal_composite_field = None
for field in number_fields:
if (field is not QQ):
if (minimal_composite_field is None):
minimal_composite_field = field
else:
minimal_composite_field = minimal_composite_field.composite_fields(field)[0]
biggest_ring = minimal_composite_field
else:
for ds in systems:
if (biggest_ring is None):
biggest_ring = ds.base_ring()
elif ds.base_ring().has_coerce_map_from(biggest_ring):
biggest_ring = ds.base_ring()
elif biggest_ring.has_coerce_map_from(ds.base_ring()):
pass
else:
raise ValueError('given dynamical systems are not automorphic under global composition')
for i in range(len(systems)):
if (systems[i].base_ring() != biggest_ring):
systems[i] = systems[i].change_ring(biggest_ring)
domain = systems[0].domain()
identical_domains = all(((ds.domain() == systems[0].domain()) for ds in systems))
if (not identical_domains):
for ds in systems:
if (ds.domain().dimension() != systems[0].domain().dimension()):
raise ValueError("domains of 'DynamicalSystem' objects must be of the same dimension")
gens = systems[0].domain().ambient_space().gens()
for i in range(len(systems)):
if (systems[i].domain().coordinate_ring() != systems[0].domain().coordinate_ring()):
sub_dict = {}
old_gens = systems[i].domain().ambient_space().gens()
for j in range(len(old_gens)):
sub_dict[old_gens[j]] = gens[j]
new_polys = []
for poly in systems[i].defining_polynomials():
new_polys.append(poly.subs(sub_dict))
systems[i] = DynamicalSystem(new_polys, domain)
return systems |
_builder('webvid2m_caption_instruct')
class WebVid2MCapInstructBuilder(BaseDatasetBuilder):
train_dataset_cls = WebVideoCaptionInstructDataset
DATASET_CONFIG_DICT = {'default': 'configs/datasets/webvid/defaults_cap_instruct.yaml'} |
def unflatten_linear_layers(prefix, statedict: StateDict, layer: hnn.Linear, out_dims_first_in_dict: Optional[bool]) -> StateDict:
ret_dict: StateDict = {}
def _unflatten_linear(layer, prefix):
nonlocal out_dims_first_in_dict
if (not isinstance(layer, hnn.Linear)):
return layer
weight = statedict[apply_prefix(prefix, 'weight')]
bias = statedict.get(apply_prefix(prefix, 'bias'), None)
Out = ensure_tuple(layer.Out)
In = ensure_tuple(layer.In)
InOut = (In + Out)
extra_dims = tuple((ax for ax in layer.weight.axes if (ax not in InOut)))
if (out_dims_first_in_dict is None):
out_dims_first_in_dict = layer.out_first
if out_dims_first_in_dict:
weight = hax.named(weight, hax.concat_axis_specs(extra_dims, ('__OUT__', '__IN__')))
else:
weight = hax.named(weight, hax.concat_axis_specs(extra_dims, ('__IN__', '__OUT__')))
if layer.out_first:
weight = weight.rearrange((..., '__OUT__', '__IN__'))
else:
weight = weight.rearrange((..., '__IN__', '__OUT__'))
weight = weight.unflatten_axis('__OUT__', layer.Out).unflatten_axis('__IN__', layer.In)
if (bias is not None):
bias = hax.named(bias, hax.concat_axis_specs(extra_dims, ('__OUT__',)))
bias = bias.unflatten_axis('__OUT__', layer.Out)
ret_dict[apply_prefix(prefix, 'weight')] = weight.array
if (bias is not None):
ret_dict[apply_prefix(prefix, 'bias')] = bias.array
return ret_dict
tree_prefixes = leaf_key_paths(layer, prefix, is_leaf=(lambda x: isinstance(x, hnn.Linear)), use_state_dict_keys=True)
jax.tree_map(_unflatten_linear, layer, tree_prefixes, is_leaf=(lambda x: isinstance(x, hnn.Linear)))
return ret_dict |
def clone_model(model, input_tensors=None):
if isinstance(model, Sequential):
return _clone_sequential_model(model, input_tensors=input_tensors)
else:
return _clone_functional_model(model, input_tensors=input_tensors) |
def test_incompatible_shapes_raise_valueerror():
data = [[(3,), (4,)], [(2, 3), (2,)], [(3,), (3,), (4,)], [(1, 3, 4), (2, 3, 3)]]
for input_shapes in data:
assert_incompatible_shapes_raise(input_shapes)
assert_incompatible_shapes_raise(input_shapes[::(- 1)]) |
def test_divmod():
value = 7
proxy = tt.ObjectProxy(value)
assert (divmod(value, 3) == divmod(proxy, 3))
assert (int in tt.UsageTraceNode.from_proxy(proxy).children['__divmod__'].arg_types[0]) |
def curves_with_j_0_char3(K):
if ((not K.is_finite()) or (K.characteristic() != 3)):
raise ValueError('field must be finite of characteristic 3')
b = None
while ((not b) or (not b.trace())):
b = K.random_element()
if (K.degree() % 2):
return [EllipticCurve(K, a4a6) for a4a6 in [[1, 0], [(- 1), 0], [(- 1), b], [(- 1), (- b)]]]
a = K.gen()
q2 = ((K.cardinality() - 1) // 2)
while ((not a) or ((a ** q2) == 1)):
a = K.random_element()
x = polygen(K)
i = ((x ** 2) + 1).roots()[0][0]
c = None
while ((not c) or (((x ** 3) + ((a ** 2) * x)) + c).roots()):
c = K.random_element()
return [EllipticCurve(K, a4a6) for a4a6 in [[1, 0], [1, (i * b)], [a, 0], [(a ** 2), 0], [(a ** 2), c], [(a ** 3), 0]]] |
class MutualInformation(ConfusionMatrixMetric):
def __init__(self, metric: str='MUTINF'):
super().__init__(metric)
def calculate(self):
tp = self.confusion_matrix.tp
tn = self.confusion_matrix.tn
fp = self.confusion_matrix.fp
fn = self.confusion_matrix.fn
n = self.confusion_matrix.n
fn_tp = (fn + tp)
fp_tp = (fp + tp)
if ((fn_tp == 0) or ((fn_tp / n) == 1) or (fp_tp == 0) or ((fp_tp / n) == 1)):
warnings.warn('Unable to compute mutual information due to log2 of 0, returning -inf', NotComputableMetricWarning)
return float('-inf')
h1 = (- (((fn_tp / n) * math.log2((fn_tp / n))) + ((1 - (fn_tp / n)) * math.log2((1 - (fn_tp / n))))))
h2 = (- (((fp_tp / n) * math.log2((fp_tp / n))) + ((1 - (fp_tp / n)) * math.log2((1 - (fp_tp / n))))))
p00 = (1 if (tn == 0) else (tn / n))
p01 = (1 if (fn == 0) else (fn / n))
p10 = (1 if (fp == 0) else (fp / n))
p11 = (1 if (tp == 0) else (tp / n))
h12 = (- (((((tn / n) * math.log2(p00)) + ((fn / n) * math.log2(p01))) + ((fp / n) * math.log2(p10))) + ((tp / n) * math.log2(p11))))
mi = ((h1 + h2) - h12)
return mi |
_function
def get_cython_cache_dir():
if ('CYTHON_CACHE_DIR' in os.environ):
return os.environ['CYTHON_CACHE_DIR']
parent = None
if (os.name == 'posix'):
if (sys.platform == 'darwin'):
parent = os.path.expanduser('~/Library/Caches')
else:
parent = os.environ.get('XDG_CACHE_HOME')
if (parent and os.path.isdir(parent)):
return os.path.join(parent, 'cython')
return os.path.expanduser(os.path.join('~', '.cython')) |
('/list_combiners_data', methods=['POST'])
def list_combiners_data():
json_data = request.get_json()
combiners = json_data.get('combiners', None)
try:
response = api.list_combiners_data(combiners)
except TypeError as e:
return (jsonify({'success': False, 'message': str(e)}), 400)
return response |
def create_split_tone_node(node_tree: bpy.types.NodeTree) -> bpy.types.Node:
split_tone_node_group = add_split_tone_node_group()
node = node_tree.nodes.new(type='CompositorNodeGroup')
node.name = 'SplitTone'
node.node_tree = split_tone_node_group
return node |
def posat(context, builder, pos, offset):
return builder.add(pos, context.get_constant(numba.intp, offset)) |
def subst(pattern: List[str], rule_symbol: str, substitute_str: str) -> List[str]:
assert (rule_symbol in pattern)
indices = [i for (i, x) in enumerate(pattern) if (x == rule_symbol)]
new_string = (pattern[:indices[0]] + [substitute_str])
for (i, j) in zip(indices[:(- 1)], indices[1:]):
new_string += pattern[(i + 1):j]
new_string += [substitute_str]
new_string += pattern[(indices[(- 1)] + 1):]
return new_string |
class WFRadiationMeshXMin(RadiationField):
glossary_name = 'params/Mesh/xMin'
def __init__(self, wf):
super(WFRadiationMeshXMin, self).__init__(wf)
def value(self):
if (self._wf.params.wSpace == 'R-space'):
return self._wf._srwl_wf.mesh.xStart
else:
warnings.warn('params/Mesh/xMin not defined if NOT params/wSpace==R-space')
return None
def value(self, val):
if (not (self._wf.params.wSpace == 'R-space')):
warnings.warn('params/Mesh/xMin not defined if NOT params/wSpace==R-space')
self._wf._srwl_wf.mesh.xStart = float(val) |
class CustomTextDatasetForGenLatentSpace(Dataset):
def __init__(self, df, tokenizer, split: str, in_memory: bool=False, train_ratio: float=1, omitted_labels=None, reduced_labels=None, reduced_labels_keep_num=None):
self.tokenizer = tokenizer
if (split == 'valid'):
file_prefix = 'train'
else:
file_prefix = split
self.data = TextDFDatasetForGen(df, in_memory, split, train_ratio, omitted_labels=omitted_labels, reduced_labels=reduced_labels, reduced_labels_keep_num=reduced_labels_keep_num)
self.omitted_labels = omitted_labels
self.reduced_labels = reduced_labels
self.reduced_labels_keep_num = reduced_labels_keep_num
def __len__(self) -> int:
return len(self.data)
def __getitem__(self, index: int):
item = self.data[index]
input_ids = self.tokenizer.encode(self.tokenizer.decode(self.tokenizer.encode(item['input_ids'])))
labels = self.tokenizer.encode(self.tokenizer.decode(self.tokenizer.encode(item['labels'])))
input_ids = np.array(input_ids, np.int64)
labels = np.array(labels, np.int64)
return (input_ids, labels)
def collate_fn(self, batch: typing.List[typing.Tuple[(typing.Any, ...)]]) -> typing.Dict[(str, torch.Tensor)]:
(input_ids, labels) = tuple(zip(*batch))
input_ids = torch.from_numpy(pad_sequences(input_ids, 0))
labels = torch.from_numpy(pad_sequences(labels, 0))
return {'input_ids': input_ids, 'labels': labels} |
def arg_parse():
parser = argparse.ArgumentParser(description='AD-GCL ZINC')
parser.add_argument('--dataset', type=str, default='zinc', help='Dataset')
parser.add_argument('--full', default=False, action='store_true', help='Flag to use full zinc dataset')
parser.add_argument('--model_lr', type=float, default=0.001, help='Model Learning rate.')
parser.add_argument('--view_lr', type=float, default=0.001, help='View Learning rate.')
parser.add_argument('--num_gc_layers', type=int, default=5, help='Number of GNN layers before pooling')
parser.add_argument('--pooling_type', type=str, default='standard', help='GNN Pooling Type Standard/Layerwise')
parser.add_argument('--emb_dim', type=int, default=100, help='embedding dimension')
parser.add_argument('--mlp_edge_model_dim', type=int, default=64, help='embedding dimension')
parser.add_argument('--batch_size', type=int, default=32, help='batch size')
parser.add_argument('--drop_ratio', type=float, default=0.0, help='Dropout Ratio / Probability')
parser.add_argument('--epochs', type=int, default=50, help='Train Epochs')
parser.add_argument('--reg_lambda', type=float, default=0.0, help='View Learner Edge Perturb Regularization Strength')
parser.add_argument('--seed', type=int, default=0)
return parser.parse_args() |
def multihead_attention(queries, keys, values, num_units=None, num_heads=1, dropout_keep_prob=1, is_training=True, has_residual=True):
if (num_units is None):
num_units = queries.get_shape().as_list[(- 1)]
Q = tf.layers.dense(queries, num_units, activation=tf.nn.relu)
K = tf.layers.dense(keys, num_units, activation=tf.nn.relu)
V = tf.layers.dense(values, num_units, activation=tf.nn.relu)
if has_residual:
V_res = tf.layers.dense(values, num_units, activation=tf.nn.relu)
Q_ = tf.concat(tf.split(Q, num_heads, axis=2), axis=0)
K_ = tf.concat(tf.split(K, num_heads, axis=2), axis=0)
V_ = tf.concat(tf.split(V, num_heads, axis=2), axis=0)
weights = tf.matmul(Q_, tf.transpose(K_, [0, 2, 1]))
weights = (weights / (K_.get_shape().as_list()[(- 1)] ** 0.5))
weights = tf.nn.softmax(weights)
weights = tf.layers.dropout(weights, rate=(1 - dropout_keep_prob), training=tf.convert_to_tensor(is_training))
outputs = tf.matmul(weights, V_)
outputs = tf.concat(tf.split(outputs, num_heads, axis=0), axis=2)
if has_residual:
outputs += V_res
outputs = tf.nn.relu(outputs)
outputs = normalize(outputs)
return outputs |
class LeNet5(nn.Module):
def __init__(self, input_channels, imsize, output_dim):
super(LeNet5, self).__init__()
self.input_channels = input_channels
self.imsize = imsize
self.output_dim = output_dim
assert ((imsize % 2) == 0)
self.cnn = nn.Sequential(OrderedDict([('conv1', nn.Conv2d(input_channels, 20, 5)), ('bn1', nn.BatchNorm2d(20, affine=False)), ('relu1', nn.ReLU(inplace=True)), ('pool1', nn.MaxPool2d(2, 2)), ('conv2', nn.Conv2d(20, 50, 5)), ('bn2', nn.BatchNorm2d(50, affine=False)), ('relu2', nn.ReLU(inplace=True)), ('pool2', nn.MaxPool2d(2, 2))]))
self.ftsize = ((((imsize - 4) / 2) - 4) / 2)
self.fc = nn.Sequential(OrderedDict([('fc1', nn.Linear(((50 * self.ftsize) * self.ftsize), 500)), ('bn3', nn.BatchNorm1d(500, affine=False)), ('relu3', nn.ReLU(inplace=True)), ('fc2', nn.Linear(500, output_dim))]))
def forward(self, x):
x = self.cnn(x)
x = x.view((- 1), ((50 * self.ftsize) * self.ftsize))
x = self.fc(x)
return x |
def eval_default_scale_factor(actf, lay):
if (actf in ('linear', 'relu')):
return 2.0
elif (actf in ('tanh', 'sigmoid')):
return (1.0 if (lay > 0) else 1.0)
elif (actf in ('sin', 'cos')):
return (2.0 if (lay > 0) else 2.0)
else:
return 1.0 |
class _data_matrix(spmatrix):
def __init__(self):
spmatrix.__init__(self)
def _get_dtype(self):
return self.data.dtype
def _set_dtype(self, newtype):
self.data.dtype = newtype
dtype = property(fget=_get_dtype, fset=_set_dtype)
def _deduped_data(self):
if hasattr(self, 'sum_duplicates'):
self.sum_duplicates()
return self.data
def __abs__(self):
return self._with_data(abs(self._deduped_data()))
def __round__(self, ndigits=0):
return self._with_data(np.around(self._deduped_data(), decimals=ndigits))
def _real(self):
return self._with_data(self.data.real)
def _imag(self):
return self._with_data(self.data.imag)
def __neg__(self):
if (self.dtype.kind == 'b'):
raise NotImplementedError('negating a sparse boolean matrix is not supported')
return self._with_data((- self.data))
def __imul__(self, other):
if isscalarlike(other):
self.data *= other
return self
else:
return NotImplemented
def __itruediv__(self, other):
if isscalarlike(other):
recip = (1.0 / other)
self.data *= recip
return self
else:
return NotImplemented
def astype(self, dtype, casting='unsafe', copy=True):
dtype = np.dtype(dtype)
if (self.dtype != dtype):
return self._with_data(self._deduped_data().astype(dtype, casting=casting, copy=copy), copy=copy)
elif copy:
return self.copy()
else:
return self
astype.__doc__ = spmatrix.astype.__doc__
def conj(self, copy=True):
if np.issubdtype(self.dtype, np.complexfloating):
return self._with_data(self.data.conj(), copy=copy)
elif copy:
return self.copy()
else:
return self
conj.__doc__ = spmatrix.conj.__doc__
def copy(self):
return self._with_data(self.data.copy(), copy=True)
copy.__doc__ = spmatrix.copy.__doc__
def count_nonzero(self):
return np.count_nonzero(self._deduped_data())
count_nonzero.__doc__ = spmatrix.count_nonzero.__doc__
def power(self, n, dtype=None):
if (not isscalarlike(n)):
raise NotImplementedError('input is not scalar')
data = self._deduped_data()
if (dtype is not None):
data = data.astype(dtype)
return self._with_data((data ** n))
def _mul_scalar(self, other):
return self._with_data((self.data * other)) |
def train(net, optimizer, trainloader):
net.train()
losses = AverageMeter()
torch.cuda.empty_cache()
loss_all = 0
for (data, labels, _) in tqdm(trainloader):
(data, labels) = (data.cuda(), labels.cuda())
optimizer.zero_grad()
(embedding, logits) = net(data, True)
(_, loss) = criterion(logits, labels, net.net.fc, embedding)
loss.backward()
optimizer.step()
losses.update(loss.item(), data.size(0))
loss_all += losses.avg
return loss_all |
def rollout(env_name, num_steps=128, use_expert=False, seed=1):
env_fn = envs.create_fn(env_name)
env = env_fn(batch_size=1, episode_length=(num_steps * 2), auto_reset=False)
env.step = jax.jit(env.step)
if (not use_expert):
parametric_action_distribution = distribution.NormalTanhDistribution(event_size=env.action_size)
policy_model = make_direct_optimization_model(parametric_action_distribution, env.observation_size)
policy_model.apply = jax.jit(policy_model.apply)
with open(f'{env_name}_params.pkl', 'rb') as f:
(normalizer_params, params) = pickle.load(f)
else:
if (env_name == 'humanoid'):
inference = sac.make_inference_fn(env.observation_size, env.action_size, True)
else:
inference = ppo.make_inference_fn(env.observation_size, env.action_size, True)
inference = jax.jit(inference)
with open(f'{my_path}/../brax_task/expert_multi_traj/{env_name}_params.pickle', 'rb') as f:
decoded_params = pickle.load(f)
(_, _, obs_normalizer_apply_fn) = normalization.create_observation_normalizer(env.observation_size, True, num_leading_batch_dims=2, pmap_to_devices=1)
key = jax.random.PRNGKey(seed)
state = env.reset(jax.random.PRNGKey(seed))
def do_one_step_eval(carry, unused_target_t):
(state, key) = carry
(key, key_sample) = jax.random.split(key)
if (not use_expert):
normalized_obs = obs_normalizer_apply_fn(normalizer_params, state.obs)
logits = policy_model.apply(params, normalized_obs)
action = parametric_action_distribution.sample(logits, key)
else:
action = inference(decoded_params, state.obs, key)
nstate = env.step(state, action)
return ((nstate, key), state)
(_, state_list) = jax.lax.scan(do_one_step_eval, (state, key), (), length=num_steps)
print(f'{env_name} reward: {state_list.reward.sum():.2f}')
visualize(state_list, env_name, num_steps) |
def test_contains():
proxy = tt.ObjectProxy([42])
assert (42 in proxy)
assert (int in tt.UsageTraceNode.from_proxy(proxy).children['__contains__'].arg_types[0]) |
def construct_beta_hats(opt_beta, sensitivity, eps_list, max_norm):
beta_hats = noise_reduc.gen_list(opt_beta, sensitivity, eps_list)
for i in range(len(beta_hats)):
beta_hats[i] = project.two_norm_project(beta_hats[i], max_norm)
return beta_hats |
class MyMultiSectionFactory(MultiSectionFactory):
def __init__(self, main_file_name, modules):
super(MyMultiSectionFactory, self).__init__()
self.main_file_name = main_file_name
self.main_sink = FileCodeSink(open(main_file_name, 'wt'))
self.header_name = 'ns3module.h'
header_file_name = os.path.join(os.path.dirname(self.main_file_name), 'pch', self.header_name)
self.header_sink = FileCodeSink(open(header_file_name, 'wt'))
self.section_sinks = {'__main__': self.main_sink}
for module in modules:
section_name = ('ns3_module_%s' % module.replace('-', '_'))
file_name = os.path.join(os.path.dirname(self.main_file_name), ('%s.cc' % section_name))
sink = FileCodeSink(open(file_name, 'wt'))
self.section_sinks[section_name] = sink
def get_section_code_sink(self, section_name):
return self.section_sinks[section_name]
def get_main_code_sink(self):
return self.main_sink
def get_common_header_code_sink(self):
return self.header_sink
def get_common_header_include(self):
return ('"%s"' % self.header_name)
def close(self):
self.header_sink.file.close()
self.main_sink.file.close()
for sink in self.section_sinks.itervalues():
sink.file.close() |
def test_gmm_wrong_descriptor_format_3():
with pytest.raises(DescriptorException):
learn_gmm([np.zeros((5, 10)), np.zeros((4, 10, 1))], n_modes=1) |
def new():
t_AND = '\\&'
t_ANDAND = '\\&\\&'
t_ANDEQ = '\\&='
t_BACKSLASH = '\\\\'
t_COLON = ':'
t_DIV = '\\/'
t_DIVEQ = '\\/='
t_DOT = '\\.'
t_DOTDIV = '\\./'
t_DOTDIVEQ = '\\./='
t_DOTEXP = '\\.\\^'
t_DOTMUL = '\\.\\*'
t_DOTMULEQ = '\\.\\*='
t_EQ = '='
t_EQEQ = '=='
t_EXP = '\\^'
t_EXPEQ = '\\^='
t_GE = '>='
t_GT = '\\>'
t_HANDLE = '\\'
t_LE = '<='
t_LT = '\\<'
t_MINUS = '\\-'
t_MINUSEQ = '\\-='
t_MINUSMINUS = '\\--'
t_MUL = '\\*'
t_POW = '\\*\\*'
t_MULEQ = '\\*='
t_NE = '(~=)|(!=)'
t_NEG = '\\~|\\!'
t_OR = '\\|'
t_OREQ = '\\|='
t_OROR = '\\|\\|'
t_PLUS = '\\+'
t_PLUSEQ = '\\+='
t_PLUSPLUS = '\\+\\+'
states = (('matrix', 'inclusive'), ('afterkeyword', 'exclusive'))
states = (('matrix', 'inclusive'), ('afterkeyword', 'exclusive'))
ws = '(\\s|\\.\\.\\..*\\n|\\\\\\n)'
ws1 = (ws + '+')
ws0 = (ws + '*')
ms = "'([^']|(''))*'"
os = '"([^"\\a\\b\\f\\r\\t\\0\\v\\n\\\\]|(\\\\[abfn0vtr\\"\\n\\\\])|(""))*"'
mos = ('(%s)|(%s)' % (os, ms))
id = '[a-zA-Z_][a-zA-Z_0-9]*'
def unescape(s):
if (s[0] == "'"):
return s[1:(- 1)].replace("''", "'")
else:
try:
return s[1:(- 1)].decode('string_escape')
except:
return s[1:(- 1)]
(mos)
def t_afterkeyword_STRING(t):
t.value = unescape(t.value)
t.lexer.begin('INITIAL')
return t
def t_afterkeyword_error(t):
t_error(t)
def t_TRANSPOSE(t):
return t
(mos)
def t_STRING(t):
t.value = unescape(t.value)
return t
(('(\\.%s)?%s' % (ws0, id)))
def t_IDENT(t):
if (t.value == 'parfor'):
t.value = 'for'
if (t.value == 'classdef'):
raise_exception(SyntaxError, ('Not implemented: %s' % t.value), t.lexer)
t.lexer.lineno += t.value.count('\n')
if (t.value[0] == '.'):
t.type = 'FIELD'
return t
if ((t.value == 'end') and ((t.lexer.parens > 0) or (t.lexer.brackets > 0) or (t.lexer.braces > 0))):
t.type = 'END_EXPR'
return t
if (t.value in ('end', 'endif', 'endfunction', 'endwhile', 'endfor', 'endswitch', 'end_try_catch')):
keyword = t.lexer.stack.pop()
if (keyword == 'function'):
t.type = 'END_FUNCTION'
else:
t.type = 'END_STMT'
return t
else:
t.type = reserved.get(t.value, 'IDENT')
if (t.value in ('if', 'function', 'while', 'for', 'switch', 'try')):
t.lexer.stack.append(t.value)
if ((t.type != 'IDENT') and (t.lexer.lexdata[t.lexer.lexpos] == "'")):
t.lexer.begin('afterkeyword')
return t
def t_LPAREN(t):
t.lexer.parens += 1
return t
def t_RPAREN(t):
t.lexer.parens -= 1
return t
((ws0 + '\\]'))
def t_RBRACKET(t):
t.lexer.lineno += t.value.count('\n')
t.lexer.brackets -= 1
if ((t.lexer.brackets + t.lexer.braces) == 0):
t.lexer.begin('INITIAL')
return t
(('\\[' + ws0))
def t_LBRACKET(t):
t.lexer.lineno += t.value.count('\n')
t.lexer.brackets += 1
if ((t.lexer.brackets + t.lexer.braces) == 1):
t.lexer.begin('matrix')
return t
((ws0 + '\\}'))
def t_RBRACE(t):
t.lexer.lineno += t.value.count('\n')
t.lexer.braces -= 1
if ((t.lexer.braces + t.lexer.brackets) == 0):
t.lexer.begin('INITIAL')
return t
(('\\{' + ws0))
def t_LBRACE(t):
t.lexer.lineno += t.value.count('\n')
t.lexer.braces += 1
if ((t.lexer.brackets + t.lexer.braces) == 1):
t.lexer.begin('matrix')
return t
((',' + ws0))
def t_COMMA(t):
t.lexer.lineno += t.value.count('\n')
if ((t.lexer.brackets == 0) and (t.lexer.parens == 0) and (t.lexer.braces == 0)):
t.type = 'SEMI'
return t
return t
(('\\;' + ws0))
def t_SEMI(t):
t.lexer.lineno += t.value.count('\n')
return t
def t_NUMBER(t):
if (t.value[(- 1)] == 'i'):
t.value = (t.value[:(- 1)] + 'j')
t.value = eval(t.value)
return t
def t_NEWLINE(t):
t.lexer.lineno += len(t.value)
if ((not t.lexer.parens) and (not t.lexer.braces)):
t.value = ';'
t.type = 'SEMI'
return t
def t_ERROR_STMT(t):
t.lexer.lineno += 1
def t_COMMENT(t):
t.lexer.lineno += t.value.count('\n')
t.type = 'COMMENT'
return t
def t_comment(t):
if (t.value[(- 1)] != '!'):
t.lexer.lexpos = t.lexer.lexdata.find('\n', t.lexer.lexpos)
((('(?<=\\w)' + ws1) + '(?=\\()'))
def t_matrix_BAR(t):
pass
tend = '(?<=[])}\'\\".]|\\w)'
tbeg = '(?=[-+]?([[({\'\\"]|\\w|\\.\\d))'
(((tend + ws1) + tbeg))
def t_matrix_FOO(t):
t.lexer.lineno += t.value.count('\n')
t.type = 'COMMA'
return t
def t_ELLIPSIS(t):
t.lexer.lineno += 1
pass
def t_SPACES(t):
pass
def t_error(t):
raise_exception(SyntaxError, ('Unexpected "%s" (lexer)' % t.value), t.lexer)
lexer = lex.lex(reflags=re.MULTILINE)
lexer.brackets = 0
lexer.parens = 0
lexer.braces = 0
lexer.stack = []
return lexer |
def AUROC(open_set_preds, open_set_labels):
auroc = roc_auc_score(open_set_labels, open_set_preds)
return auroc |
def horizontally_flip_bbox(bbox: BoundingBox) -> BoundingBox:
return ((1 - (bbox[0] + bbox[2])), bbox[1], bbox[2], bbox[3]) |
class MatFile5Writer():
def __init__(self, file_stream, do_compression=False, unicode_strings=False, global_vars=None, long_field_names=False, oned_as='row'):
self.file_stream = file_stream
self.do_compression = do_compression
self.unicode_strings = unicode_strings
if global_vars:
self.global_vars = global_vars
else:
self.global_vars = []
self.long_field_names = long_field_names
self.oned_as = oned_as
self._matrix_writer = None
def write_file_header(self):
hdr = np.zeros((), NDT_FILE_HDR)
hdr['description'] = f'MATLAB 5.0 MAT-file Platform: {os.name}, Created on: {time.asctime()}'
hdr['version'] = 256
hdr['endian_test'] = np.ndarray(shape=(), dtype='S2', buffer=np.uint16(19785))
self.file_stream.write(hdr.tobytes())
def put_variables(self, mdict, write_header=None):
if (write_header is None):
write_header = (self.file_stream.tell() == 0)
if write_header:
self.write_file_header()
self._matrix_writer = VarWriter5(self)
for (name, var) in mdict.items():
if (name[0] == '_'):
continue
is_global = (name in self.global_vars)
if self.do_compression:
stream = BytesIO()
self._matrix_writer.file_stream = stream
self._matrix_writer.write_top(var, name.encode('latin1'), is_global)
out_str = zlib.compress(stream.getvalue())
tag = np.empty((), NDT_TAG_FULL)
tag['mdtype'] = miCOMPRESSED
tag['byte_count'] = len(out_str)
self.file_stream.write(tag.tobytes())
self.file_stream.write(out_str)
else:
self._matrix_writer.write_top(var, name.encode('latin1'), is_global) |
class ClusterGCN(GCN):
def __init__(self, layer_sizes, activations, generator, bias=True, dropout=0.0, kernel_initializer='glorot_uniform', kernel_regularizer=None, kernel_constraint=None, bias_initializer='zeros', bias_regularizer=None, bias_constraint=None):
warnings.warn('ClusterGCN has been replaced by GCN with little functionality change (the GCN class removes the batch dimension in some cases)', DeprecationWarning, stacklevel=2)
super().__init__(layer_sizes=layer_sizes, generator=generator, bias=bias, dropout=dropout, activations=activations, kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer, kernel_constraint=kernel_constraint, bias_initializer=bias_initializer, bias_regularizer=bias_regularizer, bias_constraint=bias_constraint, squeeze_output_batch=False) |
def test_IndexedArray_RecordArray_NumpyArray():
a = ak.contents.indexedarray.IndexedArray(ak.index.Index(np.array([2, 2, 0, 1, 4, 5, 4])), ak.contents.recordarray.RecordArray([ak.contents.numpyarray.NumpyArray(np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6]))], ['nest']))
assert (a.to_typetracer().form == a.form)
assert (a.to_typetracer().form.type == a.form.type)
assert (len(a['nest']) == 7)
assert (a.to_typetracer()['nest'].form == a['nest'].form)
assert (a['nest'][0] == 3.3)
assert (a['nest'][1] == 3.3)
assert (a['nest'][2] == 1.1)
assert (a['nest'][3] == 2.2)
assert (a['nest'][4] == 5.5)
assert (a['nest'][5] == 6.6)
assert (a['nest'][6] == 5.5)
assert (a['nest'][(- 7)] == 3.3)
assert (a['nest'][(- 6)] == 3.3)
assert (a['nest'][(- 5)] == 1.1)
assert (a['nest'][(- 4)] == 2.2)
assert (a['nest'][(- 3)] == 5.5)
assert (a['nest'][(- 2)] == 6.6)
assert (a['nest'][(- 1)] == 5.5)
with pytest.raises(IndexError):
a['nest'][7]
with pytest.raises(IndexError):
a['nest'][(- 8)]
assert isinstance(a['nest'][3:], ak.contents.indexedarray.IndexedArray)
assert (a.to_typetracer()['nest'][3:].form == a['nest'][3:].form)
assert (len(a['nest'][3:]) == 4)
assert (len(a['nest'][(- 4):]) == 4)
assert (len(a['nest'][3:100]) == 4)
assert (len(a['nest'][(- 4):100]) == 4)
assert (a['nest'][3:][1] == 5.5)
assert (a['nest'][(- 4):][1] == 5.5)
with pytest.raises(IndexError):
a['nest']['bad'] |
class AbstractLanguage(Parent):
def __init__(self, alphabet=None, category=None):
if isinstance(alphabet, (int, Integer)):
from sage.sets.integer_range import IntegerRange
alphabet = IntegerRange(1, (alphabet + 1))
elif ((alphabet == 'integers') or (alphabet == 'positive integers') or (alphabet == 'natural numbers')):
alphabet = build_alphabet(name=alphabet)
else:
alphabet = build_alphabet(alphabet)
self._alphabet = alphabet
self.sortkey_letters = self._sortkey_letters
N = alphabet.cardinality()
if (N == Infinity):
self.sortkey_letters = self._sortkey_trivial
elif (N < 36):
try:
if all(((alphabet.unrank(i) > alphabet.unrank(j)) for i in range(N) for j in range(i))):
self.sortkey_letters = self._sortkey_trivial
except TypeError:
pass
if (category is None):
category = Sets()
Parent.__init__(self, category=category)
def alphabet(self):
return self._alphabet
def identity_morphism(self):
if (self.alphabet().cardinality() not in ZZ):
raise NotImplementedError('size of alphabet must be finite')
from sage.combinat.words.morphism import WordMorphism
return WordMorphism({a: a for a in self.alphabet()})
def _check(self, w, length=40):
stop = (None if (length is None) else int(length))
for a in itertools.islice(w, stop):
if (a not in self.alphabet()):
raise ValueError(('%s not in alphabet' % a))
def _sortkey_trivial(self, letter1):
return letter1
def _sortkey_letters(self, letter1):
rk = self.alphabet().rank
return rk(letter1)
def __eq__(self, other):
return ((self is other) or ((type(self) is type(other)) and (self.alphabet() == other.alphabet())))
def __ne__(self, other):
return (not (self == other)) |
class SimpleModel2(Model):
def __init__(self, output_dim=2, hidden_sizes=(4, 4), name=None):
super().__init__(name)
self._output_dim = output_dim
self._hidden_sizes = hidden_sizes
def _build(self, obs_input, name=None):
del name
action = mlp(obs_input, self._output_dim, self._hidden_sizes, 'state')
return action |
class Parser(object):
def getParser(self):
parser = argparse.ArgumentParser()
parser.add_argument('--train', action='store_true')
parser.add_argument('--infer', action='store_true')
parser.add_argument('--verify', action='store_true')
parser.add_argument('--word_label', action='store_true')
parser.add_argument('--dir', type=str, default=None, required=True)
parser.add_argument('--data', type=str, default=None, choices=['yelp', 'sst', 'cifar', 'mnist'], required=True)
parser.add_argument('--base_dir', type=str, default='data/model_base')
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--use_tsv', action='store_true')
parser.add_argument('--vocab_size', type=int, default=50000)
parser.add_argument('--small', action='store_true')
parser.add_argument('--debug', action='store_true')
parser.add_argument('--use_dev', action='store_true')
parser.add_argument('--num_classes', type=int, default=2)
parser.add_argument('--cpu', action='store_true')
parser.add_argument('--cpus', type=int, default=32)
parser.add_argument('--display_interval', type=int, default=50)
parser.add_argument('--num_epoches', type=int, default=3)
parser.add_argument('--batch_size', type=int, default=32)
parser.add_argument('--max_sent_length', type=int, default=128)
parser.add_argument('--lr', type=float, default=0.0001)
parser.add_argument('--num_labels', type=int, default=2)
parser.add_argument('--num_layers', type=int, default=12)
parser.add_argument('--num_attention_heads', type=int, default=4)
parser.add_argument('--hidden_size', type=int, default=256)
parser.add_argument('--intermediate_size', type=int, default=512)
parser.add_argument('--warmup', type=float, default=(- 1))
parser.add_argument('--hidden_act', type=str, default='relu')
parser.add_argument('--weight_decay', type=float, default=0.01)
parser.add_argument('--min_word_freq', type=int, default=50)
parser.add_argument('--layer_norm', type=str, default='no_var', choices=['standard', 'no', 'no_var'])
parser.add_argument('--samples', type=int, default=10)
parser.add_argument('--p', type=int, default=2)
parser.add_argument('--eps', type=float, default=1e-05)
parser.add_argument('--max_eps', type=float, default=0.01)
parser.add_argument('--verbose', action='store_true')
parser.add_argument('--debug_pos', type=int, default=1)
parser.add_argument('--log', type=str, default='log.txt')
parser.add_argument('--res', type=str, default='res.json')
parser.add_argument('--max_verify_length', type=int, default=32)
parser.add_argument('--method', type=str, default='baf', choices=['baf', 'backward', 'forward', 'ibp', 'discrete'])
parser.add_argument('--num_verify_iters', type=int, default=10)
parser.add_argument('--view_embed_dist', action='store_true')
parser.add_argument('--empty_cache', action='store_true')
parser.add_argument('--perturbed_words', type=int, default=1, choices=[1, 2])
return parser |
def build_dataloader(dataset, samples_per_gpu, workers_per_gpu, num_gpus=1, dist=True, shuffle=True, seed=None, drop_last=False, pin_memory=True, persistent_workers=True, **kwargs):
(rank, world_size) = get_dist_info()
if dist:
sampler = DistributedSampler(dataset, world_size, rank, shuffle=shuffle)
shuffle = False
batch_size = samples_per_gpu
num_workers = workers_per_gpu
else:
sampler = None
batch_size = (num_gpus * samples_per_gpu)
num_workers = (num_gpus * workers_per_gpu)
init_fn = (partial(worker_init_fn, num_workers=num_workers, rank=rank, seed=seed) if (seed is not None) else None)
if (torch.__version__ >= '1.8.0'):
data_loader = DataLoader(dataset, batch_size=batch_size, sampler=sampler, num_workers=num_workers, collate_fn=partial(collate, samples_per_gpu=samples_per_gpu), pin_memory=pin_memory, shuffle=shuffle, worker_init_fn=init_fn, drop_last=drop_last, persistent_workers=persistent_workers, **kwargs)
else:
data_loader = DataLoader(dataset, batch_size=batch_size, sampler=sampler, num_workers=num_workers, collate_fn=partial(collate, samples_per_gpu=samples_per_gpu), pin_memory=pin_memory, shuffle=shuffle, worker_init_fn=init_fn, drop_last=drop_last, **kwargs)
return data_loader |
def _group_str(names: List[str]) -> str:
lcp = _longest_common_prefix_str(names)
rest = [x[len(lcp):] for x in names]
rest = (('{' + ','.join(rest)) + '}')
ret = (lcp + rest)
ret = ret.replace('bn_{beta,running_mean,running_var,gamma}', 'bn_*')
ret = ret.replace('bn_beta,bn_running_mean,bn_running_var,bn_gamma', 'bn_*')
return ret |
def install_lightautoml():
os.system('curl -sSL | ../../bin/python -')
os.system('/root/.local/bin/poetry build')
os.system('../../bin/pip install ./dist/lightautoml-0.3.7.4-py3-none-any.whl') |
def test_custom_record():
behavior = {}
behavior[('__numba_typer__', 'Dummy')] = dummy_typer
behavior[('__numba_lower__', 'Dummy')] = dummy_lower
array = ak.highlevel.Array([{'x': 1.1, 'y': 100}, {'x': 2.2, 'y': 200}, {'x': 3.3, 'y': 300}], behavior=behavior, check_valid=True)
array.layout.parameters['__record__'] = 'Dummy'
def f1(x, i):
return x[i]
assert (f1(array, 1) == 202.2)
assert (f1(array, 2) == 303.3) |
class LatentWidget():
def __init__(self, viz):
self.viz = viz
self.latent = dnnlib.EasyDict(x=1, y=0, anim=False, speed=0.25)
self.latent_def = dnnlib.EasyDict(self.latent)
self.step_y = 100
def drag(self, dx, dy):
viz = self.viz
self.latent.x += ((dx / viz.font_size) * 0.04)
self.latent.y += ((dy / viz.font_size) * 0.04)
_utils.scoped_by_object_id
def __call__(self, show=True):
viz = self.viz
if show:
imgui.text('Latent')
imgui.same_line(viz.label_w)
seed = (round(self.latent.x) + (round(self.latent.y) * self.step_y))
with imgui_utils.item_width((viz.font_size * 8)):
(changed, seed) = imgui.input_int('##seed', seed, step=0)
if changed:
self.latent.x = seed
self.latent.y = 0
imgui.same_line(((viz.label_w + (viz.font_size * 8)) + viz.spacing))
frac_x = (self.latent.x - round(self.latent.x))
frac_y = (self.latent.y - round(self.latent.y))
with imgui_utils.item_width((viz.font_size * 5)):
(changed, (new_frac_x, new_frac_y)) = imgui.input_float2('##frac', frac_x, frac_y, format='%+.2f', flags=imgui.INPUT_TEXT_ENTER_RETURNS_TRUE)
if changed:
self.latent.x += (new_frac_x - frac_x)
self.latent.y += (new_frac_y - frac_y)
imgui.same_line(((viz.label_w + (viz.font_size * 13)) + (viz.spacing * 2)))
(_clicked, dragging, dx, dy) = imgui_utils.drag_button('Drag', width=viz.button_w)
if dragging:
self.drag(dx, dy)
imgui.same_line((((viz.label_w + (viz.font_size * 13)) + viz.button_w) + (viz.spacing * 3)))
(_clicked, self.latent.anim) = imgui.checkbox('Anim', self.latent.anim)
imgui.same_line(round((viz.font_size * 28.7)))
with imgui_utils.item_width((((- 2) - (viz.button_w * 2)) - (viz.spacing * 2))), imgui_utils.grayed_out((not self.latent.anim)):
(changed, speed) = imgui.slider_float('##speed', self.latent.speed, (- 5), 5, format='Speed %.3f', power=3)
if changed:
self.latent.speed = speed
imgui.same_line()
snapped = dnnlib.EasyDict(self.latent, x=round(self.latent.x), y=round(self.latent.y))
if imgui_utils.button('Snap', width=viz.button_w, enabled=(self.latent != snapped)):
self.latent = snapped
imgui.same_line()
if imgui_utils.button('Reset', width=(- 1), enabled=(self.latent != self.latent_def)):
self.latent = dnnlib.EasyDict(self.latent_def)
if self.latent.anim:
self.latent.x += (viz.frame_delta * self.latent.speed)
viz.args.w0_seeds = []
for (ofs_x, ofs_y) in [[0, 0], [1, 0], [0, 1], [1, 1]]:
seed_x = (np.floor(self.latent.x) + ofs_x)
seed_y = (np.floor(self.latent.y) + ofs_y)
seed = ((int(seed_x) + (int(seed_y) * self.step_y)) & ((1 << 32) - 1))
weight = ((1 - abs((self.latent.x - seed_x))) * (1 - abs((self.latent.y - seed_y))))
if (weight > 0):
viz.args.w0_seeds.append([seed, weight]) |
class TestFFTShift(object):
def test_definition(self):
x = [0, 1, 2, 3, 4, (- 4), (- 3), (- 2), (- 1)]
y = [(- 4), (- 3), (- 2), (- 1), 0, 1, 2, 3, 4]
assert_array_almost_equal(fft.fftshift(x), y)
assert_array_almost_equal(fft.ifftshift(y), x)
x = [0, 1, 2, 3, 4, (- 5), (- 4), (- 3), (- 2), (- 1)]
y = [(- 5), (- 4), (- 3), (- 2), (- 1), 0, 1, 2, 3, 4]
assert_array_almost_equal(fft.fftshift(x), y)
assert_array_almost_equal(fft.ifftshift(y), x)
def test_inverse(self):
for n in [1, 4, 9, 100, 211]:
x = np.random.random((n,))
assert_array_almost_equal(fft.ifftshift(fft.fftshift(x)), x)
def test_axes_keyword(self):
freqs = [[0, 1, 2], [3, 4, (- 4)], [(- 3), (- 2), (- 1)]]
shifted = [[(- 1), (- 3), (- 2)], [2, 0, 1], [(- 4), 3, 4]]
assert_array_almost_equal(fft.fftshift(freqs, axes=(0, 1)), shifted)
assert_array_almost_equal(fft.fftshift(freqs, axes=0), fft.fftshift(freqs, axes=(0,)))
assert_array_almost_equal(fft.ifftshift(shifted, axes=(0, 1)), freqs)
assert_array_almost_equal(fft.ifftshift(shifted, axes=0), fft.ifftshift(shifted, axes=(0,)))
assert_array_almost_equal(fft.fftshift(freqs), shifted)
assert_array_almost_equal(fft.ifftshift(shifted), freqs)
def test_uneven_dims(self):
freqs = [[0, 1], [2, 3], [4, 5]]
shift_dim0 = [[4, 5], [0, 1], [2, 3]]
assert_array_almost_equal(fft.fftshift(freqs, axes=0), shift_dim0)
assert_array_almost_equal(fft.ifftshift(shift_dim0, axes=0), freqs)
assert_array_almost_equal(fft.fftshift(freqs, axes=(0,)), shift_dim0)
assert_array_almost_equal(fft.ifftshift(shift_dim0, axes=[0]), freqs)
shift_dim1 = [[1, 0], [3, 2], [5, 4]]
assert_array_almost_equal(fft.fftshift(freqs, axes=1), shift_dim1)
assert_array_almost_equal(fft.ifftshift(shift_dim1, axes=1), freqs)
shift_dim_both = [[5, 4], [1, 0], [3, 2]]
assert_array_almost_equal(fft.fftshift(freqs, axes=(0, 1)), shift_dim_both)
assert_array_almost_equal(fft.ifftshift(shift_dim_both, axes=(0, 1)), freqs)
assert_array_almost_equal(fft.fftshift(freqs, axes=[0, 1]), shift_dim_both)
assert_array_almost_equal(fft.ifftshift(shift_dim_both, axes=[0, 1]), freqs)
assert_array_almost_equal(fft.fftshift(freqs, axes=None), shift_dim_both)
assert_array_almost_equal(fft.ifftshift(shift_dim_both, axes=None), freqs)
assert_array_almost_equal(fft.fftshift(freqs), shift_dim_both)
assert_array_almost_equal(fft.ifftshift(shift_dim_both), freqs)
def test_equal_to_original(self):
from numpy.compat import integer_types
from numpy.core import asarray, concatenate, arange, take
def original_fftshift(x, axes=None):
tmp = asarray(x)
ndim = tmp.ndim
if (axes is None):
axes = list(range(ndim))
elif isinstance(axes, integer_types):
axes = (axes,)
y = tmp
for k in axes:
n = tmp.shape[k]
p2 = ((n + 1) // 2)
mylist = concatenate((arange(p2, n), arange(p2)))
y = take(y, mylist, k)
return y
def original_ifftshift(x, axes=None):
tmp = asarray(x)
ndim = tmp.ndim
if (axes is None):
axes = list(range(ndim))
elif isinstance(axes, integer_types):
axes = (axes,)
y = tmp
for k in axes:
n = tmp.shape[k]
p2 = (n - ((n + 1) // 2))
mylist = concatenate((arange(p2, n), arange(p2)))
y = take(y, mylist, k)
return y
for i in range(16):
for j in range(16):
for axes_keyword in [0, 1, None, (0,), (0, 1)]:
inp = np.random.rand(i, j)
assert_array_almost_equal(fft.fftshift(inp, axes_keyword), original_fftshift(inp, axes_keyword))
assert_array_almost_equal(fft.ifftshift(inp, axes_keyword), original_ifftshift(inp, axes_keyword)) |
def init_pos():
for (i, j) in ti.ndrange((N + 1), (N + 1)):
k = ((i * (N + 1)) + j)
pos[k] = (((ti.Vector([i, j]) / N) * 0.25) + ti.Vector([0.45, 0.45]))
vel[k] = ti.Vector([0, 0])
for i in range(NF):
(ia, ib, ic) = f2v[i]
(a, b, c) = (pos[ia], pos[ib], pos[ic])
B_i_inv = ti.Matrix.cols([(a - c), (b - c)])
B[i] = B_i_inv.inverse() |
class Bottleneck(nn.Module):
def __init__(self, tensor_shape):
super(Bottleneck, self).__init__()
(c, h, w) = tensor_shape
self.in_shape = tensor_shape
self.out_shape = tensor_shape
if config.refine_net_use_rnn:
rnn_cells = []
for i in range(config.refine_net_rnn_num_cells):
if (config.refine_net_rnn_type == 'CRNN'):
rnn_cells.append(CRNNCell(input_size=config.refine_net_num_features, hidden_size=config.refine_net_num_features))
elif (config.refine_net_rnn_type == 'CLSTM'):
rnn_cells.append(CLSTMCell(input_size=config.refine_net_num_features, hidden_size=config.refine_net_num_features))
elif (config.refine_net_rnn_type == 'CGRU'):
rnn_cells.append(CGRUCell(input_size=config.refine_net_num_features, hidden_size=config.refine_net_num_features))
self.rnn_cells = nn.ModuleList(rnn_cells)
def forward(self, bottleneck_features, output_dict, previous_output_dict):
if config.refine_net_use_rnn:
for (i, rnn_cell) in enumerate(self.rnn_cells):
suffix = ('_%d' % i)
previous_states = None
if (previous_output_dict is not None):
previous_states = previous_output_dict[('refinenet_rnn_states' + suffix)]
states = rnn_cell(bottleneck_features, previous_states)
if isinstance(states, tuple):
rnn_features = states[0]
output_dict[('refinenet_rnn_states' + suffix)] = states
else:
rnn_features = states
output_dict[('refinenet_rnn_states' + suffix)] = states
bottleneck_features = rnn_features
return bottleneck_features |
class CriterionDSN(nn.Module):
def __init__(self, ignore_index=255, use_weight=True, reduce=True):
super(CriterionDSN, self).__init__()
self.ignore_index = ignore_index
self.criterion = torch.nn.CrossEntropyLoss(ignore_index=ignore_index, reduce=reduce)
if (not reduce):
print('disabled the reduce.')
def forward(self, preds, target):
(h, w) = (target.size(1), target.size(2))
scale_pred = F.upsample(input=preds[0], size=(h, w), mode='bilinear', align_corners=True)
loss1 = self.criterion(scale_pred, target)
scale_pred = F.upsample(input=preds[1], size=(h, w), mode='bilinear', align_corners=True)
loss2 = self.criterion(scale_pred, target)
return (loss1 + (loss2 * 0.4)) |
def CremonaRichmondConfiguration():
from sage.graphs.generators.smallgraphs import TutteCoxeterGraph
from sage.combinat.designs.incidence_structures import IncidenceStructure
g = TutteCoxeterGraph()
H = IncidenceStructure([g.neighbors(v) for v in g.bipartite_sets()[0]])
H.relabel()
return H |
class CComplexBaseTypeNode(CBaseTypeNode):
child_attrs = ['base_type', 'declarator']
def analyse(self, env, could_be_name=False):
base = self.base_type.analyse(env, could_be_name)
(_, type) = self.declarator.analyse(base, env)
return type |
class TestParser(unittest.TestCase):
def test_unlabeled_unweighted(self):
self.stub_data_1 = 'stub_1.txt'
with open(self.stub_data_1, 'w') as text_file:
text_file.write('%stub\n1 3\n4 5\n0 2')
adjacency = parse.from_csv(self.stub_data_1)
self.assertTrue((adjacency.indices == [2, 3, 0, 1, 5, 4]).all())
self.assertTrue((adjacency.indptr == [0, 1, 2, 3, 4, 5, 6]).all())
self.assertTrue((adjacency.data == [1, 1, 1, 1, 1, 1]).all())
remove(self.stub_data_1)
def test_labeled_weighted(self):
self.stub_data_2 = 'stub_2.txt'
with open(self.stub_data_2, 'w') as text_file:
text_file.write('%stub\nf, e, 5\na, d, 6\nc, b, 1')
graph = parse.from_csv(self.stub_data_2)
adjacency = graph.adjacency
names = graph.names
self.assertTrue((adjacency.indices == [4, 3, 5, 1, 0, 2]).all())
self.assertTrue((adjacency.indptr == [0, 1, 2, 3, 4, 5, 6]).all())
self.assertTrue((adjacency.data == [1, 6, 5, 6, 1, 5]).all())
self.assertTrue((names == [' b', ' d', ' e', 'a', 'c', 'f']).all())
remove(self.stub_data_2)
def test_auto_reindex(self):
self.stub_data_4 = 'stub_4.txt'
with open(self.stub_data_4, 'w') as text_file:
text_file.write('%stub\n14 31\n42 50\n0 12')
graph = parse.from_csv(self.stub_data_4)
adjacency = graph.adjacency
names = graph.names
self.assertTrue((adjacency.data == [1, 1, 1, 1, 1, 1]).all())
self.assertTrue((names == [0, 12, 14, 31, 42, 50]).all())
remove(self.stub_data_4)
def test_wrong_format(self):
self.stub_data_3 = 'stub_3.txt'
with open(self.stub_data_3, 'w') as text_file:
text_file.write('%stub\n1 3 a\n4 5 b\n0 2 e')
self.assertRaises(ValueError, parse.from_csv, self.stub_data_3)
remove(self.stub_data_3)
def test_graphml_basic(self):
self.stub_data_5 = 'stub_5.graphml'
with open(self.stub_data_5, 'w') as graphml_file:
graphml_file.write('<?xml version=\'1.0\' encoding=\'utf-8\'?>\n <graphml xmlns=" xmlns:xsi=" xsi:schemaLocation=" <key id="d0" for="edge" attr.name="weight" attr.type="int"/>\n <graph edgedefault="directed">\n <node id="node1"/>\n <node id="node2"/>\n <edge source="node1" target="node2">\n <data key="d0">1</data>\n </edge></graph></graphml>')
graph = parse.from_graphml(self.stub_data_5)
adjacency = graph.adjacency
names = graph.names
self.assertTrue((adjacency.indices == [1]).all())
self.assertTrue((adjacency.indptr == [0, 1, 1]).all())
self.assertTrue((adjacency.data == [1]).all())
self.assertTrue((names == ['node1', 'node2']).all())
remove(self.stub_data_5)
def test_graphml_refined(self):
self.stub_data_6 = 'stub_6.graphml'
with open(self.stub_data_6, 'w') as graphml_file:
graphml_file.write('<?xml version=\'1.0\' encoding=\'utf-8\'?>\n <graphml xmlns=" xmlns:xsi=" xsi:schemaLocation=" <desc>Some file</desc>\n <key id="d0" for="edge" attr.name="weight" attr.type="int"/>\n <key id="d1" for="node" attr.name="color" attr.type="string">\n <desc>Color</desc>\n <default>blue</default>\n </key>\n <key id="d2" for="edge" attr.name="distance" attr.type="double">\n <desc>Distance</desc>\n <default>1.5</default>\n </key>\n <graph edgedefault="undirected" parse.nodeids="canonical"\n parse.nodes="3" parse.edges="4">\n <node id="n0">\n <data key="d1">green</data>\n </node>\n <node id="n1"/>\n <node id="n2"/>\n <edge source="n0" target="n1">\n <data key="d0">1</data>\n <data key="d2">7.2</data>\n </edge>\n <edge source="n1" target="n2" directed=\'true\'>\n <data key="d0">1</data>\n </edge>\n <edge source="n0" target="n2" directed=\'false\'>\n <data key="d0">1</data>\n </edge></graph></graphml>')
graph = parse.from_graphml(self.stub_data_6)
adjacency = graph.adjacency
colors = graph.node_attribute.color
distances = graph.edge_attribute.distance
self.assertTrue((adjacency.indices == [1, 2, 0, 2, 0]).all())
self.assertTrue((adjacency.indptr == [0, 2, 4, 5]).all())
self.assertTrue((adjacency.data == [1, 1, 1, 1, 1]).all())
self.assertTrue((colors == ['green', 'blue', 'blue']).all())
self.assertTrue((distances == [7.2, 7.2, 1.5, 1.5, 1.5]).all())
self.assertEqual(graph.meta.description, 'Some file')
self.assertEqual(graph.meta.attributes.node.color, 'Color')
self.assertEqual(graph.meta.attributes.edge.distance, 'Distance')
remove(self.stub_data_6)
def test_no_graphml(self):
self.stub_data_7 = 'stub_7.graphml'
with open(self.stub_data_7, 'w') as graphml_file:
graphml_file.write('<?xml version=\'1.0\' encoding=\'utf-8\'?>\n <graphml xmlns=" xmlns:xsi=" xsi:schemaLocation=" <key id="d0" for="edge" attr.name="weight" attr.type="int"/>\n </graphml>')
self.assertRaises(ValueError, parse.from_graphml, self.stub_data_7)
remove(self.stub_data_7)
def test_csv_adjacency(self):
self.stub_data_8 = 'stub_8.txt'
with open(self.stub_data_8, 'w') as text_file:
text_file.write('%stub\n2\n3\n0\n1\n5\n4')
adjacency = parse.from_csv(self.stub_data_8)
self.assertTupleEqual(adjacency.shape, (6, 6))
self.assertTrue((adjacency.indices == [2, 3, 0, 1, 5, 4]).all())
self.assertTrue((adjacency.indptr == [0, 1, 2, 3, 4, 5, 6]).all())
self.assertTrue((adjacency.data == [2, 2, 2, 2, 2, 2]).all())
graph = parse.from_csv(self.stub_data_8, matrix_only=False)
adjacency = graph.adjacency
self.assertTupleEqual(adjacency.shape, (6, 6))
remove(self.stub_data_8)
self.stub_data_8 = 'stub_8.txt'
with open(self.stub_data_8, 'w') as text_file:
text_file.write('2\n3\n0\n1\n5\n4')
adjacency = parse.from_csv(self.stub_data_8)
self.assertTupleEqual(adjacency.shape, (6, 6))
remove(self.stub_data_8)
def test_csv_bipartite(self):
self.stub_data_9 = 'stub_9.txt'
with open(self.stub_data_9, 'w') as text_file:
text_file.write('#stub\n1 3\n4 5\n0 3')
graph = parse.from_csv(self.stub_data_9, bipartite=True)
biadjacency = graph.biadjacency
self.assertTrue((biadjacency.indices == [0, 0, 1]).all())
self.assertTrue((biadjacency.indptr == [0, 1, 2, 3]).all())
self.assertTrue((biadjacency.data == [1, 1, 1]).all())
remove(self.stub_data_9)
def test_csv_adjacency_bipartite(self):
self.stub_data_10 = 'stub_10.txt'
with open(self.stub_data_10, 'w') as text_file:
text_file.write('%stub\n3\n3\n0')
graph = parse.from_csv(self.stub_data_10, bipartite=True)
biadjacency = graph.biadjacency
self.assertTupleEqual(biadjacency.shape, (3, 2))
self.assertTrue((biadjacency.data == [1, 1, 1]).all())
remove(self.stub_data_10)
def test_edge_list(self):
edge_list_1 = [('Alice', 'Bob'), ('Carol', 'Alice')]
graph = parse.from_edge_list(edge_list_1)
adjacency = graph.adjacency
names = graph.names
self.assertTrue((names == ['Alice', 'Bob', 'Carol']).all())
self.assertTupleEqual(adjacency.shape, (3, 3))
self.assertTrue((adjacency.indptr == [0, 2, 3, 4]).all())
self.assertTrue((adjacency.indices == [1, 2, 0, 0]).all())
self.assertTrue((adjacency.data == [1, 1, 1, 1]).all())
edge_list_2 = [('Alice', 'Bob', 4), ('Carol', 'Alice', 6)]
graph = parse.from_edge_list(edge_list_2)
adjacency = graph.adjacency
names = graph.names
self.assertTrue((names == ['Alice', 'Bob', 'Carol']).all())
self.assertTupleEqual(adjacency.shape, (3, 3))
self.assertTrue((adjacency.indptr == [0, 2, 3, 4]).all())
self.assertTrue((adjacency.indices == [1, 2, 0, 0]).all())
self.assertTrue((adjacency.data == [4, 6, 4, 6]).all())
edge_list_3 = [('Alice', 'Bob'), ('Carol', 'Alice'), ('Alice', 'Bob')]
graph = parse.from_edge_list(edge_list_3, directed=True)
adjacency = graph.adjacency
names = graph.names
self.assertTrue((names == ['Alice', 'Bob', 'Carol']).all())
self.assertTupleEqual(adjacency.shape, (3, 3))
self.assertTrue((adjacency.data == [2, 1]).all())
adjacency = parse.from_edge_list(edge_list_3, directed=True, matrix_only=True)
self.assertTupleEqual(adjacency.shape, (3, 3))
graph = parse.from_edge_list(edge_list_3, directed=True, weighted=False)
adjacency = graph.adjacency
self.assertTrue((adjacency.data == [1, 1]).all())
edge_list = np.array([[0, 1, 1], [1, 2, 2]])
adjacency = parse.from_edge_list(edge_list, weighted=True, matrix_only=True)
self.assertTrue((adjacency.data == np.array([1, 1, 2, 2])).all())
edge_list = np.array([[0, 1, 2], [0, 1, 1], [1, 2, 1]])
adjacency = parse.from_edge_list(edge_list, weighted=True, matrix_only=True, sum_duplicates=False)
self.assertTrue((adjacency.data[0] == 2))
def test_adjacency_list(self):
edge_list_4 = {'Alice': ['Bob', 'Carol'], 'Bob': ['Carol']}
graph = parse.from_adjacency_list(edge_list_4, directed=True)
adjacency = graph.adjacency
names = graph.names
self.assertTrue((names == ['Alice', 'Bob', 'Carol']).all())
self.assertTupleEqual(adjacency.shape, (3, 3))
self.assertTrue((adjacency.data == [1, 1, 1]).all())
edge_list_5 = [[0, 1, 2], [2, 3]]
adjacency = parse.from_adjacency_list(edge_list_5, directed=True)
self.assertTupleEqual(adjacency.shape, (4, 4))
self.assertTrue((adjacency.data == [1, 1, 1, 1, 1]).all())
self.assertRaises(TypeError, parse.from_adjacency_list, {2, 3})
def test_bad_format_edge_list(self):
edge_list_2 = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
self.assertRaises(ValueError, parse.from_edge_list, edge_list_2)
edge_list_3 = 'ab cd'
self.assertRaises(TypeError, parse.from_edge_list, edge_list_3)
def test_is_number(self):
self.assertTrue(parse.is_number(3))
self.assertFalse(parse.is_number('a')) |
def generate_categories(features, definition_df):
categories = {}
for feature in features:
if ('PUMA' in feature):
continue
coll_definition = definition_df[((definition_df[0] == 'VAL') & (definition_df[1] == feature))]
coll_type = coll_definition.iloc[0][2]
if (coll_type == 'N'):
continue
mapped_col = pd.to_numeric(coll_definition[4], errors='coerce').fillna((- .0))
mapping_dict = dict(zip(mapped_col.tolist(), coll_definition[6].tolist()))
if ((- .0) not in mapping_dict):
mapping_dict[(- .0)] = 'N/A'
mapping_dict[float('nan')] = mapping_dict[(- .0)]
del mapping_dict[(- .0)]
categories[feature] = mapping_dict
return categories |
class BaseResponse(object):
charset = 'utf-8'
default_status = 200
default_mimetype = 'text/plain'
implicit_sequence_conversion = True
autocorrect_location_header = True
automatically_set_content_length = True
max_cookie_size = 4093
def __init__(self, response=None, status=None, headers=None, mimetype=None, content_type=None, direct_passthrough=False):
if isinstance(headers, Headers):
self.headers = headers
elif (not headers):
self.headers = Headers()
else:
self.headers = Headers(headers)
if (content_type is None):
if ((mimetype is None) and ('content-type' not in self.headers)):
mimetype = self.default_mimetype
if (mimetype is not None):
mimetype = get_content_type(mimetype, self.charset)
content_type = mimetype
if (content_type is not None):
self.headers['Content-Type'] = content_type
if (status is None):
status = self.default_status
if isinstance(status, integer_types):
self.status_code = status
else:
self.status = status
self.direct_passthrough = direct_passthrough
self._on_close = []
if (response is None):
self.response = []
elif isinstance(response, (text_type, bytes, bytearray)):
self.set_data(response)
else:
self.response = response
def call_on_close(self, func):
self._on_close.append(func)
return func
def __repr__(self):
if self.is_sequence:
body_info = ('%d bytes' % sum(map(len, self.iter_encoded())))
else:
body_info = ('streamed' if self.is_streamed else 'likely-streamed')
return ('<%s %s [%s]>' % (self.__class__.__name__, body_info, self.status))
def force_type(cls, response, environ=None):
if (not isinstance(response, BaseResponse)):
if (environ is None):
raise TypeError('cannot convert WSGI application into response objects without an environ')
response = BaseResponse(*_run_wsgi_app(response, environ))
response.__class__ = cls
return response
def from_app(cls, app, environ, buffered=False):
return cls(*_run_wsgi_app(app, environ, buffered))
def status_code(self):
return self._status_code
_code.setter
def status_code(self, code):
self._status_code = code
try:
self._status = ('%d %s' % (code, HTTP_STATUS_CODES[code].upper()))
except KeyError:
self._status = ('%d UNKNOWN' % code)
def status(self):
return self._status
def status(self, value):
try:
self._status = to_native(value)
except AttributeError:
raise TypeError('Invalid status argument')
try:
self._status_code = int(self._status.split(None, 1)[0])
except ValueError:
self._status_code = 0
self._status = ('0 %s' % self._status)
except IndexError:
raise ValueError('Empty status argument')
def get_data(self, as_text=False):
self._ensure_sequence()
rv = b''.join(self.iter_encoded())
if as_text:
rv = rv.decode(self.charset)
return rv
def set_data(self, value):
if isinstance(value, text_type):
value = value.encode(self.charset)
else:
value = bytes(value)
self.response = [value]
if self.automatically_set_content_length:
self.headers['Content-Length'] = str(len(value))
data = property(get_data, set_data, doc='A descriptor that calls :meth:`get_data` and :meth:`set_data`.')
def calculate_content_length(self):
try:
self._ensure_sequence()
except RuntimeError:
return None
return sum((len(x) for x in self.iter_encoded()))
def _ensure_sequence(self, mutable=False):
if self.is_sequence:
if (mutable and (not isinstance(self.response, list))):
self.response = list(self.response)
return
if self.direct_passthrough:
raise RuntimeError('Attempted implicit sequence conversion but the response object is in direct passthrough mode.')
if (not self.implicit_sequence_conversion):
raise RuntimeError('The response object required the iterable to be a sequence, but the implicit conversion was disabled. Call make_sequence() yourself.')
self.make_sequence()
def make_sequence(self):
if (not self.is_sequence):
close = getattr(self.response, 'close', None)
self.response = list(self.iter_encoded())
if (close is not None):
self.call_on_close(close)
def iter_encoded(self):
if __debug__:
_warn_if_string(self.response)
return _iter_encoded(self.response, self.charset)
def set_cookie(self, key, value='', max_age=None, expires=None, path='/', domain=None, secure=False, samesite=None):
self.headers.add('Set-Cookie', dump_cookie(key, value=value, max_age=max_age, expires=expires, path=path, domain=domain, secure=secure, charset=self.charset, max_size=self.max_cookie_size, samesite=samesite))
def delete_cookie(self, key, path='/', domain=None):
self.set_cookie(key, expires=0, max_age=0, path=path, domain=domain)
def is_streamed(self):
try:
len(self.response)
except (TypeError, AttributeError):
return True
return False
def is_sequence(self):
return isinstance(self.response, (tuple, list))
def close(self):
if hasattr(self.response, 'close'):
self.response.close()
for func in self._on_close:
func()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
self.close()
def freeze(self):
self.response = list(self.iter_encoded())
self.headers['Content-Length'] = str(sum(map(len, self.response)))
def get_wsgi_headers(self, environ):
headers = Headers(self.headers)
location = None
content_location = None
content_length = None
status = self.status_code
for (key, value) in headers:
ikey = key.lower()
if (ikey == u'location'):
location = value
elif (ikey == u'content-location'):
content_location = value
elif (ikey == u'content-length'):
content_length = value
if (location is not None):
old_location = location
if isinstance(location, text_type):
location = iri_to_uri(location, safe_conversion=True)
if self.autocorrect_location_header:
current_url = get_current_url(environ, strip_querystring=True)
if isinstance(current_url, text_type):
current_url = iri_to_uri(current_url)
location = url_join(current_url, location)
if (location != old_location):
headers['Location'] = location
if ((content_location is not None) and isinstance(content_location, text_type)):
headers['Content-Location'] = iri_to_uri(content_location)
if ((100 <= status < 200) or (status == 204)):
headers.remove('Content-Length')
elif (status == 304):
remove_entity_headers(headers)
if (self.automatically_set_content_length and self.is_sequence and (content_length is None) and (status not in (204, 304)) and (not (100 <= status < 200))):
try:
content_length = sum((len(to_bytes(x, 'ascii')) for x in self.response))
except UnicodeError:
pass
else:
headers['Content-Length'] = str(content_length)
return headers
def get_app_iter(self, environ):
status = self.status_code
if ((environ['REQUEST_METHOD'] == 'HEAD') or (100 <= status < 200) or (status in (204, 304))):
iterable = ()
elif self.direct_passthrough:
if __debug__:
_warn_if_string(self.response)
return self.response
else:
iterable = self.iter_encoded()
return ClosingIterator(iterable, self.close)
def get_wsgi_response(self, environ):
headers = self.get_wsgi_headers(environ)
app_iter = self.get_app_iter(environ)
return (app_iter, self.status, headers.to_wsgi_list())
def __call__(self, environ, start_response):
(app_iter, status, headers) = self.get_wsgi_response(environ)
start_response(status, headers)
return app_iter |
class PoseDataset(Dataset):
def __init__(self, pose: Pose):
super().__init__()
self.points = torch.tensor([p.flatten() for p in np.array(pose.body.data)], dtype=torch.float32)
self.confidence = torch.tensor([np.stack([c, c], axis=(- 1)).flatten() for c in np.array(pose.body.confidence)], dtype=torch.float32)
self.coords = PoseDataset.get_coords(time=(len(self.points) / pose.body.fps), fps=pose.body.fps)
def get_coords(time: float, fps: float):
return torch.tensor([[(i / fps)] for i in range(int((fps * time)))], dtype=torch.float32)
def __len__(self):
return 1
def __getitem__(self, idx):
if (idx > 0):
raise IndexError
return (self.coords, self.points, self.confidence) |
.mlir
def test_mlir_tasklet_no_entry():
A = dace.ndarray((1,), dace.int32)
B = dace.ndarray((1,), dace.int32)
C = dace.ndarray((1,), dace.int32)
A[:] = 5
B[:] = 2
C[:] = 15
with pytest.raises(SyntaxError):
mlir_tasklet_no_entry(A, B, C)
with pytest.raises(SyntaxError):
mlir_tasklet_no_entry_generic(A, B, C) |
class Logger():
def __init__(self, cfg):
self.path = path.join('..', 'experiment', cfg.save, cfg.ablation)
if cfg.reset:
if path.isdir(self.path):
response = input('Do you want to remove the existing directory? [Y/N]: ')
is_reset = (response.lower() == 'y')
else:
is_reset = True
if is_reset:
shutil.rmtree(self.path, ignore_errors=True)
os.makedirs(self.path, exist_ok=True)
now = datetime.datetime.now().strftime('%Y-%m-%d-%H:%M:%S')
with open(self.get_path('config.txt'), 'a') as f:
f.write((now + '\n'))
f.write((('python ' + ' '.join(sys.argv)) + '\n\n'))
for (k, v) in vars(cfg).items():
f.write('{}: {}\n'.format(k, v))
f.write((('-' * 80) + '\n\n'))
with open('global.txt', 'a') as f:
f.write('{}:\t{}\n'.format(now, self.path))
self.writer = tensorboard.SummaryWriter(log_dir=self.path)
def __enter__(self):
self.open_log()
return self
def __exit__(self, *args, **kwargs):
self.log_file.close()
def __call__(self, obj, display=True, refresh=False, clip=0, filename=None):
if display:
if isinstance(obj, (list, tuple)):
for s in obj:
_print(s)
obj = '\n'.join(obj)
else:
obj = str(obj)
if (clip > 0):
clip_obj = obj.splitlines()
clip_obj = (clip_obj[:clip] + ['...'])
clip_obj = '\n'.join(clip_obj)
_print(clip_obj)
else:
_print(obj)
if (not filename):
if (self.log_file is None):
return
try:
self.log_file.write((obj + '\n'))
except Exception as e:
_print('Cannot write log!')
_print(e)
self.log_file = None
return
if refresh:
try:
self.log_file.flush()
except Exception as e:
_print('An error occured on log.txt!')
_print(e)
self.log_file = None
return
else:
with open(self.get_path(filename), 'w') as f:
f.write((obj + '\n'))
def get_path(self, *subdirs):
return path.join(self.path, *subdirs)
def open_log(self, save_as=None):
try:
if (save_as is None):
save_as = 'log.txt'
self.log_file = open(self.get_path(save_as), 'a')
except Exception as e:
_print('Cannot open log.txt!')
_print(e)
self.log_file = None |
def get_envs(variant):
from multiworld.core.image_env import ImageEnv
from railrl.envs.vae_wrappers import VAEWrappedEnv
from railrl.misc.asset_loader import load_local_or_remote_file
render = variant.get('render', False)
vae_path = variant.get('vae_path', None)
reproj_vae_path = variant.get('reproj_vae_path', None)
ckpt = variant.get('ckpt', None)
reward_params = variant.get('reward_params', dict())
init_camera = variant.get('init_camera', None)
do_state_exp = variant.get('do_state_exp', False)
presample_goals = variant.get('presample_goals', False)
presample_image_goals_only = variant.get('presample_image_goals_only', False)
presampled_goals_path = variant.get('presampled_goals_path', None)
if ((not do_state_exp) and (type(ckpt) is str)):
vae = load_local_or_remote_file(osp.join(ckpt, 'vae.pkl'))
if (vae is not None):
from railrl.core import logger
logger.save_extra_data(vae, 'vae.pkl', mode='pickle')
else:
vae = None
if ((vae is None) and (type(vae_path) is str)):
vae = load_local_or_remote_file(osp.join(vae_path, 'vae_params.pkl'))
from railrl.core import logger
logger.save_extra_data(vae, 'vae.pkl', mode='pickle')
elif (vae is None):
vae = vae_path
if (type(vae) is str):
vae = load_local_or_remote_file(vae)
else:
vae = vae
if (type(reproj_vae_path) is str):
reproj_vae = load_local_or_remote_file(osp.join(reproj_vae_path, 'vae_params.pkl'))
else:
reproj_vae = None
if ('env_id' in variant):
import gym
env = gym.make(variant['env_id'])
else:
env = variant['env_class'](**variant['env_kwargs'])
if (not do_state_exp):
if isinstance(env, ImageEnv):
image_env = env
else:
image_env = ImageEnv(env, variant.get('imsize'), init_camera=init_camera, transpose=True, normalize=True)
vae_env = VAEWrappedEnv(image_env, vae, imsize=image_env.imsize, decode_goals=render, render_goals=render, render_rollouts=render, reward_params=reward_params, reproj_vae=reproj_vae, **variant.get('vae_wrapped_env_kwargs', {}))
if presample_goals:
if (presampled_goals_path is None):
image_env.non_presampled_goal_img_is_garbage = True
presampled_goals = variant['generate_goal_dataset_fctn'](image_env=image_env, **variant['goal_generation_kwargs'])
else:
presampled_goals = load_local_or_remote_file(presampled_goals_path).item()
presampled_goals = {'state_desired_goal': presampled_goals['next_obs_state'], 'image_desired_goal': presampled_goals['next_obs']}
image_env.set_presampled_goals(presampled_goals)
vae_env.set_presampled_goals(presampled_goals)
print('Presampling all goals')
elif presample_image_goals_only:
presampled_goals = variant['generate_goal_dataset_fctn'](image_env=vae_env.wrapped_env, **variant['goal_generation_kwargs'])
image_env.set_presampled_goals(presampled_goals)
print('Presampling image goals only')
else:
print('Not using presampled goals')
env = vae_env
if (not do_state_exp):
training_mode = variant.get('training_mode', 'train')
testing_mode = variant.get('testing_mode', 'test')
env.add_mode('eval', testing_mode)
env.add_mode('train', training_mode)
env.add_mode('relabeling', training_mode)
env.add_mode('video_vae', 'video_vae')
env.add_mode('video_env', 'video_env')
return env |
class NoFilter(FilterBase):
def __call__(self):
folder_path = (Path(self.root_folder) / self.folder_path)
assert folder_path.exists(), f'Folder {folder_path} does not exist'
files = sorted(list(folder_path.glob(self.extension)))
return files |
def test_export_sequence(exportable_test_case, tmp_path):
path = (tmp_path / 'generated.py')
exporter = export.PyTestChromosomeToAstVisitor()
exportable_test_case.accept(exporter)
exportable_test_case.accept(exporter)
export.save_module_to_file(exporter.to_module(), path)
assert (path.read_text() == (export._PYNGUIN_FILE_HEADER + 'import pytest\nimport tests.fixtures.accessibles.accessible as module_0\n\n\ndef test_case_0():\n int_0 = 5\n some_type_0 = module_0.SomeType(int_0)\n assert some_type_0 == 5\n float_0 = 42.23\n float_1 = module_0.simple_function(float_0)\n assert float_1 == pytest.approx(42.23, abs=0.01, rel=0.01)\n\n\ndef test_case_1():\n int_0 = 5\n some_type_0 = module_0.SomeType(int_0)\n assert some_type_0 == 5\n float_0 = 42.23\n float_1 = module_0.simple_function(float_0)\n assert float_1 == pytest.approx(42.23, abs=0.01, rel=0.01)\n')) |
class SimpleDicomReader(object):
def __init__(self, file):
if isinstance(file, str):
self._filename = file
self._file = open(file, 'rb')
else:
self._filename = '<unknown file>'
self._file = file
self._pixel_data_loc = None
self.is_implicit_VR = False
self.is_little_endian = True
self._unpackPrefix = '<'
self._info = {}
self._converters = {'US': (lambda x: self._unpack('H', x)), 'UL': (lambda x: self._unpack('L', x)), 'DS': (lambda x: self._splitValues(x, float, '\\')), 'IS': (lambda x: self._splitValues(x, int, '\\')), 'AS': (lambda x: x.decode('ascii', 'ignore').strip('\x00')), 'DA': (lambda x: x.decode('ascii', 'ignore').strip('\x00')), 'TM': (lambda x: x.decode('ascii', 'ignore').strip('\x00')), 'UI': (lambda x: x.decode('ascii', 'ignore').strip('\x00')), 'LO': (lambda x: x.decode('utf-8', 'ignore').strip('\x00').rstrip()), 'CS': (lambda x: self._splitValues(x, float, '\\')), 'PN': (lambda x: x.decode('utf-8', 'ignore').strip('\x00').rstrip())}
self._read()
def info(self):
return self._info
def _splitValues(self, x, type, splitter):
s = x.decode('ascii').strip('\x00')
try:
if (splitter in s):
return tuple([type(v) for v in s.split(splitter) if v.strip()])
else:
return type(s)
except ValueError:
return s
def _unpack(self, fmt, value):
return struct.unpack((self._unpackPrefix + fmt), value)[0]
def __iter__(self):
return iter(self._info.keys())
def __getattr__(self, key):
info = object.__getattribute__(self, '_info')
if (key in info):
return info[key]
return object.__getattribute__(self, key)
def _read(self):
f = self._file
f.seek(128)
if (f.read(4) != b'DICM'):
raise NotADicomFile('Not a valid DICOM file.')
self._read_header()
self._read_data_elements()
self._get_shape_and_sampling()
if os.path.isfile(self._filename):
self._file.close()
self._file = None
def _readDataElement(self):
f = self._file
group = self._unpack('H', f.read(2))
element = self._unpack('H', f.read(2))
if self.is_implicit_VR:
vl = self._unpack('I', f.read(4))
else:
vr = f.read(2)
if (vr in (b'OB', b'OW', b'SQ', b'UN')):
reserved = f.read(2)
vl = self._unpack('I', f.read(4))
else:
vl = self._unpack('H', f.read(2))
if ((group == 32736) and (element == 16)):
here = f.tell()
self._pixel_data_loc = (here, vl)
f.seek((here + vl))
return (group, element, b'Deferred loading of pixel data')
else:
if (vl == ):
value = self._read_undefined_length_value()
else:
value = f.read(vl)
return (group, element, value)
def _read_undefined_length_value(self, read_size=128):
fp = self._file
search_rewind = 3
bytes_to_find = struct.pack((self._unpackPrefix + 'HH'), SequenceDelimiterTag[0], SequenceDelimiterTag[1])
found = False
value_chunks = []
while (not found):
chunk_start = fp.tell()
bytes_read = fp.read(read_size)
if (len(bytes_read) < read_size):
new_bytes = fp.read((read_size - len(bytes_read)))
bytes_read += new_bytes
if (len(bytes_read) < read_size):
raise EOFError('End of file reached before sequence delimiter found.')
index = bytes_read.find(bytes_to_find)
if (index != (- 1)):
found = True
value_chunks.append(bytes_read[:index])
fp.seek(((chunk_start + index) + 4))
length = fp.read(4)
if (length != b'\x00\x00\x00\x00'):
logger.warning('Expected 4 zero bytes after undefined length delimiter')
else:
fp.seek((fp.tell() - search_rewind))
value_chunks.append(bytes_read[:(- search_rewind)])
return b''.join(value_chunks)
def _read_header(self):
f = self._file
TransferSyntaxUID = None
try:
while True:
fp_save = f.tell()
(group, element, value) = self._readDataElement()
if (group == 2):
if ((group == 2) and (element == 16)):
TransferSyntaxUID = value.decode('ascii').strip('\x00')
else:
f.seek(fp_save)
break
except (EOFError, struct.error):
raise RuntimeError('End of file reached while still in header.')
self._info['TransferSyntaxUID'] = TransferSyntaxUID
if (TransferSyntaxUID is None):
(is_implicit_VR, is_little_endian) = (False, True)
elif (TransferSyntaxUID == '1.2.840.10008.1.2.1'):
(is_implicit_VR, is_little_endian) = (False, True)
elif (TransferSyntaxUID == '1.2.840.10008.1.2.2'):
(is_implicit_VR, is_little_endian) = (False, False)
elif (TransferSyntaxUID == '1.2.840.10008.1.2'):
(is_implicit_VR, is_little_endian) = (True, True)
elif (TransferSyntaxUID == '1.2.840.10008.1.2.1.99'):
(is_implicit_VR, is_little_endian) = (False, True)
self._inflate()
else:
(t, extra_info) = (TransferSyntaxUID, '')
if ('1.2.840.10008.1.2.4.50' <= t < '1.2.840.10008.1.2.4.99'):
extra_info = ' (JPEG)'
if ('1.2.840.10008.1.2.4.90' <= t < '1.2.840.10008.1.2.4.99'):
extra_info = ' (JPEG 2000)'
if (t == '1.2.840.10008.1.2.5'):
extra_info = ' (RLE)'
if (t == '1.2.840.10008.1.2.6.1'):
extra_info = ' (RFC 2557)'
raise CompressedDicom(('The dicom reader can only read files with uncompressed image data - not %r%s. You can try using dcmtk or gdcm to convert the image.' % (t, extra_info)))
self.is_implicit_VR = is_implicit_VR
self.is_little_endian = is_little_endian
self._unpackPrefix = '><'[is_little_endian]
def _read_data_elements(self):
info = self._info
try:
while True:
(group, element, value) = self._readDataElement()
if (group in GROUPS):
key = (group, element)
(name, vr) = MINIDICT.get(key, (None, None))
if name:
converter = self._converters.get(vr, (lambda x: x))
info[name] = converter(value)
except (EOFError, struct.error):
pass
def get_numpy_array(self):
if ('PixelData' not in self):
raise TypeError('No pixel data found in this dataset.')
if (self._pixel_data_loc and (len(self.PixelData) < 100)):
close_file = False
if (self._file is None):
close_file = True
self._file = open(self._filename, 'rb')
self._file.seek(self._pixel_data_loc[0])
if (self._pixel_data_loc[1] == ):
value = self._read_undefined_length_value()
else:
value = self._file.read(self._pixel_data_loc[1])
if close_file:
self._file.close()
self._file = None
self._info['PixelData'] = value
data = self._pixel_data_numpy()
data = self._apply_slope_and_offset(data)
self._info['PixelData'] = (b'Data converted to numpy array, ' + b'raw data removed to preserve memory')
return data
def _get_shape_and_sampling(self):
if (('NumberOfFrames' in self) and (self.NumberOfFrames > 1)):
if (self.SamplesPerPixel > 1):
shape = (self.SamplesPerPixel, self.NumberOfFrames, self.Rows, self.Columns)
else:
shape = (self.NumberOfFrames, self.Rows, self.Columns)
elif ('SamplesPerPixel' in self):
if (self.SamplesPerPixel > 1):
if (self.BitsAllocated == 8):
shape = (self.SamplesPerPixel, self.Rows, self.Columns)
else:
raise NotImplementedError('DICOM plugin only handles SamplesPerPixel > 1 if Bits Allocated = 8')
else:
shape = (self.Rows, self.Columns)
else:
raise RuntimeError('DICOM file has no SamplesPerPixel (perhaps this is a report?)')
if ('PixelSpacing' in self):
sampling = (float(self.PixelSpacing[0]), float(self.PixelSpacing[1]))
else:
sampling = (1.0, 1.0)
if ('SliceSpacing' in self):
sampling = ((abs(self.SliceSpacing),) + sampling)
sampling = (((1.0,) * (len(shape) - len(sampling))) + sampling[(- len(shape)):])
self._info['shape'] = shape
self._info['sampling'] = sampling
def _pixel_data_numpy(self):
if ('PixelData' not in self):
raise TypeError('No pixel data found in this dataset.')
need_byteswap = (self.is_little_endian != sys_is_little_endian)
format_str = ('%sint%d' % (('u', '')[self.PixelRepresentation], self.BitsAllocated))
try:
numpy_format = np.dtype(format_str)
except TypeError:
raise TypeError(("Data type not understood by NumPy: format='%s', PixelRepresentation=%d, BitsAllocated=%d" % (numpy_format, self.PixelRepresentation, self.BitsAllocated)))
arr = np.frombuffer(self.PixelData, numpy_format).copy()
if need_byteswap:
arr.byteswap(True)
arr = arr.reshape(*self._info['shape'])
return arr
def _apply_slope_and_offset(self, data):
(slope, offset) = (1, 0)
(needFloats, needApplySlopeOffset) = (False, False)
if ('RescaleSlope' in self):
needApplySlopeOffset = True
slope = self.RescaleSlope
if ('RescaleIntercept' in self):
needApplySlopeOffset = True
offset = self.RescaleIntercept
if ((int(slope) != slope) or (int(offset) != offset)):
needFloats = True
if (not needFloats):
(slope, offset) = (int(slope), int(offset))
if needApplySlopeOffset:
if (data.dtype in [np.float32, np.float64]):
pass
elif needFloats:
data = data.astype(np.float32)
else:
(minReq, maxReq) = (data.min(), data.max())
minReq = min([minReq, ((minReq * slope) + offset), ((maxReq * slope) + offset)])
maxReq = max([maxReq, ((minReq * slope) + offset), ((maxReq * slope) + offset)])
dtype = None
if (minReq < 0):
maxReq = max([(- minReq), maxReq])
if (maxReq < (2 ** 7)):
dtype = np.int8
elif (maxReq < (2 ** 15)):
dtype = np.int16
elif (maxReq < (2 ** 31)):
dtype = np.int32
else:
dtype = np.float32
elif (maxReq < (2 ** 8)):
dtype = np.int8
elif (maxReq < (2 ** 16)):
dtype = np.int16
elif (maxReq < (2 ** 32)):
dtype = np.int32
else:
dtype = np.float32
if (dtype != data.dtype):
data = data.astype(dtype)
data *= slope
data += offset
return data
def _inflate(self):
import zlib
from io import BytesIO
zipped = self._file.read()
unzipped = zlib.decompress(zipped, (- zlib.MAX_WBITS))
self._file = BytesIO(unzipped) |
def convert_module_to_f16(l):
if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Conv3d)):
l.weight.data = l.weight.data.half()
if (l.bias is not None):
l.bias.data = l.bias.data.half() |
def layer_init(layer, std=np.sqrt(2), bias_const=0.0):
torch.nn.init.orthogonal_(layer.weight, std)
torch.nn.init.constant_(layer.bias, bias_const)
return layer |
def GenusSix():
L = ['014', '018', '023', '027', '036', '049', '056', '05b', '07a', '08a', '09b', '125', '126', '137', '139', '147', '15a', '16b', '18b', '19a', '23b', '248', '24a', '258', '269', '279', '2ab', '345', '34b', '35a', '367', '389', '38a', '459', '46a', '46b', '478', '568', '579', '57b', '67a', '689', '78b', '9ab']
return SimplicialComplex([list(w) for w in L]) |
class MinMaxResize():
def __init__(self, shorter=800, longer=1333):
self.min = shorter
self.max = longer
def __call__(self, x):
(w, h) = x.size
scale = (self.min / min(w, h))
if (h < w):
(newh, neww) = (self.min, (scale * w))
else:
(newh, neww) = ((scale * h), self.min)
if (max(newh, neww) > self.max):
scale = (self.max / max(newh, neww))
newh = (newh * scale)
neww = (neww * scale)
(newh, neww) = (int((newh + 0.5)), int((neww + 0.5)))
(newh, neww) = (((newh // 32) * 32), ((neww // 32) * 32))
return x.resize((neww, newh), resample=Image.BICUBIC) |
def group_identifier(tlist):
def _consume_cycle(tl, i):
x = itertools.cycle(((lambda y: (y.match(T.Punctuation, '.') or (y.ttype in (T.Operator, T.Wildcard, T.Name)) or isinstance(y, sql.SquareBrackets))), (lambda y: ((y.ttype in (T.String.Symbol, T.Name, T.Wildcard, T.Literal.String.Single, T.Literal.Number.Integer, T.Literal.Number.Float)) or isinstance(y, (sql.Parenthesis, sql.SquareBrackets, sql.Function))))))
for t in tl.tokens[i:]:
if (t.ttype is T.Whitespace):
(yield t)
continue
if next(x)(t):
(yield t)
else:
if (isinstance(t, sql.Comment) and t.is_multiline()):
(yield t)
raise StopIteration
def _next_token(tl, i):
t1 = tl.token_next_by_type(i, (T.String.Symbol, T.Name, T.Literal.Number.Integer, T.Literal.Number.Float))
t2 = tl.token_next_by_instance(i, (sql.Function, sql.Parenthesis))
if (t1 and t2):
i1 = tl.token_index(t1)
i2 = tl.token_index(t2)
if (i1 > i2):
return t2
else:
return t1
elif t1:
return t1
else:
return t2
[group_identifier(sgroup) for sgroup in tlist.get_sublists() if (not isinstance(sgroup, sql.Identifier))]
idx = 0
token = _next_token(tlist, idx)
while token:
identifier_tokens = ([token] + list(_consume_cycle(tlist, (tlist.token_index(token) + 1))))
if (identifier_tokens and (identifier_tokens[(- 1)].ttype is T.Whitespace)):
identifier_tokens = identifier_tokens[:(- 1)]
if (not ((len(identifier_tokens) == 1) and (isinstance(identifier_tokens[0], (sql.Function, sql.Parenthesis)) or (identifier_tokens[0].ttype in (T.Literal.Number.Integer, T.Literal.Number.Float))))):
group = tlist.group_tokens(sql.Identifier, identifier_tokens)
idx = (tlist.token_index(group) + 1)
else:
idx += 1
token = _next_token(tlist, idx) |
class TestMLPModel():
.parametrize('input_dim, output_dim, hidden_sizes', [(5, 1, (1,)), (5, 1, (2,)), (5, 2, (3,)), (5, 2, (1, 1)), (5, 3, (2, 2))])
def test_output_values(self, input_dim, output_dim, hidden_sizes):
input_val = torch.ones([1, input_dim], dtype=torch.float32)
module_with_nonlinear_function_and_module = MLPModule(input_dim=input_dim, output_dim=output_dim, hidden_nonlinearity=torch.relu, hidden_sizes=hidden_sizes, hidden_w_init=nn.init.ones_, output_w_init=nn.init.ones_, output_nonlinearity=torch.nn.ReLU)
module_with_nonlinear_module_instance_and_function = MLPModule(input_dim=input_dim, output_dim=output_dim, hidden_nonlinearity=torch.nn.ReLU(), hidden_sizes=hidden_sizes, hidden_w_init=nn.init.ones_, output_w_init=nn.init.ones_, output_nonlinearity=torch.relu)
output1 = module_with_nonlinear_function_and_module(input_val)
output2 = module_with_nonlinear_module_instance_and_function(input_val)
expected_output = torch.full([1, output_dim], fill_value=(5 * np.prod(hidden_sizes)), dtype=torch.float32)
assert torch.all(torch.eq(expected_output, output1))
assert torch.all(torch.eq(expected_output, output2))
.parametrize('input_dim, output_dim, hidden_sizes', [(5, 1, (1,)), (5, 1, (2,)), (5, 2, (3,)), (5, 2, (1, 1)), (5, 3, (2, 2))])
def test_is_pickleable(self, input_dim, output_dim, hidden_sizes):
input_val = torch.ones([1, input_dim], dtype=torch.float32)
module = MLPModule(input_dim=input_dim, output_dim=output_dim, hidden_nonlinearity=torch.relu, hidden_sizes=hidden_sizes, hidden_w_init=nn.init.ones_, output_w_init=nn.init.ones_, output_nonlinearity=torch.nn.ReLU)
output1 = module(input_val)
h = pickle.dumps(module)
model_pickled = pickle.loads(h)
output2 = model_pickled(input_val)
assert np.array_equal(torch.all(torch.eq(output1, output2)), True)
.parametrize('hidden_nonlinear, output_nonlinear', [(torch.nn.ReLU, 'test'), ('test', torch.relu), (object(), torch.tanh), (torch.tanh, object())])
def test_no_head_invalid_settings(self, hidden_nonlinear, output_nonlinear):
expected_msg = 'Non linear function .* is not supported'
with pytest.raises(ValueError, match=expected_msg):
MLPModule(input_dim=3, output_dim=5, hidden_sizes=(2, 3), hidden_nonlinearity=hidden_nonlinear, output_nonlinearity=output_nonlinear)
def test_mlp_with_learnable_non_linear_function(self):
(input_dim, output_dim, hidden_sizes) = (1, 1, (3, 2))
input_val = (- torch.ones([1, input_dim], dtype=torch.float32))
module = MLPModule(input_dim=input_dim, output_dim=output_dim, hidden_nonlinearity=torch.nn.PReLU(init=10.0), hidden_sizes=hidden_sizes, hidden_w_init=nn.init.ones_, output_w_init=nn.init.ones_, output_nonlinearity=torch.nn.PReLU(init=1.0))
output = module(input_val)
output.sum().backward()
for tt in module.parameters():
assert torch.all(torch.ne(tt.grad, 0)) |
class MetaDictSetting(Setting):
def __init__(self, meta_dict: dict, mandatory_fields: list=[]):
self.meta_dict = meta_dict
self.mandatory_fields = mandatory_fields |
def t5_3b_tied_lmheads_64_4_8p_bw12_async_squad1_mpipe():
return dict(model_type='t5_stateless', model_name_or_path='t5-3b', do_lower_case=False, output_past=False, output_attentions=False, output_hidden_states=False, do_resize_token_embedding=True, explicitly_set_dict={'output_only': True, 'output_attentions': False, 'precomputed_masks': True, 'output_hidden_states': False}, stateless_tied=True) |
def get_type_line(source):
lines = source.split('\n')
def strip_comment(line):
return line[:(line.index('#') if ('#' in line) else None)]
i = 0
while (not _def_end_regex.match(strip_comment(lines[i]))):
i += 1
i += 1
type_line = lines[i].strip()
if (not type_line.startswith('# type:')):
return None
return type_line |
def lift_to_sl2_Ok(N, c, d):
k = N.number_field()
if (c.is_zero() and d.is_zero()):
raise ValueError(('Cannot lift (%s, %s) to an element of Sl2(Ok).' % (c, d)))
if (not N.is_coprime(k.ideal(c, d))):
raise ValueError(('<%s> + <%s> and the %s are not coprime.' % (c, d, N)))
if ((c - 1) in N):
return [k(0), k((- 1)), 1, d]
if ((d - 1) in N):
return [k(1), k(0), c, 1]
if c.is_zero():
it = k.primes_of_degree_one_iter()
q = k.ideal(1)
while (not (q.is_coprime(d) and (q * N).is_principal())):
q = next(it)
m = (q * N).gens_reduced()[0]
B = k.ideal(m).element_1_mod(k.ideal(d))
return [((1 - B) / d), ((- B) / m), m, d]
if d.is_zero():
it = k.primes_of_degree_one_iter()
q = k.ideal(1)
while (not (q.is_coprime(c) and (q * N).is_principal())):
q = next(it)
m = (q * N).gens_reduced()[0]
B = k.ideal(c).element_1_mod(k.ideal(m))
return [((1 - B) / m), ((- B) / c), c, m]
(c, d) = make_coprime(N, c, d)
B = k.ideal(c).element_1_mod(k.ideal(d))
b = ((- B) / c)
a = ((1 - B) / d)
return [a, b, c, d] |
class DictAction(Action):
def _parse_int_float_bool(val):
try:
return int(val)
except ValueError:
pass
try:
return float(val)
except ValueError:
pass
if (val.lower() in ['true', 'false']):
return (True if (val.lower() == 'true') else False)
return val
def _parse_iterable(val):
def find_next_comma(string):
assert ((string.count('(') == string.count(')')) and (string.count('[') == string.count(']'))), f'Imbalanced brackets exist in {string}'
end = len(string)
for (idx, char) in enumerate(string):
pre = string[:idx]
if ((char == ',') and (pre.count('(') == pre.count(')')) and (pre.count('[') == pre.count(']'))):
end = idx
break
return end
val = val.strip('\'"').replace(' ', '')
is_tuple = False
if (val.startswith('(') and val.endswith(')')):
is_tuple = True
val = val[1:(- 1)]
elif (val.startswith('[') and val.endswith(']')):
val = val[1:(- 1)]
elif (',' not in val):
return DictAction._parse_int_float_bool(val)
values = []
while (len(val) > 0):
comma_idx = find_next_comma(val)
element = DictAction._parse_iterable(val[:comma_idx])
values.append(element)
val = val[(comma_idx + 1):]
if is_tuple:
values = tuple(values)
return values
def __call__(self, parser, namespace, values, option_string=None):
options = {}
for kv in values:
(key, val) = kv.split('=', maxsplit=1)
options[key] = self._parse_iterable(val)
setattr(namespace, self.dest, options) |
.parametrize('ratio, y, type, err_msg', [(0.5, binary_target, 'clean-sampling', "'clean-sampling' methods do let the user specify the sampling ratio"), (0.1, np.array((([0] * 10) + ([1] * 20))), 'over-sampling', 'remove samples from the minority class while trying to generate new'), (0.1, np.array((([0] * 10) + ([1] * 20))), 'under-sampling', 'generate new sample in the majority class while trying to remove')])
def test_check_sampling_strategy_float_error(ratio, y, type, err_msg):
with pytest.raises(ValueError, match=err_msg):
check_sampling_strategy(ratio, y, type) |
.parametrize('dim_context, action_noise, reward_noise, min_action_value, max_action_value, random_state, err, description', invalid_input_of_init)
def test_synthetic_continuous_init_using_invalid_inputs(dim_context, action_noise, reward_noise, min_action_value, max_action_value, random_state, err, description):
with pytest.raises(err, match=f'{description}*'):
_ = SyntheticContinuousBanditDataset(dim_context=dim_context, action_noise=action_noise, reward_noise=reward_noise, min_action_value=min_action_value, max_action_value=max_action_value, random_state=random_state) |
def unpickler(zone, utcoffset=None, dstoffset=None, tzname=None):
tz = pytz.timezone(zone)
if (utcoffset is None):
return tz
utcoffset = memorized_timedelta(utcoffset)
dstoffset = memorized_timedelta(dstoffset)
try:
return tz._tzinfos[(utcoffset, dstoffset, tzname)]
except KeyError:
pass
for localized_tz in tz._tzinfos.values():
if ((localized_tz._utcoffset == utcoffset) and (localized_tz._dst == dstoffset)):
return localized_tz
inf = (utcoffset, dstoffset, tzname)
tz._tzinfos[inf] = tz.__class__(inf, tz._tzinfos)
return tz._tzinfos[inf] |
def evaluate_interaction_sample(sample, model, max_generation_length, name='', gold_forcing=False, metrics=None, total_num=(- 1), database_username='', database_password='', database_timeout=0, use_predicted_queries=False, write_results=False, use_gpu=False, compute_metrics=False, bool_progressbar=True):
predictions_file = open((name + '_predictions.json'), 'w')
print(('Predicting with file ' + str((name + '_predictions.json'))))
metrics_sums = {}
for metric in metrics:
metrics_sums[metric] = 0.0
if bool_progressbar:
progbar = get_progressbar(name, len(sample))
progbar.start()
num_utterances = 0
predictions = []
use_gpu = (not (('--no_gpus' in sys.argv) or ('--no_gpus=1' in sys.argv)))
model.eval()
for (i, interaction) in enumerate(sample):
try:
with torch.no_grad():
if use_predicted_queries:
example_preds = model.predict_with_predicted_queries(interaction, max_generation_length)
else:
example_preds = model.predict_with_gold_queries(interaction, max_generation_length, feed_gold_query=gold_forcing)
torch.cuda.empty_cache()
except RuntimeError as exception:
print(('Failed on interaction: ' + str(interaction.identifier)))
print(exception)
print('\n\n')
exit()
predictions.extend(example_preds)
assert ((len(example_preds) == len(interaction.interaction.utterances)) or (not example_preds))
for (j, pred) in enumerate(example_preds):
num_utterances += 1
(sequence, loss, token_accuracy, _, decoder_results) = pred
if use_predicted_queries:
item = interaction.processed_utterances[j]
original_utt = interaction.interaction.utterances[item.index]
gold_query = original_utt.gold_query_to_use
original_gold_query = original_utt.original_gold_query
gold_table = original_utt.gold_sql_results
gold_queries = [q[0] for q in original_utt.all_gold_queries]
gold_tables = [q[1] for q in original_utt.all_gold_queries]
index = item.index
else:
item = interaction.gold_utterances()[j]
gold_query = item.gold_query()
original_gold_query = item.original_gold_query()
gold_table = item.gold_table()
gold_queries = item.original_gold_queries()
gold_tables = item.gold_tables()
index = item.utterance_index
if loss:
loss = (loss / len(gold_query))
flat_sequence = item.flatten_sequence(sequence)
if write_results:
write_prediction(predictions_file, identifier=interaction.identifier, input_seq=item.input_sequence(), probability=decoder_results.probability, prediction=sequence, flat_prediction=flat_sequence, gold_query=gold_query, flat_gold_queries=gold_queries, gold_tables=gold_tables, index_in_interaction=index, database_username=database_username, database_password=database_password, database_timeout=database_timeout, compute_metrics=compute_metrics)
update_sums(metrics, metrics_sums, sequence, flat_sequence, gold_query, original_gold_query, gold_forcing, loss, token_accuracy, database_username=database_username, database_password=database_password, database_timeout=database_timeout, gold_table=gold_table)
if bool_progressbar:
progbar.update(i)
if bool_progressbar:
progbar.finish()
if (total_num < 0):
total_num = num_utterances
predictions_file.close()
return (construct_averages(metrics_sums, total_num), predictions) |
def get_map(num_classes=16):
if (num_classes == 16):
map_synthiaId_to_trainId = {3: 0, 4: 1, 2: 2, 21: 3, 5: 4, 7: 5, 15: 6, 9: 7, 6: 8, 1: 9, 10: 10, 17: 11, 8: 12, 19: 13, 12: 14, 11: 15}
else:
raise NotImplementedError(f'Not yet supported {num_classes} classes')
return map_synthiaId_to_trainId |
class D(nn.Module):
class Maxout(nn.Module):
def __init__(self, d_in, d_out, pool_size=5):
super().__init__()
(self.d_in, self.d_out, self.pool_size) = (d_in, d_out, pool_size)
self.lin = nn.Linear(d_in, (d_out * pool_size))
def forward(self, inputs):
shape = list(inputs.size())
shape[(- 1)] = self.d_out
shape.append(self.pool_size)
max_dim = (len(shape) - 1)
out = self.lin(inputs)
(m, i) = out.view(*shape).max(max_dim)
return m
def max(self, out, dim=5):
return out.view(out.size(0), (- 1), dim).max(2)[0]
def __init__(self, conditioning, k_value, act_dim=200, x_dim=2):
super().__init__()
self.fc1 = self.Maxout(x_dim, act_dim)
self.fc2 = self.Maxout(act_dim, act_dim)
self.fc3 = self.Maxout(act_dim, act_dim)
if (conditioning == 'unconditional'):
self.fc4 = LinearUnconditionalLogits(act_dim)
elif (conditioning == 'conditional'):
self.fc4 = LinearConditionalMaskLogits(act_dim, k_value)
else:
raise NotImplementedError()
def forward(self, x, y=None, get_features=False):
out = self.fc1(x)
out = self.fc2(out)
out = self.fc3(out)
if get_features:
return out
return self.fc4(out, y, get_features=get_features) |
def load_model_config(config_f):
print(config_f)
with open(config_f, 'r') as f:
config = json.loads(f.read())
print(config)
return config |
def LF_non(x):
rgx = re.compile('(non)[-]*', re.I)
is_negated = (rgx.search(get_left_span(x.pain, window=2).text) is not None)
return (NEGATIVE if is_negated else ABSTAIN) |
class SyntheticProjectCheckout(ProjectCheckout):
def __init__(self, name: str, version: str, data_path: str, base_path: str):
super().__init__('-synthetic-', join(base_path, name), version)
self.name = name
self.version = version
self.data_path = data_path
def exists(self) -> bool:
return exists(self.checkout_dir)
def delete(self) -> None:
self._logger.debug('Delete %s', self.checkout_dir)
remove_tree(self.checkout_dir)
def _create(self) -> None:
if (not exists(self.checkout_dir)):
self._logger.debug('Create checkout directory %s', self.checkout_dir)
makedirs(self.checkout_dir)
copy_tree(join(self.data_path, 'repo'), self.checkout_dir)
def __str__(self):
return 'synthetic:{}.{}'.format(self.name, self.version) |
def _get_data(modality, output_folder_name, in_memory_directory):
data = {}
if output_folder_name:
for filename in os.listdir(output_folder_name):
if filename.endswith('.csv'):
table_name = Path(filename).stem
data_path = os.path.join(output_folder_name, filename)
data[table_name] = pd.read_csv(data_path)
else:
for (filename, file_) in in_memory_directory.items():
if filename.endswith('.csv'):
table_name = Path(filename).stem
data[table_name] = pd.read_csv(io.StringIO(file_.decode()))
if (modality != 'multi_table'):
data = data.popitem()[1]
return data |
def add_roi_Xconv1fc_head(model, blob_in, dim_in, spatial_scale):
hidden_dim = cfg.FAST_RCNN.CONV_HEAD_DIM
roi_size = cfg.FAST_RCNN.ROI_XFORM_RESOLUTION
roi_feat = model.RoIFeatureTransform(blob_in, 'roi_feat', blob_rois='rois', method=cfg.FAST_RCNN.ROI_XFORM_METHOD, resolution=roi_size, sampling_ratio=cfg.FAST_RCNN.ROI_XFORM_SAMPLING_RATIO, spatial_scale=spatial_scale)
roi_feat_boost = model.net.RoIFeatureBoost([roi_feat, 'obn_scores'], 'roi_feat_boost')
current = roi_feat_boost
for i in range(cfg.FAST_RCNN.NUM_STACKED_CONVS):
current = model.Conv(current, ('head_conv' + str((i + 1))), dim_in, hidden_dim, 3, stride=1, pad=1, weight_init=('MSRAFill', {}), bias_init=('ConstantFill', {'value': 0.0}), no_bias=0)
current = model.Relu(current, current)
dim_in = hidden_dim
fc_dim = cfg.FAST_RCNN.MLP_HEAD_DIM
model.FC(current, 'fc6', ((dim_in * roi_size) * roi_size), fc_dim)
model.Relu('fc6', 'fc6')
return ('fc6', fc_dim) |
def get_elapsed_time():
if (os.name == 'nt'):
raise NotImplementedError('cannot use get_elapsed_time() on Windows')
return sum(os.times()[:4]) |
_model
def tf_efficientnet_b7_ns(pretrained=False, **kwargs):
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet('tf_efficientnet_b7_ns', channel_multiplier=2.0, depth_multiplier=3.1, pretrained=pretrained, **kwargs)
return model |
class EmissionModel(nn.Module):
def __init__(self):
super().__init__()
self.distribution_function = tdist.normal.Normal
def sample(self, means, stds, sampling_temp=1.0):
return (self.distribution_function(means, (stds * sampling_temp)).sample() if (sampling_temp > 0) else means)
def forward(self, x_t, means, stds, state_lengths):
T_max = means.shape[1]
emission_dists = self.distribution_function(means, stds)
x_t = x_t.unsqueeze(1)
out = emission_dists.log_prob(x_t)
mask_tensor = x_t.new_zeros(T_max)
state_lengths_mask = (torch.arange(T_max, out=mask_tensor).expand(len(state_lengths), T_max) < state_lengths.unsqueeze(1)).unsqueeze(2)
out = torch.sum((out * state_lengths_mask), dim=2)
return out |
(scope='module')
def source_2bin_2channel_coupledhistosys():
with open('validation/data/2bin_2channel_coupledhisto.json', encoding='utf-8') as read_json:
return json.load(read_json) |
class IntBlock(nn.Module):
def __init__(self, body, shortcut=None):
super(IntBlock, self).__init__()
self.body = body
self.residual_connection = (shortcut is None)
if (not self.residual_connection):
self.shortcut = shortcut
self.post_relu = nn.ReLU(inplace=True)
self.int_op_only = getattr(body, 'int_op_only', False)
def forward(self, x):
assert getattr(FLAGS, 'int_infer', False)
if getattr(self, 'int_op_only', False):
res = x
for layer_ in self.body:
if isinstance(layer_, nn.Conv2d):
res = int_op_only_fix_quant(res, 8, layer_.input_fraclen.item(), res.output_fraclen, layer_.input_symmetric)
res = layer_(res)
output_fraclen = (layer_.weight_fraclen + layer_.input_fraclen).item()
setattr(res, 'output_fraclen', output_fraclen)
else:
res = layer_(res)
if self.residual_connection:
res_fraclen = res.output_fraclen
x_fraclen = x.output_fraclen
if (res_fraclen > x_fraclen):
x = (x << (res_fraclen - x_fraclen))
res += x
res.clamp_(max=((1 << 31) - 1), min=((- (1 << 31)) + 1))
output_fraclen = res_fraclen
setattr(res, 'output_fraclen', output_fraclen)
else:
res = (res << (x_fraclen - res_fraclen))
res += x
res.clamp_(max=((1 << 31) - 1), min=((- (1 << 31)) + 1))
output_fraclen = x_fraclen
setattr(res, 'output_fraclen', output_fraclen)
else:
x = int_op_only_fix_quant(x, 8, self.shortcut[0].input_fraclen.item(), x.output_fraclen, self.shortcut[0].input_symmetric)
x = self.shortcut(x)
output_fraclen = (self.shortcut[(- 1)].weight_fraclen + self.shortcut[(- 1)].input_fraclen).item()
setattr(x, 'output_fraclen', output_fraclen)
res_fraclen = res.output_fraclen
x_fraclen = x.output_fraclen
if (res_fraclen > x_fraclen):
x = (x << (res_fraclen - x_fraclen))
res += x
res.clamp_(max=((1 << 31) - 1), min=((- (1 << 31)) + 1))
output_fraclen = res_fraclen
setattr(res, 'output_fraclen', output_fraclen)
else:
res = (res << (x_fraclen - res_fraclen))
res += x
res.clamp_(max=((1 << 31) - 1), min=((- (1 << 31)) + 1))
output_fraclen = x_fraclen
setattr(res, 'output_fraclen', output_fraclen)
res = self.post_relu(res)
else:
res = x
for layer_ in self.body:
if isinstance(layer_, nn.Conv2d):
res = (fix_quant(res, 8, layer_.input_fraclen, 1, layer_.input_symmetric)[0] * (2 ** layer_.input_fraclen)).int().float()
res = layer_(res)
res.div_((2 ** (layer_.weight_fraclen + layer_.input_fraclen)))
else:
res = layer_(res)
setattr(res, 'output_fraclen', (self.body[(- 1)].weight_fraclen + self.body[(- 1)].input_fraclen))
if self.residual_connection:
res_fraclen = res.output_fraclen
x_fraclen = x.output_fraclen
output_fraclen = max(res_fraclen, x_fraclen)
res = (res * (2 ** output_fraclen))
x = (x * (2 ** output_fraclen))
res += x
res = torch.clamp(res, max=((1 << 31) - 1), min=((- (1 << 31)) + 1))
res = (res / (2 ** output_fraclen))
else:
x = (fix_quant(x, 8, self.shortcut[0].input_fraclen, 1, self.shortcut[0].input_symmetric)[0] * (2 ** self.shortcut[0].input_fraclen)).int().float()
x = self.shortcut(x)
setattr(x, 'output_fraclen', (self.shortcut[(- 1)].weight_fraclen + self.shortcut[(- 1)].input_fraclen))
x.div_((2 ** x.output_fraclen))
res_fraclen = res.output_fraclen
x_fraclen = x.output_fraclen
output_fraclen = max(res_fraclen, x_fraclen)
res = (res * (2 ** output_fraclen))
x = (x * (2 ** output_fraclen))
res += x
res = torch.clamp(res, max=((1 << 31) - 1), min=((- (1 << 31)) + 1))
res = (res / (2 ** output_fraclen))
res = self.post_relu(res)
setattr(res, 'output_fraclen', output_fraclen)
return res |
def dataio_prepare(hparams):
logging.info('generating datasets...')
datasets = load_dataset('text', data_files={'train': hparams['lm_train_data'], 'valid': hparams['lm_valid_data'], 'test': hparams['lm_test_data']})
train_data = sb.dataio.dataset.DynamicItemDataset.from_arrow_dataset(datasets['train'])
valid_data = sb.dataio.dataset.DynamicItemDataset.from_arrow_dataset(datasets['valid'])
test_data = sb.dataio.dataset.DynamicItemDataset.from_arrow_dataset(datasets['test'])
datasets = [train_data, valid_data, test_data]
tokenizer = hparams['tokenizer']
.data_pipeline.takes('text')
.data_pipeline.provides('text', 'tokens_bos', 'tokens_eos')
def text_pipeline(text):
(yield text)
tokens_list = tokenizer.encode_as_ids(text)
tokens_bos = torch.LongTensor(([hparams['bos_index']] + tokens_list))
(yield tokens_bos)
tokens_eos = torch.LongTensor((tokens_list + [hparams['eos_index']]))
(yield tokens_eos)
sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline)
sb.dataio.dataset.set_output_keys(datasets, ['id', 'text', 'tokens_bos', 'tokens_eos'])
return (train_data, valid_data, test_data) |
class TestDeriavtives(TestCase):
def setUp(self):
self.model = pin.buildSampleModelHumanoidRandom()
self.data = self.model.createData()
qmax = np.full((self.model.nq, 1), np.pi)
self.q = pin.randomConfiguration(self.model, (- qmax), qmax)
self.v = np.random.rand(self.model.nv)
self.a = np.random.rand(self.model.nv)
self.fext = []
for _ in range(self.model.njoints):
self.fext.append(pin.Force.Random())
def test_rnea_derivatives(self):
res = pin.computeRNEADerivatives(self.model, self.data, self.q, self.v, self.a)
self.assertTrue((len(res) == 3))
data2 = self.model.createData()
pin.rnea(self.model, data2, self.q, self.v, self.a)
self.assertApprox(self.data.ddq, data2.ddq)
res = pin.computeRNEADerivatives(self.model, self.data, self.q, self.v, self.a, self.fext)
self.assertTrue((len(res) == 3))
pin.rnea(self.model, data2, self.q, self.v, self.a, self.fext)
self.assertApprox(self.data.ddq, data2.ddq)
def test_generalized_gravity_derivatives(self):
res = pin.computeGeneralizedGravityDerivatives(self.model, self.data, self.q)
data2 = self.model.createData()
(ref, _, _) = pin.computeRNEADerivatives(self.model, data2, self.q, (self.v * 0), (self.a * 0))
self.assertApprox(res, ref)
def test_static_torque_derivatives(self):
res = pin.computeStaticTorqueDerivatives(self.model, self.data, self.q, self.fext)
data2 = self.model.createData()
(ref, _, _) = pin.computeRNEADerivatives(self.model, data2, self.q, (self.v * 0), (self.a * 0), self.fext)
self.assertApprox(res, ref) |
def build_processors(processors_config: DictConfig, registry_key: str=None, *args, **kwargs):
from mmf.datasets.processors.processors import Processor
processor_dict = {}
for (processor_key, processor_params) in processors_config.items():
if (not processor_params):
continue
processor_instance = None
if (registry_key is not None):
full_key = registry_key.format(processor_key)
processor_instance = registry.get(full_key, no_warning=True)
if (processor_instance is None):
processor_instance = Processor(processor_params, *args, **kwargs)
processor_dict[processor_key] = processor_instance
return processor_dict |
def hear_scene_trainvaltest(target_dir: str, cache_dir: str, dataset_root: str, get_path_only: bool=False):
target_dir = Path(target_dir)
resample_hear_corpus(dataset_root, target_sr=16000)
dataset_root = Path(dataset_root)
wav_root: Path = (dataset_root / '16000')
train_csv = (target_dir / 'train.csv')
valid_csv = (target_dir / 'valid.csv')
test_csv = (target_dir / 'test_csv')
if get_path_only:
return (train_csv, valid_csv, [test_csv])
def load_json(filepath):
with open(filepath, 'r') as fp:
return json.load(fp)
def split_to_df(split: str) -> pd.DataFrame:
meta = load_json((dataset_root / f'{split}.json'))
data = defaultdict(list)
for k in list(meta.keys()):
data['id'].append(k)
data['wav_path'].append(((wav_root / split) / k))
data['labels'].append(' ; '.join([str(label).strip() for label in meta[k]]))
return pd.DataFrame(data=data)
split_to_df('train').to_csv(train_csv, index=False)
split_to_df('valid').to_csv(valid_csv, index=False)
split_to_df('test').to_csv(test_csv, index=False)
return (train_csv, valid_csv, [test_csv]) |
def basic_blocks(dim, index, layers, pool_size=3, mlp_ratio=4.0, act_layer=nn.GELU, norm_layer=GroupNorm, drop_rate=0.0, drop_path_rate=0.0, use_layer_scale=True, layer_scale_init_value=1e-05):
blocks = []
for block_idx in range(layers[index]):
block_dpr = ((drop_path_rate * (block_idx + sum(layers[:index]))) / (sum(layers) - 1))
blocks.append(PoolFormerBlock(dim, pool_size=pool_size, mlp_ratio=mlp_ratio, act_layer=act_layer, norm_layer=norm_layer, drop_rate=drop_rate, drop_path_rate=block_dpr, use_layer_scale=use_layer_scale, layer_scale_init_value=layer_scale_init_value))
blocks = nn.Sequential(*blocks)
return blocks |
def calc_map_mesh(testfile, predfile):
with open(testfile, 'r') as ftest, open(predfile, 'r') as fpred:
data = []
pred = []
for line in ftest:
data.append(line.strip().split('\t'))
for line in fpred:
pred.append(float(line.strip()))
oneq = []
pre = 'BEGIN'
mapscore = 0.0
excnt = 0
resu = zip(data, pred)
resu = sorted(resu, key=(lambda d: d[1]), reverse=True)
fout = open('mfwtest.out7.txt', 'w')
for item in resu:
fout.write((((((item[0][0] + '\t') + item[0][1]) + '\t') + str(item[1])) + '\n')) |
def apply_statistics_correction(transformed_graph: Graph, representative_data_gen: Callable, core_config: CoreConfig, fw_info: FrameworkInfo, fw_impl: FrameworkImplementation, tb_w: TensorboardWriter=None) -> Graph:
if core_config.quantization_config.weights_second_moment_correction:
transformed_graph = apply_second_moment_correction_to_graph(transformed_graph, representative_data_gen, core_config, fw_info, fw_impl)
if core_config.quantization_config.weights_bias_correction:
transformed_graph = apply_bias_correction_to_graph(transformed_graph, core_config, fw_impl=fw_impl)
if (tb_w is not None):
tb_w.add_graph(transformed_graph, 'after_statistics_correction')
return transformed_graph |
def test():
one = ak.highlevel.Array([[{'x': 1}], [], [{'x': 2}]], with_name='One')
two = ak.highlevel.Array([[{'x': 1.1}], [], [{'x': 2.2}]], with_name='Two')
assert (str(ak.operations.with_name(ak.operations.concatenate([one, two], axis=1), 'All').type) == '3 * var * All[x: float64]')
assert (str(ak.operations.with_name(ak.operations.concatenate([one[1:], two[1:]], axis=1), 'All').type) == '2 * var * All[x: float64]') |
class AdaptiveAggregateAlarms(AggregateAlarms):
threshold_class = AdaptiveThreshold
def __init__(self, alm_threshold: float=None, abs_score=True, min_alm_in_window: int=2, alm_window_minutes: float=60, alm_suppress_minutes: float=120, bin_sz: int=10, default_hist_gap_thres: float=1.2):
super().__init__(alm_threshold=alm_threshold, abs_score=abs_score, min_alm_in_window=min_alm_in_window, alm_window_minutes=alm_window_minutes, alm_suppress_minutes=alm_suppress_minutes)
self.threshold = AdaptiveThreshold(alm_threshold=alm_threshold, abs_score=abs_score, bin_sz=bin_sz, default_hist_gap_thres=default_hist_gap_thres)
def bin_sz(self):
return self.threshold.bin_sz
_sz.setter
def bin_sz(self, x):
self.threshold.bin_sz = x
def default_hist_gap_thres(self):
return self.threshold.default_hist_gap_thres
_hist_gap_thres.setter
def default_hist_gap_thres(self, x):
self.threshold.default_hist_gap_thres = x |
class consume():
def __init__(self, stream: Deque[T], processing_elements: int=1, condition: Optional[Callable[([], bool)]]=None):
self.stream = stream
self.pes = processing_elements
self.condition = (condition or (lambda : (len(stream) > 0)))
def __iter__(self) -> Generator[(T, None, None)]:
while self.condition():
(yield self.stream.pop()) |
def copy_conllu(tokenizer_dir, mwt_dir, short_name, dataset, particle):
input_conllu_tokenizer = f'{tokenizer_dir}/{short_name}.{dataset}.gold.conllu'
input_conllu_mwt = f'{mwt_dir}/{short_name}.{dataset}.{particle}.conllu'
shutil.copyfile(input_conllu_tokenizer, input_conllu_mwt) |
class BackendIPythonCommandline(BackendIPython):
def default_preferences(self):
from sage.repl.rich_output.preferences import DisplayPreferences
return DisplayPreferences(supplemental_plot='never')
def _repr_(self):
return 'IPython command line'
def supported_output(self):
return set([OutputPlainText, OutputAsciiArt, OutputUnicodeArt, OutputLatex, OutputImagePng, OutputImageGif, OutputImagePdf, OutputImageDvi, OutputSceneJmol, OutputSceneWavefront, OutputSceneThreejs])
def displayhook(self, plain_text, rich_output):
if isinstance(rich_output, OutputPlainText):
return ({'text/plain': rich_output.text.get_str()}, {})
elif isinstance(rich_output, OutputAsciiArt):
return ({'text/plain': rich_output.ascii_art.get_str()}, {})
elif isinstance(rich_output, OutputUnicodeArt):
return ({'text/plain': rich_output.unicode_art.get_str()}, {})
elif isinstance(rich_output, OutputLatex):
return ({'text/plain': rich_output.latex.get_str()}, {})
elif isinstance(rich_output, OutputImagePng):
msg = self.launch_viewer(rich_output.png.filename(ext='png'), plain_text.text.get_str())
return ({'text/plain': msg}, {})
elif isinstance(rich_output, OutputImageGif):
msg = self.launch_viewer(rich_output.gif.filename(ext='gif'), plain_text.text.get_str())
return ({'text/plain': msg}, {})
elif isinstance(rich_output, OutputImagePdf):
msg = self.launch_viewer(rich_output.pdf.filename(ext='pdf'), plain_text.text.get_str())
return ({'text/plain': msg}, {})
elif isinstance(rich_output, OutputImageDvi):
msg = self.launch_viewer(rich_output.dvi.filename(ext='dvi'), plain_text.text.get_str())
return ({'text/plain': msg}, {})
elif isinstance(rich_output, OutputSceneJmol):
msg = self.launch_jmol(rich_output, plain_text.text.get_str())
return ({'text/plain': msg}, {})
elif isinstance(rich_output, OutputSceneWavefront):
msg = self.launch_sage3d(rich_output, plain_text.text.get_str())
return ({'text/plain': msg}, {})
elif isinstance(rich_output, OutputSceneThreejs):
msg = self.launch_viewer(rich_output.html.filename(ext='html'), plain_text.text.get_str())
return ({'text/plain': msg}, {})
else:
raise TypeError('rich_output type not supported')
def display_immediately(self, plain_text, rich_output):
(formatdata, metadata) = self.displayhook(plain_text, rich_output)
print(formatdata['text/plain'])
def launch_viewer(self, image_file, plain_text):
(base, dot_ext) = os.path.splitext(image_file)
ext = dot_ext.lstrip(os.path.extsep)
from sage.misc.viewer import viewer
command = viewer(ext)
if (not command):
command = viewer.browser()
from sage.doctest import DOCTEST_MODE
if (not DOCTEST_MODE):
os.system('{0} {1} 2>/dev/null 1>/dev/null &'.format(command, image_file))
return 'Launched {0} viewer for {1}'.format(ext, plain_text)
def launch_jmol(self, output_jmol, plain_text):
from sage.doctest import DOCTEST_MODE
from sage.interfaces.jmoldata import JmolData
jdata = JmolData()
if ((not jdata.is_jmol_available()) and (not DOCTEST_MODE)):
raise RuntimeError('jmol cannot run, no suitable java version found')
launch_script = output_jmol.launch_script_filename()
jmol_cmd = 'jmol'
if (not DOCTEST_MODE):
os.system('{0} {1} 2>/dev/null 1>/dev/null &'.format(jmol_cmd, launch_script))
return 'Launched jmol viewer for {0}'.format(plain_text)
def is_in_terminal(self):
return True
def threejs_offline_scripts(self):
from sage.env import THREEJS_DIR
from sage.repl.rich_output.display_manager import _required_threejs_version
script = os.path.join(THREEJS_DIR, '{}/three.min.js'.format(_required_threejs_version()))
return '\n<script src="{0}"></script>'.format(script) |
def register_Ns3Object_methods(root_module, cls):
cls.add_constructor([])
cls.add_method('AggregateObject', 'void', [param('ns3::Ptr< ns3::Object >', 'other')])
cls.add_method('Dispose', 'void', [])
cls.add_method('GetAggregateIterator', 'ns3::Object::AggregateIterator', [], is_const=True)
cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True)
cls.add_method('GetObject', 'ns3::Ptr< ns3::NetDevice >', [], is_const=True, template_parameters=[u'ns3::NetDevice'])
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('Initialize', 'void', [])
cls.add_method('IsInitialized', 'bool', [], is_const=True)
cls.add_constructor([param('ns3::Object const &', 'o')], visibility='protected')
cls.add_method('DoDispose', 'void', [], visibility='protected', is_virtual=True)
cls.add_method('DoInitialize', 'void', [], visibility='protected', is_virtual=True)
cls.add_method('NotifyNewAggregate', 'void', [], visibility='protected', is_virtual=True)
return |
class FreeGradedModuleMorphism(FPModuleMorphism):
def __init__(self, parent, values):
from .free_homspace import FreeGradedModuleHomspace
if (not isinstance(parent, FreeGradedModuleHomspace)):
raise TypeError(('the parent (%s) must be a f.p. free module homset' % parent))
self._free_morphism = self
FPModuleMorphism.__init__(self, parent, values, check=False)
if all((v.is_zero() for v in self._values)):
degree = None
else:
degrees = []
gen_deg = parent.domain().generator_degrees()
for (i, val) in enumerate(self._values):
if val:
x = val.degree()
xx = gen_deg[i]
degrees.append((x - xx))
degree = min(degrees)
if (degree != max(degrees)):
raise ValueError('ill-defined homomorphism: degrees do not match')
self._degree = degree
def degree(self):
if (self._degree is None):
raise ValueError('the zero morphism does not have a well-defined degree')
return self._degree
def __call__(self, x):
if (x.parent() != self.domain()):
raise ValueError('cannot evaluate morphism on element not in the domain')
value = self.codomain().linear_combination(zip(self._values, x.dense_coefficient_list()))
return value
def fp_module(self):
if self.codomain().has_relations():
raise ValueError('this is not a morphism between free modules')
try:
FPModule = self.base_ring()._fp_graded_module_class
except AttributeError:
from .module import FPModule
return FPModule(self) |
def upsample_bilinear(input, size=None, scale_factor=None):
warnings.warn('nn.functional.upsample_bilinear is deprecated. Use nn.functional.interpolate instead.')
return interpolate(input, size, scale_factor, mode='bilinear', align_corners=True) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.