code stringlengths 101 5.91M |
|---|
def test_random_multi_image():
shap.image_plot([np.random.randn(3, 20, 20) for i in range(3)], np.random.randn(3, 20, 20), show=False) |
def set_cycles(args):
scene = bpy.context.scene
scene.render.engine = 'CYCLES'
cycles = scene.cycles
cycles.use_progressive_refine = True
cycles.samples = args.n_samples
cycles.max_bounces = 8
cycles.caustics_reflective = True
cycles.caustics_refractive = False
cycles.diffuse_bounces = 8
cycles.glossy_bounces = 4
cycles.volume_bounces = 0
world = bpy.data.worlds['World']
world.cycles.sample_as_light = True
cycles.blur_glossy = 2.0
cycles.sample_clamp_indirect = 10.0
world.use_nodes = True
if args.use_gpu:
if (args.gpus is not None):
select_devices('CUDA', args.gpus)
bpy.context.preferences.addons['cycles'].preferences.compute_device_type = 'CUDA'
bpy.context.scene.cycles.device = 'GPU'
devices = bpy.context.preferences.addons['cycles'].preferences.get_devices()
bpy.context.scene.render.use_persistent_data = True
bpy.context.scene.world.cycles.sampling_method = 'MANUAL'
bpy.context.scene.world.cycles.sample_map_resolution = 1024
bpy.context.scene.view_layers['View Layer'].cycles.use_denoising = True
scene.render.tile_x = (256 if args.use_gpu else 16)
scene.render.tile_y = (256 if args.use_gpu else 16)
scene.render.resolution_x = args.res
scene.render.resolution_y = args.res
scene.render.resolution_percentage = 100
scene.render.use_file_extension = True
scene.render.image_settings.file_format = 'PNG'
scene.render.image_settings.color_depth = str(args.color_depth) |
class NetworkImageNet(nn.Module):
def __init__(self, C, N, auxiliary, genotype, num_classes):
super(NetworkImageNet, self).__init__()
self._C = C
self._layerN = N
layer_channels = ((((([C] * N) + [(C * 2)]) + ([(C * 2)] * N)) + [(C * 4)]) + ([(C * 4)] * N))
layer_reductions = ((((([False] * N) + [True]) + ([False] * N)) + [True]) + ([False] * N))
self.stem0 = nn.Sequential(nn.Conv2d(3, (C // 2), kernel_size=3, stride=2, padding=1, bias=False), nn.BatchNorm2d((C // 2)), nn.ReLU(inplace=True), nn.Conv2d((C // 2), C, 3, stride=2, padding=1, bias=False), nn.BatchNorm2d(C))
self.stem1 = nn.Sequential(nn.ReLU(inplace=True), nn.Conv2d(C, C, 3, stride=2, padding=1, bias=False), nn.BatchNorm2d(C))
(C_prev_prev, C_prev, C_curr, reduction_prev) = (C, C, C, True)
self.cells = nn.ModuleList()
self.auxiliary_index = None
for (i, (C_curr, reduction)) in enumerate(zip(layer_channels, layer_reductions)):
cell = InferCell(genotype, C_prev_prev, C_prev, C_curr, reduction, reduction_prev)
reduction_prev = reduction
self.cells += [cell]
(C_prev_prev, C_prev) = (C_prev, (cell._multiplier * C_curr))
if (reduction and (C_curr == (C * 4))):
C_to_auxiliary = C_prev
self.auxiliary_index = i
self._NNN = len(self.cells)
if auxiliary:
self.auxiliary_head = AuxiliaryHeadImageNet(C_to_auxiliary, num_classes)
else:
self.auxiliary_head = None
self.global_pooling = nn.AvgPool2d(7)
self.classifier = nn.Linear(C_prev, num_classes)
self.drop_path_prob = (- 1)
def update_drop_path(self, drop_path_prob):
self.drop_path_prob = drop_path_prob
def extra_repr(self):
return '{name}(C={_C}, N=[{_layerN}, {_NNN}], aux-index={auxiliary_index}, drop-path={drop_path_prob})'.format(name=self.__class__.__name__, **self.__dict__)
def get_message(self):
return self.extra_repr()
def auxiliary_param(self):
if (self.auxiliary_head is None):
return []
else:
return list(self.auxiliary_head.parameters())
def forward(self, inputs):
s0 = self.stem0(inputs)
s1 = self.stem1(s0)
logits_aux = None
for (i, cell) in enumerate(self.cells):
(s0, s1) = (s1, cell(s0, s1, self.drop_path_prob))
if ((i == self.auxiliary_index) and self.auxiliary_head and self.training):
logits_aux = self.auxiliary_head(s1)
out = self.global_pooling(s1)
logits = self.classifier(out.view(out.size(0), (- 1)))
if (logits_aux is None):
return (out, logits)
else:
return (out, [logits, logits_aux]) |
def create_argparser():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--project_name', type=str, default='sub-encoder', help='Name of a wandb project, checkpoints are saved under this directory')
parser.add_argument('--experiment_id', type=str, default=None, help='Experiment identifier for an experiment group')
parser.add_argument('--output_dir', type=str, default='output/', help='Output directory to save model, arguments, and results')
parser.add_argument('--train', action='store_true', default=False, help='Run training')
parser.add_argument('--validate', action='store_true', default=False, help='validate during training (after epochs)')
parser.add_argument('--evaluate', action='store_true', default=False, help='Evaluate on the test set')
parser.add_argument('--validate_every', type=int, default=1, help='Validate every N epochs')
parser.add_argument('--random_seed', type=int, default=42, help='Random seed for everything')
parser.add_argument('--sanity', type=int, default=None, help='Subsamples N examples from the dataset, used for debugging')
parser.add_argument('--gpus', type=int, default=1, help='Number of GPUs to use for training')
parser.add_argument('--period', type=int, default=2, help='Periodicity to save checkpoints when not validating')
parser.add_argument('--model_name', type=str, default='sentence-transformers/all-MiniLM-L6-v2', help='Name of the huggingface transformer model to use')
parser.add_argument('--learning_rate', type=float, default=2e-05, help='Specifies learning rate')
parser.add_argument('--train_batch_size', type=int, default=32, help='Per GPU batch size')
parser.add_argument('--val_batch_size', type=int, default=32, help='Per GPU validation batch size')
parser.add_argument('--loss_type', type=str, default='sup_con', choices=list(LOSS_CLASSES.keys()), help='Type of loss / training objective for training the model. Affects how dataloader works.')
parser.add_argument('--load_checkpoint', default=False, action='store_true', help='If True, will load the latest checkpoint')
parser.add_argument('--precision', default='16', type=str, help='Precision of model weights')
parser.add_argument('--num_workers', type=int, default=20, help='Number of workers to prefetch data')
parser.add_argument('--num_epoch', type=int, default=20, help='Number of epochs')
parser.add_argument('--warmup_steps', type=int, default=10, help='Number of warmup steps')
parser.add_argument('--gradient_checkpointing', default=False, action='store_true', help='If True, activates Gradient Checkpointing')
parser.add_argument('--temperature', type=float, default=0.01, help='Temperature to use for SupCon loss')
parser.add_argument('--save_top_k_ckpts', default=1, type=int, help='Number of checkpoints to save')
parser.add_argument('--train_data_path', type=str, default='data/comp_sents_prop_train.jsonl', help='training data')
parser.add_argument('--test_data_path', type=str, default='data/comp_sents_prop_test.jsonl', help='testing data')
parser.add_argument('--val_data_path', type=str, default='data/comp_sents_prop_val.jsonl', help='validation data')
parser.add_argument('--max_seq_length', default=None, type=int, help='Maximum input sequence length of inputs to the encoder model.')
parser.add_argument('--mlp_hidden_dim', default=None, type=int, help='Dimension of mlp layer after pooling. If None, match the encoder output dim.')
parser.add_argument('--final_output_dim', default=None, type=int, help='Dimension of mlp layer after pooling. If None, match the encoder output dim.')
parser.add_argument('--learning_rate_decay_gamma', type=float, default=0.9, help='Gamma for exponential decay after each epoch.')
args = parser.parse_args()
return args |
def log_config_to_file(cfg, pre='cfg', logger=None):
for (key, val) in cfg.items():
if isinstance(cfg[key], EasyDict):
print_log(f'{pre}.{key} = edict()', logger=logger)
log_config_to_file(cfg[key], pre=((pre + '.') + key), logger=logger)
continue
print_log(f'{pre}.{key} : {val}', logger=logger) |
def to_torch_imgs(img: np.ndarray, mean: Tensor, std: Tensor) -> Tensor:
t_img: Tensor = torch.from_numpy(np.transpose(img, (2, 0, 1)))
t_img -= mean
t_img /= std
return t_img |
class OuterProductOperation(pm.SingleStateTransformation):
map_entry = pm.PatternNode(nodes.MapEntry)
def expressions(cls):
return [sdutil.node_path_graph(cls.map_entry)]
def can_be_applied(self, graph: dace.SDFGState, expr_index: int, sdfg: dace.SDFG, permissive: bool=False):
map_entry = self.map_entry
map_exit = graph.exit_node(map_entry)
params = [dace.symbol(p) for p in map_entry.map.params]
inputs = dict()
for (_, _, _, _, m) in graph.out_edges(map_entry):
if (not m.data):
continue
desc = sdfg.arrays[m.data]
if (desc not in inputs.keys()):
inputs[desc] = []
inputs[desc].append(m.subset)
outer_product_found = False
for (desc, accesses) in inputs.items():
if isinstance(desc, dace.data.Scalar):
continue
elif isinstance(desc, (dace.data.Array, dace.data.View)):
if (list(desc.shape) == [1]):
continue
for a in accesses:
indices = a.min_element()
unmatched_indices = set(params)
for idx in indices:
if (not isinstance(idx, sympy.Symbol)):
return False
if (idx in unmatched_indices):
unmatched_indices.remove(idx)
if (len(unmatched_indices) == 0):
return False
outer_product_found = True
else:
return False
outputs = dict()
for (_, _, _, _, m) in graph.in_edges(map_exit):
if m.wcr:
return False
desc = sdfg.arrays[m.data]
if (desc not in outputs.keys()):
outputs[desc] = []
outputs[desc].append(m.subset)
for (desc, accesses) in outputs.items():
if isinstance(desc, (dace.data.Array, dace.data.View)):
for a in accesses:
if (a.num_elements() != 1):
return False
indices = a.min_element()
unmatched_indices = set(params)
for idx in indices:
if (idx in unmatched_indices):
unmatched_indices.remove(idx)
if (len(unmatched_indices) > 0):
return False
else:
return False
return outer_product_found
def apply(self, graph: dace.SDFGState, sdfg: dace.SDFG):
pass |
def extmodtest(A: dace.float32[(W, H)], result: dace.float32[1]):
tmp = np.ndarray([H, W], dace.float32)
external_module.transpose(A, tmp)
with dace.tasklet:
(a << tmp[(1, 2)])
(b >> result[0])
b = a |
def compute_model(E, name):
if (not isinstance(E, ell_generic.EllipticCurve_generic)):
raise TypeError('not an elliptic curve')
if (name == 'minimal'):
from sage.rings.number_field.number_field_base import NumberField
if (not isinstance(E.base_field(), NumberField)):
raise ValueError('can only compute minimal model for curves over number fields')
return E.global_minimal_model(semi_global=True)
if (name == 'short_weierstrass'):
return E.short_weierstrass_model()
if (name == 'montgomery'):
return E.montgomery_model()
raise NotImplementedError(f'cannot compute {name} model') |
def register_Ns3YansWifiPhy_methods(root_module, cls):
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_constructor([])
cls.add_method('SetChannel', 'void', [param('ns3::Ptr< ns3::YansWifiChannel > const', 'channel')])
cls.add_method('StartTx', 'void', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::WifiTxVector', 'txVector'), param('ns3::Time', 'txDuration')], is_virtual=True)
cls.add_method('GetChannel', 'ns3::Ptr< ns3::Channel >', [], is_const=True, is_virtual=True)
cls.add_constructor([param('ns3::YansWifiPhy const &', 'arg0')])
cls.add_method('DoDispose', 'void', [], visibility='protected', is_virtual=True)
return |
def main(args):
set_random_seed(args.seed)
env_config = parse_config_file(args.env_config_file)
environment = GymEnvironment(env_config['name'], ctrl_cost_weight=env_config['action_cost'], seed=args.seed)
reward_model = environment.env.reward_model()
if (args.exploration == 'optimistic'):
dynamical_model = HallucinatedModel.default(environment, beta=args.beta)
environment.add_wrapper(HallucinationWrapper)
else:
dynamical_model = TransformedModel.default(environment)
kwargs = parse_config_file(args.agent_config_file)
agent = getattr(importlib.import_module('rllib.agent'), f'{args.agent}Agent').default(environment=environment, dynamical_model=dynamical_model, reward_model=reward_model, thompson_sampling=(args.exploration == 'thompson'), **kwargs)
train_agent(agent=agent, environment=environment, max_steps=env_config['max_steps'], num_episodes=args.train_episodes, render=args.render, print_frequency=1)
evaluate_agent(agent=agent, environment=environment, max_steps=env_config['max_steps'], num_episodes=args.test_episodes) |
class CosPlace(nn.Module):
def __init__(self, in_dim, out_dim):
super().__init__()
self.gem = GeM()
self.fc = nn.Linear(in_dim, out_dim)
def forward(self, x):
x = F.normalize(x, p=2, dim=1)
x = self.gem(x)
x = x.flatten(1)
x = self.fc(x)
x = F.normalize(x, p=2, dim=1)
return x |
class SimpleTrainer(TrainerBase):
def __init__(self, model, data_loader, optimizer):
super().__init__()
model.train()
self.model = model
self.data_loader = data_loader
self._data_loader_iter = iter(data_loader)
self.optimizer = optimizer
def run_step(self):
assert self.model.training, '[SimpleTrainer] model was changed to eval mode!'
start = time.perf_counter()
data = next(self._data_loader_iter)
data_time = (time.perf_counter() - start)
loss_dict = self.model(data)
losses = sum(loss_dict.values())
self.optimizer.zero_grad()
losses.backward()
with (torch.cuda.stream(torch.cuda.Stream()) if (losses.device.type == 'cuda') else _nullcontext()):
metrics_dict = loss_dict
metrics_dict['data_time'] = data_time
self._write_metrics(metrics_dict)
self._detect_anomaly(losses, loss_dict)
'\n If you need gradient clipping/scaling or other processing, you can\n wrap the optimizer with your custom `step()` method. But it is\n suboptimal as explained in Sec 3.2.4\n '
self.optimizer.step()
def _detect_anomaly(self, losses, loss_dict):
if (not torch.isfinite(losses).all()):
raise FloatingPointError('Loss became infinite or NaN at iteration={}!\nloss_dict = {}'.format(self.iter, loss_dict))
def _write_metrics(self, metrics_dict: dict):
metrics_dict = {k: (v.detach().cpu().item() if isinstance(v, torch.Tensor) else float(v)) for (k, v) in metrics_dict.items()}
all_metrics_dict = comm.gather(metrics_dict)
if comm.is_main_process():
if ('data_time' in all_metrics_dict[0]):
data_time = np.max([x.pop('data_time') for x in all_metrics_dict])
self.storage.put_scalar('data_time', data_time)
metrics_dict = {k: np.mean([x[k] for x in all_metrics_dict]) for k in all_metrics_dict[0].keys()}
total_losses_reduced = sum((loss for loss in metrics_dict.values()))
self.storage.put_scalar('total_loss', total_losses_reduced)
if (len(metrics_dict) > 1):
self.storage.put_scalars(**metrics_dict) |
class TemplatePlaceholderType(CType):
def __init__(self, name, optional=False):
self.name = name
self.optional = optional
def declaration_code(self, entity_code, for_display=0, dll_linkage=None, pyrex=0):
if entity_code:
return ((self.name + ' ') + entity_code)
else:
return self.name
def specialize(self, values):
if (self in values):
return values[self]
else:
return self
def deduce_template_params(self, actual):
return {self: actual}
def same_as_resolved_type(self, other_type):
if isinstance(other_type, TemplatePlaceholderType):
return (self.name == other_type.name)
else:
return 0
def __hash__(self):
return hash(self.name)
def __cmp__(self, other):
if isinstance(other, TemplatePlaceholderType):
return cmp(self.name, other.name)
else:
return cmp(type(self), type(other))
def __eq__(self, other):
if isinstance(other, TemplatePlaceholderType):
return (self.name == other.name)
else:
return False |
def cross_attn_mask_generation(from_mask, to_mask, mutual=True, head_num=None, name=None):
with tf.name_scope((name or 'attention_mask_generation')):
(bs, slf) = get_shape_list(from_mask, 2)[:2]
slt = get_shape_list(to_mask, 2)[1]
if mutual:
res_mask = tf.cast((tf.expand_dims(tf.cast(from_mask, tf.int32), 2) * tf.expand_dims(tf.cast(to_mask, tf.int32), 1)), tf.bool)
else:
res_mask = tf.tile(tf.expand_dims(to_mask, 1), [1, slf, 1])
if isinstance(head_num, int):
res_mask = tf.expand_dims(res_mask, 1)
tile_multiples = ([1] * len(get_shape_list(res_mask)))
tile_multiples[1] = head_num
res_mask = tf.tile(res_mask, tile_multiples)
return res_mask |
def get_data_collator(tokenizer, return_tensors='pt', do_padding=False, max_length=1024):
def data_collator(features):
if (not do_padding):
try:
batch = {k: torch.tensor([f[k] for f in features]) for k in features[0].keys()}
except Exception:
batch = tokenizer.pad([{k: v for (k, v) in f.items() if (k not in {'domain_id', 'domain_ids'})} for f in features], return_tensors=return_tensors, pad_to_multiple_of=max_length)
if ('domain_id' in features[0]):
batch['domain_id'] = torch.tensor([f['domain_id'] for f in features])
elif ('domain_ids' in features[0]):
batch['domain_ids'] = torch.tensor([f['domain_ids'] for f in features])
else:
batch = tokenizer.pad(features, return_tensors=return_tensors, pad_to_multiple_of=max_length)
batch['input_ids'] = batch['input_ids'].long()
if ('attention_mask' not in batch):
batch['attention_mask'] = torch.ones_like(batch['input_ids']).long()
else:
batch['attention_mask'] = batch['attention_mask'].long()
batch.pop('special_tokens_mask', None)
if ('labels' not in batch):
labels = batch['input_ids'].clone()
batch['labels'] = labels
if (tokenizer.pad_token_id is not None):
batch['labels'][(batch['labels'] == tokenizer.pad_token_id)] = (- 100)
if (('domain_ids' not in batch) and ('domain_id' in batch)):
batch['domain_ids'] = batch['domain_id']
batch.pop('domain_id')
return batch
return data_collator |
def make_objective(eps: goos.Shape, stage: str, params: Options):
solver = 'local_direct'
sim_left_x = (- params.wg_len)
sim_right_x = (params.coupler_len + params.buffer_len)
pml_thick = (params.dx * 10)
sim_z_center = ((((params.wg_thickness / 2) + params.beam_dist) - params.box_size) / 2)
sim_z_extent = ((((params.wg_thickness + params.beam_dist) + params.box_size) + 2000) + (pml_thick * 2))
sim = maxwell.fdfd_simulation(name='sim_{}'.format(stage), wavelength=params.wlen, eps=eps, solver=solver, sources=[maxwell.GaussianSource(w0=(params.beam_width / 2), center=[(params.coupler_len / 2), 0, ((params.wg_thickness / 2) + params.beam_dist)], extents=[params.beam_extents, 0, 0], normal=[0, 0, (- 1)], power=1, theta=np.deg2rad(params.source_angle_deg), psi=(np.pi / 2), polarization_angle=0, normalize_by_sim=True)], simulation_space=maxwell.SimulationSpace(mesh=maxwell.UniformMesh(dx=params.dx), sim_region=goos.Box3d(center=[((sim_left_x + sim_right_x) / 2), 0, sim_z_center], extents=[(sim_right_x - sim_left_x), 0, sim_z_extent]), pml_thickness=[pml_thick, pml_thick, 0, 0, pml_thick, pml_thick]), background=goos.material.Material(index=params.eps_bg), outputs=[maxwell.Epsilon(name='eps'), maxwell.ElectricField(name='field'), maxwell.WaveguideModeOverlap(name='overlap', center=[((- params.wg_len) / 2), 0, 0], extents=[0, 1000, 2000], normal=[(- 1), 0, 0], mode_num=0, power=1)])
obj = ((1 - goos.abs(sim['overlap'])) ** 2)
obj = goos.rename(obj, name='obj_{}'.format(stage))
return (obj, sim) |
class IndexedArray(IndexedMeta[Content], Content):
def __init__(self, index, content, *, parameters=None):
if (not (isinstance(index, Index) and (index.dtype in (np.dtype(np.int32), np.dtype(np.uint32), np.dtype(np.int64))))):
raise TypeError("{} 'index' must be an Index with dtype in (int32, uint32, int64), not {}".format(type(self).__name__, repr(index)))
if (not isinstance(content, Content)):
raise TypeError("{} 'content' must be a Content subtype, not {}".format(type(self).__name__, repr(content)))
is_cat = ((parameters is not None) and (parameters.get('__array__') == 'categorical'))
if ((content.is_union and (not is_cat)) or content.is_indexed or content.is_option):
raise TypeError("{0} cannot contain a union-type (unless categorical), option-type, or indexed 'content' ({1}); try {0}.simplified instead".format(type(self).__name__, type(content).__name__))
assert (index.nplike is content.backend.index_nplike)
self._index = index
self._content = content
self._init(parameters, content.backend)
def index(self):
return self._index
form_cls: Final = IndexedForm
def copy(self, index=UNSET, content=UNSET, *, parameters=UNSET):
return IndexedArray((self._index if (index is UNSET) else index), (self._content if (content is UNSET) else content), parameters=(self._parameters if (parameters is UNSET) else parameters))
def __copy__(self):
return self.copy()
def __deepcopy__(self, memo):
return self.copy(index=copy.deepcopy(self._index, memo), content=copy.deepcopy(self._content, memo), parameters=copy.deepcopy(self._parameters, memo))
def simplified(cls, index, content, *, parameters=None):
is_cat = ((parameters is not None) and (parameters.get('__array__') == 'categorical'))
if (content.is_union and (not is_cat)):
return content._carry(index, allow_lazy=False).copy(parameters=parameters_union(content._parameters, parameters))
elif (content.is_indexed or content.is_option):
backend = content.backend
if content.is_indexed:
inner = content.index
else:
inner = content.to_IndexedOptionArray64().index
result = ak.index.Index64.empty(index.length, nplike=backend.index_nplike)
backend.maybe_kernel_error(backend[('awkward_IndexedArray_simplify', result.dtype.type, index.dtype.type, inner.dtype.type)](result.data, index.data, index.length, inner.data, inner.length))
if isinstance(content, ak.contents.IndexedArray):
return ak.contents.IndexedArray(result, content.content, parameters=parameters_union(content._parameters, parameters))
else:
return ak.contents.IndexedOptionArray(result, content.content, parameters=parameters_union(content._parameters, parameters))
else:
return cls(index, content, parameters=parameters)
def _form_with_key(self, getkey: Callable[([Content], (str | None))]) -> IndexedForm:
form_key = getkey(self)
return self.form_cls(self._index.form, self._content._form_with_key(getkey), parameters=self._parameters, form_key=form_key)
def _to_buffers(self, form: Form, getkey: Callable[([Content, Form, str], str)], container: MutableMapping[(str, ArrayLike)], backend: Backend, byteorder: str):
assert isinstance(form, self.form_cls)
key = getkey(self, form, 'index')
container[key] = ak._util.native_to_byteorder(self._index.raw(backend.index_nplike), byteorder)
self._content._to_buffers(form.content, getkey, container, backend, byteorder)
def _to_typetracer(self, forget_length: bool) -> Self:
index = self._index.to_nplike(TypeTracer.instance())
return IndexedArray((index.forget_length() if forget_length else index), self._content._to_typetracer(forget_length), parameters=self._parameters)
def _touch_data(self, recursive: bool):
self._index._touch_data()
if recursive:
self._content._touch_data(recursive)
def _touch_shape(self, recursive: bool):
self._index._touch_shape()
if recursive:
self._content._touch_shape(recursive)
def length(self) -> ShapeItem:
return self._index.length
def __repr__(self):
return self._repr('', '', '')
def _repr(self, indent, pre, post):
out = [indent, pre, '<IndexedArray len=']
out.append(repr(str(self.length)))
out.append('>')
out.extend(self._repr_extra((indent + ' ')))
out.append('\n')
out.append(self._index._repr((indent + ' '), '<index>', '</index>\n'))
out.append(self._content._repr((indent + ' '), '<content>', '</content>\n'))
out.append((indent + '</IndexedArray>'))
out.append(post)
return ''.join(out)
def to_IndexedOptionArray64(self) -> IndexedOptionArray:
return ak.contents.IndexedOptionArray(self._index, self._content, parameters=self._parameters)
def mask_as_bool(self, valid_when: bool=True) -> ArrayLike:
if valid_when:
return (self._index.data >= 0)
else:
return (self._index.data < 0)
def _getitem_nothing(self):
return self._content._getitem_range(0, 0)
def _getitem_at(self, where: IndexType):
if (not self._backend.nplike.known_data):
self._touch_data(recursive=False)
return self._content._getitem_at(where)
if (where < 0):
where += self.length
if (self._backend.nplike.known_data and (not (0 <= where < self.length))):
raise ak._errors.index_error(self, where)
return self._content._getitem_at(self._index[where])
def _getitem_range(self, start: IndexType, stop: IndexType) -> Content:
if (not self._backend.nplike.known_data):
self._touch_shape(recursive=False)
return self
return IndexedArray(self._index[start:stop], self._content, parameters=self._parameters)
def _getitem_field(self, where: (str | SupportsIndex), only_fields: tuple[(str, ...)]=()) -> Content:
return IndexedArray.simplified(self._index, self._content._getitem_field(where, only_fields), parameters=None)
def _getitem_fields(self, where: list[(str | SupportsIndex)], only_fields: tuple[(str, ...)]=()) -> Content:
return IndexedArray.simplified(self._index, self._content._getitem_fields(where, only_fields), parameters=None)
def _carry(self, carry: Index, allow_lazy: bool) -> IndexedArray:
assert isinstance(carry, ak.index.Index)
try:
nextindex = self._index[carry.data]
except IndexError as err:
raise ak._errors.index_error(self, carry.data, str(err)) from err
return IndexedArray(nextindex, self._content, parameters=self._parameters)
def _getitem_next_jagged_generic(self, slicestarts, slicestops, slicecontent, tail):
if (self._backend.nplike.known_data and (slicestarts.length != self.length)):
raise ak._errors.index_error(self, ak.contents.ListArray(slicestarts, slicestops, slicecontent, parameters=None), 'cannot fit jagged slice with length {} into {} of size {}'.format(slicestarts.length, type(self).__name__, self.length))
nextcarry = ak.index.Index64.empty(self.length, self._backend.index_nplike)
assert ((nextcarry.nplike is self._backend.index_nplike) and (self._index.nplike is self._backend.index_nplike))
self._maybe_index_error(self._backend[('awkward_IndexedArray_getitem_nextcarry', nextcarry.dtype.type, self._index.dtype.type)](nextcarry.data, self._index.data, self._index.length, self._content.length), slicer=ak.contents.ListArray(slicestarts, slicestops, slicecontent))
next = self._content._carry(nextcarry, False)
return next._getitem_next_jagged(slicestarts, slicestops, slicecontent, tail)
def _getitem_next_jagged(self, slicestarts: Index, slicestops: Index, slicecontent: Content, tail) -> Content:
return self._getitem_next_jagged_generic(slicestarts, slicestops, slicecontent, tail)
def _getitem_next(self, head: (SliceItem | tuple), tail: tuple[(SliceItem, ...)], advanced: (Index | None)) -> Content:
if (head is NO_HEAD):
return self
elif (is_integer_like(head) or isinstance(head, (slice, ak.index.Index64, ak.contents.ListOffsetArray))):
(nexthead, nexttail) = ak._slicing.head_tail(tail)
nextcarry = ak.index.Index64.empty(self._index.length, self._backend.index_nplike)
assert ((nextcarry.nplike is self._backend.index_nplike) and (self._index.nplike is self._backend.index_nplike))
self._maybe_index_error(self._backend[('awkward_IndexedArray_getitem_nextcarry', nextcarry.dtype.type, self._index.dtype.type)](nextcarry.data, self._index.data, self._index.length, self._content.length), slicer=head)
next = self._content._carry(nextcarry, False)
return next._getitem_next(head, tail, advanced)
elif isinstance(head, str):
return self._getitem_next_field(head, tail, advanced)
elif isinstance(head, list):
return self._getitem_next_fields(head, tail, advanced)
elif (head is np.newaxis):
return self._getitem_next_newaxis(tail, advanced)
elif (head is Ellipsis):
return self._getitem_next_ellipsis(tail, advanced)
elif isinstance(head, ak.contents.IndexedOptionArray):
return self._getitem_next_missing(head, tail, advanced)
else:
raise AssertionError(repr(head))
def project(self, mask=None):
if (mask is not None):
if (self._backend.nplike.known_data and (self._index.length != mask.length)):
raise ValueError('mask length ({}) is not equal to {} length ({})'.format(mask.length(), type(self).__name__, self._index.length))
nextindex = ak.index.Index64.empty(self._index.length, self._backend.index_nplike)
assert ((nextindex.nplike is self._backend.index_nplike) and (mask.nplike is self._backend.index_nplike) and (self._index.nplike is self._backend.index_nplike))
self._backend.maybe_kernel_error(self._backend[('awkward_IndexedArray_overlay_mask', nextindex.dtype.type, mask.dtype.type, self._index.dtype.type)](nextindex.data, mask.data, self._index.data, self._index.length))
next = ak.contents.IndexedOptionArray(nextindex, self._content, parameters=self._parameters)
return next.project()
else:
nextcarry = ak.index.Index64.empty(self.length, self._backend.index_nplike)
assert ((nextcarry.nplike is self._backend.index_nplike) and (self._index.nplike is self._backend.index_nplike))
self._backend.maybe_kernel_error(self._backend[('awkward_IndexedArray_getitem_nextcarry', nextcarry.dtype.type, self._index.dtype.type)](nextcarry.data, self._index.data, self._index.length, self._content.length))
next = self._content._carry(nextcarry, False)
return next.copy(parameters=parameters_union(next._parameters, self._parameters, exclude=(('__array__', 'categorical'),)))
def _offsets_and_flattened(self, axis: int, depth: int) -> tuple[(Index, Content)]:
posaxis = maybe_posaxis(self, axis, depth)
if ((posaxis is not None) and ((posaxis + 1) == depth)):
raise AxisError('axis=0 not allowed for flatten')
else:
return self.project()._offsets_and_flattened(axis, depth)
def _mergeable_next(self, other: Content, mergebool: bool) -> bool:
if (other.is_identity_like or other.is_union):
return True
elif (other.is_option or other.is_indexed):
return self._content._mergeable_next(other.content, mergebool)
else:
return self._content._mergeable_next(other, mergebool)
def _merging_strategy(self, others):
if (len(others) == 0):
raise ValueError("to merge this array with 'others', at least one other must be provided")
head = [self]
tail = []
it_others = iter(others)
for other in it_others:
if isinstance(other, ak.contents.UnionArray):
tail.append(other)
tail.extend(it_others)
break
else:
head.append(other)
if (any((x.backend.nplike.known_data for x in (head + tail))) and (not all((x.backend.nplike.known_data for x in (head + tail))))):
raise RuntimeError
return (head, tail)
def _reverse_merge(self, other):
if isinstance(other, ak.contents.EmptyArray):
return self
if (other.is_indexed and (other.parameter('__array__') == self.parameter('__array__') == 'categorical')):
raise NotImplementedError('merging categorical arrays is currently not implemented. Use `ak.enforce_type` to drop the categorical type and use general merging.')
theirlength = other.length
mylength = self.length
index = ak.index.Index64.empty((theirlength + mylength), self._backend.index_nplike)
content = other._mergemany([self._content])
assert (index.nplike is self._backend.index_nplike)
self._backend.maybe_kernel_error(self._backend[('awkward_IndexedArray_fill_count', index.dtype.type)](index.data, 0, theirlength, 0))
assert (index.nplike is self._backend.index_nplike)
self._backend.maybe_kernel_error(self._backend[('awkward_IndexedArray_fill', index.dtype.type, self.index.dtype.type)](index.data, theirlength, self.index.data, mylength, theirlength))
if (other.is_option or other.is_indexed):
parameters = parameters_union(self._parameters, other._parameters)
else:
parameters = self._parameters
return ak.contents.IndexedArray.simplified(index, content, parameters=parameters)
def _mergemany(self, others: Sequence[Content]) -> Content:
if (len(others) == 0):
return self
(head, tail) = self._merging_strategy(others)
total_length = 0
for array in head:
total_length += array.length
contents = []
contentlength_so_far = 0
length_so_far = 0
nextindex = ak.index.Index64.empty(total_length, nplike=self._backend.index_nplike)
parameters = self._parameters
for array in head:
if isinstance(array, ak.contents.EmptyArray):
continue
if isinstance(array, (ak.contents.ByteMaskedArray, ak.contents.BitMaskedArray, ak.contents.UnmaskedArray)):
array = array.to_IndexedOptionArray64()
if isinstance(array, (ak.contents.IndexedOptionArray, ak.contents.IndexedArray)):
parameters = parameters_intersect(parameters, array._parameters)
contents.append(array.content)
array_index = array.index
assert ((nextindex.nplike is self._backend.index_nplike) and (array_index.nplike is self._backend.index_nplike))
self._backend.maybe_kernel_error(self._backend[('awkward_IndexedArray_fill', nextindex.dtype.type, array_index.dtype.type)](nextindex.data, length_so_far, array_index.data, array.length, contentlength_so_far))
contentlength_so_far += array.content.length
length_so_far += array.length
else:
contents.append(array)
assert (nextindex.nplike is self._backend.index_nplike)
self._backend.maybe_kernel_error(self._backend[('awkward_IndexedArray_fill_count', nextindex.dtype.type)](nextindex.data, length_so_far, array.length, contentlength_so_far))
contentlength_so_far += array.length
length_so_far += array.length
if ((parameters is not None) and (parameters.get('__array__') == 'categorical')):
parameters = {**parameters}
del parameters['__array__']
tail_contents = contents[1:]
nextcontent = contents[0]._mergemany(tail_contents)
if ((parameters is not None) and (parameters.get('__array__') == 'categorical')):
raise NotImplementedError('merging categorical arrays is currently not implemented. Use `ak.enforce_type` to drop the categorical type and use general merging.')
if any((x.is_option for x in head)):
next = ak.contents.IndexedOptionArray(nextindex, nextcontent, parameters=parameters)
else:
next = ak.contents.IndexedArray(nextindex, nextcontent, parameters=parameters)
if (len(tail) == 0):
return next
reversed = tail[0]._reverse_merge(next)
if (len(tail) == 1):
return reversed
else:
return reversed._mergemany(tail[1:])
def _fill_none(self, value: Content) -> Content:
if (value.backend.nplike.known_data and (value.length != 1)):
raise ValueError(f'fill_none value length ({value.length}) is not equal to 1')
return IndexedArray(self._index, self._content._fill_none(value), parameters=self._parameters)
def _local_index(self, axis, depth):
posaxis = maybe_posaxis(self, axis, depth)
if ((posaxis is not None) and ((posaxis + 1) == depth)):
return self._local_index_axis0()
else:
return self.project()._local_index(axis, depth)
def _unique_index(self, index, sorted=True):
next = ak.index.Index64.zeros(self.length, nplike=self._backend.index_nplike)
length = ak.index.Index64.zeros(1, nplike=self._backend.index_nplike)
if (not sorted):
next = self._index
offsets = ak.index.Index64.empty(2, self._backend.index_nplike)
offsets[0] = 0
offsets[1] = next.length
assert ((next.nplike is self._backend.index_nplike) and (offsets.nplike is self._backend.index_nplike))
self._backend.maybe_kernel_error(self._backend[('awkward_sort', next.dtype.type, next.dtype.type, offsets.dtype.type)](next.data, next.data, offsets[1], offsets.data, 2, offsets[1], True, False))
assert ((next.nplike is self._backend.index_nplike) and (length.nplike is self._backend.index_nplike))
self._backend.maybe_kernel_error(self._backend[('awkward_unique', next.dtype.type, length.dtype.type)](next.data, self._index.length, length.data))
else:
assert ((self._index.nplike is self._backend.index_nplike) and (next.nplike is self._backend.index_nplike) and (length.nplike is self._backend.index_nplike))
self._backend.maybe_kernel_error(self._backend[('awkward_unique_copy', self._index.dtype.type, next.dtype.type, length.dtype.type)](self._index.data, next.data, self._index.length, length.data))
return next[0:length[0]]
def _numbers_to_type(self, name, including_unknown):
return ak.contents.IndexedArray(self._index, self._content._numbers_to_type(name, including_unknown), parameters=self._parameters)
def _is_unique(self, negaxis, starts, parents, outlength):
if (self._index.length == 0):
return True
nextindex = self._unique_index(self._index)
if (len(nextindex) != len(self._index)):
return False
next = self._content._carry(nextindex, False)
return next._is_unique(negaxis, starts, parents, outlength)
def _unique(self, negaxis, starts, parents, outlength):
if (self._index.length == 0):
return self
(branch, depth) = self.branch_depth
index_length = self._index.length
parents_length = parents.length
next_length = index_length
nextcarry = ak.index.Index64.empty(index_length, self._backend.index_nplike)
nextparents = ak.index.Index64.empty(index_length, self._backend.index_nplike)
outindex = ak.index.Index64.empty(index_length, self._backend.index_nplike)
assert ((nextcarry.nplike is self._backend.index_nplike) and (nextparents.nplike is self._backend.index_nplike) and (outindex.nplike is self._backend.index_nplike) and (self._index.nplike is self._backend.index_nplike) and (parents.nplike is self._backend.index_nplike))
self._backend.maybe_kernel_error(self._backend[('awkward_IndexedArray_reduce_next_64', nextcarry.dtype.type, nextparents.dtype.type, outindex.dtype.type, self._index.dtype.type, parents.dtype.type)](nextcarry.data, nextparents.data, outindex.data, self._index.data, parents.data, index_length))
next = self._content._carry(nextcarry, False)
unique = next._unique(negaxis, starts, nextparents, outlength)
if (branch or ((negaxis is not None) and (negaxis != depth))):
nextoutindex = ak.index.Index64.empty(parents_length, self._backend.index_nplike)
assert ((nextoutindex.nplike is self._backend.index_nplike) and (starts.nplike is self._backend.index_nplike) and (parents.nplike is self._backend.index_nplike) and (nextparents.nplike is self._backend.index_nplike))
self._backend.maybe_kernel_error(self._backend[('awkward_IndexedArray_local_preparenext', nextoutindex.dtype.type, starts.dtype.type, parents.dtype.type, nextparents.dtype.type)](nextoutindex.data, starts.data, parents.data, parents_length, nextparents.data, next_length))
return ak.contents.IndexedOptionArray.simplified(nextoutindex, unique, parameters=self._parameters)
if ((not branch) and (negaxis == depth)):
return unique
else:
if isinstance(unique, ak.contents.RegularArray):
unique = unique.to_ListOffsetArray64(True)
if isinstance(unique, ak.contents.ListOffsetArray):
if (starts.nplike.known_data and (starts.length > 0) and (starts[0] != 0)):
raise AssertionError(f'reduce_next with unbranching depth > negaxis expects a ListOffsetArray64 whose offsets start at zero ({starts[0]})')
outoffsets = ak.index.Index64.empty((starts.length + 1), self._backend.index_nplike)
assert ((outoffsets.nplike is self._backend.index_nplike) and (starts.nplike is self._backend.index_nplike))
self._backend.maybe_kernel_error(self._backend[('awkward_IndexedArray_reduce_next_fix_offsets_64', outoffsets.dtype.type, starts.dtype.type)](outoffsets.data, starts.data, starts.length, self._index.length))
tmp = ak.contents.IndexedArray(outindex, unique._content, parameters=None)
return ak.contents.ListOffsetArray(outoffsets, tmp, parameters=None)
elif isinstance(unique, ak.contents.NumpyArray):
nextoutindex = ak.index.Index64(self._backend.index_nplike.arange(unique.length, dtype=np.int64), nplike=self._backend.index_nplike)
return ak.contents.IndexedOptionArray.simplified(nextoutindex, unique, parameters=self._parameters)
raise NotImplementedError
def _argsort_next(self, negaxis, starts, shifts, parents, outlength, ascending, stable):
next = self._content._carry(self._index, False)
return next._argsort_next(negaxis, starts, shifts, parents, outlength, ascending, stable)
def _sort_next(self, negaxis, starts, parents, outlength, ascending, stable):
next = self._content._carry(self._index, False)
return next._sort_next(negaxis, starts, parents, outlength, ascending, stable)
def _combinations(self, n, replacement, recordlookup, parameters, axis, depth):
posaxis = maybe_posaxis(self, axis, depth)
if ((posaxis is not None) and ((posaxis + 1) == depth)):
return self._combinations_axis0(n, replacement, recordlookup, parameters)
else:
return self.project()._combinations(n, replacement, recordlookup, parameters, axis, depth)
def _reduce_next(self, reducer, negaxis, starts, shifts, parents, outlength, mask, keepdims, behavior):
next = self._content._carry(self._index, False)
return next._reduce_next(reducer, negaxis, starts, shifts, parents, outlength, mask, keepdims, behavior)
def _validity_error(self, path):
if (self.parameter('__array__') == 'categorical'):
if (not ak._do.is_unique(self._content)):
return 'at {} ("{}"): __array__ = "categorical" requires contents to be unique'.format(path, type(self))
error = self._backend[('awkward_IndexedArray_validity', self.index.dtype.type)](self.index.data, self.index.length, self._content.length, False)
if (error.str is not None):
if (error.filename is None):
filename = ''
else:
filename = (' (in compiled code: ' + error.filename.decode(errors='surrogateescape').lstrip('\n').lstrip('('))
message = error.str.decode(errors='surrogateescape')
return f'at {path} ("{type(self)}"): {message} at i={error.id}{filename}'
else:
return self._content._validity_error((path + '.content'))
def _nbytes_part(self):
return (self.index._nbytes_part() + self.content._nbytes_part())
def _pad_none(self, target, axis, depth, clip):
posaxis = maybe_posaxis(self, axis, depth)
if ((posaxis is not None) and ((posaxis + 1) == depth)):
return self._pad_none_axis0(target, clip)
elif ((posaxis is not None) and ((posaxis + 1) == (depth + 1))):
return self.project()._pad_none(target, axis, depth, clip)
else:
return ak.contents.IndexedArray(self._index, self._content._pad_none(target, axis, depth, clip), parameters=self._parameters)
def _to_arrow(self, pyarrow: Any, mask_node: (Content | None), validbytes: (Content | None), length: int, options: ToArrowOptions):
if ((not options['categorical_as_dictionary']) and (self.parameter('__array__') == 'categorical')):
next_parameters = dict(self._parameters)
del next_parameters['__array__']
next = IndexedArray(self._index, self._content, parameters=next_parameters)
return next._to_arrow(pyarrow, mask_node, validbytes, length, options)
index = self._index.raw(numpy)
if (self.parameter('__array__') == 'categorical'):
dictionary = self._content._to_arrow(pyarrow, None, None, self._content.length, options)
out = pyarrow.DictionaryArray.from_arrays(index, dictionary, (None if (validbytes is None) else (~ validbytes)))
if options['extensionarray']:
return ak._connect.pyarrow.AwkwardArrowArray.from_storage(ak._connect.pyarrow.to_awkwardarrow_type(out.type, options['extensionarray'], options['record_is_scalar'], mask_node, self), out)
else:
return out
else:
if (self._content.length == 0):
next = self._content
else:
next = self._content._carry(ak.index.Index(index), False)
next2 = next.copy(parameters=parameters_union(next._parameters, self._parameters))
return next2._to_arrow(pyarrow, mask_node, validbytes, length, options)
def _to_backend_array(self, allow_missing, backend):
return self.project()._to_backend_array(allow_missing, backend)
def _remove_structure(self, backend: Backend, options: RemoveStructureOptions) -> list[Content]:
return self.project()._remove_structure(backend, options)
def _recursively_apply(self, action: ImplementsApplyAction, depth: int, depth_context: (Mapping[(str, Any)] | None), lateral_context: (Mapping[(str, Any)] | None), options: ApplyActionOptions) -> (Content | None):
if (self._backend.nplike.known_data and self._backend.nplike.known_data and (self._index.length != 0)):
npindex = self._index.data
indexmin = self._backend.index_nplike.min(npindex)
index = ak.index.Index((npindex - indexmin), nplike=self._backend.index_nplike)
content = self._content[indexmin:(npindex.max() + 1)]
else:
if (not self._backend.nplike.known_data):
self._touch_data(recursive=False)
(index, content) = (self._index, self._content)
if options['return_array']:
if options['return_simplified']:
make = IndexedArray.simplified
else:
make = IndexedArray
def continuation():
return make(index, content._recursively_apply(action, depth, copy.copy(depth_context), lateral_context, options), parameters=(self._parameters if options['keep_parameters'] else None))
else:
def continuation():
content._recursively_apply(action, depth, copy.copy(depth_context), lateral_context, options)
result = action(self, depth=depth, depth_context=depth_context, lateral_context=lateral_context, continuation=continuation, backend=self._backend, options=options)
if isinstance(result, Content):
return result
elif (result is None):
return continuation()
else:
raise AssertionError(result)
def to_packed(self) -> Self:
if (self.parameter('__array__') == 'categorical'):
return IndexedArray(self._index, self._content.to_packed(), parameters=self._parameters)
else:
return self.project().to_packed()
def _to_list(self, behavior, json_conversions):
if (not self._backend.nplike.known_data):
raise TypeError('cannot convert typetracer arrays to Python lists')
out = self._to_list_custom(behavior, json_conversions)
if (out is not None):
return out
index = self._index.raw(numpy)
nextcontent = self._content._carry(ak.index.Index(index), False)
return nextcontent._to_list(behavior, json_conversions)
def _to_backend(self, backend: Backend) -> Self:
content = self._content.to_backend(backend)
index = self._index.to_nplike(backend.index_nplike)
return IndexedArray(index, content, parameters=self._parameters)
def _push_inside_record_or_project(self) -> (Self | ak.contents.RecordArray):
if self.content.is_record:
return ak.contents.RecordArray(contents=[ak.contents.IndexedArray.simplified(self._index, c) for c in self.content.contents], fields=self.content._fields, length=self.length, backend=self._backend, parameters=parameters_union(self.content._parameters, self._parameters))
else:
return self.project()
def _is_equal_to(self, other: Self, index_dtype: bool, numpyarray: bool, all_parameters: bool) -> bool:
return (self._is_equal_to_generic(other, all_parameters) and self._index.is_equal_to(other.index, index_dtype, numpyarray) and self._content._is_equal_to(other.content, index_dtype, numpyarray, all_parameters)) |
class TextureLoss(tnn.Module):
def __init__(self, pos_weight=10):
super(TextureLoss, self).__init__()
self.loss = tnn.CrossEntropyLoss(weight=torch.Tensor([1, pos_weight]), ignore_index=2, reduction='none')
def forward(self, preds, targs):
loss = self.loss(preds, targs)
loss = torch.mean(loss, 1)
return loss |
def test_demo_start_subprocess_patched():
from returnn.util.basic import get_patch_atfork_lib
from subprocess import check_call
env = os.environ.copy()
env['LD_PRELOAD'] = get_patch_atfork_lib()
print('LD_PRELOAD:', get_patch_atfork_lib())
check_call([sys.executable, __file__, 'patched_check_demo_start_subprocess'], env=env) |
class FunctionFieldCompletion(Map):
def __init__(self, field, place, name=None, prec=None, gen_name=None):
if (name is None):
name = 's'
if (gen_name is None):
gen_name = 'a'
(k, from_k, to_k) = place.residue_field(name=gen_name)
self._place = place
self._gen_name = gen_name
if (prec == infinity):
from sage.rings.lazy_series_ring import LazyLaurentSeriesRing
codomain = LazyLaurentSeriesRing(k, name)
self._precision = infinity
else:
from sage.rings.laurent_series_ring import LaurentSeriesRing
codomain = LaurentSeriesRing(k, name=name, default_prec=prec)
self._precision = codomain.default_prec()
Map.__init__(self, field, codomain)
def _repr_type(self) -> str:
return 'Completion'
def _call_(self, f):
if (self._precision == infinity):
return self._expand_lazy(f)
else:
return self._expand(f, prec=None)
def _call_with_args(self, f, args, kwds):
if (self._precision == infinity):
return self._expand_lazy(f, *args, **kwds)
else:
return self._expand(f, *args, **kwds)
def _expand(self, f, prec=None):
if (prec is None):
prec = self._precision
place = self._place
F = place.function_field()
der = F.higher_derivation()
(k, from_k, to_k) = place.residue_field(name=self._gen_name)
sep = place.local_uniformizer()
val = f.valuation(place)
e = (f * (sep ** (- val)))
coeffs = [to_k(der._derive(e, i, sep)) for i in range(prec)]
return self.codomain()(coeffs, val).add_bigoh((prec + val))
def _expand_lazy(self, f):
place = self._place
F = place.function_field()
der = F.higher_derivation()
(k, from_k, to_k) = place.residue_field(name=self._gen_name)
sep = place.local_uniformizer()
val = f.valuation(place)
e = (f * (sep ** (- val)))
def coeff(s, n):
return to_k(der._derive(e, (n - val), sep))
return self.codomain().series(coeff, valuation=val)
def default_precision(self):
return self._precision |
def compute_huber_loss(y: torch.Tensor, target: torch.Tensor, beta: float=1.0) -> torch.Tensor:
diff = (target - y)
cond = (diff.detach().abs() < beta)
return torch.where(cond, (0.5 * (diff ** 2)), (beta * (diff.abs() - (0.5 * beta)))) |
class CLIPTokenizer(PreTrainedTokenizer):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ['input_ids', 'attention_mask']
def __init__(self, vocab_file, merges_file, errors='replace', unk_token='<|endoftext|>', bos_token='<|startoftext|>', eos_token='<|endoftext|>', pad_token='<|endoftext|>', **kwargs):
bos_token = (AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token)
eos_token = (AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token)
unk_token = (AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token)
super().__init__(errors=errors, unk_token=unk_token, bos_token=bos_token, eos_token=eos_token, pad_token=pad_token, **kwargs)
try:
import ftfy
self.fix_text = ftfy.fix_text
except ImportError:
logger.info('ftfy or spacy is not installed using custom BasicTokenizer instead of ftfy.')
self.nlp = BasicTokenizer(do_lower_case=True)
self.fix_text = None
with open(vocab_file, encoding='utf-8') as vocab_handle:
self.encoder = json.load(vocab_handle)
self.decoder = {v: k for (k, v) in self.encoder.items()}
self.errors = errors
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for (k, v) in self.byte_encoder.items()}
with open(merges_file, encoding='utf-8') as merges_handle:
bpe_merges = merges_handle.read().strip().split('\n')[1:(((49152 - 256) - 2) + 1)]
bpe_merges = [tuple(merge.split()) for merge in bpe_merges]
self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'}
self.pat = re.compile("<\\|startoftext\\|>|<\\|endoftext\\|>|'s|'t|'re|'ve|'m|'ll|'d|[\\p{L}]+|[\\p{N}]|[^\\s\\p{L}\\p{N}]+", re.IGNORECASE)
def vocab_size(self):
return len(self.encoder)
def get_vocab(self):
return dict(self.encoder, **self.added_tokens_encoder)
def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
bos_token = [self.bos_token_id]
eos_token = [self.eos_token_id]
if (token_ids_1 is None):
return ((bos_token + token_ids_0) + eos_token)
return (((((bos_token + token_ids_0) + eos_token) + eos_token) + token_ids_1) + eos_token)
def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)
if (token_ids_1 is None):
return (([1] + ([0] * len(token_ids_0))) + [1])
return ((((([1] + ([0] * len(token_ids_0))) + [1]) + [1]) + ([0] * len(token_ids_1))) + [1])
def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
bos_token = [self.bos_token_id]
eos_token = [self.eos_token_id]
if (token_ids_1 is None):
return (len(((bos_token + token_ids_0) + eos_token)) * [0])
return (len((((((bos_token + token_ids_0) + eos_token) + eos_token) + token_ids_1) + eos_token)) * [0])
def bpe(self, token):
if (token in self.cache):
return self.cache[token]
word = (tuple(token[:(- 1)]) + ((token[(- 1)] + '</w>'),))
pairs = get_pairs(word)
if (not pairs):
return (token + '</w>')
while True:
bigram = min(pairs, key=(lambda pair: self.bpe_ranks.get(pair, float('inf'))))
if (bigram not in self.bpe_ranks):
break
(first, second) = bigram
new_word = []
i = 0
while (i < len(word)):
try:
j = word.index(first, i)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
i = j
if ((word[i] == first) and (i < (len(word) - 1)) and (word[(i + 1)] == second)):
new_word.append((first + second))
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if (len(word) == 1):
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
self.cache[token] = word
return word
def _tokenize(self, text):
bpe_tokens = []
if (self.fix_text is None):
text = ' '.join(self.nlp.tokenize(text))
else:
text = whitespace_clean(self.fix_text(text)).lower()
for token in re.findall(self.pat, text):
token = ''.join((self.byte_encoder[b] for b in token.encode('utf-8')))
bpe_tokens.extend((bpe_token for bpe_token in self.bpe(token).split(' ')))
return bpe_tokens
def _convert_token_to_id(self, token):
return self.encoder.get(token, self.encoder.get(self.unk_token))
def _convert_id_to_token(self, index):
return self.decoder.get(index)
def convert_tokens_to_string(self, tokens):
text = ''.join(tokens)
byte_array = bytearray([self.byte_decoder[c] for c in text])
text = byte_array.decode('utf-8', errors=self.errors).replace('</w>', ' ').strip()
return text
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:
if (not os.path.isdir(save_directory)):
logger.error('Vocabulary path ({}) should be a directory'.format(save_directory))
return
vocab_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']))
merge_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file']))
with open(vocab_file, 'w', encoding='utf-8') as f:
f.write((json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + '\n'))
index = 0
with open(merge_file, 'w', encoding='utf-8') as writer:
writer.write('#version: 0.2\n')
for (bpe_tokens, token_index) in sorted(self.bpe_ranks.items(), key=(lambda kv: kv[1])):
if (index != token_index):
logger.warning('Saving vocabulary to {}: BPE merge indices are not consecutive. Please check that the tokenizer is not corrupted!'.format(merge_file))
index = token_index
writer.write((' '.join(bpe_tokens) + '\n'))
index += 1
return (vocab_file, merge_file) |
class BasicBlockIR(nn.Module):
def __init__(self, c1, c2, s) -> None:
super().__init__()
if (c1 == c2):
self.shortcut_layer = nn.MaxPool2d(1, s)
else:
self.shortcut_layer = nn.Sequential(nn.Conv2d(c1, c2, 1, s, bias=False), nn.BatchNorm2d(c2))
self.res_layer = nn.Sequential(nn.BatchNorm2d(c1), nn.Conv2d(c1, c2, 3, 1, 1, bias=False), nn.BatchNorm2d(c2), nn.PReLU(c2), nn.Conv2d(c2, c2, 3, s, 1, bias=False), nn.BatchNorm2d(c2))
def forward(self, x: Tensor) -> Tensor:
shortcut = self.shortcut_layer(x)
res = self.res_layer(x)
return (res + shortcut) |
def main():
args = parse_args()
os.makedirs(args.out_dir, exist_ok=True)
global_setup(args)
if args.pool:
_main_parallel(args)
else:
_main_sequential(args)
print('finished rendering') |
def setEduCovered(n, eduIds, eduCovered):
if (n._id in eduIds):
eduCovered.append(n)
for m in n.nodelist:
setEduCovered(m, eduIds, eduCovered) |
.parametrize('observation_size', [4])
.parametrize('action_size', [2])
def test_transition(observation_size: int, action_size: int) -> None:
transition = Transition(observation=np.random.random(observation_size).astype(np.float32), action=np.random.random(action_size).astype(np.float32), reward=np.random.random(1).astype(np.float32), next_observation=np.random.random(observation_size).astype(np.float32), return_to_go=np.random.random(1).astype(np.float32), terminal=0.0, interval=1)
assert (transition.observation_signature.shape[0] == (observation_size,))
assert (transition.observation_signature.dtype[0] == np.float32)
assert (transition.action_signature.shape[0] == (action_size,))
assert (transition.action_signature.dtype[0] == np.float32)
assert (transition.reward_signature.shape[0] == (1,))
assert (transition.reward_signature.dtype[0] == np.float32) |
class TernaryEnsemble(Ensemble):
def __init__(self, M, N, p_pos=0.33, p_neg=0.33):
self.M = M
self.N = N
self.p_pos = p_pos
self.p_neg = p_neg
self.repr_init()
self.p_zero = (1 - (self.p_pos + self.p_neg))
def generate(self):
p = [self.p_neg, self.p_zero, self.p_pos]
X = np.random.choice([(- 1), 0, (+ 1)], size=(self.M, self.N), replace=True, p=p)
sigma_x = (1 / np.sqrt(self.N))
X *= sigma_x
return X |
class ProductProjectiveSpaces_finite_field(ProductProjectiveSpaces_field):
def _point(self, *args, **kwds):
return ProductProjectiveSpaces_point_finite_field(*args, **kwds)
def __iter__(self):
iters = [iter(T) for T in self._components]
L = []
for x in iters:
L.append(next(x))
(yield self(L))
j = 0
while (j < self.num_components()):
try:
L[j] = next(iters[j])
(yield self(L))
j = 0
except StopIteration:
iters[j] = iter(self[j])
L[j] = next(iters[j])
j += 1
def rational_points(self, F=None):
if (F is None):
return list(self)
elif (not isinstance(F, FiniteField)):
raise TypeError(('second argument (= %s) must be a finite field' % F))
return list(self.base_extend(F)) |
def add_present_time_to_history(current_time: List[Dict[(str, Any)]], history: History) -> History:
for annotation in current_time:
token = annotation['instance_token']
if (token in history):
history[token].append(annotation)
else:
history[token] = [annotation]
return history |
def set_pox_opts(components, info_level, logfile_opts, log_format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'):
info_level = info_level.upper()
pox_opts = ('%s log.level --%s log --file=%s --format="%s" &' % (components, info_level, logfile_opts, log_format))
return pox_opts |
def finetune_net_one_epoch(run_manager, args, epoch, warmup_epochs=0, warmup_lr=0, subnet_settings=None):
dynamic_net = run_manager.net
dynamic_net.train()
run_manager.run_config.train_loader.sampler.set_epoch(epoch)
MyRandomResizedCrop.EPOCH = epoch
nBatch = len(run_manager.run_config.train_loader)
data_time = AverageMeter()
losses = DistributedMetric('train_loss')
top1 = DistributedMetric('train_top1')
top5 = DistributedMetric('train_top5')
end = time.time()
for (i, (images, labels)) in enumerate(run_manager.run_config.train_loader):
data_time.update((time.time() - end))
if (epoch < warmup_epochs):
new_lr = run_manager.run_config.warmup_adjust_learning_rate(run_manager.optimizer, (warmup_epochs * nBatch), nBatch, epoch, i, warmup_lr)
else:
new_lr = run_manager.run_config.adjust_learning_rate(run_manager.optimizer, (epoch - warmup_epochs), i, nBatch)
(images, labels) = (images.cuda(), labels.cuda())
target = labels
if isinstance(run_manager.run_config.mixup_alpha, float):
random.seed(int(('%d%.3d' % (i, epoch))))
lam = random.betavariate(run_manager.run_config.mixup_alpha, run_manager.run_config.mixup_alpha)
images = mix_images(images, lam)
labels = mix_labels(labels, lam, run_manager.run_config.data_provider.n_classes, run_manager.run_config.label_smoothing)
if (args.kd_ratio > 0):
args.teacher_model.train()
with torch.no_grad():
soft_logits = args.teacher_model(images).detach()
soft_label = F.softmax(soft_logits, dim=1)
run_manager.optimizer.zero_grad()
(loss_of_subnets, acc1_of_subnets, acc5_of_subnets) = ([], [], [])
subnet_str = ''
for _ in range(args.dynamic_batch_size):
if args.independent_distributed_sampling:
subnet_seed = (os.getpid() + time.time())
else:
subnet_seed = int(('%d%.3d%.3d' % (((epoch * nBatch) + i), _, 0)))
random.seed(subnet_seed)
dynamic_net.set_active_subnet(ks=subnet_settings['ks'], e=subnet_settings['e'], d=subnet_settings['d'])
subnet_str += ((('%d: ' % _) + ','.join([('%s_%s' % (key, (('%.1f' % subset_mean(val, 0)) if isinstance(val, list) else val))) for (key, val) in subnet_settings.items()])) + ' || ')
output = run_manager.net(images)
if (args.kd_ratio == 0):
loss = run_manager.train_criterion(output, labels)
loss_type = 'ce'
else:
if (args.kd_type == 'ce'):
kd_loss = cross_entropy_loss_with_soft_target(output, soft_label)
else:
kd_loss = F.mse_loss(output, soft_logits)
loss = ((args.kd_ratio * kd_loss) + run_manager.train_criterion(output, labels))
loss = (loss * (2 / (args.kd_ratio + 1)))
loss_type = ('%.1fkd-%s & ce' % (args.kd_ratio, args.kd_type))
(acc1, acc5) = accuracy(output, target, topk=(1, 5))
loss_of_subnets.append(loss)
acc1_of_subnets.append(acc1[0])
acc5_of_subnets.append(acc5[0])
loss = (loss / distributed.get_world_size())
loss.backward()
distributed.sync_grad_sum(run_manager.net)
run_manager.optimizer.step()
losses.update(list_mean(loss_of_subnets), images.size(0))
top1.update(list_mean(acc1_of_subnets), images.size(0))
top5.update(list_mean(acc5_of_subnets), images.size(0))
if (((i % 100) == 0) and (torch.distributed.get_rank() == 0)):
string = f'Train Epoch [{epoch}] Iter [{i}/{nBatch}] '
for (key, value) in {'task': args.task, 'phase': args.phase, 'loss': '{:.3f}'.format(losses.avg.item()), 'top1': '{:.3f}'.format(top1.avg.item()), 'top5': '{:.3f}'.format(top5.avg.item()), 'R': images.size(2), 'lr': '{:.3f}'.format(new_lr), 'loss_type': loss_type, 'seed': str(subnet_seed), 'data_time': '{:.3f}'.format(data_time.avg)}.items():
string += f'{key}: {value}, '
print(string)
end = time.time()
return (losses.avg.item(), top1.avg.item(), top5.avg.item()) |
_module()
class STDCHead(FCNHead):
def __init__(self, boundary_threshold=0.1, **kwargs):
super(STDCHead, self).__init__(**kwargs)
self.boundary_threshold = boundary_threshold
self.register_buffer('laplacian_kernel', torch.tensor([(- 1), (- 1), (- 1), (- 1), 8, (- 1), (- 1), (- 1), (- 1)], dtype=torch.float32, requires_grad=False).reshape((1, 1, 3, 3)))
self.fusion_kernel = torch.nn.Parameter(torch.tensor([[(6.0 / 10)], [(3.0 / 10)], [(1.0 / 10)]], dtype=torch.float32).reshape(1, 3, 1, 1), requires_grad=False)
def losses(self, seg_logit, seg_label):
seg_label = seg_label.to(self.laplacian_kernel)
boundary_targets = F.conv2d(seg_label, self.laplacian_kernel, padding=1)
boundary_targets = boundary_targets.clamp(min=0)
boundary_targets[(boundary_targets > self.boundary_threshold)] = 1
boundary_targets[(boundary_targets <= self.boundary_threshold)] = 0
boundary_targets_x2 = F.conv2d(seg_label, self.laplacian_kernel, stride=2, padding=1)
boundary_targets_x2 = boundary_targets_x2.clamp(min=0)
boundary_targets_x4 = F.conv2d(seg_label, self.laplacian_kernel, stride=4, padding=1)
boundary_targets_x4 = boundary_targets_x4.clamp(min=0)
boundary_targets_x4_up = F.interpolate(boundary_targets_x4, boundary_targets.shape[2:], mode='nearest')
boundary_targets_x2_up = F.interpolate(boundary_targets_x2, boundary_targets.shape[2:], mode='nearest')
boundary_targets_x2_up[(boundary_targets_x2_up > self.boundary_threshold)] = 1
boundary_targets_x2_up[(boundary_targets_x2_up <= self.boundary_threshold)] = 0
boundary_targets_x4_up[(boundary_targets_x4_up > self.boundary_threshold)] = 1
boundary_targets_x4_up[(boundary_targets_x4_up <= self.boundary_threshold)] = 0
boundary_targets_pyramids = torch.stack((boundary_targets, boundary_targets_x2_up, boundary_targets_x4_up), dim=1)
boundary_targets_pyramids = boundary_targets_pyramids.squeeze(2)
boundary_targets_pyramid = F.conv2d(boundary_targets_pyramids, self.fusion_kernel)
boundary_targets_pyramid[(boundary_targets_pyramid > self.boundary_threshold)] = 1
boundary_targets_pyramid[(boundary_targets_pyramid <= self.boundary_threshold)] = 0
loss = super(STDCHead, self).losses(seg_logit, boundary_targets_pyramid.long())
return loss |
def get_state_embedding_network_args(env, embedding_dim):
network_args = dict(name='state_embedding_network', input_shape=env.observation_space.shape, output_dim=embedding_dim, conv_filters=(16, 32), conv_filter_sizes=(8, 4), conv_strides=(4, 2), conv_pads=('VALID', 'VALID'), hidden_sizes=(256,), hidden_nonlinearity=tf.nn.relu, output_nonlinearity=None, batch_normalization=False)
return network_args |
def accuracy(logit, y):
pred = tf.argmax(logit, 1)
true = tf.argmax(y, 1)
return tf.reduce_mean(tf.to_float(tf.equal(pred, true))) |
class Device(object):
def __init__(self, name, protocol, state, disk={}, memory={}):
self._validate_inputs(name, protocol, state, disk, memory)
self.name = name
self.state = state
self.protocol = protocol
self.memory = memory
self.disk = disk
self._init_state()
self._init_protocol()
self._start()
self._stop()
def _validate_inputs(self, name, protocol, state, disk, memory):
if (type(name) is not str):
raise TypeError('Name must be a string.')
elif (not name):
raise ValueError('Name string cannot be empty.')
if (type(state) is not dict):
raise TypeError('State must be a dict.')
else:
state_keys = state.keys()
if ((not state_keys) or (len(state_keys) != 2)):
raise KeyError('State must contain 2 keys.')
else:
for key in state_keys:
if ((key != 'path') and (key != 'name')):
raise KeyError(('%s is an invalid key.' % key))
state_values = state.values()
for val in state_values:
if (type(val) is not str):
raise TypeError('state values must be strings.')
(subpath, extension) = splitext(state['path'])
if ((extension != '.redis') and (extension != '.sqlite')):
raise ValueError(('%s extension not supported.' % extension))
if (type(state['name']) is not str):
raise TypeError('State name must be a string.')
if (type(protocol) is not dict):
if (protocol is not None):
raise TypeError('Protocol must be either None or a dict.')
else:
protocol_keys = protocol.keys()
if ((not protocol_keys) or (len(protocol_keys) != 3)):
raise KeyError('Protocol must contain 3 keys.')
else:
for key in protocol_keys:
if ((key != 'name') and (key != 'mode') and (key != 'server')):
raise KeyError(('%s is an invalid key.' % key))
if (type(protocol['name']) is not str):
raise TypeError('Protocol name must be a string.')
else:
name = protocol['name']
if ((name != 'enip') and (name != 'modbus')):
raise ValueError(('%s protocol not supported.' % protocol))
if (type(protocol['mode']) is not int):
raise TypeError('Protocol mode must be a int.')
else:
mode = protocol['mode']
if (mode < 0):
raise ValueError('Protocol mode must be positive.')
def _init_state(self):
(subpath, extension) = splitext(self.state['path'])
if (extension == '.sqlite'):
self._state = SQLiteState(self.state)
elif (extension == '.redis'):
self._state = RedisState(self.state)
else:
print(('ERROR: %s backend not supported.' % self.state))
def _init_protocol(self):
if (self.protocol is None):
print(('DEBUG: %s has no networking capabilities.' % self.name))
pass
else:
name = self.protocol['name']
if (name == 'enip'):
self._protocol = EnipProtocol(self.protocol)
elif (name == 'modbus'):
self._protocol = ModbusProtocol(self.protocol)
else:
print(('ERROR: %s protocol not supported.' % self.protocol))
def _start(self):
print('TODO _start: please override me')
def _stop(self):
print('TODO _stop: please override me')
def set(self, what, value):
if (type(what) is not tuple):
raise TypeError('Parameter must be a tuple.')
else:
return self._state._set(what, value)
def get(self, what):
if (type(what) is not tuple):
raise TypeError('Parameter must be a tuple.')
else:
return self._state._get(what)
def send(self, what, value, address, **kwargs):
if (type(what) is not tuple):
raise TypeError('Parameter must be a tuple.')
else:
return self._protocol._send(what, value, address, **kwargs)
def send_multiple(self, what, value, address, **kwargs):
return self._protocol._send_multiple(what, value, address, **kwargs)
def receive(self, what, address, **kwargs):
if (type(what) is not tuple):
raise TypeError('Parameter must be a tuple.')
else:
return self._protocol._receive(what, address, **kwargs)
def receive_multiple(self, what, address, **kwargs):
return self._protocol._receive_multiple(what, address, **kwargs) |
def update_learning_rate_att(optimizer, cur_lr, new_lr):
if (cur_lr != new_lr):
ratio = _get_lr_change_ratio(cur_lr, new_lr)
if (ratio > cfg.SOLVER.LOG_LR_CHANGE_THRESHOLD):
logger.info('Changing learning rate %.6f -> %.6f', cur_lr, new_lr)
param_keys = []
for (ind, param_group) in enumerate(optimizer.param_groups):
if (((ind == 1) or (ind == 3)) and cfg.SOLVER.BIAS_DOUBLE_LR):
param_group['lr'] = (new_lr * 2)
else:
param_group['lr'] = new_lr
if (ind <= 1):
param_group['lr'] = (cfg.SOLVER.BACKBONE_LR_SCALAR * param_group['lr'])
param_keys += param_group['params']
if ((cfg.SOLVER.TYPE in ['SGD']) and cfg.SOLVER.SCALE_MOMENTUM and (cur_lr > 1e-07) and (ratio > cfg.SOLVER.SCALE_MOMENTUM_THRESHOLD)):
_CorrectMomentum(optimizer, param_keys, (new_lr / cur_lr)) |
def convert(data, quantity, per):
if ((per == 'atom') or (per is None)):
return quantity
if (per in ['structure', 'cell', 'struc', 'molecule', 'mol']):
return (quantity * data.aux['n_atoms'])
if (per in ['cat', 'sub', 'non_O', 'cation']):
return ((quantity * data.aux['n_atoms']) / data.aux['n_non_O']) |
_to_string
class Undefined(object):
__slots__ = ('_undefined_hint', '_undefined_obj', '_undefined_name', '_undefined_exception')
def __init__(self, hint=None, obj=missing, name=None, exc=UndefinedError):
self._undefined_hint = hint
self._undefined_obj = obj
self._undefined_name = name
self._undefined_exception = exc
def _undefined_message(self):
if self._undefined_hint:
return self._undefined_hint
if (self._undefined_obj is missing):
return ('%r is undefined' % self._undefined_name)
if (not isinstance(self._undefined_name, string_types)):
return ('%s has no element %r' % (object_type_repr(self._undefined_obj), self._undefined_name))
return ('%r has no attribute %r' % (object_type_repr(self._undefined_obj), self._undefined_name))
def _fail_with_undefined_error(self, *args, **kwargs):
raise self._undefined_exception(self._undefined_message)
def __getattr__(self, name):
if (name[:2] == '__'):
raise AttributeError(name)
return self._fail_with_undefined_error()
__add__ = __radd__ = __mul__ = __rmul__ = __div__ = __rdiv__ = __truediv__ = __rtruediv__ = __floordiv__ = __rfloordiv__ = __mod__ = __rmod__ = __pos__ = __neg__ = __call__ = __getitem__ = __lt__ = __le__ = __gt__ = __ge__ = __int__ = __float__ = __complex__ = __pow__ = __rpow__ = __sub__ = __rsub__ = _fail_with_undefined_error
def __eq__(self, other):
return (type(self) is type(other))
def __ne__(self, other):
return (not self.__eq__(other))
def __hash__(self):
return id(type(self))
def __str__(self):
return u''
def __len__(self):
return 0
def __iter__(self):
if 0:
(yield None)
def __nonzero__(self):
return False
__bool__ = __nonzero__
def __repr__(self):
return 'Undefined' |
class StateD(nn.Module):
def __init__(self, opt):
super(StateD, self).__init__()
self.opt = opt
self.state_dim = opt.state_dim
self.state_fc = nn.Sequential(nn.Linear(self.state_dim, 32), nn.ReLU(), nn.Linear(32, 128), nn.ReLU(), nn.Linear(128, 64), nn.ReLU(), nn.Linear(64, 32), nn.ReLU(), nn.Linear(32, 2))
def forward(self, state):
out = self.state_fc(state)
return out |
def group_connectivity(timeseries, subject_list, atlas_name, kind, save=True, save_path=root_folder):
if (kind == 'lasso'):
covariance_estimator = GraphLassoCV(verbose=1)
connectivity_matrices = []
for (i, ts) in enumerate(timeseries):
covariance_estimator.fit(ts)
connectivity = covariance_estimator.covariance_
connectivity_matrices.append(connectivity)
print('Covariance matrix has shape {0}.'.format(connectivity.shape))
elif (kind in ['tangent', 'partial correlation', 'correlation']):
conn_measure = connectome.ConnectivityMeasure(kind=kind)
connectivity_matrices = conn_measure.fit_transform(timeseries)
if save:
for (i, subject) in enumerate(subject_list):
subject_file = os.path.join(save_path, subject_list[i], (((((subject_list[i] + '_') + atlas_name) + '_') + kind.replace(' ', '_')) + '.mat'))
sio.savemat(subject_file, {'connectivity': connectivity_matrices[i]})
print(('Saving connectivity matrix to %s' % subject_file))
return connectivity_matrices |
_converter_regitstry('sAR')
def sAR_t_converter(reg: sAR_reg):
(n, c, h, w) = (reg[f'res0_{d}'] for d in 'nchw')
opd0 = dict(address=reg.opd0_addr, dtype=(reg.opd0_prec, reg.opd0_sign), shape=(n, c, h, w), stride=tuple((reg[f'opd0_{d}_str'] for d in 'nchw')), layout=reg.opd0_str, is_const=reg.opd0_const)
res0 = dict(address=reg.res0_addr, dtype=(reg.res0_prec, (reg.opd0_sign or reg.opd1_sign)), shape=(n, c, h, w), stride=tuple((reg[f'res0_{d}_str'] for d in 'nchw')), layout=reg.res0_str)
opd1 = dict(address=reg.opd1_addr, dtype=(reg.opd1_prec, reg.opd1_sign), shape=(n, c, h, w), stride=tuple((reg[f'opd1_{d}_str'] for d in 'nchw')), layout=reg.opd1_str, is_const=reg.opd1_const)
opd2 = dict(address=reg.opd2_addr, dtype=(reg.opd2_prec, reg.opd2_sign), shape=(1, c, 1, 1), layout=Layout.compact, is_const=reg.opd2_const)
if (reg.tsk_eu_typ == 17):
opd2['shape'] = (1, c, 1, 2)
elif (reg.tsk_eu_typ == 28):
opd0['shape'] = (1, c, h, w)
elif (reg.tsk_eu_typ == 14):
res0['dtype'] = (reg.res0_prec, reg.opd2_sign)
if (reg.tsk_eu_typ == 12):
attr = dict(iter=(reg.opd2_n_str + 1))
else:
attr = dict(round_mode=reg.opd2_n_str)
opd0['shape'] = restore_org_shape(opd0)
opd1['shape'] = restore_org_shape(opd1)
opd_num = reg.tsk_opd_num
operands = [get_value(**x) for x in (opd0, opd1, opd2)[:opd_num]]
results = [get_value(**res0)]
return (results, attr, operands) |
def index_(tokenized_sentences, vocab_size):
freqflag = 0
freq_dist = nltk.FreqDist(itertools.chain(*tokenized_sentences))
vocabflag = freqflag
vocab = freq_dist.most_common(vocab_size)
vocabflag = vocab_size
index2word = ((['_'] + [UNK]) + [x[0] for x in vocab])
vocabflag += 1
word2index = dict([(w, i) for (i, w) in enumerate(index2word)])
if (vocabflag > 0):
freqflag += 1
return (index2word, word2index, freq_dist) |
class TaskType(str, enum.Enum):
SEQ_CLS = 'SEQ_CLS'
SEQ_2_SEQ_LM = 'SEQ_2_SEQ_LM'
CAUSAL_LM = 'CAUSAL_LM'
TOKEN_CLS = 'TOKEN_CLS'
QUESTION_ANS = 'QUESTION_ANS' |
def test_parameters():
array = ak.with_parameter([1, 2, 3], 'name', 'Bob Dylan')
assert (not ak.almost_equal(array, [1, 2, 3]))
assert ak.almost_equal(array, [1, 2, 3], check_parameters=False)
array_other = ak.with_parameter(array, 'name', 'Emmy Noether')
assert (not ak.almost_equal(array, array_other))
assert ak.almost_equal(array, array_other, check_parameters=False) |
class OpioidOverdoseLabeler(TimeHorizonEventLabeler):
def __init__(self, ontology: extension_datasets.Ontology, time_horizon: TimeHorizon):
self.time_horizon: TimeHorizon = time_horizon
icd9_codes: List[str] = ['E850.0', 'E850.1', 'E850.2', '965.00', '965.01', '965.02', '965.09']
icd10_codes: List[str] = ['T40.0', 'T40.1', 'T40.2', 'T40.3', 'T40.4']
self.overdose_codes: Set[str] = set()
for code in icd9_codes:
self.overdose_codes |= _get_all_children(ontology, ('ICD9CM/' + code))
for code in icd10_codes:
self.overdose_codes |= _get_all_children(ontology, ('ICD10CM/' + code))
self.opioid_codes = _get_all_children(ontology, 'ATC/N02A')
def get_outcome_times(self, patient: Patient) -> List[datetime.datetime]:
times: List[datetime.datetime] = []
for event in patient.events:
if (event.code in self.overdose_codes):
times.append(event.start)
return times
def get_prediction_times(self, patient: Patient) -> List[datetime.datetime]:
times: List[datetime.datetime] = []
for event in patient.events:
if (event.code in self.opioid_codes):
times.append(event.start)
return times
def get_time_horizon(self) -> TimeHorizon:
return self.time_horizon
def get_labeler_type(self) -> LabelType:
return 'boolean' |
def freeze_pos_embeddings(student, args):
if (args.student_type == 'roberta'):
student.roberta.embeddings.position_embeddings.weight.requires_grad = False
elif (args.student_type == 'gpt2'):
student.transformer.wpe.weight.requires_grad = False |
def array2hexstring(array, dtype, pad_to_nbits, prefix='0x', reverse=False):
if (pad_to_nbits < 4):
pad_to_nbits = 4
if ((type(array) != np.ndarray) or (array.dtype != np.float32)):
array = np.asarray(array, dtype=np.float32)
assert (array.ndim == 1), 'The given array is not one-dimensional.'
if (dtype == DataType['BIPOLAR']):
array = ((array + 1) / 2)
dtype = DataType['BINARY']
if reverse:
array = np.flip(array, (- 1))
lineval = BitArray(length=0)
bw = dtype.bitwidth()
if dtype.is_fixed_point():
sf = dtype.scale_factor()
array = (array / sf)
dtype = DataType[('INT' + str(bw))]
for val in array:
assert dtype.allowed(val), 'This value is not permitted by chosen dtype.'
if dtype.is_integer():
if dtype.signed():
lineval.append(BitArray(int=int(val), length=bw))
else:
lineval.append(BitArray(uint=int(val), length=bw))
else:
lineval.append(BitArray(float=val, length=bw))
if (pad_to_nbits >= lineval.len):
lineval.prepend(BitArray(length=(pad_to_nbits - lineval.len)))
else:
raise Exception('Number of bits is greater than pad_to_nbits')
return (prefix + lineval.hex) |
class SwinConfig(PretrainedConfig):
model_type = 'swin'
def __init__(self, image_size=224, patch_size=4, num_channels=3, embed_dim=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24], window_size=7, mlp_ratio=4.0, qkv_bias=True, hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, drop_path_rate=0.1, hidden_act='gelu', use_absolute_embeddings=False, patch_norm=True, initializer_range=0.02, layer_norm_eps=1e-05, **kwargs):
super().__init__(**kwargs)
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.embed_dim = embed_dim
self.depths = depths
self.num_heads = num_heads
self.window_size = window_size
self.mlp_ratio = mlp_ratio
self.qkv_bias = qkv_bias
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.drop_path_rate = drop_path_rate
self.hidden_act = hidden_act
self.use_absolute_embeddings = use_absolute_embeddings
self.path_norm = patch_norm
self.layer_norm_eps = layer_norm_eps
self.initializer_range = initializer_range |
def program_to_strs(program, mode):
if (mode == 'chain'):
if (not programs.is_chain(program)):
return None
elif (mode == 'prefix'):
program = programs.list_to_prefix(program)
elif (mode == 'postfix'):
program = programs.list_to_postfix(program)
for f in program:
if (f['function'] in {'equal_shape', 'equal_color', 'equal_size', 'equal_material'}):
f['function'] = 'equal'
elif ('query' in f['function']):
value = f['function'][6:]
f['function'] = 'query'
f['value_inputs'].append(value)
assert (len(f['value_inputs']) == 1)
elif ('same' in f['function']):
value = f['function'][5:]
f['function'] = 'same'
f['value_inputs'].append(value)
elif ('filter_' in f['function']):
f['function'] = 'filter'
if (len(f['value_inputs']) == 0):
f['value_inputs'].append('<NULL>')
assert (len(f['value_inputs']) == 1)
func_str = ' '.join((f['function'] for f in program))
input_str = ' '.join((f['value_inputs'][0] for f in program))
return (func_str, input_str) |
def build_dataset(dataset_list, is_train=True, local_rank=0):
if (not isinstance(dataset_list, (list, tuple))):
raise RuntimeError('dataset_list should be a list of strings, got {}'.format(dataset_list))
for dataset_name in dataset_list:
assert contains(dataset_name), 'Unknown dataset name: {}'.format(dataset_name)
assert os.path.exists(get_im_dir(dataset_name)), "Im dir '{}' not found".format(get_im_dir(dataset_name))
logging_rank('Creating: {}'.format(dataset_name), local_rank=local_rank)
transforms = build_transforms(is_train)
datasets = []
for dataset_name in dataset_list:
args = {}
args['root'] = get_im_dir(dataset_name)
args['ann_file'] = get_ann_fn(dataset_name)
args['remove_images_without_annotations'] = is_train
ann_types = ('bbox',)
if cfg.MODEL.MASK_ON:
ann_types = (ann_types + ('segm',))
if cfg.MODEL.KEYPOINT_ON:
ann_types = (ann_types + ('keypoints',))
if cfg.MODEL.PARSING_ON:
ann_types = (ann_types + ('parsing',))
if cfg.MODEL.UV_ON:
ann_types = (ann_types + ('uv',))
args['ann_types'] = ann_types
args['transforms'] = transforms
dataset = D.COCODataset(**args)
datasets.append(dataset)
dataset = datasets[0]
if (len(datasets) > 1):
dataset = D.ConcatDataset(datasets)
return dataset |
def advance_past_constituents(gold_sequence, cur_index):
count = 0
while (cur_index < len(gold_sequence)):
if isinstance(gold_sequence[cur_index], OpenConstituent):
count = (count + 1)
elif isinstance(gold_sequence[cur_index], CloseConstituent):
count = (count - 1)
if (count == (- 1)):
return cur_index
cur_index = (cur_index + 1)
return None |
def test_option_option_axis1():
a1 = ak.from_json('[[0.0, 1.1], null, [2.2, 3.3]]')
a2 = ak.from_json('[[4.4, 5.5, 6.6], null, [7.7, 8.8, 9.9]]')
a1 = ak.to_regular(a1, axis=1)
a2 = ak.to_regular(a2, axis=1)
c = ak.concatenate([a1, a2], axis=1)
assert (c.to_list() == [[0.0, 1.1, 4.4, 5.5, 6.6], [], [2.2, 3.3, 7.7, 8.8, 9.9]])
assert (c.type == ArrayType(ListType(NumpyType('float64')), 3)) |
class TestCohereWindowService():
def setup_class(cls):
cls.path: str = tempfile.mkdtemp()
cache_path: str = os.path.join(cls.path, 'cache')
ensure_directory_exists(cache_path)
with SqliteDict(os.path.join(cache_path, 'cohere.sqlite')) as cache:
for (request, response) in REQUESTS_TO_RESPONSES.items():
cache[request] = response
cache.commit()
with open(os.path.join(cls.path, 'credentials.conf'), 'w') as f:
f.write('cohereApiKey: secret')
service: TokenizerService = get_tokenizer_service(cls.path)
cls.window_service = WindowServiceFactory.get_window_service('cohere/xlarge-', service)
cls.prompt: str = TEST_PROMPT
cls.tokenized_prompt: List[str] = TOKENIZED_PROMPT
def teardown_class(cls):
shutil.rmtree(cls.path)
def test_max_request_length(self):
assert (self.window_service.max_request_length == 2048)
def test_encode(self):
assert (self.window_service.encode(self.prompt).token_values == self.tokenized_prompt)
def test_decode(self):
assert (self.window_service.decode(self.window_service.encode(self.prompt).tokens) == self.prompt)
def test_tokenize(self):
assert (self.window_service.tokenize(self.prompt) == self.tokenized_prompt)
def test_tokenize_and_count(self):
assert (self.window_service.get_num_tokens(self.prompt) == 6)
def test_fits_within_context_window(self):
assert self.window_service.fits_within_context_window(self.prompt, (self.window_service.max_request_length - 6))
assert (not self.window_service.fits_within_context_window(self.prompt, ((self.window_service.max_request_length - 6) + 1)))
def test_truncate_from_right(self):
long_prompt: str = (self.prompt * 342)
assert (not self.window_service.fits_within_context_window(long_prompt))
truncated_long_prompt: str = self.window_service.truncate_from_right(long_prompt)
assert (self.window_service.get_num_tokens(truncated_long_prompt) == self.window_service.max_request_length)
assert self.window_service.fits_within_context_window(truncated_long_prompt) |
class CategoricalBoW(dist.Multinomial):
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
(logits, value) = dist.util.broadcast_all(self.logits, value)
logits = logits.clone(memory_format=torch.contiguous_format)
logits[((value == 0) & (logits == (- math.inf)))] = 0
log_powers = (logits * value).sum((- 1))
return log_powers |
def When(p, t, ctx=None):
p = _to_probe(p, ctx)
t = _to_tactic(t, ctx)
return Tactic(Z3_tactic_when(t.ctx.ref(), p.probe, t.tactic), t.ctx) |
def plot_model(model, to_file='model.png', show_shapes=False, show_layer_names=True, rankdir='TB'):
dot = model_to_dot(model, show_shapes, show_layer_names, rankdir)
(_, extension) = os.path.splitext(to_file)
if (not extension):
extension = 'png'
else:
extension = extension[1:]
dot.write(to_file, format=extension) |
('The requests and results cannot be unpicked after the modules moved')
class TestAI21WindowService():
def setup_method(self):
auth = Authentication(api_key='DUMMY_API_KEY')
service = TokenizerService(RemoteService('DUMMY_URL'), auth)
self.window_service = WindowServiceFactory.get_window_service('ai21/j1-jumbo', service)
('helm.benchmark.tokenizer.ai21_tokenizer.TokenizerService.tokenize', return_value=REQUEST_RESULT)
.skip('TODO: update the pickle file with the response')
def test_encode(self, mock_tokenize):
assert (self.window_service.encode(TEST_PROMPT).tokens == TEST_TOKEN_REPRESENTATIONS)
.skip('TODO: update the pickle file with the response')
def test_decode(self):
assert (self.window_service.decode(TEST_TOKEN_REPRESENTATIONS, TEST_PROMPT) == TEST_PROMPT)
assert (self.window_service.decode(TEST_TOKEN_REPRESENTATIONS, TEST_PROMPT)[:(- 1)] == TEST_PROMPT[:(- 1)])
('helm.benchmark.tokenizer.ai21_tokenizer.TokenizerService.tokenize', return_value=REQUEST_RESULT)
.skip('TODO: update the pickle file with the response')
def test_tokenize(self, mock_tokenize):
assert (self.window_service.tokenize(TEST_PROMPT) == TEST_TOKENS)
('helm.benchmark.tokenizer.ai21_tokenizer.TokenizerService.tokenize', return_value=REQUEST_RESULT)
.skip('TODO: update the pickle file with the response')
def test_fits_within_context_window(self, mock_tokenize):
assert self.window_service.fits_within_context_window(TEST_PROMPT, (2047 - 36))
assert (not self.window_service.fits_within_context_window(TEST_PROMPT, ((2047 - 36) + 1)))
('helm.benchmark.tokenizer.ai21_tokenizer.TokenizerService.tokenize', side_effect=[LONG_REQUEST_RESULT, LONG_REQUEST_RESULT, TRUNCATED_REQUEST_RESULT, TRUNCATED_REQUEST_RESULT, TRUNCATED_REQUEST_RESULT])
.skip('TODO: update the pickle file with the response')
def test_truncate_from_right(self, mock_tokenize):
long_prompt: str = (TEST_PROMPT * 57)
assert (not self.window_service.fits_within_context_window(long_prompt))
truncated_long_prompt: str = self.window_service.truncate_from_right(long_prompt)
assert (self.window_service.get_num_tokens(truncated_long_prompt) == 2047)
assert self.window_service.fits_within_context_window(truncated_long_prompt)
('helm.benchmark.tokenizer.ai21_tokenizer.TokenizerService.tokenize', return_value=REQUEST_RESULT)
.skip('TODO: update the pickle file with the response')
def test_tokenize_and_count(self, mock_tokenize):
assert (self.window_service.get_num_tokens(TEST_PROMPT) == 36) |
.parametrize('statement_type,value,new_value', [(stmt.IntPrimitiveStatement, 42, 23), (stmt.FloatPrimitiveStatement, 2.1, 1.2), (stmt.StringPrimitiveStatement, 'foo', 'bar'), (stmt.BytesPrimitiveStatement, b'foo', b'bar'), (stmt.BooleanPrimitiveStatement, True, False), (stmt.ComplexPrimitiveStatement, (4 + 1j), (1 + 4j)), (stmt.ClassPrimitiveStatement, 0, 1)])
def test_primitive_statement_set_value(statement_type, default_test_case, value, new_value):
statement = statement_type(default_test_case, value)
statement.value = new_value
assert (statement.value == new_value) |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--input_dir', default='./input')
parser.add_argument('--ckpt', required=True)
parser.add_argument('--mode', default='val', choices=['val', 'vis', 'test'])
args = parser.parse_args()
device = ('cuda' if torch.cuda.is_available() else 'cpu')
(ent2id, rel2id, triples, train_loader, val_loader) = load_data(args.input_dir, 16)
model = TransferNet(args, ent2id, rel2id, triples)
(missing, unexpected) = model.load_state_dict(torch.load(args.ckpt), strict=False)
if missing:
print('Missing keys: {}'.format('; '.join(missing)))
if unexpected:
print('Unexpected keys: {}'.format('; '.join(unexpected)))
model = model.to(device)
model.Msubj = model.Msubj.to(device)
model.Mobj = model.Mobj.to(device)
model.Mrel = model.Mrel.to(device)
if (args.mode == 'vis'):
validate(args, model, val_loader, device, True)
elif (args.mode == 'val'):
validate(args, model, val_loader, device, False) |
def test_MemoryArray_init():
tl = Timeline()
ma = MemoryArray('ma', tl, num_memories=10)
assert (len(ma.memories) == 10)
for m in ma.memories:
assert (type(m) == Memory) |
def load_and_cache_examples(args, task, tokenizer, evaluate=False):
if ((args.local_rank not in [(- 1), 0]) and (not evaluate)):
torch.distributed.barrier()
processor = processors[task]()
output_mode = output_modes[task]
cached_features_file = os.path.join(args.data_dir, 'cached_{}_{}_{}_{}'.format(('dev' if evaluate else 'train'), list(filter(None, args.model_name_or_path.split('/'))).pop(), str(args.max_seq_length), str(task)))
if (os.path.exists(cached_features_file) and (not args.overwrite_cache)):
logger.info('Loading features from cached file %s', cached_features_file)
features = torch.load(cached_features_file)
else:
logger.info('Creating features from dataset file at %s', args.data_dir)
label_list = processor.get_labels()
if ((task in ['mnli', 'mnli-mm']) and (args.model_type in ['roberta', 'xlmroberta'])):
(label_list[1], label_list[2]) = (label_list[2], label_list[1])
examples = (processor.get_dev_examples(args.data_dir) if evaluate else processor.get_train_examples(args.data_dir))
features = convert_examples_to_features(examples, tokenizer, label_list=label_list, max_length=args.max_seq_length, output_mode=output_mode, pad_on_left=bool((args.model_type in ['xlnet'])), pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0], pad_token_segment_id=(4 if (args.model_type in ['xlnet']) else 0))
if (args.local_rank in [(- 1), 0]):
logger.info('Saving features into cached file %s', cached_features_file)
torch.save(features, cached_features_file)
if ((args.local_rank == 0) and (not evaluate)):
torch.distributed.barrier()
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)
if (output_mode == 'classification'):
all_labels = torch.tensor([f.label for f in features], dtype=torch.long)
elif (output_mode == 'regression'):
all_labels = torch.tensor([f.label for f in features], dtype=torch.float)
dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels)
return dataset |
def inference(image, background_enhance, face_upsample, upscale, codeformer_fidelity):
try:
has_aligned = False
only_center_face = False
draw_box = False
detection_model = 'retinaface_resnet50'
print('Inp:', image, background_enhance, face_upsample, upscale, codeformer_fidelity)
img = cv2.imread(str(image), cv2.IMREAD_COLOR)
print('\timage size:', img.shape)
upscale = int(upscale)
if (upscale > 4):
upscale = 4
if ((upscale > 2) and (max(img.shape[:2]) > 1000)):
upscale = 2
if (max(img.shape[:2]) > 1500):
upscale = 1
background_enhance = False
face_upsample = False
face_helper = FaceRestoreHelper(upscale, face_size=512, crop_ratio=(1, 1), det_model=detection_model, save_ext='png', use_parse=True, device=device)
bg_upsampler = (upsampler if background_enhance else None)
face_upsampler = (upsampler if face_upsample else None)
if has_aligned:
img = cv2.resize(img, (512, 512), interpolation=cv2.INTER_LINEAR)
face_helper.is_gray = is_gray(img, threshold=5)
if face_helper.is_gray:
print('\tgrayscale input: True')
face_helper.cropped_faces = [img]
else:
face_helper.read_image(img)
num_det_faces = face_helper.get_face_landmarks_5(only_center_face=only_center_face, resize=640, eye_dist_threshold=5)
print(f' detect {num_det_faces} faces')
face_helper.align_warp_face()
for (idx, cropped_face) in enumerate(face_helper.cropped_faces):
cropped_face_t = img2tensor((cropped_face / 255.0), bgr2rgb=True, float32=True)
normalize(cropped_face_t, (0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True)
cropped_face_t = cropped_face_t.unsqueeze(0).to(device)
try:
with torch.no_grad():
output = codeformer_net(cropped_face_t, w=codeformer_fidelity, adain=True)[0]
restored_face = tensor2img(output, rgb2bgr=True, min_max=((- 1), 1))
del output
torch.cuda.empty_cache()
except RuntimeError as error:
print(f'Failed inference for CodeFormer: {error}')
restored_face = tensor2img(cropped_face_t, rgb2bgr=True, min_max=((- 1), 1))
restored_face = restored_face.astype('uint8')
face_helper.add_restored_face(restored_face)
if (not has_aligned):
if (bg_upsampler is not None):
bg_img = bg_upsampler.enhance(img, outscale=upscale)[0]
else:
bg_img = None
face_helper.get_inverse_affine(None)
if (face_upsample and (face_upsampler is not None)):
restored_img = face_helper.paste_faces_to_input_image(upsample_img=bg_img, draw_box=draw_box, face_upsampler=face_upsampler)
else:
restored_img = face_helper.paste_faces_to_input_image(upsample_img=bg_img, draw_box=draw_box)
save_path = f'output/out.png'
imwrite(restored_img, str(save_path))
restored_img = cv2.cvtColor(restored_img, cv2.COLOR_BGR2RGB)
return (restored_img, save_path)
except Exception as error:
print('Global exception', error)
return (None, None) |
_utils.test(arch=archs_support_ndarray_ad, default_fp=ti.f64)
def test_ad_sum_local_atomic():
N = 10
a = ti.ndarray(ti.f32, shape=N, needs_grad=True)
b = ti.ndarray(ti.i32, shape=N)
p = ti.ndarray(ti.f32, shape=N, needs_grad=True)
def compute_sum(a: ti.types.ndarray(), b: ti.types.ndarray(), p: ti.types.ndarray()):
for i in range(N):
ret = 1.0
for j in range(b[i]):
ret += a[i]
p[i] = ret
for i in range(N):
a[i] = 3
b[i] = i
compute_sum(a, b, p)
for i in range(N):
assert (p[i] == ((3 * b[i]) + 1))
p.grad[i] = 1
compute_sum.grad(a, b, p)
for i in range(N):
assert (a.grad[i] == b[i]) |
def _load_shared_obj(name):
paths = []
try:
paths += [ctu.find_library(name)]
except FileNotFoundError:
pass
try:
paths += [ctu.find_library(('lib' + name))]
except FileNotFoundError:
pass
dll = (ct.windll if (platform.system() == 'Windows') else ct.cdll)
for path in paths:
if path:
lib = dll.LoadLibrary(path)
return lib
raise RuntimeError((('No ' + name) + ' shared libraries found')) |
class EmitGemmGroupedInstance():
def __init__(self, operation_suffix=''):
self.operation_suffix = operation_suffix
self.includes = ['cutlass/cutlass.h', 'cutlass/numeric_types.h', 'cutlass/arch/arch.h', 'cutlass/arch/mma.h', 'cutlass/layout/matrix.h', 'cutlass/gemm/kernel/gemm_grouped.h', 'cutlass/gemm/kernel/default_gemm_grouped.h']
self.gemm_template = '\n// Gemm operator ${operation_name}\nusing ${operation_name}_base =\n typename cutlass::gemm::kernel::DefaultGemmGrouped<\n ${element_a}, ${layout_a}, ${transform_a}, ${align_a},\n ${element_b}, ${layout_b}, ${transform_b}, ${align_b},\n ${element_c}, ${layout_c},\n ${element_accumulator},\n ${opcode_class},\n ${arch},\n cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,\n cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>,\n cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>,\n ${epilogue_functor},\n ${swizzling_functor},\n ${stages},\n ${precompute_mode},\n ${math_operation}\n>::GemmKernel;\n\n// Define named type\nstruct ${operation_name}${operation_suffix} :\n public ${operation_name}_base { };\n'
def instance_template(self):
return '\n${compile_guard_start}\n manifest.append(new ${gemm_kind}<\n cutlass::gemm::device::GemmGrouped<${operation_name}>\n >("${operation_name}"));\n${compile_guard_end}\n'
def emit(self, operation):
threadblock_shape = operation.tile_description.threadblock_shape
warp_count = operation.tile_description.warp_count
warp_shape = [(threadblock_shape[idx] // warp_count[idx]) for idx in range(3)]
(instance_layout_A, instance_layout_B, instance_layout_C) = (operation.A.layout, operation.B.layout, operation.C.layout)
epilogue_functor = operation.epilogue_functor.emit()
values = {'operation_name': operation.procedural_name(), 'operation_suffix': self.operation_suffix, 'element_a': DataTypeTag[operation.A.element], 'layout_a': LayoutTag[instance_layout_A], 'element_b': DataTypeTag[operation.B.element], 'layout_b': LayoutTag[instance_layout_B], 'element_c': DataTypeTag[operation.C.element], 'layout_c': LayoutTag[instance_layout_C], 'element_accumulator': DataTypeTag[operation.accumulator_type()], 'opcode_class': OpcodeClassTag[operation.tile_description.math_instruction.opcode_class], 'arch': ('cutlass::arch::Sm%d' % operation.arch), 'threadblock_shape_m': str(operation.tile_description.threadblock_shape[0]), 'threadblock_shape_n': str(operation.tile_description.threadblock_shape[1]), 'threadblock_shape_k': str(operation.tile_description.threadblock_shape[2]), 'warp_shape_m': str(warp_shape[0]), 'warp_shape_n': str(warp_shape[1]), 'warp_shape_k': str(warp_shape[2]), 'instruction_shape_m': str(operation.tile_description.math_instruction.instruction_shape[0]), 'instruction_shape_n': str(operation.tile_description.math_instruction.instruction_shape[1]), 'instruction_shape_k': str(operation.tile_description.math_instruction.instruction_shape[2]), 'epilogue_functor': epilogue_functor, 'swizzling_functor': operation.swizzling_functor.tag(), 'stages': str(operation.tile_description.stages), 'align_a': str(operation.A.alignment), 'align_b': str(operation.B.alignment), 'transform_a': ComplexTransformTag[operation.A.complex_transform], 'transform_b': ComplexTransformTag[operation.B.complex_transform], 'precompute_mode': SchedulerModeTag[operation.precompute_mode], 'math_operation': MathOperationTag[operation.tile_description.math_instruction.math_operation]}
return SubstituteTemplate(self.gemm_template, values) |
class DistilBertTokenizerFast(BertTokenizerFast):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION |
def main():
parser = argparse.ArgumentParser(description='Export Matcha-TTS to ONNX')
parser.add_argument('checkpoint_path', type=str, help='Path to the model checkpoint')
parser.add_argument('output', type=str, help='Path to output `.onnx` file')
parser.add_argument('--n-timesteps', type=int, default=5, help='Number of steps to use for reverse diffusion in decoder (default 5)')
parser.add_argument('--vocoder-name', type=str, choices=list(VOCODER_URLS.keys()), default=None, help='Name of the vocoder to embed in the ONNX graph')
parser.add_argument('--vocoder-checkpoint-path', type=str, default=None, help='Vocoder checkpoint to embed in the ONNX graph for an `e2e` like experience')
parser.add_argument('--opset', type=int, default=DEFAULT_OPSET, help='ONNX opset version to use (default 15')
args = parser.parse_args()
print(f'[] Loading Matcha checkpoint from {args.checkpoint_path}')
print(f'Setting n_timesteps to {args.n_timesteps}')
checkpoint_path = Path(args.checkpoint_path)
matcha = load_matcha(checkpoint_path.stem, checkpoint_path, 'cpu')
if (args.vocoder_name or args.vocoder_checkpoint_path):
assert (args.vocoder_name and args.vocoder_checkpoint_path), 'Both vocoder_name and vocoder-checkpoint are required when embedding the vocoder in the ONNX graph.'
(vocoder, _) = load_vocoder(args.vocoder_name, args.vocoder_checkpoint_path, 'cpu')
else:
vocoder = None
is_multi_speaker = (matcha.n_spks > 1)
(dummy_input, input_names) = get_inputs(is_multi_speaker)
(model, output_names) = get_exportable_module(matcha, vocoder, args.n_timesteps)
dynamic_axes = {'x': {0: 'batch_size', 1: 'time'}, 'x_lengths': {0: 'batch_size'}}
if (vocoder is None):
dynamic_axes.update({'mel': {0: 'batch_size', 2: 'time'}, 'mel_lengths': {0: 'batch_size'}})
else:
print('Embedding the vocoder in the ONNX graph')
dynamic_axes.update({'wav': {0: 'batch_size', 1: 'time'}, 'wav_lengths': {0: 'batch_size'}})
if is_multi_speaker:
dynamic_axes['spks'] = {0: 'batch_size'}
Path(args.output).parent.mkdir(parents=True, exist_ok=True)
model.to_onnx(args.output, dummy_input, input_names=input_names, output_names=output_names, dynamic_axes=dynamic_axes, opset_version=args.opset, export_params=True, do_constant_folding=True)
print(f'[] ONNX model exported to {args.output}') |
def test_invalid_pdf_pars():
source = {'binning': [2, (- 0.5), 1.5], 'bindata': {'data': [55.0], 'bkg': [50.0], 'bkgerr': [7.0], 'sig': [10.0]}}
pdf = pyhf.simplemodels.uncorrelated_background(source['bindata']['sig'], source['bindata']['bkg'], source['bindata']['bkgerr'])
pars = (pdf.config.suggested_init() + [1.0])
data = (source['bindata']['data'] + pdf.config.auxdata)
with pytest.raises(pyhf.exceptions.InvalidPdfParameters):
pdf.logpdf(pars, data) |
def eval_step(apply_fn, state, batch):
logits = apply_fn(state.variables, batch['image'], training=False, mutable=False)
return compute_metrics(logits, batch['label']) |
def is_traceable(data):
return isinstance(data, (type(None), type(Ellipsis), list, tuple, dict, set, int, bool, str, float, slice, torch.device, torch.Size, torch.Tensor, torch.dtype, torch.memory_format)) |
class irreducible_character_basis(generic_character):
def __init__(self, Sym, pfix):
SFA_generic.__init__(self, Sym, basis_name='irreducible symmetric group character', prefix=pfix, graded=False)
self._other = Sym.Schur()
self._p = Sym.powersum()
self.module_morphism(self._self_to_power_on_basis, codomain=Sym.powersum()).register_as_coercion()
self.register_coercion(SetMorphism(Hom(self._other, self), self._other_to_self))
def _b_power_k_r(self, k, r):
p = self._p
return p.sum(((((((- 1) ** (r - j)) * (k ** j)) * binomial(r, j)) * p.prod(((self._b_power_k(k) - (i * p.one())) for i in range(j)))) for j in range((r + 1))))
def _b_power_gamma(self, gamma):
return self._p.prod((self._b_power_k_r(Integer(k), Integer(r)) for (k, r) in gamma.to_exp_dict().items()))
def _self_to_power_on_basis(self, lam):
return self._p.sum(((c * self._b_power_gamma(ga)) for (ga, c) in self._p(self._other(lam))))
_method
def _self_to_other_on_basis(self, lam):
return self._other(self._self_to_power_on_basis(lam)) |
def optimize(onnx_model_path: Path) -> Path:
from onnxruntime import InferenceSession, SessionOptions
opt_model_path = generate_identified_filename(onnx_model_path, '-optimized')
sess_option = SessionOptions()
sess_option.optimized_model_filepath = opt_model_path.as_posix()
_ = InferenceSession(onnx_model_path.as_posix(), sess_option)
print(f'Optimized model has been written at {opt_model_path}: ')
print('/!\\ Optimized model contains hardware specific operators which might not be portable. /!\\')
return opt_model_path |
class Ego4DDataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def dataset_cls(self):
return Ego4DDataset
def dataset_cls_no_false(self):
return Ego4DDataset
def dataset_name(self):
return 'ego4d' |
class Generator(BaseGenerator):
def __init__(self, config, mode, X=None):
super(Generator, self).__init__(config, mode)
self.build_generator(X=X)
def generate_random_X(self, shape):
return (np.random.rand(*shape) + 1.0) |
class KitchenTopLeftBurnerV0(KitchenBase):
TASK_ELEMENTS = ['top left burner']
def __init__(self, delta=0, **kwargs):
super(KitchenTopLeftBurnerV0, self).__init__(**kwargs)
self.step_to_primitive_name = {0: 'lift', 1: 'angled_x_y_grasp', 2: 'rotate_about_y_axis', 3: 'no_op', 4: 'no_op'}
if ((not self.use_combined_action_space) and (self.control_mode == 'primitives')):
action_low = np.array([0, 0.5, 1, 0.0, 0.0, 0.0, ((- np.pi) / 4), 0.6, 0.0, 0, 0, 0, 0.0, 0.0])
action_high = np.array([0, 0.5, 1, 0.0, 0.0, 0.0, ((- np.pi) / 4), 0.6, 0.0, 0, 0, 0, 0.0, 0.0])
action_low -= delta
action_high += delta
if (not self.fixed_schema):
act_lower_primitive = np.zeros(self.num_primitives)
act_upper_primitive = np.ones(self.num_primitives)
action_low = np.concatenate((act_lower_primitive, action_low))
action_high = np.concatenate((act_upper_primitive, action_high))
self.action_space = Box(action_low, action_high, dtype=np.float32) |
def export_gephi():
G_times = canVote_loader.load_canVote_temporarl_edgelist('datasets/canVote_processed/canVote_edgelist.txt')
MP_dict = load_mp()
labels = list(range(2006, 2020, 1))
print(len(MP_dict))
for i in range(len(G_times)):
G = G_times[i]
count = 0
for node in G.nodes:
if (node in MP_dict):
if (len(MP_dict[node]['party']) > 0):
node_party = MP_dict[node]['party'][(- 1)]
else:
node_party = MP_dict[node]['party'][0]
if (node_party == 'Conservative'):
G.nodes[node]['viz'] = {'color': {'r': 49, 'g': 130, 'b': 189, 'a': 0}}
if (node_party == 'Progressive Conservative'):
G.nodes[node]['viz'] = {'color': {'r': 49, 'g': 130, 'b': 189, 'a': 0}}
if (node_party == 'Reform'):
G.nodes[node]['viz'] = {'color': {'r': 49, 'g': 130, 'b': 189, 'a': 0}}
if (node_party == 'Canadian Alliance'):
G.nodes[node]['viz'] = {'color': {'r': 49, 'g': 130, 'b': 189, 'a': 0}}
if (node_party == 'Liberal'):
G.nodes[node]['viz'] = {'color': {'r': 227, 'g': 74, 'b': 51, 'a': 0}}
if (node_party == 'Bloc'):
G.nodes[node]['viz'] = {'color': {'r': 136, 'g': 86, 'b': 167, 'a': 0}}
if (node_party == 'NDP'):
G.nodes[node]['viz'] = {'color': {'r': 49, 'g': 163, 'b': 84, 'a': 0}}
if (node_party == 'Independent'):
G.nodes[node]['viz'] = {'color': {'r': 99, 'g': 99, 'b': 99, 'a': 0}}
if (node_party == 'Green'):
G.nodes[node]['viz'] = {'color': {'r': 61, 'g': 155, 'b': 53, 'a': 0}}
else:
G.nodes[node]['viz'] = {'color': {'r': 99, 'g': 99, 'b': 99, 'a': 0}}
nx.write_gexf(G, (('gephi_new/' + str(labels[i])) + '.gexf'), version='1.2draft') |
class dataloader_val(Dataset):
def __init__(self, ImagePth, valtxtfile, transform=None):
self.ImagePth = ImagePth
self.valtxtfile = valtxtfile
self.transform = transform
imagelist = []
labelList = []
imgname = []
classId = []
with open(self.valtxtfile) as f:
for line in f:
imagelist.append(((self.ImagePth + '/') + line.split()[0]))
labelList.append(classes[line.split()[1]])
self.files = imagelist
self.labels = labelList
def __getitem__(self, index):
image = Image.open(self.files[index])
img_tmp = numpy.array(image)
if (img_tmp.ndim == 2):
img_tmp = img_tmp.reshape((img_tmp.shape[0], img_tmp.shape[1], 1))
img_tmp = numpy.concatenate([img_tmp, img_tmp, img_tmp], axis=2)
image = Image.fromarray(img_tmp)
if self.transform:
image = self.transform(image)
label = self.labels[index]
return (image, label)
def __len__(self):
return len(self.files) |
class NefPartition(SageObject, Hashable):
def __init__(self, data, Delta_polar, check=True):
if (check and (not Delta_polar.is_reflexive())):
raise ValueError('nef-partitions can be constructed for reflexive polytopes ony!')
self._vertex_to_part = tuple((int(el) for el in data))
self._nparts = (max(self._vertex_to_part) + 1)
self._Delta_polar = Delta_polar
if (check and (not self.nabla().is_reflexive())):
raise ValueError(('%s do not form a nef-partition!' % str(data)))
def __eq__(self, other):
return (is_NefPartition(other) and (self._Delta_polar == other._Delta_polar) and (self._vertex_to_part == other._vertex_to_part))
def __hash__(self):
try:
return self._hash
except AttributeError:
self._hash = (hash(self._vertex_to_part) + hash(self._Delta_polar))
return self._hash
def __ne__(self, other):
return (not (self == other))
def _latex_(self):
result = '\\text{Nef-partition } '
for (i, part) in enumerate(self.parts()):
if (i != 0):
result += ' \\sqcup '
result += (('\\{' + ', '.join((('%d' % v) for v in part))) + '\\}')
try:
if self._is_product:
result += ' \\text{ (direct product)}'
if self._is_projection:
result += ' \\text{ (projection)}'
except AttributeError:
pass
return result
def _repr_(self):
result = 'Nef-partition '
for (i, part) in enumerate(self.parts()):
if (i != 0):
result += ' '
result += (('{' + ', '.join((('%d' % v) for v in part))) + '}')
try:
if self._is_product:
result += ' (direct product)'
if self._is_projection:
result += ' (projection)'
except AttributeError:
pass
return result
def _sage_input_(self, sib, coerced):
vertex_to_part = [ZZ(i) for i in self._vertex_to_part]
return sib.name('NefPartition')(vertex_to_part, sib(self.Delta_polar()))
def Delta(self, i=None):
if (i is None):
return self._Delta_polar.polar()
else:
return self.dual().nabla(i)
def Delta_polar(self):
return self._Delta_polar
def Deltas(self):
return self.dual().nablas()
_method
def dual(self):
nabla_polar = LatticePolytope(reduce(minkowski_sum, (nabla.vertices() for nabla in self.nablas())), lattice=self._Delta_polar.lattice()).polar()
vertex_to_part = []
nabla_polar_vertices = []
for i in range(self._nparts):
A = (nabla_polar.vertices().matrix() * self.nabla(i).vertices())
for (j, row) in enumerate(A):
if (min(row) == (- 1)):
vertex_to_part.append(i)
nabla_polar_vertices.append(nabla_polar.vertex(j))
nabla_polar = LatticePolytope(nabla_polar_vertices, compute_vertices=False)
dual = NefPartition(vertex_to_part, nabla_polar, check=False)
dual.dual.set_cache(self)
return dual
def hodge_numbers(self):
try:
return self._hodge_numbers
except AttributeError:
self._Delta_polar._compute_hodge_numbers()
return self._hodge_numbers
def nabla(self, i=None):
if (i is None):
return self.dual().Delta()
else:
return self.nablas()[i]
def nabla_polar(self):
return self.nabla().polar()
def nablas(self):
try:
return self._nablas
except AttributeError:
Delta_polar = self._Delta_polar
origin = [([0] * Delta_polar.dim())]
self._nablas = tuple((LatticePolytope(([Delta_polar.vertex(j) for j in part] + origin), lattice=Delta_polar.lattice()) for part in self.parts()))
return self._nablas
def nparts(self):
return self._nparts
def part(self, i, all_points=False):
return self.parts(all_points)[i]
_method
def parts(self, all_points=False):
parts = [[] for _ in range(self._nparts)]
if all_points:
for point in range(self._Delta_polar.npoints()):
if (point != self._Delta_polar.origin()):
parts[self.part_of_point(point)].append(point)
else:
for (vertex, part) in enumerate(self._vertex_to_part):
parts[part].append(vertex)
return tuple((tuple(part) for part in parts))
def part_of(self, i):
return self._vertex_to_part[i]
_method
def part_of_point(self, i):
if (i < self._Delta_polar.nvertices()):
return self.part_of(i)
if (i == self._Delta_polar.origin()):
raise ValueError('the origin belongs to all parts!')
point = self._Delta_polar.point(i)
for (part, nabla) in enumerate(self.nablas()):
if (point in nabla):
return part |
.parametrize('embedding_size,cross_num,hidden_size,sparse_feature_num,cross_parameterization', [(8, 2, (32,), 2, 'vector'), (8, 1, (32,), 2, 'matrix')])
def test_DCN(embedding_size, cross_num, hidden_size, sparse_feature_num, cross_parameterization):
model_name = 'DCN'
sample_size = SAMPLE_SIZE
(x, y, feature_columns) = get_test_data(sample_size, sparse_feature_num=sparse_feature_num, dense_feature_num=sparse_feature_num)
model = DCN(linear_feature_columns=feature_columns, dnn_feature_columns=feature_columns, cross_num=cross_num, cross_parameterization=cross_parameterization, dnn_hidden_units=hidden_size, dnn_dropout=0.5, device=get_device())
check_model(model, model_name, x, y) |
class MultiProcessInitLogger():
def __init__(self, app_name):
date_str = datetime.datetime.now().strftime('%Y%m%d-%H%M%S')
self.log_name = ((app_name + '-') + date_str)
def __call__(self, *args):
init_logger(self.log_name) |
def cyclotomic_to_gamma(cyclo_up, cyclo_down):
dico = defaultdict(int)
for d in cyclo_up:
dico[d] += 1
for d in cyclo_down:
dico[d] -= 1
resu = defaultdict(int)
for n in dico:
for d in divisors(n):
resu[d] += (moebius((n / d)) * dico[n])
return {d: resu[d] for d in resu if resu[d]} |
class Region(object):
def __init__(self, drClass, rx1drClass, beaconProps=BeaconProperties()):
if (not issubclass(drClass, DataRate)):
raise TypeError('Invalid data rate implementation')
self._drClass = drClass
if (not issubclass(rx1drClass, Rx1DrOffset)):
raise TypeError('Invalid rx1 data rate offset implementation')
self._rx1drClass = rx1drClass
if (not isinstance(beaconProps, BeaconProperties)):
raise TypeError('Invalid beacon properties')
self._beaconProps = beaconProps
def binToRx1DrOffset(self, binData):
try:
return self._rx1drClass((binData & 7))
except ValueError:
return None
def rx1DrOffsetToBin(self, offset):
return (offset.id & 7)
def binToDataRate(self, binData):
try:
return self._drClass((binData & 15))
except ValueError:
return None
def dataRateToBin(self, dataRate):
return (dataRate.id & 15)
def binToTxPower(self, binData):
raise NotImplementedError()
def txPowerToBin(self, txPower):
raise NotImplementedError()
def cfListSupported(self):
return False
def parseCFList(self, cfdata):
return tuple()
def rx2Channel(self):
raise NotImplementedError()
def defaultChannels(self):
return tuple()
def joinChannels(self):
return tuple()
def beaconProperties(self):
return self._beaconProps |
def test_gmm_wrong_num_modes_format_1():
with pytest.raises(FisherVectorException):
learn_gmm([np.zeros((5, 10)), np.zeros((4, 10))], n_modes='not_valid') |
def count_permutation_trials(per_doc1, per_doc2, base_diff, n_trials):
(metrics, bases) = zip(*base_diff.items())
ops = [(operator.le if (base < 0) else operator.ge) for base in bases]
better = ([0] * len(metrics))
for _ in range(n_trials):
result = _permutation_trial(per_doc1, per_doc2)
for (i, metric) in enumerate(metrics):
better[i] += ops[i](result[metric], bases[i])
return dict(zip(metrics, better)) |
def view_model_param(net_params):
model = DGNNet(net_params)
total_param = 0
print('MODEL DETAILS:\n')
for param in model.parameters():
total_param += np.prod(list(param.data.size()))
print('DGN Total parameters:', total_param)
return total_param |
def get_relations_by_type(data_dir):
with open(os.path.join(data_dir, 'raw.kb')) as f:
triples = list(f.readlines())
with open(os.path.join(data_dir, 'train.triples')) as f:
triples += list(f.readlines())
triples = list(set(triples))
query_answers = dict()
theta_1_to_M = 1.5
for triple_str in triples:
(e1, e2, r) = triple_str.strip().split('\t')
if (not (r in query_answers)):
query_answers[r] = dict()
if (not (e1 in query_answers[r])):
query_answers[r][e1] = set()
query_answers[r][e1].add(e2)
to_M_rels = set()
to_1_rels = set()
dev_rels = set()
with open(os.path.join(data_dir, 'dev.triples')) as f:
for line in f:
(e1, e2, r) = line.strip().split('\t')
dev_rels.add(r)
num_rels = len(dev_rels)
print('{} relations in dev dataset in total'.format(num_rels))
for r in dev_rels:
ratio = np.mean([len(x) for x in query_answers[r].values()])
if (ratio > theta_1_to_M):
to_M_rels.add(r)
else:
to_1_rels.add(r)
num_to_M = (len(to_M_rels) + 0.0)
num_to_1 = (len(to_1_rels) + 0.0)
print('to-M: {}/{} ({})'.format(num_to_M, num_rels, (num_to_M / num_rels)))
print('to-1: {}/{} ({})'.format(num_to_1, num_rels, (num_to_1 / num_rels)))
return (to_M_rels, to_1_rels) |
def main(argv=None):
gen_dim = FLAGS.gen_dimension
generator_dims = [(64 * gen_dim), ((64 * gen_dim) // 2), ((64 * gen_dim) // 4), ((64 * gen_dim) // 8), 3]
discriminator_dims = [3, 64, (64 * 2), (64 * 4), (64 * 8), 1]
(crop_image_size, resized_image_size) = map(int, FLAGS.image_size.split(','))
if (FLAGS.model == 0):
model = GAN(FLAGS.z_dim, crop_image_size, resized_image_size, FLAGS.batch_size, FLAGS.data_dir)
elif (FLAGS.model == 1):
model = WasserstienGAN(FLAGS.z_dim, crop_image_size, resized_image_size, FLAGS.batch_size, FLAGS.data_dir, clip_values=((- 0.01), 0.01), critic_iterations=5)
else:
raise ValueError(('Unknown model identifier - FLAGS.model=%d' % FLAGS.model))
model.create_network(generator_dims, discriminator_dims, FLAGS.optimizer, FLAGS.learning_rate, FLAGS.optimizer_param)
model.initialize_network(FLAGS.logs_dir)
if (FLAGS.mode == 'train'):
model.train_model(int((1 + FLAGS.iterations)))
elif (FLAGS.mode == 'visualize'):
model.visualize_model() |
def test_get_tasks_for_collaborator(assigner):
tasks = assigner.get_tasks_for_collaborator('one', 2)
assert (tasks == default_tasks)
assert (len(tasks) == 3)
assert isinstance(tasks[0], TrainTask)
assert isinstance(tasks[1], ValidateTask) |
class FrameSecondMeter(object):
def __init__(self):
self.st = time.time()
self.fps = None
self.ed = None
self.frame_n = 0
def add_frame_n(self, frame_n):
self.frame_n += frame_n
def end(self):
self.ed = time.time()
self.fps = (self.frame_n / (self.ed - self.st)) |
_method
def ith_to_zero_rotation_matrix(v, i, ring=None):
if (ring is not None):
v = vector(ring, v)
dim = len(v)
i = (i % dim)
j = ((i - 1) % dim)
(a, b) = (v[j], v[i])
if (b == 0):
return identity_matrix(dim, sparse=True)
from sage.misc.functional import sqrt
norm = sqrt(((a * a) + (b * b)))
aa = (a / norm)
bb = (b / norm)
entries = {(k, k): 1 for k in range(dim)}
entries.update({(j, j): aa, (j, i): bb, (i, j): (- bb), (i, i): aa})
return matrix(entries, nrows=dim, ring=ring) |
def world_gen(coordinate={}, master={}, config_file=None):
world = {}
for (axis_name, axis) in master.iteritems():
if ((axis['sequence'] is not None) and (coordinate[axis_name] in axis['sequence'])):
for i in world:
if (i in axis['sequence'][coordinate[axis_name]]):
world[i] += axis['sequence'][coordinate[axis_name]]
for i in axis['sequence'][coordinate[axis_name]]:
if (i not in world):
if (axis['sequence'][coordinate[axis_name]][i] == [None]):
world[i] = [{}]
else:
world[i] = axis['sequence'][coordinate[axis_name]][i]
else:
for (macro_name, macro_calls) in axis['macros'].iteritems():
if (macro_name not in world):
world[macro_name] = []
for params in macro_calls:
if (params is not None):
evaluated_params = {}
for (param, value) in params.iteritems():
if ((value[0] == "'") and (value[(- 1)] == "'")):
evaluated_params[param] = value[1:(- 1)]
else:
f = str(value)
evaluated_params[param] = (lambda n: eval(f))(coordinate[axis_name])
world[macro_name].append(evaluated_params)
else:
world[macro_name].append({})
if (('yamls' in axis) and (axis['yamls'] is not None) and (coordinate[axis_name] in axis['yamls'])):
params = axis['yamls'][coordinate[axis_name]]
config_stream = file(config_file, 'w')
yaml.dump(params, config_stream)
print(('Generated %s' % config_file))
return world |
class MultiMetricStats():
def __init__(self, metric, n_jobs=1, batch_eval=False):
self.metric = _dictify(metric)
self.n_jobs = n_jobs
self.batch_eval = batch_eval
self.ids = []
self.metrics = {}
def append(self, ids, *args, **kwargs):
self.ids.extend(ids)
if self.batch_eval:
scores = self.eval_simple(*args, **kwargs)
else:
if (('predict' not in kwargs) or ('target' not in kwargs)):
raise ValueError("Must pass 'predict' and 'target' as kwargs if batch_eval=False")
if (self.n_jobs == 1):
scores_raw = sequence_evaluation(self.metric, **kwargs)
else:
scores_raw = multiprocess_evaluation(metric=self.metric, n_jobs=self.n_jobs, **kwargs)
keys = scores_raw[0].keys()
scores = {key: torch.tensor([score[key] for score in scores_raw]) for key in keys}
for (key, metric_scores) in scores.items():
if (key not in self.metrics):
self.metrics[key] = MetricStats((lambda x: x), batch_eval=True)
self.metrics[key].append(ids, metric_scores)
def eval_simple(self, *args, **kwargs):
scores = self.metric(*args, **kwargs)
return {key: score.detach() for (key, score) in scores.items()}
def summarize(self, field=None, flat=False):
result = {key: metric.summarize(field) for (key, metric) in self.metrics.items()}
if flat:
result = {f'{key}_{field}': value for (key, fields) in result.items() for (field, value) in fields.items()}
return result |
def train_one_epoch(context, args, model, optimizer, scheduler, loader, loss, temp, decoder=None, transform=None):
model.train()
train_loss = 0
optimizer.zero_grad()
for (i, data) in enumerate(loader):
if (transform is not None):
(x, y, z) = data
z = z.to(args.device)
else:
(x, y) = data
(x, y) = (x.to(args.device), y.to(args.device))
out = model(x)
if isinstance(out, dict):
out = out['out']
if (decoder is not None):
out = decoder.decode(out).view(x.shape[0], (- 1))
y = decoder.decode(y).view(x.shape[0], (- 1))
if (transform is not None):
out = transform(out, z)
y = transform(y, z)
if (args.dataset[:4] == 'DRUG'):
out = out.squeeze(1)
l = loss(out, y)
l.backward()
if (args.clip > 0):
torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip)
if (((i + 1) % args.accum) == 0):
optimizer.step()
optimizer.zero_grad()
if args.lr_sched_iter:
scheduler.step()
train_loss += l.item()
if (i >= (temp - 1)):
break
if (not args.lr_sched_iter):
scheduler.step()
return (train_loss / temp) |
def collect_occluded_linemod_testlist(rootpath, outname):
path = (rootpath + 'RGB-D/rgb_noseg/')
imgs = [f for f in os.listdir(path) if (f.endswith('.jpg') or f.endswith('.png'))]
imgs.sort()
allf = open(outname, 'w')
for i in imgs:
allf.write(((path + i) + '\n')) |
def _validate_weights(w, dtype=np.double):
w = _validate_vector(w, dtype=dtype)
if np.any((w < 0)):
raise ValueError('Input weights should be all non-negative')
return w |
def load_imageid(folder):
images = load_folder(folder, 'jpg')
img_ids = set()
for img in images:
img_id = int(img.split('/')[(- 1)].split('.')[0].split('_')[(- 1)])
img_ids.add(img_id)
return img_ids |
def hsl_to_hsv(color):
(hi, si, li) = [float(d) for d in color]
ho = hi
si *= ((li / 100.0) if (li <= 50.0) else (1.0 - (li / 100.0)))
vo = (li + si)
so = (((200.0 * si) / vo) if vo else 0.0)
return (ho, so, vo) |
class DataFrameTracedOps(DFIterDataPipe):
def __init__(self, source_datapipe, output_var):
self.source_datapipe = source_datapipe
self.output_var = output_var
def __iter__(self):
for item in self.source_datapipe:
(yield self.output_var.calculate_me(item)) |
class Mish(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return (x * torch.tanh(F.softplus(x))) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.