code stringlengths 101 5.91M |
|---|
_utils.test(arch=get_host_arch_list())
def test_assign2_static():
a = ti.field(ti.f32, ())
b = ti.field(ti.f32, ())
def func():
(c, d) = ti.static(b, a)
(c[None], d[None]) = (2, 3)
func()
assert (a[None] == 3)
assert (b[None] == 2) |
class Messages(BaseMessages):
ChatCompleted = 'Congratulations, you successfully completed the task!'
ChatIncomplete = "Sorry, you weren't able to complete the task."
Redirect = 'Sorry, that chat did not meet our acceptance criteria.' |
def prepare_infer_only_dataloader(full_dataset_class_str, dictionary, im_dir, im_suffix, transforms, batch_size=1, indices=None, handle_transforms_within_dataset=False):
(*dataset_str_parts, dataset_class_str) = full_dataset_class_str.split('.')
dataset_class = getattr(importlib.import_module('.'.join(dataset_str_parts)), dataset_class_str)
dataset = dataset_class('dataset', dictionary, im_dir=im_dir, im_suffix=im_suffix, transforms=transforms, indices=indices)
return MultiDatasetLoader([dataset], transforms=(None if handle_transforms_within_dataset else [transforms]), shuffle=False, batch_size=batch_size, num_workers=0) |
def one_hot_bool(x, num_classes: int):
onehot = torch.zeros(x.size(0), num_classes, device=x.device, dtype=torch.bool)
return onehot.scatter_(1, x.unsqueeze(1), 1) |
class LeanPreprocessedJumpToLabelInstruction(LeanPreprocessedCodeElement):
label_name: ScopedName
offset: int
pc_dest: int
condition: Optional[Expression]
def get_exprs(self) -> List[Expression]:
return ([self.condition] if (self.condition is not None) else []) |
def _parse_cmudict(file):
cmudict = {}
for line in file:
if (len(line) and (((line[0] >= 'A') and (line[0] <= 'Z')) or (line[0] == "'"))):
parts = line.split(' ')
word = parts[0]
pronunciation = _get_pronunciation(parts[1])
if pronunciation:
if (word in cmudict):
cmudict[word].append(pronunciation)
else:
cmudict[word] = [pronunciation]
return cmudict |
def cxy_wh_2_rect1(pos, sz):
return np.array([((pos[0] - (sz[0] / 2)) + 1), ((pos[1] - (sz[1] / 2)) + 1), sz[0], sz[1]]) |
def test_constructor_mutate_parameter_choose_existing(constructor_mock, default_test_case):
float0 = stmt.FloatPrimitiveStatement(default_test_case, 5.0)
float1 = stmt.FloatPrimitiveStatement(default_test_case, 5.0)
const = stmt.ConstructorStatement(default_test_case, constructor_mock, {'a': float0.ret_val})
default_test_case.add_statement(float0)
default_test_case.add_statement(float1)
default_test_case.add_statement(const)
with mock.patch('pynguin.utils.randomness.choice') as choice_mock:
choice_mock.side_effect = (lambda coll: coll[0])
with mock.patch('pynguin.testcase.testfactory.is_optional_parameter') as optional_mock:
optional_mock.return_value = False
assert const._mutate_parameter('a', MagicMock(parameters={'a': default_test_case.test_cluster.type_system.convert_type_hint(float)})) |
_utils.in_tempdir
def test_dory_query_by_hashval(location):
testdata = relative_file('data/dory-k31-hashval-queries.txt')
shutil.copyfile(testdata, 'dory-k31-hashval-queries.txt')
copy_dory_catlas()
args = '-k 31 dory_k21/bcalm.unitigs.db dory_k21_r1_mh.pickle'
assert (index_cdbg_by_minhash.main(args.split()) == 0)
args = '-k 31 dory_k21 dory_k21_r1 dory_k21_r1_mh.pickle dory-k31-hashval-queries.txt dory_k21_r1_hashval_k31 --contigs-db dory_k21/bcalm.unitigs.db'
assert (query_by_hashval.main(args.split()) == 0)
assert os.path.exists('dory_k21_r1_hashval_k31/hashval_results.csv') |
def test_integrate_line_coverage_instrumentation(simple_module):
tracer = ExecutionTracer()
function_callable = getattr(simple_module, 'multi_loop')
adapter = LineCoverageInstrumentation(tracer)
transformer = InstrumentationTransformer(tracer, [adapter])
function_callable.__code__ = transformer.instrument_module(function_callable.__code__)
assert tracer.get_subject_properties().existing_lines
assert ({0, 1, 2, 3, 4, 5, 6} == tracer.get_subject_properties().existing_lines.keys()) |
def test_batch_to_cuda(conll2003_demo, device):
if device.type.startswith('cpu'):
pytest.skip('test requires cuda, while current session runs on cpu')
torch.cuda.set_device(device)
config = ExtractorConfig('sequence_tagging', ohots=ConfigDict({f: OneHotConfig(field=f, emb_dim=20) for f in Token._basic_ohot_fields}), mhots=ConfigDict({f: MultiHotConfig(field=f, emb_dim=20) for f in Token._basic_mhot_fields}))
dataset = Dataset(conll2003_demo, config)
dataset.build_vocabs_and_dims()
dataloader = torch.utils.data.DataLoader(dataset, batch_size=4, shuffle=True, collate_fn=dataset.collate, pin_memory=True)
for batch in dataloader:
break
assert batch.ohots['text'].is_pinned()
assert batch.ohots['en_pattern'].is_pinned()
assert batch.mhots['en_shape_features'].is_pinned()
assert batch.seq_lens.is_pinned()
assert batch.tags_objs[0].tag_ids.is_pinned()
assert batch.ohots['text'].device.type.startswith('cpu')
batch = batch.to(device)
assert batch.ohots['text'].device.type.startswith('cuda')
assert (not batch.ohots['text'].is_pinned()) |
class LPPool2d(_LPPoolNd):
kernel_size: _size_2_t
stride: _size_2_t
def forward(self, input: Tensor) -> Tensor:
return F.lp_pool2d(input, float(self.norm_type), self.kernel_size, self.stride, self.ceil_mode) |
def register_Ns3VsaManager_methods(root_module, cls):
cls.add_constructor([param('ns3::VsaManager const &', 'arg0')])
cls.add_constructor([])
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('RemoveAll', 'void', [])
cls.add_method('RemoveByChannel', 'void', [param('uint32_t', 'channelNumber')])
cls.add_method('RemoveByOrganizationIdentifier', 'void', [param('ns3::OrganizationIdentifier const &', 'oi')])
cls.add_method('SendVsa', 'void', [param('ns3::VsaInfo const &', 'vsaInfo')])
cls.add_method('SetWaveNetDevice', 'void', [param('ns3::Ptr< ns3::WaveNetDevice >', 'device')])
cls.add_method('SetWaveVsaCallback', 'void', [param('ns3::Callback< bool, ns3::Ptr< ns3::Packet const >, ns3::Address const &, unsigned int, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'vsaCallback')])
cls.add_method('DoDispose', 'void', [], visibility='private', is_virtual=True)
cls.add_method('DoInitialize', 'void', [], visibility='private', is_virtual=True)
return |
def decode_arch_def(arch_def, depth_multiplier=1.0, depth_trunc='ceil', experts_multiplier=1, fix_first_last=False):
arch_args = []
for (stack_idx, block_strings) in enumerate(arch_def):
assert isinstance(block_strings, list)
stack_args = []
repeats = []
for block_str in block_strings:
assert isinstance(block_str, str)
(ba, rep) = _decode_block_str(block_str)
if ((ba.get('num_experts', 0) > 0) and (experts_multiplier > 1)):
ba['num_experts'] *= experts_multiplier
stack_args.append(ba)
repeats.append(rep)
if (fix_first_last and ((stack_idx == 0) or (stack_idx == (len(arch_def) - 1)))):
arch_args.append(_scale_stage_depth(stack_args, repeats, 1.0, depth_trunc))
else:
arch_args.append(_scale_stage_depth(stack_args, repeats, depth_multiplier, depth_trunc))
return arch_args |
def apply_wrapper(wrapper, task_or_dataset=None):
if (task_or_dataset is None):
return wrapper
from torchmeta.utils.data import MetaDataset
if isinstance(task_or_dataset, Task):
return wrapper(task_or_dataset)
elif isinstance(task_or_dataset, MetaDataset):
if (task_or_dataset.dataset_transform is None):
dataset_transform = wrapper
else:
dataset_transform = Compose([task_or_dataset.dataset_transform, wrapper])
task_or_dataset.dataset_transform = dataset_transform
return task_or_dataset
else:
raise NotImplementedError() |
def _concat_dataset(cfg, default_args=None):
from .dataset_wrappers import ConcatDataset
img_dir = cfg['img_dir']
ann_dir = cfg.get('ann_dir', None)
split = cfg.get('split', None)
separate_eval = cfg.pop('separate_eval', True)
num_img_dir = (len(img_dir) if isinstance(img_dir, (list, tuple)) else 1)
if (ann_dir is not None):
num_ann_dir = (len(ann_dir) if isinstance(ann_dir, (list, tuple)) else 1)
else:
num_ann_dir = 0
if (split is not None):
num_split = (len(split) if isinstance(split, (list, tuple)) else 1)
else:
num_split = 0
if (num_img_dir > 1):
assert ((num_img_dir == num_ann_dir) or (num_ann_dir == 0))
assert ((num_img_dir == num_split) or (num_split == 0))
else:
assert ((num_split == num_ann_dir) or (num_ann_dir <= 1))
num_dset = max(num_split, num_img_dir)
datasets = []
for i in range(num_dset):
data_cfg = copy.deepcopy(cfg)
if isinstance(img_dir, (list, tuple)):
data_cfg['img_dir'] = img_dir[i]
if isinstance(ann_dir, (list, tuple)):
data_cfg['ann_dir'] = ann_dir[i]
if isinstance(split, (list, tuple)):
data_cfg['split'] = split[i]
datasets.append(build_dataset(data_cfg, default_args))
return ConcatDataset(datasets, separate_eval) |
(Output('add-node-B', 'options'), Input('add-node-B-parent', 'n_clicks'), State('causal-data-state', 'data'))
def update_node_b_dropdown(n_clicks, data_state):
options = []
ctx = dash.callback_context
prop_id = ctx.triggered_id
if (prop_id == 'add-node-B-parent'):
data = (json.loads(data_state) if (data_state is not None) else {})
options += [{'label': col, 'value': col} for col in data.get('columns', [])]
return options |
def divergence(vf: ti.types.ndarray(ndim=2), velocity_divs: ti.types.ndarray(ndim=2)):
for (i, j) in vf:
vl = sample(vf, (i - 1), j)
vr = sample(vf, (i + 1), j)
vb = sample(vf, i, (j - 1))
vt = sample(vf, i, (j + 1))
vc = sample(vf, i, j)
if (i == 0):
vl.x = (- vc.x)
if (i == (res - 1)):
vr.x = (- vc.x)
if (j == 0):
vb.y = (- vc.y)
if (j == (res - 1)):
vt.y = (- vc.y)
velocity_divs[(i, j)] = ((((vr.x - vl.x) + vt.y) - vb.y) * 0.5) |
def init_weights(modules, initialize):
for module in modules():
if (isinstance(module, nn.Conv2d) or isinstance(module, nn.ConvTranspose2d) or isinstance(module, nn.Linear)):
if (initialize == 'ortho'):
init.orthogonal_(module.weight)
if (module.bias is not None):
module.bias.data.fill_(0.0)
elif (initialize == 'N02'):
init.normal_(module.weight, 0, 0.02)
if (module.bias is not None):
module.bias.data.fill_(0.0)
elif (initialize in ['glorot', 'xavier']):
init.xavier_uniform_(module.weight)
if (module.bias is not None):
module.bias.data.fill_(0.0)
else:
print('Init style not recognized...')
elif isinstance(module, nn.Embedding):
if (initialize == 'ortho'):
init.orthogonal_(module.weight)
elif (initialize == 'N02'):
init.normal_(module.weight, 0, 0.02)
elif (initialize in ['glorot', 'xavier']):
init.xavier_uniform_(module.weight)
else:
print('Init style not recognized...')
else:
pass |
def sequence_factory():
return _DummySequenceOutputVariableFactory(RuntimeVariable.CoverageTimeline) |
def tovec_sym(x: dace.float32[N], y: dace.float32[N], z: dace.float32[N]):
def sum(i: _[0:N]):
(xx << x[i])
(yy << y[i])
(zz << z[i])
(out >> z[i])
out = ((xx + yy) + zz) |
def compute_average_flops_cost(self):
batches_count = self.__batch_counter__
flops_sum = 0
for module in self.modules():
if (isinstance(module, torch.nn.Conv2d) or isinstance(module, torch.nn.Linear)):
flops_sum += module.__flops__
return (flops_sum / batches_count) |
.register_keras_serializable()
class FedProxOptimizer(keras.optimizers.Optimizer):
def __init__(self, learning_rate=0.01, mu=0.01, name='FedProxOptimizer', **kwargs):
super().__init__(name=name, **kwargs)
self._set_hyper('learning_rate', learning_rate)
self._set_hyper('mu', mu)
self._lr_t = None
self._mu_t = None
def _prepare(self, var_list):
self._lr_t = tf.convert_to_tensor(self._get_hyper('learning_rate'), name='lr')
self._mu_t = tf.convert_to_tensor(self._get_hyper('mu'), name='mu')
def _create_slots(self, var_list):
for v in var_list:
self.add_slot(v, 'vstar')
def _resource_apply_dense(self, grad, var):
lr_t = tf.cast(self._lr_t, var.dtype.base_dtype)
mu_t = tf.cast(self._mu_t, var.dtype.base_dtype)
vstar = self.get_slot(var, 'vstar')
var_update = var.assign_sub((lr_t * (grad + (mu_t * (var - vstar)))))
return tf.group(*[var_update])
def _apply_sparse_shared(self, grad, var, indices, scatter_add):
lr_t = tf.cast(self._lr_t, var.dtype.base_dtype)
mu_t = tf.cast(self._mu_t, var.dtype.base_dtype)
vstar = self.get_slot(var, 'vstar')
v_diff = vstar.assign((mu_t * (var - vstar)), use_locking=self._use_locking)
with tf.control_dependencies([v_diff]):
scaled_grad = scatter_add(vstar, indices, grad)
var_update = var.assign_sub((lr_t * scaled_grad))
return tf.group(*[var_update])
def _resource_apply_sparse(self, grad, var):
return self._apply_sparse_shared(grad.values, var, grad.indices, (lambda x, i, v: standard_ops.scatter_add(x, i, v)))
def get_config(self):
base_config = super(FedProxOptimizer, self).get_config()
return {**base_config, 'lr': self._serialize_hyperparameter('learning_rate'), 'mu': self._serialize_hyperparameter('mu')} |
def set_gamma_ramp(monitor, ramp):
gammaramp = _GLFWgammaramp()
gammaramp.wrap(ramp)
_glfw.glfwSetGammaRamp(monitor, ctypes.pointer(gammaramp)) |
def faiss_clustering(features, ncentroids, kmeans_niters):
import faiss
d = features.shape[1]
if (kmeans_niters is None):
kmeans_niters = 20
kmeans = faiss.Kmeans(d, ncentroids, niter=kmeans_niters, verbose=False)
kmeans.train(features)
(distances, assignments) = kmeans.index.search(features, 1)
return assignments.squeeze(axis=(- 1)) |
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000):
self.inplanes = 128
super(ResNet, self).__init__()
self.conv1 = conv3x3(3, 64, stride=2)
self.bn1 = nn.BatchNorm2d(64)
self.relu1 = nn.ReLU(inplace=True)
self.conv2 = conv3x3(64, 64)
self.bn2 = nn.BatchNorm2d(64)
self.relu2 = nn.ReLU(inplace=True)
self.conv3 = conv3x3(64, 128)
self.bn3 = nn.BatchNorm2d(128)
self.relu3 = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7, stride=1)
self.fc = nn.Linear((512 * block.expansion), num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(nn.Conv2d(self.inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((planes * block.expansion)))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = (planes * block.expansion)
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.relu1(self.bn1(self.conv1(x)))
x = self.relu2(self.bn2(self.conv2(x)))
x = self.relu3(self.bn3(self.conv3(x)))
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), (- 1))
x = self.fc(x)
return x |
def test_branch_subscopes_fission():
sdfg = dace.SDFG('branch_subscope_fission')
sdfg.add_symbol('i', dace.int32)
sdfg.add_array('A', [2], dace.int32)
sdfg.add_array('B', [1], dace.int32, transient=True)
sdfg.add_array('C', [1], dace.int32)
init_state = sdfg.add_state('init')
guard_1 = sdfg.add_state('guard_1')
guard_2 = sdfg.add_state('guard_2')
right1_state = sdfg.add_state('right1')
right2_state = sdfg.add_state('right2')
left2_state = sdfg.add_state('left2')
merge_1 = sdfg.add_state('merge_1')
merge_2 = sdfg.add_state('merge_2')
guard_after = sdfg.add_state('guard_after')
left_after = sdfg.add_state('left_after')
right_after = sdfg.add_state('right_after')
merge_after = sdfg.add_state('merge_after')
first_assign = dace.InterstateEdge(assignments={'i': 'A[0]'})
sdfg.add_edge(init_state, guard_1, first_assign)
combined_assign_cond = dace.InterstateEdge(assignments={'i': 'A[1]'}, condition='i > 0')
sdfg.add_edge(guard_1, guard_2, combined_assign_cond)
right_cond = dace.InterstateEdge(condition='i <= 0')
left_2_cond = dace.InterstateEdge(condition='i <= 0')
right_2_cond = dace.InterstateEdge(condition='i > 0')
sdfg.add_edge(guard_1, right1_state, right_cond)
sdfg.add_edge(guard_2, right2_state, right_2_cond)
sdfg.add_edge(guard_2, left2_state, left_2_cond)
sdfg.add_edge(right1_state, merge_1, dace.InterstateEdge())
sdfg.add_edge(right2_state, merge_2, dace.InterstateEdge())
sdfg.add_edge(left2_state, merge_2, dace.InterstateEdge())
sdfg.add_edge(merge_2, merge_1, dace.InterstateEdge())
sdfg.add_edge(merge_1, guard_after, dace.InterstateEdge())
after_cond_left = dace.InterstateEdge(condition='i <= 0')
after_cond_right = dace.InterstateEdge(condition='i > 0')
sdfg.add_edge(guard_after, left_after, after_cond_left)
sdfg.add_edge(guard_after, right_after, after_cond_right)
sdfg.add_edge(left_after, merge_after, dace.InterstateEdge())
sdfg.add_edge(right_after, merge_after, dace.InterstateEdge())
t1 = guard_1.add_tasklet('t1', {}, {'b'}, 'b = 1')
a1 = guard_1.add_access('B')
guard_1.add_edge(t1, 'b', a1, None, dace.Memlet('B[0]'))
t0 = guard_2.add_tasklet('t1', {}, {'b'}, 'b = 0')
a0 = guard_2.add_access('B')
guard_2.add_edge(t0, 'b', a0, None, dace.Memlet('B[0]'))
a2 = left2_state.add_access('B')
t2 = left2_state.add_tasklet('t2', {'b'}, {'c'}, 'c = b')
a3 = left2_state.add_access('C')
left2_state.add_edge(a2, None, t2, 'b', dace.Memlet('B[0]'))
left2_state.add_edge(t2, 'c', a3, None, dace.Memlet('C[0]'))
a4 = right2_state.add_access('B')
t3 = right2_state.add_tasklet('t3', {'b'}, {'c'}, 'c = b + 1')
a5 = right2_state.add_access('C')
right2_state.add_edge(a4, None, t3, 'b', dace.Memlet('B[0]'))
right2_state.add_edge(t3, 'c', a5, None, dace.Memlet('C[0]'))
a6 = right1_state.add_access('B')
t4 = right1_state.add_tasklet('t4', {'b'}, {'c'}, 'c = b - 1')
a7 = right1_state.add_access('C')
right1_state.add_edge(a6, None, t4, 'b', dace.Memlet('B[0]'))
right1_state.add_edge(t4, 'c', a7, None, dace.Memlet('C[0]'))
t7 = guard_after.add_tasklet('t7', {}, {'b'}, 'b = 5')
a12 = guard_after.add_access('B')
guard_after.add_edge(t7, 'b', a12, None, dace.Memlet('B[0]'))
a8 = left_after.add_access('B')
t5 = left_after.add_tasklet('t5', {'b'}, {'c'}, 'c = b * 2')
a9 = left_after.add_access('C')
left_after.add_edge(a8, None, t5, 'b', dace.Memlet('B[0]'))
left_after.add_edge(t5, 'c', a9, None, dace.Memlet('C[0]'))
a10 = right_after.add_access('B')
t6 = right_after.add_tasklet('t6', {'b'}, {'c'}, 'c = b * 3')
a11 = right_after.add_access('C')
right_after.add_edge(a10, None, t6, 'b', dace.Memlet('B[0]'))
right_after.add_edge(t6, 'c', a11, None, dace.Memlet('C[0]'))
a13 = merge_1.add_access('B')
t8 = merge_1.add_tasklet('t8', {'b'}, {'c'}, 'c = b + 1')
a14 = merge_1.add_access('C')
merge_1.add_edge(a13, None, t8, 'b', dace.Memlet('B[0]'))
merge_1.add_edge(t8, 'c', a14, None, dace.Memlet('C[0]'))
Pipeline([ScalarFission()]).apply_pass(sdfg, {})
assert (set(sdfg.arrays.keys()) == {'A', 'B', 'C', 'B_0', 'B_1'}) |
def drug_is_taken_in_baseline(index_date, dates):
for date in dates:
if ((index_date - date).days > 0):
return True
return False |
.parametrize('gamma', [0.1, 0.5, 0.9])
def test_valid_gamma(gamma: float) -> None:
check_gamma(gamma) |
def _worker_set_policy_params(g, params, scope=None):
g = _get_scoped_g(g, scope)
g.policy.set_param_values(params) |
def main():
generate_zenodo()
rst_include.include(source='docs/resources/credits_template.rst', target='docs/resources/credits.rst', quiet=False, inplace=False, source_encoding='utf-8', target_encoding='utf-8')
rst_include.include(source='README_TEMPLATE.rst', target='README.rst', quiet=False, inplace=False, source_encoding='utf-8', target_encoding='utf-8') |
class SegHead(nn.Module):
def __init__(self, in_ch, mid_ch, num_classes, upscale_factor=8, is_aux=True) -> None:
super().__init__()
out_ch = ((num_classes * upscale_factor) * upscale_factor)
self.conv_3x3 = ConvModule(in_ch, mid_ch, 3, 1, 1)
self.drop = nn.Dropout(0.1)
if is_aux:
self.conv_out = nn.Sequential(ConvModule(mid_ch, (upscale_factor * upscale_factor), 3, 1, 1), nn.Conv2d((upscale_factor * upscale_factor), out_ch, 1, 1, 0), nn.PixelShuffle(upscale_factor))
else:
self.conv_out = nn.Sequential(nn.Conv2d(mid_ch, out_ch, 1, 1, 0), nn.PixelShuffle(upscale_factor))
def forward(self, x):
out = self.conv_3x3(x)
out = self.drop(out)
return self.conv_out(out) |
class NumericColumnTransformer(BaseColumnTransformer):
def __init__(self, key, shape=(1,), dtype='float32'):
self.key = key
self.shape = shape
self.dtype = dtype
def _set_feature_column_names(self, names):
BaseColumnTransformer._set_feature_column_names(self, names)
self.column_idx = self.names.index(self.key)
def __call__(self, inputs):
return inputs[self.column_idx]
def get_feature_column_names(self):
return [self.key] |
class PriNet2D(nn.Module):
def __init__(self):
super(PriNet2D, self).__init__()
print('PriNet2D...')
self.net = archs.sparse_invar_encoder2D.CustomCNN(18, 1, 3).cuda()
def forward(self, feat_mem, clist_cam, summ_writer, suffix=''):
total_loss = torch.tensor(0.0).cuda()
(B, C, Z, Y, X) = list(feat_mem.shape)
(B2, S, D) = list(clist_cam.shape)
assert (B == B2)
assert (D == 3)
clist_mem = utils_vox.Ref2Mem(clist_cam, Z, Y, X)
feat_ = feat_mem.permute(0, 1, 3, 2, 4).reshape(B, (C * Y), Z, X)
mask_ = (1.0 - (feat_ == 0).all(dim=1, keepdim=True).float().cuda())
grid_ = utils_basic.meshgrid2D(B, Z, X, stack=True, norm=True).permute(0, 3, 1, 2)
halfgrid_ = utils_basic.meshgrid2D(B, int((Z / 2)), int((X / 2)), stack=True, norm=True).permute(0, 3, 1, 2)
feat_ = torch.cat([feat_, grid_], dim=1)
(energy_map, mask) = self.net(feat_, mask_, halfgrid_)
summ_writer.summ_feat('pri/energy_input', feat_)
summ_writer.summ_oned('pri/energy_map', energy_map)
summ_writer.summ_oned('pri/mask', mask, norm=False)
summ_writer.summ_histogram('pri/energy_map_hist', energy_map)
loglike_per_traj = utils_misc.get_traj_loglike((clist_mem * 0.5), energy_map)
ce_loss = ((- 1.0) * torch.mean(loglike_per_traj))
total_loss = utils_misc.add_loss('pri/ce_loss', total_loss, ce_loss, hyp.pri2D_ce_coeff, summ_writer)
reg_loss = torch.sum(torch.abs(energy_map))
total_loss = utils_misc.add_loss('pri/reg_loss', total_loss, reg_loss, hyp.pri2D_reg_coeff, summ_writer)
(dz, dx) = utils_basic.gradient2D(energy_map, absolute=True)
smooth_vox = torch.mean((dz + dx), dim=1, keepdims=True)
summ_writer.summ_oned('pri/smooth_loss', smooth_vox)
smooth_loss = torch.mean(smooth_vox)
total_loss = utils_misc.add_loss('pri/smooth_loss', total_loss, smooth_loss, hyp.pri2D_smooth_coeff, summ_writer)
return (total_loss, energy_map) |
def register_Ns3Dot11sIePeerManagement_methods(root_module, cls):
cls.add_binary_comparison_operator('==')
cls.add_output_stream_operator()
cls.add_constructor([param('ns3::dot11s::IePeerManagement const &', 'arg0')])
cls.add_constructor([])
cls.add_method('DeserializeInformationField', 'uint8_t', [param('ns3::Buffer::Iterator', 'i'), param('uint8_t', 'length')], is_virtual=True)
cls.add_method('ElementId', 'ns3::WifiInformationElementId', [], is_const=True, is_virtual=True)
cls.add_method('GetInformationFieldSize', 'uint8_t', [], is_const=True, is_virtual=True)
cls.add_method('GetLocalLinkId', 'uint16_t', [], is_const=True)
cls.add_method('GetPeerLinkId', 'uint16_t', [], is_const=True)
cls.add_method('GetReasonCode', 'ns3::dot11s::PmpReasonCode', [], is_const=True)
cls.add_method('GetSubtype', 'uint8_t', [], is_const=True)
cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True)
cls.add_method('SerializeInformationField', 'void', [param('ns3::Buffer::Iterator', 'i')], is_const=True, is_virtual=True)
cls.add_method('SetPeerClose', 'void', [param('uint16_t', 'localLinkID'), param('uint16_t', 'peerLinkId'), param('ns3::dot11s::PmpReasonCode', 'reasonCode')])
cls.add_method('SetPeerConfirm', 'void', [param('uint16_t', 'localLinkID'), param('uint16_t', 'peerLinkId')])
cls.add_method('SetPeerOpen', 'void', [param('uint16_t', 'localLinkId')])
cls.add_method('SubtypeIsClose', 'bool', [], is_const=True)
cls.add_method('SubtypeIsConfirm', 'bool', [], is_const=True)
cls.add_method('SubtypeIsOpen', 'bool', [], is_const=True)
return |
def get_run_env_dict():
d = {}
d['timestamp'] = datetime.datetime.now().timestamp()
d['hostname'] = socket.gethostname()
if ('SLURM_JOB_ID' in os.environ):
d['slurm_job_id'] = int(os.environ['SLURM_JOB_ID'])
if ('SLURM_PROCID' in os.environ):
d['slurm_procid'] = int(os.environ['SLURM_PROCID'])
if ('SLURM_RESTART_COUNT' in os.environ):
d['slurm_restart_count'] = int(os.environ['SLURM_RESTART_COUNT'])
(git_root_path, metadata) = get_metadata()
d['git_root_path'] = (git_root_path.decode('utf-8') if (git_root_path is not None) else None)
d['git_commit'] = metadata.get('githash')
d['launcher'] = metadata.get('launcher')
return d |
def _check_img_dtype(img):
assert isinstance(img, np.ndarray), '[Augmentation] Needs an numpy array, but got a {}!'.format(type(img))
assert ((not isinstance(img.dtype, np.integer)) or (img.dtype == np.uint8)), '[Augmentation] Got image of type {}, use uint8 or floating points instead!'.format(img.dtype)
assert (img.ndim in [2, 3]), img.ndim |
class TestEmbeddings(unittest.TestCase):
def setUp(self):
self.methods = [Spectral(), GSVD(), SVD()]
self.bimethods = [GSVD(), SVD()]
def test_undirected(self):
adjacency = test_graph()
n = adjacency.shape[0]
method = Spring()
embedding = method.fit_transform(adjacency)
self.assertEqual(embedding.shape, (n, 2))
pred1 = method.predict(adjacency[0])
pred2 = method.predict(adjacency[0].toarray())
self.assertEqual(pred1.shape, (2,))
self.assertAlmostEqual(np.linalg.norm((pred1 - pred2)), 0)
pred1 = method.predict(adjacency)
pred2 = method.predict(adjacency.toarray())
self.assertTupleEqual(pred1.shape, (n, 2))
self.assertAlmostEqual(np.linalg.norm((pred1 - pred2)), 0)
def test_bimethods(self):
for adjacency in [test_digraph(), test_bigraph()]:
(n_row, n_col) = adjacency.shape
for method in self.bimethods:
method.fit(adjacency)
self.assertEqual(method.embedding_.shape, (n_row, 2))
self.assertEqual(method.embedding_row_.shape, (n_row, 2))
self.assertEqual(method.embedding_col_.shape, (n_col, 2))
ref = method.embedding_[0]
pred1 = method.predict(adjacency[0])
pred2 = method.predict(adjacency[0].toarray())
self.assertEqual(pred1.shape, (2,))
self.assertAlmostEqual(np.linalg.norm((pred1 - pred2)), 0)
self.assertAlmostEqual(np.linalg.norm((pred1 - ref)), 0)
ref = method.embedding_
pred1 = method.predict(adjacency)
pred2 = method.predict(adjacency.toarray())
self.assertTupleEqual(pred1.shape, (n_row, 2))
self.assertAlmostEqual(np.linalg.norm((pred1 - pred2)), 0)
self.assertAlmostEqual(np.linalg.norm((pred1 - ref)), 0)
def test_disconnected(self):
n = 10
adjacency = np.eye(n)
for method in self.methods:
embedding = method.fit_transform(adjacency)
self.assertEqual(embedding.shape, (n, 2))
def test_regularization(self):
adjacency = test_graph()
method = Spectral()
self.assertEqual(method._get_regularization((- 1), adjacency), 0) |
def Norm2d(in_channels):
layer = getattr(cfg.MODEL, 'BNFUNC')
normalizationLayer = layer(in_channels)
return normalizationLayer |
class DistilBertForSequenceClassification():
def __init__(self, *args, **kwargs):
requires_pytorch(self)
def from_pretrained(self, *args, **kwargs):
requires_pytorch(self) |
def _get_all_arcs(graph_parse, is_variable):
assert isinstance(graph_parse, GraphParse)
items = []
for (a_key, b_key) in itertools.combinations(graph_parse.intersection_points, 2):
items.extend(_get_arcs(graph_parse, is_variable, a_key, b_key).iteritems())
return dict(items) |
def main(args):
file_name = f'{args.policy}_{args.env}_{args.seed}'
print('')
print(f'Policy: {args.policy}, Env: {args.env}, Seed: {args.seed}')
print('')
log_path = safe_path(os.path.join(args.log_root, '{}_base'.format(args.env)))
result_path = safe_path(os.path.join(log_path, 'results'))
model_path = safe_path(os.path.join(log_path, 'models'))
env = gym.make(args.env)
env.seed(args.seed)
torch.manual_seed(args.seed)
np.random.seed(args.seed)
state_dim = env.observation_space.shape[0]
action_dim = env.action_space.shape[0]
max_action = float(env.action_space.high[0])
kwargs = {'state_dim': state_dim, 'action_dim': action_dim, 'max_action': max_action, 'discount': args.discount, 'tau': args.tau}
if (args.policy == 'TD3'):
kwargs['policy_noise'] = (args.policy_noise * max_action)
kwargs['noise_clip'] = (args.noise_clip * max_action)
kwargs['policy_freq'] = args.policy_freq
policy = TD3.TD3(**kwargs)
replay_buffer = utils.ReplayBuffer(state_dim, action_dim)
evaluations = [eval_policy(policy, env, args.seed)]
(state, done) = (env.reset(), False)
episode_reward = 0
episode_timesteps = 0
episode_num = 0
for t in range(int(args.max_timesteps)):
episode_timesteps += 1
if (t < args.start_timesteps):
action = env.action_space.sample()
else:
action = (policy.select_action(np.array(state)) + np.random.normal(0, (max_action * args.expl_noise), size=action_dim)).clip((- max_action), max_action)
(next_state, reward, done, _) = env.step(action)
done_bool = (float(done) if (episode_timesteps < env._max_episode_steps) else 0)
replay_buffer.add(state, action, next_state, reward, done_bool)
state = next_state
episode_reward += reward
if (t >= args.start_timesteps):
policy.train(replay_buffer, args.batch_size)
if done:
print(f'Total T: {(t + 1)} Episode Num: {(episode_num + 1)} Episode T: {episode_timesteps} Reward: {episode_reward:.3f}')
(state, done) = (env.reset(), False)
episode_reward = 0
episode_timesteps = 0
episode_num += 1
if (((t + 1) % args.eval_freq) == 0):
evaluations.append(eval_policy(policy, env, args.seed))
np.save(os.path.join(result_path, '{}'.format(file_name)), evaluations)
if args.save_model:
policy.save(os.path.join(model_path, '{}'.format(file_name))) |
.make_registry
class BackwardImplementation(abc.ABC):
def backward_can_be_applied(node: nd.Node, state: SDFGState, sdfg: SDFG) -> bool:
return True
def backward(forward_node: nd.Node, context: BackwardContext, given_gradients: typing.List[typing.Optional[str]], required_gradients: typing.List[typing.Optional[str]]) -> typing.Tuple[(nd.Node, BackwardResult)]:
... |
class ModelArguments():
model_name_or_path: str = field(metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'})
config_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained config name or path if not the same as model_name'})
tokenizer_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'})
cache_dir: Optional[str] = field(default=None, metadata={'help': 'Path to directory to store the pretrained models downloaded from huggingface.co'})
use_fast_tokenizer: bool = field(default=True, metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'})
model_revision: str = field(default='main', metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'})
use_auth_token: bool = field(default=False, metadata={'help': 'Will use the token generated when running `huggingface-cli login` (necessary to use this script with private models).'}) |
class OpenAIGPTDoubleHeadsModel():
def __init__(self, *args, **kwargs):
requires_pytorch(self)
def from_pretrained(self, *args, **kwargs):
requires_pytorch(self) |
class BinaryEncoder(BaseNEncoder):
encoding_relation = utils.EncodingRelation.ONE_TO_M
__init__ = partialmethod(BaseNEncoder.__init__, base=2) |
def mae(y: pd.Series, yhat: pd.Series, lb: pd.Series, ub: pd.Series):
return np.abs((y - yhat)).mean() |
class ListPromptTemplate():
def __init__(self, template: str, input_variables: List[str]):
self.template = template
self.input_variables = input_variables
def build(self, **kwargs) -> str:
for i in self.input_variables:
if (i not in kwargs):
raise ValueError(f'Missing input variable {i}')
return self.template.format(**kwargs) |
def potsdam_classes():
return ['impervious_surface', 'building', 'low_vegetation', 'tree', 'car', 'clutter'] |
class Function_csc(GinacFunction):
def __init__(self):
GinacFunction.__init__(self, 'csc', latex_name='\\csc')
def _eval_numpy_(self, x):
return (1 / sin(x)) |
class Histogram(GraphicPrimitive):
def __init__(self, datalist, options):
import numpy as np
self.datalist = np.asarray(datalist, dtype=float)
if ('normed' in options):
from sage.misc.superseded import deprecation
deprecation(25260, "the 'normed' option is deprecated. Use 'density' instead.")
if ('linestyle' in options):
from sage.plot.misc import get_matplotlib_linestyle
options['linestyle'] = get_matplotlib_linestyle(options['linestyle'], return_type='long')
if options.get('range', None):
options['range'] = [float(x) for x in options['range']]
GraphicPrimitive.__init__(self, options)
def get_minmax_data(self):
import numpy
options = self.options()
opt = {}
for key in ('range', 'bins', 'normed', 'density', 'weights'):
try:
value = options[key]
except KeyError:
pass
else:
if (value is not None):
opt[key] = value
if (not hasattr(self.datalist[0], '__contains__')):
(ydata, xdata) = numpy.histogram(self.datalist, **opt)
return minmax_data(xdata, ([0] + list(ydata)), dict=True)
else:
m = {'xmax': 0, 'xmin': 0, 'ymax': 0, 'ymin': 0}
if (not options.get('stacked')):
for d in self.datalist:
(ydata, xdata) = numpy.histogram(d, **opt)
m['xmax'] = max(([m['xmax']] + list(xdata)))
m['xmin'] = min(([m['xmin']] + list(xdata)))
m['ymax'] = max(([m['ymax']] + list(ydata)))
return m
else:
for d in self.datalist:
(ydata, xdata) = numpy.histogram(d, **opt)
m['xmax'] = max(([m['xmax']] + list(xdata)))
m['xmin'] = min(([m['xmin']] + list(xdata)))
m['ymax'] = (m['ymax'] + max(list(ydata)))
return m
def _allowed_options(self):
return {'color': 'The color of the face of the bars or list of colors if multiple data sets are given.', 'edgecolor': 'The color of the border of each bar.', 'alpha': 'How transparent the plot is', 'hue': 'The color of the bars given as a hue.', 'fill': '(True or False, default True) Whether to fill the bars', 'hatch': 'What symbol to fill with - one of "/", "\\", "|", "-", "+", "x", "o", "O", ".", "*"', 'linewidth': 'Width of the lines defining the bars', 'linestyle': "One of 'solid' or '-', 'dashed' or '--', 'dotted' or ':', 'dashdot' or '-.'", 'zorder': 'The layer level to draw the histogram', 'bins': 'The number of sections in which to divide the range. Also can be a sequence of points within the range that create the partition.', 'align': 'How the bars align inside of each bin. Acceptable values are "left", "right" or "mid".', 'rwidth': 'The relative width of the bars as a fraction of the bin width', 'cumulative': '(True or False) If True, then a histogram is computed in which each bin gives the counts in that bin plus all bins for smaller values. Negative values give a reversed direction of accumulation.', 'range': 'A list [min, max] which define the range of the histogram. Values outside of this range are treated as outliers and omitted from counts.', 'normed': 'Deprecated. Use density instead.', 'density': '(True or False) If True, the counts are normalized to form a probability density. (n/(len(x)*dbin)', 'weights': 'A sequence of weights the same length as the data list. If supplied, then each value contributes its associated weight to the bin count.', 'stacked': '(True or False) If True, multiple data are stacked on top of each other.', 'label': 'A string label for each data list given.'}
def _repr_(self):
L = len(self.datalist)
if (not hasattr(self.datalist[0], '__contains__')):
return 'Histogram defined by a data list of size {}'.format(L)
else:
return 'Histogram defined by {} data lists'.format(L)
def _render_on_subplot(self, subplot):
options = self.options()
if (not hasattr(self.datalist[0], '__contains__')):
subplot.hist(self.datalist, **options)
else:
subplot.hist(self.datalist.transpose(), **options) |
def reduce_formulas(formulas):
variable_values = {}
for formula in formulas:
if (formula.signature.id != 'Equals'):
continue
(left, right) = formula.children
if left.is_grounded(['What', 'Which']):
(left, right) = (right, left)
if (isinstance(left.signature, VariableSignature) and right.is_grounded(['What', 'Which'])):
variable_values[left.signature] = right
tester = (lambda node: (isinstance(node, FormulaNode) and (node.signature in variable_values)))
getter = (lambda node: variable_values[node.signature])
replaced_formulas = [formula.replace_node(tester, getter) for formula in formulas]
filtered_formulas = [formula for formula in replaced_formulas if (not formula.is_grounded())]
return filtered_formulas |
def main(args):
if ('totaltext' in args.result_path.lower()):
gt_folder = 'evaluation/gt/gt_totaltext'
IS_WORDSPOTTING = True
lexicon_paths = ['', 'evaluation/lexicons/totaltext/weak_voc_new.txt']
pair_paths = ['', 'evaluation/lexicons/totaltext/weak_voc_pair_list.txt']
lexicon_type = 1
elif ('ctw1500' in args.result_path.lower()):
gt_folder = 'evaluation/gt/gt_ctw1500'
IS_WORDSPOTTING = False
lexicon_paths = ['', 'evaluation/lexicons/ctw1500/weak_voc_new.txt']
pair_paths = ['', 'evaluation/lexicons/ctw1500/weak_voc_pair_list.txt']
lexicon_type = 1
elif ('ic13' in args.result_path.lower()):
gt_folder = 'evaluation/gt/gt_ic13'
IS_WORDSPOTTING = False
lexicon_paths = ['evaluation/lexicons/ic13/GenericVocabulary_new.txt', 'evaluation/lexicons/ic13/ch2_test_vocabulary_new.txt', 'evaluation/lexicons/ic13/new_strong_lexicon/new_voc_img_']
pair_paths = ['evaluation/lexicons/ic13/GenericVocabulary_pair_list.txt', 'evaluation/lexicons/ic13/ch2_test_vocabulary_pair_list.txt', 'evaluation/lexicons/ic13/new_strong_lexicon/pair_voc_img_']
lexicon_type = args.lexicon_type
elif ('ic15' in args.result_path.lower()):
gt_folder = 'evaluation/gt/gt_ic15'
IS_WORDSPOTTING = False
lexicon_paths = ['evaluation/lexicons/ic15/GenericVocabulary_new.txt', 'evaluation/lexicons/ic15/ch4_test_vocabulary_new.txt', 'evaluation/lexicons/ic15/new_strong_lexicon/new_voc_img_']
pair_paths = ['evaluation/lexicons/ic15/GenericVocabulary_pair_list.txt', 'evaluation/lexicons/ic15/ch4_test_vocabulary_pair_list.txt', 'evaluation/lexicons/ic15/new_strong_lexicon/pair_voc_img_']
lexicon_type = args.lexicon_type
else:
raise ValueError('Cannot determine target dataset')
if args.with_lexicon:
lexicon_path = lexicon_paths[lexicon_type]
pair_path = pair_paths[lexicon_type]
lexicons = read_lexicon(lexicon_path)
pairs = read_pair(pair_path)
else:
lexicons = None
pairs = None
print('Reading GT')
gts = read_gt(gt_folder, IS_WORDSPOTTING)
print('Reading and Processing Results')
results = read_result(args.result_path, lexicons, pairs, 0.4, gt_folder, lexicon_type)
print('Evaluating')
conf_thres_list = np.arange(0.8, 0.95, 0.01)
hmeans = []
recalls = []
precisions = []
for conf_thres in conf_thres_list:
(precision, recall, hmean, pgt, ngt, ndet) = evaluate(results=results, gts=gts, conf_thres=conf_thres)
hmeans.append(hmean)
recalls.append(recall)
precisions.append(precision)
max_hmean = max(hmeans)
max_hmean_index = ((len(hmeans) - hmeans[::(- 1)].index(max_hmean)) - 1)
precision = precisions[max_hmean_index]
recall = recalls[max_hmean_index]
conf_thres = conf_thres_list[max_hmean_index]
print(f'Precision: {precision:.4f}, Recall: {recall:.4f}, Hmean: {max_hmean:.4f}, Conf Thres: {conf_thres:.4f}') |
def lie_console():
from sage.repl.rich_output.display_manager import get_display_manager
if (not get_display_manager().is_in_terminal()):
raise RuntimeError('Can use the console only in the terminal. Try %%lie magics instead.')
os.system('bash `which lie`') |
class Ackley01(Benchmark):
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip(([(- 35.0)] * self.N), ([35.0] * self.N)))
self.global_optimum = [[0 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
u = sum((x ** 2))
v = sum(cos(((2 * pi) * x)))
return (((((- 20.0) * exp(((- 0.2) * sqrt((u / self.N))))) - exp((v / self.N))) + 20.0) + exp(1.0)) |
def get_writer(uri, format=None, mode='?', **kwargs):
if ((uri == RETURN_BYTES) and isinstance(format, str)):
uri = ((RETURN_BYTES + '.') + format.strip('. '))
request = Request(uri, ('w' + mode), **kwargs)
if (format is not None):
format = formats[format]
else:
format = formats.search_write_format(request)
if (format is None):
modename = MODENAMES.get(mode, mode)
raise ValueError(('Could not find a format to write the specified file in %s mode' % modename))
return format.get_writer(request) |
def test_jottings():
fname = os.path.join(test_data_path, 'parabola.mat')
read_workspace_vars(fname) |
def register_Ns3AddressValue_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::AddressValue const &', 'arg0')])
cls.add_constructor([param('ns3::Address const &', 'value')])
cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True)
cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True)
cls.add_method('Get', 'ns3::Address', [], is_const=True)
cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True)
cls.add_method('Set', 'void', [param('ns3::Address const &', 'value')])
return |
def rotate(img, degrees, **kwargs):
_check_args_tf(kwargs)
if (_PIL_VER >= (5, 2)):
return img.rotate(degrees, **kwargs)
elif (_PIL_VER >= (5, 0)):
(w, h) = img.size
post_trans = (0, 0)
rotn_center = ((w / 2.0), (h / 2.0))
angle = (- math.radians(degrees))
matrix = [round(math.cos(angle), 15), round(math.sin(angle), 15), 0.0, round((- math.sin(angle)), 15), round(math.cos(angle), 15), 0.0]
def transform(x, y, matrix):
(a, b, c, d, e, f) = matrix
return ((((a * x) + (b * y)) + c), (((d * x) + (e * y)) + f))
(matrix[2], matrix[5]) = transform(((- rotn_center[0]) - post_trans[0]), ((- rotn_center[1]) - post_trans[1]), matrix)
matrix[2] += rotn_center[0]
matrix[5] += rotn_center[1]
return img.transform(img.size, Image.AFFINE, matrix, **kwargs)
else:
return img.rotate(degrees, resample=kwargs['resample']) |
class ENet(nn.Module):
def __init__(self, dictionary=None):
super(ENet, self).__init__()
self.dictionary = dictionary
self.dummy_input = torch.zeros(1, 3, 480, 360)
self.num_classes = len(self.dictionary)
self.category = [v for d in self.dictionary for v in d.keys()]
self.weight = [d[v] for d in self.dictionary for v in d.keys() if (v in self.category)]
self.initialBlock = InitialBlock(3, 16)
self.stage1_1 = DownBottleneck(16, 64, 2, p=0.01)
self.stage1_2 = nn.Sequential(RegularBottleneck(64, 64, 1, p=0.01), RegularBottleneck(64, 64, 1, p=0.01), RegularBottleneck(64, 64, 1, p=0.01), RegularBottleneck(64, 64, 1, p=0.01))
self.stage2_1 = DownBottleneck(64, 128, 2, p=0.1)
self.stage2_2 = nn.Sequential(RegularBottleneck(128, 128, 1, p=0.1), RegularBottleneck(128, 128, 1, dilation=2, p=0.1), RegularBottleneck(128, 128, 1, asymmetric=True, p=0.1), RegularBottleneck(128, 128, 1, dilation=4, p=0.1), RegularBottleneck(128, 128, 1), RegularBottleneck(128, 128, 1, dilation=8, p=0.1), RegularBottleneck(128, 128, 1, asymmetric=True, p=0.1), RegularBottleneck(128, 128, 1, dilation=16, p=0.1))
self.stage3 = nn.Sequential(RegularBottleneck(128, 128, 1), RegularBottleneck(128, 128, 1, dilation=2, p=0.1), RegularBottleneck(128, 128, 1, asymmetric=True, p=0.1), RegularBottleneck(128, 128, 1, dilation=4, p=0.1), RegularBottleneck(128, 128, 1), RegularBottleneck(128, 128, 1, dilation=8, p=0.1), RegularBottleneck(128, 128, 1, asymmetric=True, p=0.1), RegularBottleneck(128, 128, 1, dilation=16, p=0.1))
self.stage4_1 = UpBottleneck(128, 64, 2, is_relu=True, p=0.1)
self.stage4_2 = nn.Sequential(RegularBottleneck(64, 64, 1, is_relu=True, p=0.1), RegularBottleneck(64, 64, 1, is_relu=True, p=0.1))
self.stage5_1 = UpBottleneck(64, 16, 2, is_relu=True, p=0.1)
self.stage5_2 = RegularBottleneck(16, 16, 1, is_relu=True, p=0.1)
self.final_conv = nn.ConvTranspose2d(in_channels=16, out_channels=self.num_classes, kernel_size=3, stride=2, padding=1, output_padding=1, bias=False)
self.ce_criterion = CrossEntropyLoss2d(torch.from_numpy(np.array(self.weight)).float()).cuda()
self.ohem_ce_criterion = OhemCrossEntropyLoss2d(torch.from_numpy(np.array(self.weight)).float()).cuda()
self.focal_criterion = FocalLoss(weight=torch.from_numpy(np.array(self.weight)).float()).cuda()
self.lovasz_criterion = LovaszSoftmax().cuda()
self.bce_criterion = BCEWithLogitsLoss2d(weight=torch.from_numpy(np.array(self.weight)).float()).cuda()
self.dice_criterion = DiceLoss().cuda()
self.ce_dice_criterion = CE_DiceLoss(weight=torch.from_numpy(np.array(self.weight)).float()).cuda()
self.init_params()
def init_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_uniform_(m.weight)
if (m.bias is not None):
nn.init.constant_(m.bias, 0)
elif (isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.Linear)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, imgs, targets=None, mode='infer', **kwargs):
x = self.initialBlock(imgs)
(x, indices1) = self.stage1_1(x)
x = self.stage1_2(x)
(x, indices2) = self.stage2_1(x)
x = self.stage2_2(x)
x = self.stage3(x)
x = self.stage4_1(x, indices2)
x = self.stage4_2(x)
x = self.stage5_1(x, indices1)
x = self.stage5_2(x)
outputs = self.final_conv(x)
if (mode == 'infer'):
pass
else:
losses = {}
losses['ce_loss'] = self.ce_criterion(outputs, targets)
losses['ohem_ce_loss'] = self.ohem_ce_criterion(outputs, targets)
losses['focal_loss'] = self.focal_criterion(outputs, targets)
losses['lovasz_loss'] = self.lovasz_criterion(outputs, targets)
losses['bce_loss'] = self.bce_criterion(outputs, targets)
losses['dice_loss'] = self.dice_criterion(outputs, targets)
losses['ce_dice_loss'] = self.ce_dice_criterion(outputs, targets)
losses['loss'] = losses['ce_loss']
if (mode == 'val'):
return (losses, torch.argmax(outputs, dim=1))
else:
return losses |
def parse_requirements() -> Tuple[(PackagesType, PackagesType, Set[str])]:
essential_packages: PackagesType = {}
other_packages: PackagesType = {}
duplicates: Set[str] = set()
with open('requirements.txt', 'r') as req_file:
section: str = ''
for line in req_file:
line = line.strip()
if line.startswith('####'):
section = parse_section_name(line)
continue
if ((not line) or line.startswith('#')):
continue
(module, version) = parse_package(line)
if ((module in essential_packages) or (module in other_packages)):
duplicates.add(module)
if section.startswith('ESSENTIAL'):
essential_packages[module] = version
else:
other_packages[module] = version
return (essential_packages, other_packages, duplicates) |
def draw_cdf_ci(axis, dataset, confidence=0.95, yscale=None, **kwargs):
if (yscale is None):
yscale = axis.gca().get_yscale()
y = __calc_cdf_bins(yscale, axis.gca().get_yaxis())
quantile_buckets = {q: [] for q in y}
total_num_items = 0
for data in dataset:
num_items = len(data)
total_num_items += num_items
if (num_items == 0):
continue
data.sort(key=getfirstorself)
for q in quantile_buckets:
val_at_q = data[int(((num_items - 1) * q))]
quantile_buckets[q].append(val_at_q)
if (total_num_items == 0):
(x, y, x_min, x_max) = ([], [], [], [])
else:
bucket_list = [quantile_buckets[q] for (_, q) in enumerate(y)]
(x, x_min, x_max) = __compute_sample_mean_and_error(bucket_list, confidence)
plot_line = axis.plot(x, y, **kwargs)
kwargs['alpha'] = 0.5
kwargs['linestyle'] = '-'
axis.fill_betweenx(y, x_min, x_max, **kwargs)
fill_line = axis.fill(0, 0, color=kwargs['color'], alpha=kwargs['alpha'])
return (plot_line[0], fill_line[0]) |
class Config():
def __init__(self):
self.verbose = True
self.network = 'resnet50'
self.use_horizontal_flips = False
self.use_vertical_flips = False
self.rot_90 = False
self.anchor_box_scales = [128, 256, 512]
self.anchor_box_ratios = [[1, 1], [1, 2], [2, 1]]
self.im_size = 600
self.img_channel_mean = [103.939, 116.779, 123.68]
self.img_scaling_factor = 1.0
self.num_rois = 4
self.rpn_stride = 16
self.balanced_classes = False
self.std_scaling = 4.0
self.classifier_regr_std = [8.0, 8.0, 4.0, 4.0]
self.rpn_min_overlap = 0.3
self.rpn_max_overlap = 0.7
self.classifier_min_overlap = 0.1
self.classifier_max_overlap = 0.5
self.class_mapping = None
self.model_path = 'model_frcnn.hdf5'
self.wordvectorfile = 'Matlab/word_w2v.txt' |
_grad()
def test(model, loader, evaluator, device):
model.eval()
(y_true, y_pred) = ([], [])
for (xs, y) in loader:
xs = [x.to(device) for x in xs]
y_true.append(y.to(torch.long))
y_pred.append(model(xs).argmax(dim=(- 1)).cpu())
return evaluator.eval({'y_true': torch.cat(y_true, dim=0), 'y_pred': torch.cat(y_pred, dim=0)})['acc'] |
def weights_init(module):
if isinstance(module, nn.Conv2d):
nn.init.kaiming_normal_(module.weight, mode='fan_in', nonlinearity='relu')
if (module.bias is not None):
nn.init.zeros_(module.bias)
elif isinstance(module, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.ones_(module.weight)
if (module.bias is not None):
nn.init.zeros_(module.bias)
elif isinstance(module, nn.Linear):
nn.init.kaiming_normal_(module.weight, mode='fan_in', nonlinearity='relu')
if (module.bias is not None):
nn.init.zeros_(module.bias) |
def CReFF():
args = args_parser()
print('imb_factor:{ib}, non_iid:{non_iid}\nlr_net:{lr_net}, lr_feature:{lr_feature}, num_of_feature:{num_of_feature}\n match_epoch:{match_epoch}, re_training_epoch:{crt_epoch}\n'.format(ib=args.imb_factor, non_iid=args.non_iid_alpha, lr_net=args.lr_net, lr_feature=args.lr_feature, num_of_feature=args.num_of_feature, match_epoch=args.match_epoch, crt_epoch=args.crt_epoch))
random_state = np.random.RandomState(args.seed)
transform_all = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.201))])
data_local_training = datasets.CIFAR10(args.path_cifar10, train=True, download=True, transform=transform_all)
data_global_test = datasets.CIFAR10(args.path_cifar10, train=False, transform=transform_all)
list_label2indices = classify_label(data_local_training, args.num_classes)
(_, list_label2indices_train_new) = train_long_tail(copy.deepcopy(list_label2indices), args.num_classes, args.imb_factor, args.imb_type)
list_client2indices = clients_indices(copy.deepcopy(list_label2indices_train_new), args.num_classes, args.num_clients, args.non_iid_alpha, args.seed)
original_dict_per_client = show_clients_data_distribution(data_local_training, list_client2indices, args.num_classes)
global_model = Global(num_classes=args.num_classes, device=args.device, args=args, num_of_feature=args.num_of_feature)
total_clients = list(range(args.num_clients))
indices2data = Indices2Dataset(data_local_training)
re_trained_acc = []
temp_model = nn.Linear(256, 10).to(args.device)
syn_params = temp_model.state_dict()
for r in tqdm(range(1, (args.num_rounds + 1)), desc='server-training'):
global_params = global_model.download_params()
syn_feature_params = copy.deepcopy(global_params)
for name_param in reversed(syn_feature_params):
if (name_param == 'classifier.bias'):
syn_feature_params[name_param] = syn_params['bias']
if (name_param == 'classifier.weight'):
syn_feature_params[name_param] = syn_params['weight']
break
online_clients = random_state.choice(total_clients, args.num_online_clients, replace=False)
list_clients_gradient = []
list_dicts_local_params = []
list_nums_local_data = []
for client in online_clients:
indices2data.load(list_client2indices[client])
data_client = indices2data
list_nums_local_data.append(len(data_client))
local_model = Local(data_client=data_client, class_list=original_dict_per_client[client])
truth_gradient = local_model.compute_gradient(copy.deepcopy(syn_feature_params), args)
list_clients_gradient.append(copy.deepcopy(truth_gradient))
local_params = local_model.local_train(args, copy.deepcopy(global_params))
list_dicts_local_params.append(copy.deepcopy(local_params))
fedavg_params = global_model.initialize_for_model_fusion(list_dicts_local_params, list_nums_local_data)
global_model.update_feature_syn(args, copy.deepcopy(syn_feature_params), list_clients_gradient)
(syn_params, ft_params) = global_model.feature_re_train(copy.deepcopy(fedavg_params), args.batch_size_local_training)
one_re_train_acc = global_model.global_eval(ft_params, data_global_test, args.batch_size_test)
re_trained_acc.append(one_re_train_acc)
global_model.syn_model.load_state_dict(copy.deepcopy(fedavg_params))
if ((r % 10) == 0):
print(re_trained_acc)
print(re_trained_acc) |
def _all_filename(distribution):
if (not distribution):
return 'all.py'
return f"all__{distribution.replace('-', '_')}.py" |
def convert_example_to_features(example: ContractNLIExample, max_seq_length: int, doc_stride: int, max_query_length: int, padding_strategy, labels_available: bool, symbol_based_hypothesis: bool) -> List[IdentificationClassificationFeatures]:
features = []
(all_doc_tokens, orig_to_tok_index, tok_to_orig_index, span_to_orig_index) = tokenize(tokenizer, example.tokens, example.splits)
if symbol_based_hypothesis:
truncated_query = [example.hypothesis_symbol]
else:
truncated_query = tokenize(tokenizer, example.hypothesis_tokens, [])[0][:max_query_length]
tokenizer_type = type(tokenizer).__name__.replace('Tokenizer', '').lower()
sequence_added_tokens = (((tokenizer.model_max_length - tokenizer.max_len_single_sentence) + 1) if (tokenizer_type in MULTI_SEP_TOKENS_TOKENIZERS_SET) else (tokenizer.model_max_length - tokenizer.max_len_single_sentence))
sequence_pair_added_tokens = (tokenizer.model_max_length - tokenizer.max_len_sentences_pair)
query_with_special_tokens_length = (len(truncated_query) + sequence_added_tokens)
max_context_length = ((max_seq_length - sequence_pair_added_tokens) - len(truncated_query))
spans = []
start = 0
covered_splits = set()
all_splits = set(span_to_orig_index.keys())
while (len((all_splits - covered_splits)) > 0):
upcoming_splits = [i for i in span_to_orig_index.keys() if ((i >= start) and (i not in covered_splits))]
assert (len(upcoming_splits) > 0)
second_split = (upcoming_splits[1] if (len(upcoming_splits) > 1) else len(all_doc_tokens))
if ((second_split - upcoming_splits[0]) > max_context_length):
start = upcoming_splits[0]
last_span_idx = second_split
covered_splits.add(upcoming_splits[0])
elif ((second_split - start) > max_context_length):
start += (second_split - max_context_length)
last_span_idx = second_split
covered_splits.add(upcoming_splits[0])
else:
last_span_idx = None
for i in range(start, (min((start + max_context_length), len(all_doc_tokens)) + 1)):
if ((i == len(all_doc_tokens)) or (i in span_to_orig_index)):
if (last_span_idx is not None):
covered_splits.add(last_span_idx)
last_span_idx = i
assert (last_span_idx is not None)
split_tokens = all_doc_tokens[start:min((start + max_context_length), len(all_doc_tokens))]
if (tokenizer.padding_side == 'right'):
texts = truncated_query
pairs = split_tokens
else:
texts = split_tokens
pairs = truncated_query
encoded_dict = tokenizer.encode_plus(texts, pairs, truncation=False, padding=padding_strategy, max_length=max_seq_length, return_overflowing_tokens=False, return_token_type_ids=True)
assert (len(encoded_dict['input_ids']) <= max_seq_length)
paragraph_len = len(split_tokens)
if (tokenizer.pad_token_id in encoded_dict['input_ids']):
if (tokenizer.padding_side == 'right'):
non_padded_ids = encoded_dict['input_ids'][:encoded_dict['input_ids'].index(tokenizer.pad_token_id)]
else:
last_padding_id_position = ((len(encoded_dict['input_ids']) - 1) - encoded_dict['input_ids'][::(- 1)].index(tokenizer.pad_token_id))
non_padded_ids = encoded_dict['input_ids'][(last_padding_id_position + 1):]
else:
non_padded_ids = encoded_dict['input_ids']
tokens = tokenizer.convert_ids_to_tokens(non_padded_ids)
token_to_orig_map = {}
span_to_orig_map = {}
for i in range(paragraph_len):
index = ((query_with_special_tokens_length + i) if (tokenizer.padding_side == 'right') else i)
if (tok_to_orig_index[(start + i)] != (- 1)):
token_to_orig_map[index] = tok_to_orig_index[(start + i)]
assert ((start + i) not in span_to_orig_index)
else:
assert ((start + i) in span_to_orig_index)
span_to_orig_map[index] = span_to_orig_index[(start + i)]
encoded_dict['paragraph_len'] = paragraph_len
encoded_dict['tokens'] = tokens
encoded_dict['token_to_orig_map'] = token_to_orig_map
encoded_dict['span_to_orig_map'] = span_to_orig_map
encoded_dict['truncated_query_with_special_tokens_length'] = query_with_special_tokens_length
encoded_dict['token_is_max_context'] = {}
encoded_dict['start'] = start
spans.append(encoded_dict)
start = (last_span_idx - doc_stride)
for doc_span_index in range(len(spans)):
for j in range(spans[doc_span_index]['paragraph_len']):
is_max_context = _new_check_is_max_context(spans, doc_span_index, (spans[doc_span_index]['start'] + j))
index = (j if (tokenizer.padding_side == 'left') else (query_with_special_tokens_length + j))
spans[doc_span_index]['token_is_max_context'][index] = is_max_context
span_token_id = tokenizer.additional_special_tokens_ids[tokenizer.additional_special_tokens.index(SPAN_TOKEN)]
for span in spans:
cls_index = span['input_ids'].index(tokenizer.cls_token_id)
p_mask = np.logical_not(np.isin(np.array(span['input_ids']), [span_token_id, tokenizer.cls_token_id])).astype(np.int32)
valid_span_missing_in_context = False
span_labels = np.zeros_like(span['input_ids'])
if labels_available:
if (example.label != NLILabel.NOT_MENTIONED):
doc_start = span['start']
doc_end = (span['start'] + span['paragraph_len'])
annotated_spans = set(example.annotated_spans)
_span_labels = np.array([any(((s in annotated_spans) for s in span_to_orig_index.get(i, []))) for i in range(doc_start, doc_end)]).astype(int)
if (not np.any(_span_labels)):
valid_span_missing_in_context = True
tok_start = query_with_special_tokens_length
tok_end = (tok_start + span['paragraph_len'])
if (tokenizer.padding_side == 'right'):
span_labels[tok_start:tok_end] = _span_labels
else:
span_labels[(- tok_end):(- tok_start)] = _span_labels
class_label = example.label.value
else:
class_label = (- 1)
assert (not np.any(np.logical_and(p_mask, span_labels)))
features.append(IdentificationClassificationFeatures(span['input_ids'], span['attention_mask'], span['token_type_ids'], cls_index, p_mask, example_index=0, unique_id=0, paragraph_len=span['paragraph_len'], token_is_max_context=span['token_is_max_context'], tokens=span['tokens'], token_to_orig_map=span['token_to_orig_map'], span_to_orig_map=span['span_to_orig_map'], class_label=class_label, span_labels=span_labels, valid_span_missing_in_context=valid_span_missing_in_context, data_id=example.data_id))
return features |
_metric('accuracy')
def accuracy(target: Union[(Sequence[int], Sequence[Sequence[int]])], prediction: Union[(Sequence[float], Sequence[Sequence[float]])]) -> float:
if isinstance(target[0], int):
return np.mean((np.asarray(target) == np.asarray(prediction).argmax((- 1))))
else:
correct = 0
total = 0
for (label, score) in zip(target, prediction):
label_array = np.asarray(label)
pred_array = np.asarray(score).argmax((- 1))
mask = (label_array != (- 1))
is_correct = (label_array[mask] == pred_array[mask])
correct += is_correct.sum()
total += is_correct.size
return (correct / total) |
def main(config):
(svhn_loader, mnist_loader, svhn_test_loader, mnist_test_loader) = get_loader(config)
solver = Solver(config, svhn_loader, mnist_loader)
cudnn.benchmark = True
if (not os.path.exists(config.model_path)):
os.makedirs(config.model_path)
if (not os.path.exists(config.sample_path)):
os.makedirs(config.sample_path)
if (config.mode == 'train'):
solver.train() |
_toolkit()
class Twilio(FunctionToolkit):
name_for_human = 'Twilio'
description_for_human = 'Toolkit for Twilio services.'
name_for_model = 'Twilio'
description_for_model = 'A toolkit for Twilio services, enabling users to send SMS messages, retrieve communication history, manage scheduled actions, retrieve information about specific phone numbers, and retrieve the history of received SMS messages.'
tool_classes = [TwilioSendSms, TwilioGetSmsHistory, TwilioGetReceivedSmsMessages, TwilioGetSmsPricing, TwilioGetPhoneNumberInfo, TwilioGetScheduledActions, TwilioCancelScheduledAction] |
class PointnetSAModuleVotes(nn.Module):
def __init__(self, *, mlp: List[int], radius: float=None, nsample: int=None, bn: bool=True, use_xyz: bool=True, normalize_xyz: bool=False, sample_uniformly: bool=False, sample_method='fps'):
super().__init__()
self.radius = radius
self.nsample = nsample
self.mlp_module = None
self.use_xyz = use_xyz
self.normalize_xyz = normalize_xyz
self.grouper = pointnet2_utils.QueryAndGroup(self.radius, nsample, use_xyz=use_xyz, ret_grouped_xyz=True, normalize_xyz=normalize_xyz, sample_uniformly=sample_uniformly, ret_unique_cnt=False)
mlp_spec = mlp
if (use_xyz and (len(mlp_spec) > 0)):
mlp_spec[0] += 3
self.mlp_module = pt_utils.SharedMLP(mlp_spec, bn=bn)
self.sample_method = sample_method
def forward(self, xyz: torch.Tensor, features: torch.Tensor, npoint: int, inds: torch.Tensor=None):
xyz_flipped = xyz.transpose(1, 2).contiguous()
if (inds is None):
if (self.sample_method == 'ffps'):
features_for_fps = torch.cat([xyz_flipped, features], dim=1).transpose(1, 2).contiguous()
features_for_fps_distance = square_distance(features_for_fps, features_for_fps)
inds = pointnet2_utils.furthest_point_sampling_with_dist(features_for_fps_distance, npoint)
elif (self.sample_method == 'rs'):
inds = torch.arange(npoint).repeat(xyz.size(0), 1).int().cuda()
elif (self.sample_method == 'sequence'):
inds = torch.arange(npoint).repeat(xyz.size(0), 1).int().cuda()
elif (self.sample_method == 'fps'):
inds = pointnet2_utils.furthest_point_sample(xyz, npoint)
else:
raise NotImplementedError
else:
assert (inds.shape[1] == npoint)
new_xyz = (pointnet2_utils.gather_operation(xyz_flipped, inds).transpose(1, 2).contiguous() if (npoint is not None) else None)
(grouped_features, grouped_xyz) = self.grouper(xyz, new_xyz, features)
new_features_ = self.mlp_module(grouped_features)
new_features_ = F.max_pool2d(new_features_, kernel_size=[1, new_features_.size(3)])
new_features_ = new_features_.squeeze((- 1))
return (new_xyz, new_features_, inds.to(torch.int64)) |
_module()
class EmptyCacheHook(Hook):
def __init__(self, before_epoch=False, after_epoch=True, after_iter=False):
self._before_epoch = before_epoch
self._after_epoch = after_epoch
self._after_iter = after_iter
def after_iter(self, runner):
if self._after_iter:
torch.cuda.empty_cache()
def before_epoch(self, runner):
if self._before_epoch:
torch.cuda.empty_cache()
def after_epoch(self, runner):
if self._after_epoch:
torch.cuda.empty_cache() |
class TestDummies(TestCore):
def test_get_dummy(self):
Dummy('dummy')
def test_create_dummy(self):
d = Dummy.create(0.01)
self.assertIsInstance(d, Dummy) |
def calculate_fid_given_paths(paths, batch_size, device, dims):
for p in paths:
if (not os.path.exists(p)):
raise RuntimeError(('Invalid path: %s' % p))
block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[dims]
model = InceptionV3([block_idx]).to(device)
(m1, s1) = compute_statistics_of_path(paths[0], model, batch_size, dims, device)
(m2, s2) = compute_statistics_of_path(paths[1], model, batch_size, dims, device)
fid_value = calculate_frechet_distance(m1, s1, m2, s2)
return fid_value |
class Module(abc.ABC):
def __init__(self, name):
self._name = name
self._variable_scope = None
self._cached_params = None
self._cached_param_shapes = None
def name(self):
return self._name
def vectorized(self):
def reset(self, do_resets=None):
def state_info_specs(self):
return list()
def state_info_keys(self):
return [k for (k, _) in self.state_info_specs]
def terminate(self):
def get_trainable_vars(self):
return self._variable_scope.trainable_variables()
def get_global_vars(self):
return self._variable_scope.global_variables()
def get_params(self):
if (self._cached_params is None):
self._cached_params = self.get_trainable_vars()
return self._cached_params
def get_param_shapes(self):
if (self._cached_param_shapes is None):
params = self.get_params()
param_values = tf.compat.v1.get_default_session().run(params)
self._cached_param_shapes = [val.shape for val in param_values]
return self._cached_param_shapes
def get_param_values(self):
params = self.get_params()
param_values = tf.compat.v1.get_default_session().run(params)
return flatten_tensors(param_values)
def set_param_values(self, param_values):
param_values = unflatten_tensors(param_values, self.get_param_shapes())
for (param, value) in zip(self.get_params(), param_values):
param.load(value)
def flat_to_params(self, flattened_params):
return unflatten_tensors(flattened_params, self.get_param_shapes())
def __getstate__(self):
new_dict = self.__dict__.copy()
del new_dict['_cached_params']
return new_dict
def __setstate__(self, state):
self._cached_params = None
self.__dict__.update(state) |
class PairwiseDistance(Module):
__constants__ = ['norm', 'eps', 'keepdim']
norm: float
eps: float
keepdim: bool
def __init__(self, p: float=2.0, eps: float=1e-06, keepdim: bool=False) -> None:
super(PairwiseDistance, self).__init__()
self.norm = p
self.eps = eps
self.keepdim = keepdim
def forward(self, x1: Tensor, x2: Tensor) -> Tensor:
return F.pairwise_distance(x1, x2, self.norm, self.eps, self.keepdim) |
.skipif((not has_pytorch()), reason='Pytorch not installed.')
_utils.test(arch=ti.cuda)
def test_torch_cuda_context():
device = torch.device('cuda:0')
x = torch.tensor([2.0], requires_grad=True, device=device)
assert torch._C._cuda_hasPrimaryContext(0)
loss = (x ** 2)
loss.backward() |
class _DummyEnvPoolTest(absltest.TestCase):
def test_config(self) -> None:
ref_config_keys = ['num_envs', 'batch_size', 'num_threads', 'max_num_players', 'thread_affinity_offset', 'base_path', 'seed', 'gym_reset_return_info', 'state_num', 'action_num', 'max_episode_steps']
default_conf = _DummyEnvSpec._default_config_values
self.assertTrue(isinstance(default_conf, tuple))
config_keys = _DummyEnvSpec._config_keys
self.assertTrue(isinstance(config_keys, list))
self.assertEqual(len(default_conf), len(config_keys))
self.assertEqual(sorted(config_keys), sorted(ref_config_keys))
def test_spec(self) -> None:
conf = _DummyEnvSpec._default_config_values
env_spec = _DummyEnvSpec(conf)
state_spec = env_spec._state_spec
action_spec = env_spec._action_spec
state_keys = env_spec._state_keys
action_keys = env_spec._action_keys
self.assertTrue(isinstance(state_spec, tuple))
self.assertTrue(isinstance(action_spec, tuple))
state_spec = dict(zip(state_keys, state_spec))
action_spec = dict(zip(action_keys, action_spec))
self.assertEqual(state_spec['obs:raw'][1][(- 1)], 10)
self.assertEqual(state_spec['obs:dyn'][1][1][(- 1)], 10)
conf = dict(zip(_DummyEnvSpec._config_keys, conf))
conf['state_num'] = 666
env_spec = _DummyEnvSpec(tuple(conf.values()))
state_spec = dict(zip(state_keys, env_spec._state_spec))
self.assertEqual(state_spec['obs:raw'][1][(- 1)], 666)
def test_envpool(self) -> None:
conf = dict(zip(_DummyEnvSpec._config_keys, _DummyEnvSpec._default_config_values))
conf['num_envs'] = num_envs = 100
conf['batch_size'] = batch = 31
conf['num_threads'] = os.cpu_count()
env_spec = _DummyEnvSpec(tuple(conf.values()))
env = _DummyEnvPool(env_spec)
state_keys = env._state_keys
total = 100000
env._reset(np.arange(num_envs, dtype=np.int32))
t = time.time()
for _ in range(total):
state = dict(zip(state_keys, env._recv()))
action = {'env_id': state['info:env_id'], 'players.env_id': state['info:players.env_id'], 'list_action': np.zeros((batch, 6), dtype=np.float64), 'players.id': state['info:players.id'], 'players.action': state['info:players.id']}
env._send(tuple(action.values()))
duration = (time.time() - t)
fps = ((total * batch) / duration)
logging.info(f'FPS = {fps:.6f}')
def test_xla(self) -> None:
conf = dict(zip(_DummyEnvSpec._config_keys, _DummyEnvSpec._default_config_values))
conf['num_envs'] = 100
conf['batch_size'] = 31
conf['num_threads'] = os.cpu_count()
env_spec = _DummyEnvSpec(tuple(conf.values()))
env = _DummyEnvPool(env_spec)
xla_failed = False
try:
_ = env._xla()
except RuntimeError:
logging.info('XLA on Dummy failed because dummy has Container typed state.')
xla_failed = True
self.assertTrue(xla_failed) |
def CyclicPresentation(n):
n = Integer(n)
if (n < 1):
raise ValueError('finitely presented group order must be positive')
F = FreeGroup('a')
rls = ((F([1]) ** n),)
return FinitelyPresentedGroup(F, rls) |
(name='generate-cert-request')
('-n', '--collaborator_name', required=True, help='The certified common name of the collaborator')
('-s', '--silent', help='Do not prompt', is_flag=True)
('-x', '--skip-package', help='Do not package the certificate signing request for export', is_flag=True)
def generate_cert_request_(collaborator_name, silent, skip_package):
generate_cert_request(collaborator_name, silent, skip_package) |
def make_batch_char_elem_into_tensor(batch, entry, pad, maxl=None, minl=None):
max_char_length = min(maxl, max((len(chars) for elem in batch for chars in elem[entry])))
max_char_length = max(max_char_length, minl)
torch_batch = np.full((len(batch), max_char_length, max((len(elem[entry]) for elem in batch))), pad)
for i in range(0, len(batch)):
for j in range(0, len(batch[i][entry])):
for k in range(0, min(max_char_length, len(batch[i][entry][j]))):
torch_batch[i][k][j] = batch[i][entry][j][k]
return torch.LongTensor(torch_batch) |
class BasicTokenizer(object):
def __init__(self, do_lower_case=True, never_split=None, tokenize_chinese_chars=True, strip_accents=None):
if (never_split is None):
never_split = []
self.do_lower_case = do_lower_case
self.never_split = set(never_split)
self.tokenize_chinese_chars = tokenize_chinese_chars
self.strip_accents = strip_accents
def tokenize(self, text, never_split=None):
never_split = (self.never_split.union(set(never_split)) if never_split else self.never_split)
text = self._clean_text(text)
if self.tokenize_chinese_chars:
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if (token not in never_split):
if self.do_lower_case:
token = token.lower()
if (self.strip_accents is not False):
token = self._run_strip_accents(token)
elif self.strip_accents:
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token, never_split))
output_tokens = whitespace_tokenize(' '.join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
text = unicodedata.normalize('NFD', text)
output = []
for char in text:
cat = unicodedata.category(char)
if (cat == 'Mn'):
continue
output.append(char)
return ''.join(output)
def _run_split_on_punc(self, text, never_split=None):
if ((never_split is not None) and (text in never_split)):
return [text]
chars = list(text)
i = 0
start_new_word = True
output = []
while (i < len(chars)):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[(- 1)].append(char)
i += 1
return [''.join(x) for x in output]
def _tokenize_chinese_chars(self, text):
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(' ')
output.append(char)
output.append(' ')
else:
output.append(char)
return ''.join(output)
def _is_chinese_char(self, cp):
if (((cp >= 19968) and (cp <= 40959)) or ((cp >= 13312) and (cp <= 19903)) or ((cp >= 131072) and (cp <= 173791)) or ((cp >= 173824) and (cp <= 177983)) or ((cp >= 177984) and (cp <= 178207)) or ((cp >= 178208) and (cp <= 183983)) or ((cp >= 63744) and (cp <= 64255)) or ((cp >= 194560) and (cp <= 195103))):
return True
return False
def _clean_text(self, text):
output = []
for char in text:
cp = ord(char)
if ((cp == 0) or (cp == 65533) or _is_control(char)):
continue
if _is_whitespace(char):
output.append(' ')
else:
output.append(char)
return ''.join(output) |
def _set_playable_dice(dice: Array) -> Array:
return (((dice[0] == dice[1]) * jnp.array(([dice[0]] * 4), dtype=jnp.int32)) + ((dice[0] != dice[1]) * jnp.array([dice[0], dice[1], (- 1), (- 1)], dtype=jnp.int32))) |
class MFModel(object):
def __init__(self, F, data, lr, reg, random_seed, *args):
np.random.seed(random_seed)
self._factors = F
self._users = data.users
self._items = data.items
self._private_users = data.private_users
self._public_users = data.public_users
self._private_items = data.private_items
self._public_items = data.public_items
self._lr = lr
self._reg = reg
self.initialize(*args)
def initialize(self, loc: float=0, scale: float=0.1):
self._global_bias = 0
self._user_bias = np.zeros(len(self._users))
self._item_bias = np.zeros(len(self._items))
self._user_factors = np.random.normal(loc=loc, scale=scale, size=(len(self._users), self._factors))
self._item_factors = np.random.normal(loc=loc, scale=scale, size=(len(self._items), self._factors))
def name(self):
return 'MF2020'
def indexed_predict(self, user, item):
return (((self._global_bias + self._user_bias[user]) + self._item_bias[item]) + (self._user_factors[user] self._item_factors[item]))
def get_user_predictions(self, user_id, mask, top_k=10):
user_id = self._public_users.get(user_id)
b = self._preds[user_id]
a = mask[user_id]
b[(~ a)] = (- np.inf)
(indices, values) = zip(*[(self._private_items.get(u_list[0]), u_list[1]) for u_list in enumerate(b.data)])
indices = np.array(indices)
values = np.array(values)
local_k = min(top_k, len(values))
partially_ordered_preds_indices = np.argpartition(values, (- local_k))[(- local_k):]
real_values = values[partially_ordered_preds_indices]
real_indices = indices[partially_ordered_preds_indices]
local_top_k = real_values.argsort()[::(- 1)]
return [(real_indices[item], real_values[item]) for item in local_top_k]
def train_step(self, batch, **kwargs):
sum_of_loss = 0
lr = self._lr
reg = self._reg
for (user, item, rating) in batch:
gb_ = self._global_bias
uf_ = self._user_factors[user]
if_ = self._item_factors[item]
ub_ = self._user_bias[user]
ib_ = self._item_bias[item]
prediction = (((gb_ + ub_) + ib_) + np.dot(uf_, if_))
if (prediction > 0):
one_plus_exp_minus_pred = (1.0 + np.exp((- prediction)))
sigmoid = (1.0 / one_plus_exp_minus_pred)
this_loss = (np.log(one_plus_exp_minus_pred) + ((1.0 - rating) * prediction))
else:
exp_pred = np.exp(prediction)
sigmoid = (exp_pred / (1.0 + exp_pred))
this_loss = (((- rating) * prediction) + np.log((1.0 + exp_pred)))
grad = (rating - sigmoid)
self._user_factors[user] += (lr * ((grad * if_) - (reg * uf_)))
self._item_factors[item] += (lr * ((grad * uf_) - (reg * if_)))
self._user_bias[user] += (lr * (grad - (reg * ub_)))
self._item_bias[item] += (lr * (grad - (reg * ib_)))
self._global_bias += (lr * (grad - (reg * gb_)))
sum_of_loss += this_loss
return sum_of_loss
def prepare_predictions(self):
self._preds = (np.expand_dims(self._user_bias, axis=1) + ((self._global_bias + self._item_bias) + (self._user_factors self._item_factors.T)))
def update_factors(self, user: int, item: int, rating: float):
uf_ = self._user_factors[user]
if_ = self._item_factors[item]
ub_ = self._user_bias[user]
ib_ = self._item_bias[item]
gb_ = self._global_bias
lr = self._lr
reg = self._reg
prediction = (((gb_ + ub_) + ib_) + np.dot(uf_, if_))
if (prediction > 0):
one_plus_exp_minus_pred = (1.0 + np.exp((- prediction)))
sigmoid = (1.0 / one_plus_exp_minus_pred)
this_loss = (np.log(one_plus_exp_minus_pred) + ((1.0 - rating) * prediction))
else:
exp_pred = np.exp(prediction)
sigmoid = (exp_pred / (1.0 + exp_pred))
this_loss = (((- rating) * prediction) + np.log((1.0 + exp_pred)))
grad = (rating - sigmoid)
self._user_factors[user] += (lr * ((grad * if_) - (reg * uf_)))
self._item_factors[item] += (lr * ((grad * uf_) - (reg * if_)))
self._user_bias[user] += (lr * (grad - (reg * ub_)))
self._item_bias[item] += (lr * (grad - (reg * ib_)))
self._global_bias += (lr * (grad - (reg * gb_)))
return this_loss
def get_model_state(self):
saving_dict = {}
saving_dict['_global_bias'] = self._global_bias
saving_dict['_user_bias'] = self._user_bias
saving_dict['_item_bias'] = self._item_bias
saving_dict['_user_factors'] = self._user_factors
saving_dict['_item_factors'] = self._item_factors
return saving_dict
def set_model_state(self, saving_dict):
self._global_bias = saving_dict['_global_bias']
self._user_bias = saving_dict['_user_bias']
self._item_bias = saving_dict['_item_bias']
self._user_factors = saving_dict['_user_factors']
self._item_factors = saving_dict['_item_factors']
def load_weights(self, path):
with open(path, 'rb') as f:
self.set_model_state(pickle.load(f))
def save_weights(self, path):
with open(path, 'wb') as f:
pickle.dump(self.get_model_state(), f) |
('/_connect/', methods=['GET'])
def connect():
backend = get_backend()
backend.connect(userid())
return jsonify(success=True) |
def dataio_prepare(hparams):
data_folder = hparams['data_folder']
train_data = sb.dataio.dataset.DynamicItemDataset.from_csv(csv_path=hparams['train_csv'], replacements={'data_root': data_folder})
if (hparams['sorting'] == 'ascending'):
train_data = train_data.filtered_sorted(sort_key='duration')
hparams['train_dataloader_opts']['shuffle'] = False
elif (hparams['sorting'] == 'descending'):
train_data = train_data.filtered_sorted(sort_key='duration', reverse=True)
hparams['train_dataloader_opts']['shuffle'] = False
elif (hparams['sorting'] == 'random'):
pass
else:
raise NotImplementedError('sorting must be random, ascending or descending')
valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv(csv_path=hparams['valid_csv'], replacements={'data_root': data_folder})
valid_data = valid_data.filtered_sorted(sort_key='duration')
test_datasets = {}
for csv_file in hparams['test_csv']:
name = Path(csv_file).stem
test_datasets[name] = sb.dataio.dataset.DynamicItemDataset.from_csv(csv_path=csv_file, replacements={'data_root': data_folder})
test_datasets[name] = test_datasets[name].filtered_sorted(sort_key='duration')
datasets = ([train_data, valid_data] + [i for (k, i) in test_datasets.items()])
valtest_datasets = ([valid_data] + [i for (k, i) in test_datasets.items()])
tokenizer = hparams['tokenizer']
.data_pipeline.takes('wav')
.data_pipeline.provides('sig')
def audio_pipeline(wav):
sig = sb.dataio.dataio.read_audio(wav)
return sig
sb.dataio.dataset.add_dynamic_item(valtest_datasets, audio_pipeline)
.data_pipeline.takes('wav')
.data_pipeline.provides('sig')
def audio_pipeline_train(wav):
if ('speed_perturb' in hparams):
sig = sb.dataio.dataio.read_audio(wav)
sig = hparams['speed_perturb'](sig.unsqueeze(0)).squeeze(0)
else:
sig = sb.dataio.dataio.read_audio(wav)
return sig
sb.dataio.dataset.add_dynamic_item([train_data], audio_pipeline_train)
.data_pipeline.takes('wrd')
.data_pipeline.provides('wrd', 'tokens_list', 'tokens_bos', 'tokens_eos', 'tokens')
def text_pipeline(wrd):
(yield wrd)
tokens_list = tokenizer.encode_as_ids(wrd)
(yield tokens_list)
tokens_bos = torch.LongTensor(([hparams['bos_index']] + tokens_list))
(yield tokens_bos)
tokens_eos = torch.LongTensor((tokens_list + [hparams['eos_index']]))
(yield tokens_eos)
tokens = torch.LongTensor(tokens_list)
(yield tokens)
sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline)
sb.dataio.dataset.set_output_keys(datasets, ['id', 'sig', 'wrd', 'tokens_bos', 'tokens_eos', 'tokens'])
train_batch_sampler = None
valid_batch_sampler = None
if hparams['dynamic_batching']:
from speechbrain.dataio.sampler import DynamicBatchSampler
dynamic_hparams = hparams['dynamic_batch_sampler']
num_buckets = dynamic_hparams['num_buckets']
train_batch_sampler = DynamicBatchSampler(train_data, dynamic_hparams['max_batch_len'], num_buckets=num_buckets, length_func=(lambda x: x['duration']), shuffle=dynamic_hparams['shuffle_ex'], batch_ordering=dynamic_hparams['batch_ordering'], max_batch_ex=dynamic_hparams['max_batch_ex'])
valid_batch_sampler = DynamicBatchSampler(valid_data, dynamic_hparams['max_batch_len_val'], num_buckets=num_buckets, length_func=(lambda x: x['duration']), shuffle=dynamic_hparams['shuffle_ex'], batch_ordering=dynamic_hparams['batch_ordering'])
return (train_data, valid_data, test_datasets, tokenizer, train_batch_sampler, valid_batch_sampler) |
def otsu_threshold(image, nbins=256):
(hist, bin_centers) = histogram(image.ravel(), nbins)
hist = hist.astype(float)
weight1 = np.cumsum(hist)
weight2 = np.cumsum(hist[::(- 1)])[::(- 1)]
mean1 = (np.cumsum((hist * bin_centers)) / weight1)
mean2 = (np.cumsum((hist * bin_centers)[::(- 1)]) / weight2[::(- 1)])[::(- 1)]
variance12 = ((weight1[:(- 1)] * weight2[1:]) * ((mean1[:(- 1)] - mean2[1:]) ** 2))
idx = np.argmax(variance12)
threshold = bin_centers[:(- 1)][idx]
return threshold |
def to_binary(bars, threshold=0.0):
track_is_max = tf.equal(bars, tf.reduce_max(bars, axis=(- 1), keep_dims=True))
track_pass_threshold = (bars > threshold)
out_track = tf.logical_and(track_is_max, track_pass_threshold)
return out_track |
def solve_ivp(fun, t_span, y0, method='RK45', t_eval=None, dense_output=False, events=None, vectorized=False, args=None, **options):
if ((method not in METHODS) and (not (inspect.isclass(method) and issubclass(method, OdeSolver)))):
raise ValueError('`method` must be one of {} or OdeSolver class.'.format(METHODS))
(t0, tf) = (float(t_span[0]), float(t_span[1]))
if (args is not None):
fun = (lambda t, x, fun=fun: fun(t, x, *args))
jac = options.get('jac')
if callable(jac):
options['jac'] = (lambda t, x: jac(t, x, *args))
if (t_eval is not None):
t_eval = np.asarray(t_eval)
if (t_eval.ndim != 1):
raise ValueError('`t_eval` must be 1-dimensional.')
if (np.any((t_eval < min(t0, tf))) or np.any((t_eval > max(t0, tf)))):
raise ValueError('Values in `t_eval` are not within `t_span`.')
d = np.diff(t_eval)
if (((tf > t0) and np.any((d <= 0))) or ((tf < t0) and np.any((d >= 0)))):
raise ValueError('Values in `t_eval` are not properly sorted.')
if (tf > t0):
t_eval_i = 0
else:
t_eval = t_eval[::(- 1)]
t_eval_i = t_eval.shape[0]
if (method in METHODS):
method = METHODS[method]
solver = method(fun, t0, y0, tf, vectorized=vectorized, **options)
if (t_eval is None):
ts = [t0]
ys = [y0]
elif ((t_eval is not None) and dense_output):
ts = []
ti = [t0]
ys = []
else:
ts = []
ys = []
interpolants = []
(events, is_terminal, event_dir) = prepare_events(events)
if (events is not None):
if (args is not None):
events = [(lambda t, x, event=event: event(t, x, *args)) for event in events]
g = [event(t0, y0) for event in events]
t_events = [[] for _ in range(len(events))]
y_events = [[] for _ in range(len(events))]
else:
t_events = None
y_events = None
status = None
while (status is None):
message = solver.step()
if (solver.status == 'finished'):
status = 0
elif (solver.status == 'failed'):
status = (- 1)
break
t_old = solver.t_old
t = solver.t
y = solver.y
if dense_output:
sol = solver.dense_output()
interpolants.append(sol)
else:
sol = None
if (events is not None):
g_new = [event(t, y) for event in events]
active_events = find_active_events(g, g_new, event_dir)
if (active_events.size > 0):
if (sol is None):
sol = solver.dense_output()
(root_indices, roots, terminate) = handle_events(sol, events, active_events, is_terminal, t_old, t)
for (e, te) in zip(root_indices, roots):
t_events[e].append(te)
y_events[e].append(sol(te))
if terminate:
status = 1
t = roots[(- 1)]
y = sol(t)
g = g_new
if (t_eval is None):
ts.append(t)
ys.append(y)
else:
if (solver.direction > 0):
t_eval_i_new = np.searchsorted(t_eval, t, side='right')
t_eval_step = t_eval[t_eval_i:t_eval_i_new]
else:
t_eval_i_new = np.searchsorted(t_eval, t, side='left')
t_eval_step = t_eval[t_eval_i_new:t_eval_i][::(- 1)]
if (t_eval_step.size > 0):
if (sol is None):
sol = solver.dense_output()
ts.append(t_eval_step)
ys.append(sol(t_eval_step))
t_eval_i = t_eval_i_new
if ((t_eval is not None) and dense_output):
ti.append(t)
message = MESSAGES.get(status, message)
if (t_events is not None):
t_events = [np.asarray(te) for te in t_events]
y_events = [np.asarray(ye) for ye in y_events]
if (t_eval is None):
ts = np.array(ts)
ys = np.vstack(ys).T
else:
ts = np.hstack(ts)
ys = np.hstack(ys)
if dense_output:
if (t_eval is None):
sol = OdeSolution(ts, interpolants)
else:
sol = OdeSolution(ti, interpolants)
else:
sol = None
return OdeResult(t=ts, y=ys, sol=sol, t_events=t_events, y_events=y_events, nfev=solver.nfev, njev=solver.njev, nlu=solver.nlu, status=status, message=message, success=(status >= 0)) |
def eval_parsing_ap(all_parsings, all_scores, score_thresh, im_dir, ann_fn, num_parsing):
nb_class = num_parsing
ovthresh_seg = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
confidence = []
image_ids = []
Local_segs_ptr = []
for (img_index, parsings) in enumerate(all_parsings):
for (idx, rect) in enumerate(parsings):
score = all_scores[img_index][idx]
image_ids.append(img_index)
confidence.append(score)
Local_segs_ptr.append(idx)
confidence = np.array(confidence)
Local_segs_ptr = np.array(Local_segs_ptr)
sorted_ind = np.argsort((- confidence))
sorted_scores = confidence[sorted_ind]
Local_segs_ptr = Local_segs_ptr[sorted_ind]
image_ids = [image_ids[x] for x in sorted_ind]
(class_recs_temp, npos) = get_gt(im_dir, ann_fn)
class_recs = [copy.deepcopy(class_recs_temp) for _ in range(len(ovthresh_seg))]
nd = len(image_ids)
tp_seg = [np.zeros(nd) for _ in range(len(ovthresh_seg))]
fp_seg = [np.zeros(nd) for _ in range(len(ovthresh_seg))]
pcp_list = [[] for _ in range(len(ovthresh_seg))]
for d in trange(nd, desc='Calculating AP and PCP ..'):
if (sorted_scores[d] < score_thresh):
continue
R = []
for j in range(len(ovthresh_seg)):
R.append(class_recs[j][image_ids[d]])
ovmax = (- np.inf)
jmax = (- 1)
parsings = all_parsings[image_ids[d]]
mask0 = parsings[Local_segs_ptr[d]]
mask_pred = mask0.astype(np.int)
for i in range(len(R[0]['anno_adds'])):
mask_gt = cv2.imread(R[0]['anno_adds'][i], 0)
seg_iou = cal_one_mean_iou(mask_pred.astype(np.uint8), mask_gt, nb_class)
mean_seg_iou = np.nanmean(seg_iou)
if (mean_seg_iou > ovmax):
ovmax = mean_seg_iou
seg_iou_max = seg_iou
jmax = i
mask_gt_u = np.unique(mask_gt)
for j in range(len(ovthresh_seg)):
if (ovmax > ovthresh_seg[j]):
if (not R[j]['det'][jmax]):
tp_seg[j][d] = 1.0
R[j]['det'][jmax] = 1
pcp_d = len(mask_gt_u[np.logical_and((mask_gt_u > 0), (mask_gt_u < nb_class))])
pcp_n = float(np.sum((seg_iou_max[1:] > ovthresh_seg[j])))
if (pcp_d > 0):
pcp_list[j].append((pcp_n / pcp_d))
else:
pcp_list[j].append(0.0)
else:
fp_seg[j][d] = 1.0
else:
fp_seg[j][d] = 1.0
all_ap_seg = []
all_pcp = []
for j in range(len(ovthresh_seg)):
fp_seg[j] = np.cumsum(fp_seg[j])
tp_seg[j] = np.cumsum(tp_seg[j])
rec_seg = (tp_seg[j] / float(npos))
prec_seg = (tp_seg[j] / np.maximum((tp_seg[j] + fp_seg[j]), np.finfo(np.float64).eps))
ap_seg = voc_ap(rec_seg, prec_seg)
all_ap_seg.append(ap_seg)
assert (np.max(tp_seg[j]) == len(pcp_list[j])), ('%d vs %d' % (np.max(tp_seg[j]), len(pcp_list[j])))
pcp_list[j].extend(([0.0] * (npos - len(pcp_list[j]))))
pcp = np.mean(pcp_list[j])
all_pcp.append(pcp)
return (all_ap_seg, all_pcp) |
def get_model_4(params):
embedding_weights = pickle.load(open((common.TRAINDATA_DIR + ('/embedding_weights_w2v_%s.pk' % params['embeddings_suffix'])), 'rb'))
graph_in = Input(shape=(params['sequence_length'], params['embedding_dim']))
convs = []
for fsz in params['filter_sizes']:
conv = Convolution1D(nb_filter=params['num_filters'], filter_length=fsz, border_mode='valid', activation='relu', subsample_length=1)
x = conv(graph_in)
logging.debug(('Filter size: %s' % fsz))
logging.debug(('Output CNN: %s' % str(conv.output_shape)))
pool = GlobalMaxPooling1D()
x = pool(x)
logging.debug(('Output Pooling: %s' % str(pool.output_shape)))
convs.append(x)
if (len(params['filter_sizes']) > 1):
merge = Merge(mode='concat')
out = merge(convs)
logging.debug(('Merge: %s' % str(merge.output_shape)))
else:
out = convs[0]
graph = Model(input=graph_in, output=out)
model = Sequential()
if (not (params['model_variation'] == 'CNN-static')):
model.add(Embedding(len(embedding_weights[0]), params['embedding_dim'], input_length=params['sequence_length'], weights=embedding_weights))
model.add(Dropout(params['dropout_prob'][0], input_shape=(params['sequence_length'], params['embedding_dim'])))
model.add(graph)
model.add(Dense(params['n_dense']))
model.add(Dropout(params['dropout_prob'][1]))
model.add(Activation('relu'))
model.add(Dense(output_dim=params['n_out'], init='uniform'))
model.add(Activation(params['final_activation']))
logging.debug(('Output CNN: %s' % str(model.output_shape)))
if (params['final_activation'] == 'linear'):
model.add(Lambda((lambda x: K.l2_normalize(x, axis=1))))
return model |
class PNEANetV(object):
thisown = _swig_property((lambda x: x.this.own()), (lambda x, v: x.this.own(v)), doc='The membership flag')
__repr__ = _swig_repr
__swig_destroy__ = _snap.delete_PNEANetV
def __init__(self, *args):
_snap.PNEANetV_swiginit(self, _snap.new_PNEANetV(*args))
def Load(self, SIn):
return _snap.PNEANetV_Load(self, SIn)
def Save(self, SOut):
return _snap.PNEANetV_Save(self, SOut)
def __add__(self, Val):
return _snap.PNEANetV___add__(self, Val)
def GetMemUsed(self):
return _snap.PNEANetV_GetMemUsed(self)
def GetMemSize(self):
return _snap.PNEANetV_GetMemSize(self)
def Gen(self, *args):
return _snap.PNEANetV_Gen(self, *args)
def GenExt(self, _ValT, _Vals):
return _snap.PNEANetV_GenExt(self, _ValT, _Vals)
def IsExt(self):
return _snap.PNEANetV_IsExt(self)
def Reserve(self, *args):
return _snap.PNEANetV_Reserve(self, *args)
def Clr(self, DoDel=True, NoDelLim=(- 1)):
return _snap.PNEANetV_Clr(self, DoDel, NoDelLim)
def Trunc(self, _Vals=(- 1)):
return _snap.PNEANetV_Trunc(self, _Vals)
def Reduce(self, _Vals=(- 1)):
return _snap.PNEANetV_Reduce(self, _Vals)
def Pack(self):
return _snap.PNEANetV_Pack(self)
def MoveFrom(self, Vec):
return _snap.PNEANetV_MoveFrom(self, Vec)
def Empty(self):
return _snap.PNEANetV_Empty(self)
def Len(self):
return _snap.PNEANetV_Len(self)
def Reserved(self):
return _snap.PNEANetV_Reserved(self)
def Last(self, *args):
return _snap.PNEANetV_Last(self, *args)
def LastValN(self):
return _snap.PNEANetV_LastValN(self)
def LastLast(self, *args):
return _snap.PNEANetV_LastLast(self, *args)
def GetRndVal(self, *args):
return _snap.PNEANetV_GetRndVal(self, *args)
def BegI(self):
return _snap.PNEANetV_BegI(self)
def EndI(self):
return _snap.PNEANetV_EndI(self)
def GetI(self, ValN):
return _snap.PNEANetV_GetI(self, ValN)
def Add(self, *args):
return _snap.PNEANetV_Add(self, *args)
def AddMP(self, Val):
return _snap.PNEANetV_AddMP(self, Val)
def MoveLastMP(self, Val, Inc):
return _snap.PNEANetV_MoveLastMP(self, Val, Inc)
def AddV(self, ValV):
return _snap.PNEANetV_AddV(self, ValV)
def GetVal(self, *args):
return _snap.PNEANetV_GetVal(self, *args)
def SetVal(self, ValN, Val):
return _snap.PNEANetV_SetVal(self, ValN, Val)
def GetSubValV(self, BValN, EValN, ValV):
return _snap.PNEANetV_GetSubValV(self, BValN, EValN, ValV)
def Ins(self, ValN, Val):
return _snap.PNEANetV_Ins(self, ValN, Val)
def Del(self, *args):
return _snap.PNEANetV_Del(self, *args)
def DelLast(self):
return _snap.PNEANetV_DelLast(self)
def PutAll(self, Val):
return _snap.PNEANetV_PutAll(self, Val)
def Swap(self, *args):
return _snap.PNEANetV_Swap(self, *args)
def SwapI(LVal, RVal):
return _snap.PNEANetV_SwapI(LVal, RVal)
SwapI = staticmethod(SwapI)
def Shuffle(self, Rnd):
return _snap.PNEANetV_Shuffle(self, Rnd)
def Reverse(self, *args):
return _snap.PNEANetV_Reverse(self, *args)
def GetV(*args):
return _snap.PNEANetV_GetV(*args)
GetV = staticmethod(GetV) |
class HTMLTokenizer(object):
def __init__(self, stream, parser=None, **kwargs):
self.stream = HTMLInputStream(stream, **kwargs)
self.parser = parser
self.escapeFlag = False
self.lastFourChars = []
self.state = self.dataState
self.escape = False
self.currentToken = None
super(HTMLTokenizer, self).__init__()
def __iter__(self):
self.tokenQueue = deque([])
while self.state():
while self.stream.errors:
(yield {'type': tokenTypes['ParseError'], 'data': self.stream.errors.pop(0)})
while self.tokenQueue:
(yield self.tokenQueue.popleft())
def consumeNumberEntity(self, isHex):
allowed = digits
radix = 10
if isHex:
allowed = hexDigits
radix = 16
charStack = []
c = self.stream.char()
while ((c in allowed) and (c is not EOF)):
charStack.append(c)
c = self.stream.char()
charAsInt = int(''.join(charStack), radix)
if (charAsInt in replacementCharacters):
char = replacementCharacters[charAsInt]
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'illegal-codepoint-for-numeric-entity', 'datavars': {'charAsInt': charAsInt}})
elif ((55296 <= charAsInt <= 57343) or (charAsInt > 1114111)):
char = ''
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'illegal-codepoint-for-numeric-entity', 'datavars': {'charAsInt': charAsInt}})
else:
if ((1 <= charAsInt <= 8) or (14 <= charAsInt <= 31) or (127 <= charAsInt <= 159) or (64976 <= charAsInt <= 65007) or (charAsInt in frozenset([11, 65534, 65535, 131070, 131071, 196606, 196607, 262142, 262143, 327678, 327679, 393214, 393215, 458750, 458751, 524286, 524287, 589822, 589823, 655358, 655359, 720894, 720895, 786430, 786431, 851966, 851967, 917502, 917503, 983038, 983039, 1048574, 1048575, 1114110, 1114111]))):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'illegal-codepoint-for-numeric-entity', 'datavars': {'charAsInt': charAsInt}})
try:
char = chr(charAsInt)
except ValueError:
v = (charAsInt - 65536)
char = (chr((55296 | (v >> 10))) + chr((56320 | (v & 1023))))
if (c != ';'):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'numeric-entity-without-semicolon'})
self.stream.unget(c)
return char
def consumeEntity(self, allowedChar=None, fromAttribute=False):
output = '&'
charStack = [self.stream.char()]
if ((charStack[0] in spaceCharacters) or (charStack[0] in (EOF, '<', '&')) or ((allowedChar is not None) and (allowedChar == charStack[0]))):
self.stream.unget(charStack[0])
elif (charStack[0] == '#'):
hex = False
charStack.append(self.stream.char())
if (charStack[(- 1)] in ('x', 'X')):
hex = True
charStack.append(self.stream.char())
if ((hex and (charStack[(- 1)] in hexDigits)) or ((not hex) and (charStack[(- 1)] in digits))):
self.stream.unget(charStack[(- 1)])
output = self.consumeNumberEntity(hex)
else:
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'expected-numeric-entity'})
self.stream.unget(charStack.pop())
output = ('&' + ''.join(charStack))
else:
while (charStack[(- 1)] is not EOF):
if (not entitiesTrie.has_keys_with_prefix(''.join(charStack))):
break
charStack.append(self.stream.char())
try:
entityName = entitiesTrie.longest_prefix(''.join(charStack[:(- 1)]))
entityLength = len(entityName)
except KeyError:
entityName = None
if (entityName is not None):
if (entityName[(- 1)] != ';'):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'named-entity-without-semicolon'})
if ((entityName[(- 1)] != ';') and fromAttribute and ((charStack[entityLength] in asciiLetters) or (charStack[entityLength] in digits) or (charStack[entityLength] == '='))):
self.stream.unget(charStack.pop())
output = ('&' + ''.join(charStack))
else:
output = entities[entityName]
self.stream.unget(charStack.pop())
output += ''.join(charStack[entityLength:])
else:
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'expected-named-entity'})
self.stream.unget(charStack.pop())
output = ('&' + ''.join(charStack))
if fromAttribute:
self.currentToken['data'][(- 1)][1] += output
else:
if (output in spaceCharacters):
tokenType = 'SpaceCharacters'
else:
tokenType = 'Characters'
self.tokenQueue.append({'type': tokenTypes[tokenType], 'data': output})
def processEntityInAttribute(self, allowedChar):
self.consumeEntity(allowedChar=allowedChar, fromAttribute=True)
def emitCurrentToken(self):
token = self.currentToken
if (token['type'] in tagTokenTypes):
token['name'] = token['name'].translate(asciiUpper2Lower)
if (token['type'] == tokenTypes['EndTag']):
if token['data']:
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'attributes-in-end-tag'})
if token['selfClosing']:
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'self-closing-flag-on-end-tag'})
self.tokenQueue.append(token)
self.state = self.dataState
def dataState(self):
data = self.stream.char()
if (data == '&'):
self.state = self.entityDataState
elif (data == '<'):
self.state = self.tagOpenState
elif (data == '\x00'):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'invalid-codepoint'})
self.tokenQueue.append({'type': tokenTypes['Characters'], 'data': '\x00'})
elif (data is EOF):
return False
elif (data in spaceCharacters):
self.tokenQueue.append({'type': tokenTypes['SpaceCharacters'], 'data': (data + self.stream.charsUntil(spaceCharacters, True))})
else:
chars = self.stream.charsUntil(('&', '<', '\x00'))
self.tokenQueue.append({'type': tokenTypes['Characters'], 'data': (data + chars)})
return True
def entityDataState(self):
self.consumeEntity()
self.state = self.dataState
return True
def rcdataState(self):
data = self.stream.char()
if (data == '&'):
self.state = self.characterReferenceInRcdata
elif (data == '<'):
self.state = self.rcdataLessThanSignState
elif (data == EOF):
return False
elif (data == '\x00'):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'invalid-codepoint'})
self.tokenQueue.append({'type': tokenTypes['Characters'], 'data': ''})
elif (data in spaceCharacters):
self.tokenQueue.append({'type': tokenTypes['SpaceCharacters'], 'data': (data + self.stream.charsUntil(spaceCharacters, True))})
else:
chars = self.stream.charsUntil(('&', '<', '\x00'))
self.tokenQueue.append({'type': tokenTypes['Characters'], 'data': (data + chars)})
return True
def characterReferenceInRcdata(self):
self.consumeEntity()
self.state = self.rcdataState
return True
def rawtextState(self):
data = self.stream.char()
if (data == '<'):
self.state = self.rawtextLessThanSignState
elif (data == '\x00'):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'invalid-codepoint'})
self.tokenQueue.append({'type': tokenTypes['Characters'], 'data': ''})
elif (data == EOF):
return False
else:
chars = self.stream.charsUntil(('<', '\x00'))
self.tokenQueue.append({'type': tokenTypes['Characters'], 'data': (data + chars)})
return True
def scriptDataState(self):
data = self.stream.char()
if (data == '<'):
self.state = self.scriptDataLessThanSignState
elif (data == '\x00'):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'invalid-codepoint'})
self.tokenQueue.append({'type': tokenTypes['Characters'], 'data': ''})
elif (data == EOF):
return False
else:
chars = self.stream.charsUntil(('<', '\x00'))
self.tokenQueue.append({'type': tokenTypes['Characters'], 'data': (data + chars)})
return True
def plaintextState(self):
data = self.stream.char()
if (data == EOF):
return False
elif (data == '\x00'):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'invalid-codepoint'})
self.tokenQueue.append({'type': tokenTypes['Characters'], 'data': ''})
else:
self.tokenQueue.append({'type': tokenTypes['Characters'], 'data': (data + self.stream.charsUntil('\x00'))})
return True
def tagOpenState(self):
data = self.stream.char()
if (data == '!'):
self.state = self.markupDeclarationOpenState
elif (data == '/'):
self.state = self.closeTagOpenState
elif (data in asciiLetters):
self.currentToken = {'type': tokenTypes['StartTag'], 'name': data, 'data': [], 'selfClosing': False, 'selfClosingAcknowledged': False}
self.state = self.tagNameState
elif (data == '>'):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'expected-tag-name-but-got-right-bracket'})
self.tokenQueue.append({'type': tokenTypes['Characters'], 'data': '<>'})
self.state = self.dataState
elif (data == '?'):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'expected-tag-name-but-got-question-mark'})
self.stream.unget(data)
self.state = self.bogusCommentState
else:
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'expected-tag-name'})
self.tokenQueue.append({'type': tokenTypes['Characters'], 'data': '<'})
self.stream.unget(data)
self.state = self.dataState
return True
def closeTagOpenState(self):
data = self.stream.char()
if (data in asciiLetters):
self.currentToken = {'type': tokenTypes['EndTag'], 'name': data, 'data': [], 'selfClosing': False}
self.state = self.tagNameState
elif (data == '>'):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'expected-closing-tag-but-got-right-bracket'})
self.state = self.dataState
elif (data is EOF):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'expected-closing-tag-but-got-eof'})
self.tokenQueue.append({'type': tokenTypes['Characters'], 'data': '</'})
self.state = self.dataState
else:
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'expected-closing-tag-but-got-char', 'datavars': {'data': data}})
self.stream.unget(data)
self.state = self.bogusCommentState
return True
def tagNameState(self):
data = self.stream.char()
if (data in spaceCharacters):
self.state = self.beforeAttributeNameState
elif (data == '>'):
self.emitCurrentToken()
elif (data is EOF):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'eof-in-tag-name'})
self.state = self.dataState
elif (data == '/'):
self.state = self.selfClosingStartTagState
elif (data == '\x00'):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'invalid-codepoint'})
self.currentToken['name'] += ''
else:
self.currentToken['name'] += data
return True
def rcdataLessThanSignState(self):
data = self.stream.char()
if (data == '/'):
self.temporaryBuffer = ''
self.state = self.rcdataEndTagOpenState
else:
self.tokenQueue.append({'type': tokenTypes['Characters'], 'data': '<'})
self.stream.unget(data)
self.state = self.rcdataState
return True
def rcdataEndTagOpenState(self):
data = self.stream.char()
if (data in asciiLetters):
self.temporaryBuffer += data
self.state = self.rcdataEndTagNameState
else:
self.tokenQueue.append({'type': tokenTypes['Characters'], 'data': '</'})
self.stream.unget(data)
self.state = self.rcdataState
return True
def rcdataEndTagNameState(self):
appropriate = (self.currentToken and (self.currentToken['name'].lower() == self.temporaryBuffer.lower()))
data = self.stream.char()
if ((data in spaceCharacters) and appropriate):
self.currentToken = {'type': tokenTypes['EndTag'], 'name': self.temporaryBuffer, 'data': [], 'selfClosing': False}
self.state = self.beforeAttributeNameState
elif ((data == '/') and appropriate):
self.currentToken = {'type': tokenTypes['EndTag'], 'name': self.temporaryBuffer, 'data': [], 'selfClosing': False}
self.state = self.selfClosingStartTagState
elif ((data == '>') and appropriate):
self.currentToken = {'type': tokenTypes['EndTag'], 'name': self.temporaryBuffer, 'data': [], 'selfClosing': False}
self.emitCurrentToken()
self.state = self.dataState
elif (data in asciiLetters):
self.temporaryBuffer += data
else:
self.tokenQueue.append({'type': tokenTypes['Characters'], 'data': ('</' + self.temporaryBuffer)})
self.stream.unget(data)
self.state = self.rcdataState
return True
def rawtextLessThanSignState(self):
data = self.stream.char()
if (data == '/'):
self.temporaryBuffer = ''
self.state = self.rawtextEndTagOpenState
else:
self.tokenQueue.append({'type': tokenTypes['Characters'], 'data': '<'})
self.stream.unget(data)
self.state = self.rawtextState
return True
def rawtextEndTagOpenState(self):
data = self.stream.char()
if (data in asciiLetters):
self.temporaryBuffer += data
self.state = self.rawtextEndTagNameState
else:
self.tokenQueue.append({'type': tokenTypes['Characters'], 'data': '</'})
self.stream.unget(data)
self.state = self.rawtextState
return True
def rawtextEndTagNameState(self):
appropriate = (self.currentToken and (self.currentToken['name'].lower() == self.temporaryBuffer.lower()))
data = self.stream.char()
if ((data in spaceCharacters) and appropriate):
self.currentToken = {'type': tokenTypes['EndTag'], 'name': self.temporaryBuffer, 'data': [], 'selfClosing': False}
self.state = self.beforeAttributeNameState
elif ((data == '/') and appropriate):
self.currentToken = {'type': tokenTypes['EndTag'], 'name': self.temporaryBuffer, 'data': [], 'selfClosing': False}
self.state = self.selfClosingStartTagState
elif ((data == '>') and appropriate):
self.currentToken = {'type': tokenTypes['EndTag'], 'name': self.temporaryBuffer, 'data': [], 'selfClosing': False}
self.emitCurrentToken()
self.state = self.dataState
elif (data in asciiLetters):
self.temporaryBuffer += data
else:
self.tokenQueue.append({'type': tokenTypes['Characters'], 'data': ('</' + self.temporaryBuffer)})
self.stream.unget(data)
self.state = self.rawtextState
return True
def scriptDataLessThanSignState(self):
data = self.stream.char()
if (data == '/'):
self.temporaryBuffer = ''
self.state = self.scriptDataEndTagOpenState
elif (data == '!'):
self.tokenQueue.append({'type': tokenTypes['Characters'], 'data': '<!'})
self.state = self.scriptDataEscapeStartState
else:
self.tokenQueue.append({'type': tokenTypes['Characters'], 'data': '<'})
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEndTagOpenState(self):
data = self.stream.char()
if (data in asciiLetters):
self.temporaryBuffer += data
self.state = self.scriptDataEndTagNameState
else:
self.tokenQueue.append({'type': tokenTypes['Characters'], 'data': '</'})
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEndTagNameState(self):
appropriate = (self.currentToken and (self.currentToken['name'].lower() == self.temporaryBuffer.lower()))
data = self.stream.char()
if ((data in spaceCharacters) and appropriate):
self.currentToken = {'type': tokenTypes['EndTag'], 'name': self.temporaryBuffer, 'data': [], 'selfClosing': False}
self.state = self.beforeAttributeNameState
elif ((data == '/') and appropriate):
self.currentToken = {'type': tokenTypes['EndTag'], 'name': self.temporaryBuffer, 'data': [], 'selfClosing': False}
self.state = self.selfClosingStartTagState
elif ((data == '>') and appropriate):
self.currentToken = {'type': tokenTypes['EndTag'], 'name': self.temporaryBuffer, 'data': [], 'selfClosing': False}
self.emitCurrentToken()
self.state = self.dataState
elif (data in asciiLetters):
self.temporaryBuffer += data
else:
self.tokenQueue.append({'type': tokenTypes['Characters'], 'data': ('</' + self.temporaryBuffer)})
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEscapeStartState(self):
data = self.stream.char()
if (data == '-'):
self.tokenQueue.append({'type': tokenTypes['Characters'], 'data': '-'})
self.state = self.scriptDataEscapeStartDashState
else:
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEscapeStartDashState(self):
data = self.stream.char()
if (data == '-'):
self.tokenQueue.append({'type': tokenTypes['Characters'], 'data': '-'})
self.state = self.scriptDataEscapedDashDashState
else:
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEscapedState(self):
data = self.stream.char()
if (data == '-'):
self.tokenQueue.append({'type': tokenTypes['Characters'], 'data': '-'})
self.state = self.scriptDataEscapedDashState
elif (data == '<'):
self.state = self.scriptDataEscapedLessThanSignState
elif (data == '\x00'):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'invalid-codepoint'})
self.tokenQueue.append({'type': tokenTypes['Characters'], 'data': ''})
elif (data == EOF):
self.state = self.dataState
else:
chars = self.stream.charsUntil(('<', '-', '\x00'))
self.tokenQueue.append({'type': tokenTypes['Characters'], 'data': (data + chars)})
return True
def scriptDataEscapedDashState(self):
data = self.stream.char()
if (data == '-'):
self.tokenQueue.append({'type': tokenTypes['Characters'], 'data': '-'})
self.state = self.scriptDataEscapedDashDashState
elif (data == '<'):
self.state = self.scriptDataEscapedLessThanSignState
elif (data == '\x00'):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'invalid-codepoint'})
self.tokenQueue.append({'type': tokenTypes['Characters'], 'data': ''})
self.state = self.scriptDataEscapedState
elif (data == EOF):
self.state = self.dataState
else:
self.tokenQueue.append({'type': tokenTypes['Characters'], 'data': data})
self.state = self.scriptDataEscapedState
return True
def scriptDataEscapedDashDashState(self):
data = self.stream.char()
if (data == '-'):
self.tokenQueue.append({'type': tokenTypes['Characters'], 'data': '-'})
elif (data == '<'):
self.state = self.scriptDataEscapedLessThanSignState
elif (data == '>'):
self.tokenQueue.append({'type': tokenTypes['Characters'], 'data': '>'})
self.state = self.scriptDataState
elif (data == '\x00'):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'invalid-codepoint'})
self.tokenQueue.append({'type': tokenTypes['Characters'], 'data': ''})
self.state = self.scriptDataEscapedState
elif (data == EOF):
self.state = self.dataState
else:
self.tokenQueue.append({'type': tokenTypes['Characters'], 'data': data})
self.state = self.scriptDataEscapedState
return True
def scriptDataEscapedLessThanSignState(self):
data = self.stream.char()
if (data == '/'):
self.temporaryBuffer = ''
self.state = self.scriptDataEscapedEndTagOpenState
elif (data in asciiLetters):
self.tokenQueue.append({'type': tokenTypes['Characters'], 'data': ('<' + data)})
self.temporaryBuffer = data
self.state = self.scriptDataDoubleEscapeStartState
else:
self.tokenQueue.append({'type': tokenTypes['Characters'], 'data': '<'})
self.stream.unget(data)
self.state = self.scriptDataEscapedState
return True
def scriptDataEscapedEndTagOpenState(self):
data = self.stream.char()
if (data in asciiLetters):
self.temporaryBuffer = data
self.state = self.scriptDataEscapedEndTagNameState
else:
self.tokenQueue.append({'type': tokenTypes['Characters'], 'data': '</'})
self.stream.unget(data)
self.state = self.scriptDataEscapedState
return True
def scriptDataEscapedEndTagNameState(self):
appropriate = (self.currentToken and (self.currentToken['name'].lower() == self.temporaryBuffer.lower()))
data = self.stream.char()
if ((data in spaceCharacters) and appropriate):
self.currentToken = {'type': tokenTypes['EndTag'], 'name': self.temporaryBuffer, 'data': [], 'selfClosing': False}
self.state = self.beforeAttributeNameState
elif ((data == '/') and appropriate):
self.currentToken = {'type': tokenTypes['EndTag'], 'name': self.temporaryBuffer, 'data': [], 'selfClosing': False}
self.state = self.selfClosingStartTagState
elif ((data == '>') and appropriate):
self.currentToken = {'type': tokenTypes['EndTag'], 'name': self.temporaryBuffer, 'data': [], 'selfClosing': False}
self.emitCurrentToken()
self.state = self.dataState
elif (data in asciiLetters):
self.temporaryBuffer += data
else:
self.tokenQueue.append({'type': tokenTypes['Characters'], 'data': ('</' + self.temporaryBuffer)})
self.stream.unget(data)
self.state = self.scriptDataEscapedState
return True
def scriptDataDoubleEscapeStartState(self):
data = self.stream.char()
if (data in (spaceCharacters | frozenset(('/', '>')))):
self.tokenQueue.append({'type': tokenTypes['Characters'], 'data': data})
if (self.temporaryBuffer.lower() == 'script'):
self.state = self.scriptDataDoubleEscapedState
else:
self.state = self.scriptDataEscapedState
elif (data in asciiLetters):
self.tokenQueue.append({'type': tokenTypes['Characters'], 'data': data})
self.temporaryBuffer += data
else:
self.stream.unget(data)
self.state = self.scriptDataEscapedState
return True
def scriptDataDoubleEscapedState(self):
data = self.stream.char()
if (data == '-'):
self.tokenQueue.append({'type': tokenTypes['Characters'], 'data': '-'})
self.state = self.scriptDataDoubleEscapedDashState
elif (data == '<'):
self.tokenQueue.append({'type': tokenTypes['Characters'], 'data': '<'})
self.state = self.scriptDataDoubleEscapedLessThanSignState
elif (data == '\x00'):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'invalid-codepoint'})
self.tokenQueue.append({'type': tokenTypes['Characters'], 'data': ''})
elif (data == EOF):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'eof-in-script-in-script'})
self.state = self.dataState
else:
self.tokenQueue.append({'type': tokenTypes['Characters'], 'data': data})
return True
def scriptDataDoubleEscapedDashState(self):
data = self.stream.char()
if (data == '-'):
self.tokenQueue.append({'type': tokenTypes['Characters'], 'data': '-'})
self.state = self.scriptDataDoubleEscapedDashDashState
elif (data == '<'):
self.tokenQueue.append({'type': tokenTypes['Characters'], 'data': '<'})
self.state = self.scriptDataDoubleEscapedLessThanSignState
elif (data == '\x00'):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'invalid-codepoint'})
self.tokenQueue.append({'type': tokenTypes['Characters'], 'data': ''})
self.state = self.scriptDataDoubleEscapedState
elif (data == EOF):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'eof-in-script-in-script'})
self.state = self.dataState
else:
self.tokenQueue.append({'type': tokenTypes['Characters'], 'data': data})
self.state = self.scriptDataDoubleEscapedState
return True
def scriptDataDoubleEscapedDashDashState(self):
data = self.stream.char()
if (data == '-'):
self.tokenQueue.append({'type': tokenTypes['Characters'], 'data': '-'})
elif (data == '<'):
self.tokenQueue.append({'type': tokenTypes['Characters'], 'data': '<'})
self.state = self.scriptDataDoubleEscapedLessThanSignState
elif (data == '>'):
self.tokenQueue.append({'type': tokenTypes['Characters'], 'data': '>'})
self.state = self.scriptDataState
elif (data == '\x00'):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'invalid-codepoint'})
self.tokenQueue.append({'type': tokenTypes['Characters'], 'data': ''})
self.state = self.scriptDataDoubleEscapedState
elif (data == EOF):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'eof-in-script-in-script'})
self.state = self.dataState
else:
self.tokenQueue.append({'type': tokenTypes['Characters'], 'data': data})
self.state = self.scriptDataDoubleEscapedState
return True
def scriptDataDoubleEscapedLessThanSignState(self):
data = self.stream.char()
if (data == '/'):
self.tokenQueue.append({'type': tokenTypes['Characters'], 'data': '/'})
self.temporaryBuffer = ''
self.state = self.scriptDataDoubleEscapeEndState
else:
self.stream.unget(data)
self.state = self.scriptDataDoubleEscapedState
return True
def scriptDataDoubleEscapeEndState(self):
data = self.stream.char()
if (data in (spaceCharacters | frozenset(('/', '>')))):
self.tokenQueue.append({'type': tokenTypes['Characters'], 'data': data})
if (self.temporaryBuffer.lower() == 'script'):
self.state = self.scriptDataEscapedState
else:
self.state = self.scriptDataDoubleEscapedState
elif (data in asciiLetters):
self.tokenQueue.append({'type': tokenTypes['Characters'], 'data': data})
self.temporaryBuffer += data
else:
self.stream.unget(data)
self.state = self.scriptDataDoubleEscapedState
return True
def beforeAttributeNameState(self):
data = self.stream.char()
if (data in spaceCharacters):
self.stream.charsUntil(spaceCharacters, True)
elif (data in asciiLetters):
self.currentToken['data'].append([data, ''])
self.state = self.attributeNameState
elif (data == '>'):
self.emitCurrentToken()
elif (data == '/'):
self.state = self.selfClosingStartTagState
elif (data in ("'", '"', '=', '<')):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'invalid-character-in-attribute-name'})
self.currentToken['data'].append([data, ''])
self.state = self.attributeNameState
elif (data == '\x00'):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'invalid-codepoint'})
self.currentToken['data'].append(['', ''])
self.state = self.attributeNameState
elif (data is EOF):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'expected-attribute-name-but-got-eof'})
self.state = self.dataState
else:
self.currentToken['data'].append([data, ''])
self.state = self.attributeNameState
return True
def attributeNameState(self):
data = self.stream.char()
leavingThisState = True
emitToken = False
if (data == '='):
self.state = self.beforeAttributeValueState
elif (data in asciiLetters):
self.currentToken['data'][(- 1)][0] += (data + self.stream.charsUntil(asciiLetters, True))
leavingThisState = False
elif (data == '>'):
emitToken = True
elif (data in spaceCharacters):
self.state = self.afterAttributeNameState
elif (data == '/'):
self.state = self.selfClosingStartTagState
elif (data == '\x00'):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'invalid-codepoint'})
self.currentToken['data'][(- 1)][0] += ''
leavingThisState = False
elif (data in ("'", '"', '<')):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'invalid-character-in-attribute-name'})
self.currentToken['data'][(- 1)][0] += data
leavingThisState = False
elif (data is EOF):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'eof-in-attribute-name'})
self.state = self.dataState
else:
self.currentToken['data'][(- 1)][0] += data
leavingThisState = False
if leavingThisState:
self.currentToken['data'][(- 1)][0] = self.currentToken['data'][(- 1)][0].translate(asciiUpper2Lower)
for (name, _) in self.currentToken['data'][:(- 1)]:
if (self.currentToken['data'][(- 1)][0] == name):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'duplicate-attribute'})
break
if emitToken:
self.emitCurrentToken()
return True
def afterAttributeNameState(self):
data = self.stream.char()
if (data in spaceCharacters):
self.stream.charsUntil(spaceCharacters, True)
elif (data == '='):
self.state = self.beforeAttributeValueState
elif (data == '>'):
self.emitCurrentToken()
elif (data in asciiLetters):
self.currentToken['data'].append([data, ''])
self.state = self.attributeNameState
elif (data == '/'):
self.state = self.selfClosingStartTagState
elif (data == '\x00'):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'invalid-codepoint'})
self.currentToken['data'].append(['', ''])
self.state = self.attributeNameState
elif (data in ("'", '"', '<')):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'invalid-character-after-attribute-name'})
self.currentToken['data'].append([data, ''])
self.state = self.attributeNameState
elif (data is EOF):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'expected-end-of-tag-but-got-eof'})
self.state = self.dataState
else:
self.currentToken['data'].append([data, ''])
self.state = self.attributeNameState
return True
def beforeAttributeValueState(self):
data = self.stream.char()
if (data in spaceCharacters):
self.stream.charsUntil(spaceCharacters, True)
elif (data == '"'):
self.state = self.attributeValueDoubleQuotedState
elif (data == '&'):
self.state = self.attributeValueUnQuotedState
self.stream.unget(data)
elif (data == "'"):
self.state = self.attributeValueSingleQuotedState
elif (data == '>'):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'expected-attribute-value-but-got-right-bracket'})
self.emitCurrentToken()
elif (data == '\x00'):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'invalid-codepoint'})
self.currentToken['data'][(- 1)][1] += ''
self.state = self.attributeValueUnQuotedState
elif (data in ('=', '<', '`')):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'equals-in-unquoted-attribute-value'})
self.currentToken['data'][(- 1)][1] += data
self.state = self.attributeValueUnQuotedState
elif (data is EOF):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'expected-attribute-value-but-got-eof'})
self.state = self.dataState
else:
self.currentToken['data'][(- 1)][1] += data
self.state = self.attributeValueUnQuotedState
return True
def attributeValueDoubleQuotedState(self):
data = self.stream.char()
if (data == '"'):
self.state = self.afterAttributeValueState
elif (data == '&'):
self.processEntityInAttribute('"')
elif (data == '\x00'):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'invalid-codepoint'})
self.currentToken['data'][(- 1)][1] += ''
elif (data is EOF):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'eof-in-attribute-value-double-quote'})
self.state = self.dataState
else:
self.currentToken['data'][(- 1)][1] += (data + self.stream.charsUntil(('"', '&', '\x00')))
return True
def attributeValueSingleQuotedState(self):
data = self.stream.char()
if (data == "'"):
self.state = self.afterAttributeValueState
elif (data == '&'):
self.processEntityInAttribute("'")
elif (data == '\x00'):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'invalid-codepoint'})
self.currentToken['data'][(- 1)][1] += ''
elif (data is EOF):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'eof-in-attribute-value-single-quote'})
self.state = self.dataState
else:
self.currentToken['data'][(- 1)][1] += (data + self.stream.charsUntil(("'", '&', '\x00')))
return True
def attributeValueUnQuotedState(self):
data = self.stream.char()
if (data in spaceCharacters):
self.state = self.beforeAttributeNameState
elif (data == '&'):
self.processEntityInAttribute('>')
elif (data == '>'):
self.emitCurrentToken()
elif (data in ('"', "'", '=', '<', '`')):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'unexpected-character-in-unquoted-attribute-value'})
self.currentToken['data'][(- 1)][1] += data
elif (data == '\x00'):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'invalid-codepoint'})
self.currentToken['data'][(- 1)][1] += ''
elif (data is EOF):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'eof-in-attribute-value-no-quotes'})
self.state = self.dataState
else:
self.currentToken['data'][(- 1)][1] += (data + self.stream.charsUntil((frozenset(('&', '>', '"', "'", '=', '<', '`', '\x00')) | spaceCharacters)))
return True
def afterAttributeValueState(self):
data = self.stream.char()
if (data in spaceCharacters):
self.state = self.beforeAttributeNameState
elif (data == '>'):
self.emitCurrentToken()
elif (data == '/'):
self.state = self.selfClosingStartTagState
elif (data is EOF):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'unexpected-EOF-after-attribute-value'})
self.stream.unget(data)
self.state = self.dataState
else:
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'unexpected-character-after-attribute-value'})
self.stream.unget(data)
self.state = self.beforeAttributeNameState
return True
def selfClosingStartTagState(self):
data = self.stream.char()
if (data == '>'):
self.currentToken['selfClosing'] = True
self.emitCurrentToken()
elif (data is EOF):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'unexpected-EOF-after-solidus-in-tag'})
self.stream.unget(data)
self.state = self.dataState
else:
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'unexpected-character-after-solidus-in-tag'})
self.stream.unget(data)
self.state = self.beforeAttributeNameState
return True
def bogusCommentState(self):
data = self.stream.charsUntil('>')
data = data.replace('\x00', '')
self.tokenQueue.append({'type': tokenTypes['Comment'], 'data': data})
self.stream.char()
self.state = self.dataState
return True
def markupDeclarationOpenState(self):
charStack = [self.stream.char()]
if (charStack[(- 1)] == '-'):
charStack.append(self.stream.char())
if (charStack[(- 1)] == '-'):
self.currentToken = {'type': tokenTypes['Comment'], 'data': ''}
self.state = self.commentStartState
return True
elif (charStack[(- 1)] in ('d', 'D')):
matched = True
for expected in (('o', 'O'), ('c', 'C'), ('t', 'T'), ('y', 'Y'), ('p', 'P'), ('e', 'E')):
charStack.append(self.stream.char())
if (charStack[(- 1)] not in expected):
matched = False
break
if matched:
self.currentToken = {'type': tokenTypes['Doctype'], 'name': '', 'publicId': None, 'systemId': None, 'correct': True}
self.state = self.doctypeState
return True
elif ((charStack[(- 1)] == '[') and (self.parser is not None) and self.parser.tree.openElements and (self.parser.tree.openElements[(- 1)].namespace != self.parser.tree.defaultNamespace)):
matched = True
for expected in ['C', 'D', 'A', 'T', 'A', '[']:
charStack.append(self.stream.char())
if (charStack[(- 1)] != expected):
matched = False
break
if matched:
self.state = self.cdataSectionState
return True
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'expected-dashes-or-doctype'})
while charStack:
self.stream.unget(charStack.pop())
self.state = self.bogusCommentState
return True
def commentStartState(self):
data = self.stream.char()
if (data == '-'):
self.state = self.commentStartDashState
elif (data == '\x00'):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'invalid-codepoint'})
self.currentToken['data'] += ''
elif (data == '>'):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'incorrect-comment'})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif (data is EOF):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'eof-in-comment'})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken['data'] += data
self.state = self.commentState
return True
def commentStartDashState(self):
data = self.stream.char()
if (data == '-'):
self.state = self.commentEndState
elif (data == '\x00'):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'invalid-codepoint'})
self.currentToken['data'] += '-'
elif (data == '>'):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'incorrect-comment'})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif (data is EOF):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'eof-in-comment'})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken['data'] += ('-' + data)
self.state = self.commentState
return True
def commentState(self):
data = self.stream.char()
if (data == '-'):
self.state = self.commentEndDashState
elif (data == '\x00'):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'invalid-codepoint'})
self.currentToken['data'] += ''
elif (data is EOF):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'eof-in-comment'})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken['data'] += (data + self.stream.charsUntil(('-', '\x00')))
return True
def commentEndDashState(self):
data = self.stream.char()
if (data == '-'):
self.state = self.commentEndState
elif (data == '\x00'):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'invalid-codepoint'})
self.currentToken['data'] += '-'
self.state = self.commentState
elif (data is EOF):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'eof-in-comment-end-dash'})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken['data'] += ('-' + data)
self.state = self.commentState
return True
def commentEndState(self):
data = self.stream.char()
if (data == '>'):
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif (data == '\x00'):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'invalid-codepoint'})
self.currentToken['data'] += '--'
self.state = self.commentState
elif (data == '!'):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'unexpected-bang-after-double-dash-in-comment'})
self.state = self.commentEndBangState
elif (data == '-'):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'unexpected-dash-after-double-dash-in-comment'})
self.currentToken['data'] += data
elif (data is EOF):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'eof-in-comment-double-dash'})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'unexpected-char-in-comment'})
self.currentToken['data'] += ('--' + data)
self.state = self.commentState
return True
def commentEndBangState(self):
data = self.stream.char()
if (data == '>'):
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif (data == '-'):
self.currentToken['data'] += '--!'
self.state = self.commentEndDashState
elif (data == '\x00'):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'invalid-codepoint'})
self.currentToken['data'] += '--!'
self.state = self.commentState
elif (data is EOF):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'eof-in-comment-end-bang-state'})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken['data'] += ('--!' + data)
self.state = self.commentState
return True
def doctypeState(self):
data = self.stream.char()
if (data in spaceCharacters):
self.state = self.beforeDoctypeNameState
elif (data is EOF):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'expected-doctype-name-but-got-eof'})
self.currentToken['correct'] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'need-space-after-doctype'})
self.stream.unget(data)
self.state = self.beforeDoctypeNameState
return True
def beforeDoctypeNameState(self):
data = self.stream.char()
if (data in spaceCharacters):
pass
elif (data == '>'):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'expected-doctype-name-but-got-right-bracket'})
self.currentToken['correct'] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif (data == '\x00'):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'invalid-codepoint'})
self.currentToken['name'] = ''
self.state = self.doctypeNameState
elif (data is EOF):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'expected-doctype-name-but-got-eof'})
self.currentToken['correct'] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken['name'] = data
self.state = self.doctypeNameState
return True
def doctypeNameState(self):
data = self.stream.char()
if (data in spaceCharacters):
self.currentToken['name'] = self.currentToken['name'].translate(asciiUpper2Lower)
self.state = self.afterDoctypeNameState
elif (data == '>'):
self.currentToken['name'] = self.currentToken['name'].translate(asciiUpper2Lower)
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif (data == '\x00'):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'invalid-codepoint'})
self.currentToken['name'] += ''
self.state = self.doctypeNameState
elif (data is EOF):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'eof-in-doctype-name'})
self.currentToken['correct'] = False
self.currentToken['name'] = self.currentToken['name'].translate(asciiUpper2Lower)
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken['name'] += data
return True
def afterDoctypeNameState(self):
data = self.stream.char()
if (data in spaceCharacters):
pass
elif (data == '>'):
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif (data is EOF):
self.currentToken['correct'] = False
self.stream.unget(data)
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'eof-in-doctype'})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
if (data in ('p', 'P')):
matched = True
for expected in (('u', 'U'), ('b', 'B'), ('l', 'L'), ('i', 'I'), ('c', 'C')):
data = self.stream.char()
if (data not in expected):
matched = False
break
if matched:
self.state = self.afterDoctypePublicKeywordState
return True
elif (data in ('s', 'S')):
matched = True
for expected in (('y', 'Y'), ('s', 'S'), ('t', 'T'), ('e', 'E'), ('m', 'M')):
data = self.stream.char()
if (data not in expected):
matched = False
break
if matched:
self.state = self.afterDoctypeSystemKeywordState
return True
self.stream.unget(data)
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'expected-space-or-right-bracket-in-doctype', 'datavars': {'data': data}})
self.currentToken['correct'] = False
self.state = self.bogusDoctypeState
return True
def afterDoctypePublicKeywordState(self):
data = self.stream.char()
if (data in spaceCharacters):
self.state = self.beforeDoctypePublicIdentifierState
elif (data in ("'", '"')):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'unexpected-char-in-doctype'})
self.stream.unget(data)
self.state = self.beforeDoctypePublicIdentifierState
elif (data is EOF):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'eof-in-doctype'})
self.currentToken['correct'] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.stream.unget(data)
self.state = self.beforeDoctypePublicIdentifierState
return True
def beforeDoctypePublicIdentifierState(self):
data = self.stream.char()
if (data in spaceCharacters):
pass
elif (data == '"'):
self.currentToken['publicId'] = ''
self.state = self.doctypePublicIdentifierDoubleQuotedState
elif (data == "'"):
self.currentToken['publicId'] = ''
self.state = self.doctypePublicIdentifierSingleQuotedState
elif (data == '>'):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'unexpected-end-of-doctype'})
self.currentToken['correct'] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif (data is EOF):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'eof-in-doctype'})
self.currentToken['correct'] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'unexpected-char-in-doctype'})
self.currentToken['correct'] = False
self.state = self.bogusDoctypeState
return True
def doctypePublicIdentifierDoubleQuotedState(self):
data = self.stream.char()
if (data == '"'):
self.state = self.afterDoctypePublicIdentifierState
elif (data == '\x00'):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'invalid-codepoint'})
self.currentToken['publicId'] += ''
elif (data == '>'):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'unexpected-end-of-doctype'})
self.currentToken['correct'] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif (data is EOF):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'eof-in-doctype'})
self.currentToken['correct'] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken['publicId'] += data
return True
def doctypePublicIdentifierSingleQuotedState(self):
data = self.stream.char()
if (data == "'"):
self.state = self.afterDoctypePublicIdentifierState
elif (data == '\x00'):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'invalid-codepoint'})
self.currentToken['publicId'] += ''
elif (data == '>'):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'unexpected-end-of-doctype'})
self.currentToken['correct'] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif (data is EOF):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'eof-in-doctype'})
self.currentToken['correct'] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken['publicId'] += data
return True
def afterDoctypePublicIdentifierState(self):
data = self.stream.char()
if (data in spaceCharacters):
self.state = self.betweenDoctypePublicAndSystemIdentifiersState
elif (data == '>'):
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif (data == '"'):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'unexpected-char-in-doctype'})
self.currentToken['systemId'] = ''
self.state = self.doctypeSystemIdentifierDoubleQuotedState
elif (data == "'"):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'unexpected-char-in-doctype'})
self.currentToken['systemId'] = ''
self.state = self.doctypeSystemIdentifierSingleQuotedState
elif (data is EOF):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'eof-in-doctype'})
self.currentToken['correct'] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'unexpected-char-in-doctype'})
self.currentToken['correct'] = False
self.state = self.bogusDoctypeState
return True
def betweenDoctypePublicAndSystemIdentifiersState(self):
data = self.stream.char()
if (data in spaceCharacters):
pass
elif (data == '>'):
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif (data == '"'):
self.currentToken['systemId'] = ''
self.state = self.doctypeSystemIdentifierDoubleQuotedState
elif (data == "'"):
self.currentToken['systemId'] = ''
self.state = self.doctypeSystemIdentifierSingleQuotedState
elif (data == EOF):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'eof-in-doctype'})
self.currentToken['correct'] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'unexpected-char-in-doctype'})
self.currentToken['correct'] = False
self.state = self.bogusDoctypeState
return True
def afterDoctypeSystemKeywordState(self):
data = self.stream.char()
if (data in spaceCharacters):
self.state = self.beforeDoctypeSystemIdentifierState
elif (data in ("'", '"')):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'unexpected-char-in-doctype'})
self.stream.unget(data)
self.state = self.beforeDoctypeSystemIdentifierState
elif (data is EOF):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'eof-in-doctype'})
self.currentToken['correct'] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.stream.unget(data)
self.state = self.beforeDoctypeSystemIdentifierState
return True
def beforeDoctypeSystemIdentifierState(self):
data = self.stream.char()
if (data in spaceCharacters):
pass
elif (data == '"'):
self.currentToken['systemId'] = ''
self.state = self.doctypeSystemIdentifierDoubleQuotedState
elif (data == "'"):
self.currentToken['systemId'] = ''
self.state = self.doctypeSystemIdentifierSingleQuotedState
elif (data == '>'):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'unexpected-char-in-doctype'})
self.currentToken['correct'] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif (data is EOF):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'eof-in-doctype'})
self.currentToken['correct'] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'unexpected-char-in-doctype'})
self.currentToken['correct'] = False
self.state = self.bogusDoctypeState
return True
def doctypeSystemIdentifierDoubleQuotedState(self):
data = self.stream.char()
if (data == '"'):
self.state = self.afterDoctypeSystemIdentifierState
elif (data == '\x00'):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'invalid-codepoint'})
self.currentToken['systemId'] += ''
elif (data == '>'):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'unexpected-end-of-doctype'})
self.currentToken['correct'] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif (data is EOF):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'eof-in-doctype'})
self.currentToken['correct'] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken['systemId'] += data
return True
def doctypeSystemIdentifierSingleQuotedState(self):
data = self.stream.char()
if (data == "'"):
self.state = self.afterDoctypeSystemIdentifierState
elif (data == '\x00'):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'invalid-codepoint'})
self.currentToken['systemId'] += ''
elif (data == '>'):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'unexpected-end-of-doctype'})
self.currentToken['correct'] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif (data is EOF):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'eof-in-doctype'})
self.currentToken['correct'] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken['systemId'] += data
return True
def afterDoctypeSystemIdentifierState(self):
data = self.stream.char()
if (data in spaceCharacters):
pass
elif (data == '>'):
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif (data is EOF):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'eof-in-doctype'})
self.currentToken['correct'] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'unexpected-char-in-doctype'})
self.state = self.bogusDoctypeState
return True
def bogusDoctypeState(self):
data = self.stream.char()
if (data == '>'):
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif (data is EOF):
self.stream.unget(data)
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
pass
return True
def cdataSectionState(self):
data = []
while True:
data.append(self.stream.charsUntil(']'))
data.append(self.stream.charsUntil('>'))
char = self.stream.char()
if (char == EOF):
break
else:
assert (char == '>')
if (data[(- 1)][(- 2):] == ']]'):
data[(- 1)] = data[(- 1)][:(- 2)]
break
else:
data.append(char)
data = ''.join(data)
nullCount = data.count('\x00')
if (nullCount > 0):
for _ in range(nullCount):
self.tokenQueue.append({'type': tokenTypes['ParseError'], 'data': 'invalid-codepoint'})
data = data.replace('\x00', '')
if data:
self.tokenQueue.append({'type': tokenTypes['Characters'], 'data': data})
self.state = self.dataState
return True |
def lift(x):
try:
return x.lift()
except AttributeError:
raise ArithmeticError('no lift defined.') |
def mask_rcnn_fcn_head_v1up(model, blob_in, dim_in, spatial_scale):
return mask_rcnn_fcn_head_v1upXconvs(model, blob_in, dim_in, spatial_scale, 2) |
def print_diff(diff_lines, use_color):
if use_color:
diff_lines = colorize(diff_lines)
if (sys.version_info[0] < 3):
sys.stdout.writelines((l.encode('utf-8') for l in diff_lines))
else:
sys.stdout.writelines(diff_lines) |
def main():
dataset = 'fb15k-237'
model_names = ['conve', 'distmult', 'complex']
compare_models(dataset, model_names) |
('Fixing Together cache')
def fix(mongo_uri: str):
source_name: str = 'together'
target_name: str = 'together_rewritten'
source_config = MongoCacheConfig(mongo_uri, collection_name=source_name)
target_config = MongoCacheConfig(mongo_uri, collection_name=target_name)
source_store = create_key_value_store(source_config)
target_store = create_key_value_store(target_config)
db: MongoClient = MongoClient(mongo_uri)
if ('request_1' not in db['crfm-models'][target_name].index_information()):
db['crfm-models'][target_name].create_index('request', name='request_1', unique=True)
for (i, (request, response)) in enumerate(source_store.get_all()):
request['request_type'] = 'language-model-inference'
request['model'] = request['engine']
del request['engine']
target_store.put(request, response)
if ((i + (1 % 10000)) == 0):
hlog(f'Processed {(i + 1)} entries.')
with htrack_block(f'Dropping {source_name} collection and renaming {target_name} to {source_name}'):
source_collection = db['crfm-models'][source_name]
source_collection.drop()
target_collection = db['crfm-models'][target_name]
target_collection.rename(source_name) |
class SawyerSoccerEnvV2(SawyerXYZEnv):
def __init__(self):
goal_low = ((- 0.1), 0.8, 0.0)
goal_high = (0.1, 0.9, 0.0)
hand_low = ((- 0.5), 0.4, 0.05)
hand_high = (0.5, 1, 0.5)
obj_low = ((- 0.1), 0.6, 0.03)
obj_high = (0.1, 0.7, 0.03)
super().__init__(self.model_name, hand_low=hand_low, hand_high=hand_high)
self.init_config = {'obj_init_pos': np.array([0, 0.6, 0.03]), 'obj_init_angle': 0.3, 'hand_init_pos': np.array([0.0, 0.6, 0.2])}
self.goal = np.array([0.0, 0.9, 0.03])
self.obj_init_pos = self.init_config['obj_init_pos']
self.obj_init_angle = self.init_config['obj_init_angle']
self.hand_init_pos = self.init_config['hand_init_pos']
self.max_path_length = 150
self._random_reset_space = Box(np.hstack((obj_low, goal_low)), np.hstack((obj_high, goal_high)))
self.goal_space = Box(np.array(goal_low), np.array(goal_high))
def model_name(self):
return full_v2_path_for('sawyer_xyz/sawyer_soccer.xml')
_assert_task_is_set
def step(self, action):
ob = super().step(action)
(reward, reachDist, pushDist) = self.compute_reward(action, ob)
self.curr_path_length += 1
info = {'reachDist': reachDist, 'goalDist': pushDist, 'epRew': reward, 'pickRew': None, 'success': float((pushDist <= 0.07))}
return (ob, reward, False, info)
def _get_pos_objects(self):
return self.get_body_com('soccer_ball')
def reset_model(self):
self._reset_hand()
self._target_pos = self.goal.copy()
self.obj_init_angle = self.init_config['obj_init_angle']
self.objHeight = self.get_body_com('soccer_ball')[2]
if self.random_init:
goal_pos = self._get_state_rand_vec()
self._target_pos = goal_pos[3:]
while (np.linalg.norm((goal_pos[:2] - self._target_pos[:2])) < 0.15):
goal_pos = self._get_state_rand_vec()
self._target_pos = goal_pos[3:]
self.obj_init_pos = np.concatenate((goal_pos[:2], [self.obj_init_pos[(- 1)]]))
self.sim.model.body_pos[self.model.body_name2id('goal_whole')] = self._target_pos
self._set_obj_xyz(self.obj_init_pos)
self.maxPushDist = np.linalg.norm((self.obj_init_pos[:2] - np.array(self._target_pos)[:2]))
return self._get_obs()
def _reset_hand(self):
super()._reset_hand()
self.reachCompleted = False
def compute_reward(self, actions, obs):
del actions
objPos = obs[3:6]
(rightFinger, leftFinger) = (self._get_site_pos('rightEndEffector'), self._get_site_pos('leftEndEffector'))
fingerCOM = ((rightFinger + leftFinger) / 2)
goal = self._target_pos
c1 = 1000
c2 = 0.01
c3 = 0.001
assert np.all((goal == self._get_site_pos('goal')))
reachDist = np.linalg.norm((fingerCOM - objPos))
pushDist = np.linalg.norm((objPos[:2] - goal[:2]))
reachRew = (- reachDist)
def reachCompleted():
return (reachDist < 0.05)
self.reachCompleted = reachCompleted()
if self.reachCompleted:
pushRew = ((1000 * (self.maxPushDist - pushDist)) + (c1 * (np.exp(((- (pushDist ** 2)) / c2)) + np.exp(((- (pushDist ** 2)) / c3)))))
pushRew = max(pushRew, 0)
else:
pushRew = 0
reward = (reachRew + pushRew)
return [reward, reachDist, pushDist] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.