code stringlengths 281 23.7M |
|---|
class ConvertCommand(BaseTransformersCLICommand):
def register_subcommand(parser: ArgumentParser):
train_parser = parser.add_parser('convert', help='CLI tool to run convert model from original author checkpoints to Transformesr PyTorch checkpoints.')
train_parser.add_argument('--model_type', type=str, required=True, help="Model's type.")
train_parser.add_argument('--tf_checkpoint', type=str, required=True, help='TensorFlow checkpoint path or folder.')
train_parser.add_argument('--pytorch_dump_output', type=str, required=True, help='Path to the PyTorch savd model output.')
train_parser.add_argument('--config', type=str, default='', help='Configuration file path or folder.')
train_parser.add_argument('--finetuning_task_name', type=str, default=None, help='Optional fine-tuning task name if the TF model was a finetuned model.')
train_parser.set_defaults(func=convert_command_factory)
def __init__(self, model_type: str, tf_checkpoint: str, pytorch_dump_output: str, config: str, finetuning_task_name: str, *args):
self._logger = getLogger('transformers-cli/converting')
self._logger.info('Loading model {}'.format(model_type))
self._model_type = model_type
self._tf_checkpoint = tf_checkpoint
self._pytorch_dump_output = pytorch_dump_output
self._config = config
self._finetuning_task_name = finetuning_task_name
def run(self):
if (self._model_type == 'bert'):
try:
from transformers.convert_bert_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
msg = 'transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires TensorFlow to be installed. Please see for installation instructions.'
raise ImportError(msg)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output)
elif (self._model_type == 'gpt'):
from transformers.convert_openai_original_tf_checkpoint_to_pytorch import convert_openai_checkpoint_to_pytorch
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output)
elif (self._model_type == 'transfo_xl'):
try:
from transformers.convert_transfo_xl_original_tf_checkpoint_to_pytorch import convert_transfo_xl_checkpoint_to_pytorch
except ImportError:
msg = 'transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires TensorFlow to be installed. Please see for installation instructions.'
raise ImportError(msg)
if ('ckpt' in self._tf_checkpoint.lower()):
TF_CHECKPOINT = self._tf_checkpoint
TF_DATASET_FILE = ''
else:
TF_DATASET_FILE = self._tf_checkpoint
TF_CHECKPOINT = ''
convert_transfo_xl_checkpoint_to_pytorch(TF_CHECKPOINT, self._config, self._pytorch_dump_output, TF_DATASET_FILE)
elif (self._model_type == 'gpt2'):
try:
from transformers.convert_gpt2_original_tf_checkpoint_to_pytorch import convert_gpt2_checkpoint_to_pytorch
except ImportError:
msg = 'transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires TensorFlow to be installed. Please see for installation instructions.'
raise ImportError(msg)
convert_gpt2_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output)
elif (self._model_type == 'xlnet'):
try:
from transformers.convert_xlnet_original_tf_checkpoint_to_pytorch import convert_xlnet_checkpoint_to_pytorch
except ImportError:
msg = 'transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires TensorFlow to be installed. Please see for installation instructions.'
raise ImportError(msg)
convert_xlnet_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output, self._finetuning_task_name)
elif (self._model_type == 'xlm'):
from transformers.convert_xlm_original_pytorch_checkpoint_to_pytorch import convert_xlm_checkpoint_to_pytorch
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint, self._pytorch_dump_output)
else:
raise ValueError('--model_type should be selected in the list [bert, gpt, gpt2, transfo_xl, xlnet, xlm]') |
def test_has_keywords():
assert (has_keywords((lambda : None)) is False)
assert (has_keywords((lambda x: None)) is False)
assert has_keywords((lambda x=1: None))
assert has_keywords((lambda **kwargs: None))
assert has_keywords(int)
assert has_keywords(sorted)
assert has_keywords(max)
assert (has_keywords(map) is False)
assert (has_keywords(bytearray) is None) |
class ScalarQuantizationConfig(BaseModel, extra='forbid'):
type: 'ScalarType' = Field(..., description='')
quantile: Optional[float] = Field(default=None, description='Quantile for quantization. Expected value range in [0.5, 1.0]. If not set - use the whole range of values')
always_ram: Optional[bool] = Field(default=None, description='If true - quantized vectors always will be stored in RAM, ignoring the config of main storage') |
def get_kindle_kfx_metadata(filepath):
packed_data = read_file(filepath)
if (packed_data[0:8] == DRMION_MAGIC):
altpath = os.path.join((os.path.splitext(filepath)[0] + '.sdr'), 'assets', 'metadata.kfx')
packed_data = read_file(altpath)
if (packed_data[0:4] != CONTAINER_MAGIC):
raise Exception(('%s is not a KFX container' % filepath))
return extract_metadata(KFXContainer(packed_data).decode(metadata_only=True)) |
.wrap
def get_weights_list(cat_seq: torch.Tensor, features: KeyedJaggedTensor, position_weights: Dict[(str, nn.Parameter)]) -> Optional[torch.Tensor]:
weights_list = []
seqs = torch.split(cat_seq, features.length_per_key())
for (key, seq) in zip(features.keys(), seqs):
if (key in position_weights.keys()):
weights_list.append(torch.gather(position_weights[key], dim=0, index=seq))
else:
weights_list.append(torch.ones(seq.shape[0], device=features.values().device))
return (torch.cat(weights_list) if weights_list else features.weights_or_none()) |
('make-struct-field-accessor', [values_struct.W_StructAccessor, values.W_Fixnum, default(values.W_Object, values.w_false)])
def do_make_struct_field_accessor(accessor, field, field_name):
if (field_name is values.w_false):
return values_struct.W_StructFieldAccessor(accessor, field.value, None)
if (not isinstance(field_name, values.W_Symbol)):
raise SchemeException('make-struct-field-accessor: expected symbol or #f as argument 2')
return values_struct.W_StructFieldAccessor(accessor, field.value, field_name) |
class MiniImageNet(Dataset):
def __init__(self, setname):
csv_path = osp.join(ROOT_PATH, (setname + '.csv'))
lines = [x.strip() for x in open(csv_path, 'r').readlines()][1:]
data = []
label = []
lb = (- 1)
self.wnids = []
for l in lines:
(name, wnid) = l.split(',')
path = osp.join(ROOT_PATH, 'images', name)
if (wnid not in self.wnids):
self.wnids.append(wnid)
lb += 1
data.append(path)
label.append(lb)
self.data = data
self.label = label
self.transform = transforms.Compose([transforms.Resize(84), transforms.CenterCrop(84), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
def __len__(self):
return len(self.data)
def __getitem__(self, i):
(path, label) = (self.data[i], self.label[i])
image = self.transform(Image.open(path).convert('RGB'))
return (image, label) |
def _main(args):
config_path = os.path.expanduser(args.config_path)
weights_path = os.path.expanduser(args.weights_path)
assert config_path.endswith('.cfg'), '{} is not a .cfg file'.format(config_path)
assert weights_path.endswith('.weights'), '{} is not a .weights file'.format(weights_path)
output_path = os.path.expanduser(args.output_path)
assert output_path.endswith('.h5'), 'output path {} is not a .h5 file'.format(output_path)
output_root = os.path.splitext(output_path)[0]
print('Loading weights.')
weights_file = open(weights_path, 'rb')
(major, minor, revision) = np.ndarray(shape=(3,), dtype='int32', buffer=weights_file.read(12))
if ((((major * 10) + minor) >= 2) and (major < 1000) and (minor < 1000)):
seen = np.ndarray(shape=(1,), dtype='int64', buffer=weights_file.read(8))
else:
seen = np.ndarray(shape=(1,), dtype='int32', buffer=weights_file.read(4))
print('Weights Header: ', major, minor, revision, seen)
print('Parsing Darknet config.')
unique_config_file = unique_config_sections(config_path)
cfg_parser = configparser.ConfigParser()
cfg_parser.read_file(unique_config_file)
print('Creating Keras model.')
input_layer = Input(shape=(None, None, 3))
prev_layer = input_layer
all_layers = []
weight_decay = (float(cfg_parser['net_0']['decay']) if ('net_0' in cfg_parser.sections()) else 0.0005)
count = 0
out_index = []
for section in cfg_parser.sections():
print('Parsing section {}'.format(section))
if section.startswith('convolutional'):
filters = int(cfg_parser[section]['filters'])
size = int(cfg_parser[section]['size'])
stride = int(cfg_parser[section]['stride'])
pad = int(cfg_parser[section]['pad'])
activation = cfg_parser[section]['activation']
batch_normalize = ('batch_normalize' in cfg_parser[section])
padding = ('same' if ((pad == 1) and (stride == 1)) else 'valid')
prev_layer_shape = K.int_shape(prev_layer)
weights_shape = (size, size, prev_layer_shape[(- 1)], filters)
darknet_w_shape = (filters, weights_shape[2], size, size)
weights_size = np.product(weights_shape)
print('conv2d', ('bn' if batch_normalize else ' '), activation, weights_shape)
conv_bias = np.ndarray(shape=(filters,), dtype='float32', buffer=weights_file.read((filters * 4)))
count += filters
if batch_normalize:
bn_weights = np.ndarray(shape=(3, filters), dtype='float32', buffer=weights_file.read((filters * 12)))
count += (3 * filters)
bn_weight_list = [bn_weights[0], conv_bias, bn_weights[1], bn_weights[2]]
conv_weights = np.ndarray(shape=darknet_w_shape, dtype='float32', buffer=weights_file.read((weights_size * 4)))
count += weights_size
conv_weights = np.transpose(conv_weights, [2, 3, 1, 0])
conv_weights = ([conv_weights] if batch_normalize else [conv_weights, conv_bias])
act_fn = None
if (activation == 'leaky'):
pass
elif (activation != 'linear'):
raise ValueError('Unknown activation function `{}` in section {}'.format(activation, section))
if (stride > 1):
prev_layer = ZeroPadding2D(((1, 0), (1, 0)))(prev_layer)
conv_layer = Conv2D(filters, (size, size), strides=(stride, stride), kernel_regularizer=l2(weight_decay), use_bias=(not batch_normalize), weights=conv_weights, activation=act_fn, padding=padding)(prev_layer)
if batch_normalize:
conv_layer = BatchNormalization(weights=bn_weight_list)(conv_layer)
prev_layer = conv_layer
if (activation == 'linear'):
all_layers.append(prev_layer)
elif (activation == 'leaky'):
act_layer = LeakyReLU(alpha=0.1)(prev_layer)
prev_layer = act_layer
all_layers.append(act_layer)
elif section.startswith('route'):
ids = [int(i) for i in cfg_parser[section]['layers'].split(',')]
layers = [all_layers[i] for i in ids]
if (len(layers) > 1):
print('Concatenating route layers:', layers)
concatenate_layer = Concatenate()(layers)
all_layers.append(concatenate_layer)
prev_layer = concatenate_layer
else:
skip_layer = layers[0]
all_layers.append(skip_layer)
prev_layer = skip_layer
elif section.startswith('maxpool'):
size = int(cfg_parser[section]['size'])
stride = int(cfg_parser[section]['stride'])
all_layers.append(MaxPooling2D(pool_size=(size, size), strides=(stride, stride), padding='same')(prev_layer))
prev_layer = all_layers[(- 1)]
elif section.startswith('shortcut'):
index = int(cfg_parser[section]['from'])
activation = cfg_parser[section]['activation']
assert (activation == 'linear'), 'Only linear activation supported.'
all_layers.append(Add()([all_layers[index], prev_layer]))
prev_layer = all_layers[(- 1)]
elif section.startswith('upsample'):
stride = int(cfg_parser[section]['stride'])
assert (stride == 2), 'Only stride=2 supported.'
all_layers.append(UpSampling2D(stride)(prev_layer))
prev_layer = all_layers[(- 1)]
elif section.startswith('yolo'):
out_index.append((len(all_layers) - 1))
all_layers.append(None)
prev_layer = all_layers[(- 1)]
elif section.startswith('net'):
pass
else:
raise ValueError('Unsupported section header type: {}'.format(section))
if (len(out_index) == 0):
out_index.append((len(all_layers) - 1))
model = Model(inputs=input_layer, outputs=[all_layers[i] for i in out_index])
print(model.summary())
if args.weights_only:
model.save_weights('{}'.format(output_path))
print('Saved Keras weights to {}'.format(output_path))
else:
model.save('{}'.format(output_path))
print('Saved Keras model to {}'.format(output_path))
remaining_weights = (len(weights_file.read()) / 4)
weights_file.close()
print('Read {} of {} from Darknet weights.'.format(count, (count + remaining_weights)))
if (remaining_weights > 0):
print('Warning: {} unused weights'.format(remaining_weights))
if args.plot_model:
plot(model, to_file='{}.png'.format(output_root), show_shapes=True)
print('Saved model plot to {}.png'.format(output_root)) |
class Bookkeeper(Feature):
def on_attach(self, fgraph):
for node in io_toposort(fgraph.inputs, fgraph.outputs):
self.on_import(fgraph, node, 'on_attach')
def on_detach(self, fgraph):
for node in io_toposort(fgraph.inputs, fgraph.outputs):
self.on_prune(fgraph, node, 'Bookkeeper.detach') |
.parametrize('qc_options, scan_range, compatible', [pytest.param(QCOptions(program='rdkit', method='uff', basis=None, td_settings=None), ((- 165), 180), True, id='Compatible'), pytest.param(QCOptions(program='xtb', method='gfn2xtb', basis=None, td_settings=None), ((- 165), 180), False, id='Wrong program'), pytest.param(QCOptions(program='rdkit', method='uff', basis=None, td_settings=TDSettings(n_states=3)), ((- 165), 180), False, id='TD settings'), pytest.param(QCOptions(program='rdkit', method='uff', basis=None), (0, 180), False, id='Wrong torsion range')])
def test_load_old_state(tmpdir, ethane_state, qc_options, scan_range, compatible):
with tmpdir.as_cwd():
td_api.current_state_json_dump(current_state=ethane_state, jsonfilename='torsiondrive_state.json')
td = TorsionDriver()
state = td._load_state(qc_spec=qc_options, torsion_scan=TorsionScan(ethane_state['dihedrals'][0], scan_range=scan_range))
if compatible:
assert (state is not None)
else:
assert (state is None) |
def run(*args, cwd=None, input=None, capture_stdout=False, capture_stderr=False, shell=False, env=None, check=True, quiet=False):
cmd = [*args]
_log.info("Running subprocess in '{0}'\n{1}".format((cwd or os.getcwd()), cmd))
def output(is_stream_captured):
return (subprocess.PIPE if is_stream_captured else (subprocess.DEVNULL if quiet else None))
completed_process = subprocess.run(cmd, cwd=cwd, check=check, input=input, stdout=output(capture_stdout), stderr=output(capture_stderr), env=env, shell=shell)
_log.debug('Subprocess completed. Return code: {}'.format(completed_process.returncode))
return completed_process |
.parametrize('target_chunks', [(10, 10), (50, 50), (100, 50), (50, 100)])
def test_rechunk_bgen__target_chunks(shared_datadir, tmp_path, target_chunks):
(_, dsr, store) = _rechunk_bgen(shared_datadir, tmp_path, chunk_length=target_chunks[0], chunk_width=target_chunks[1], pack=False)
for v in GT_DATA_VARS:
assert (dsr[v].data.chunksize[:2] == target_chunks) |
def build_argparser(parser=None):
parser = (parser or argparse.ArgumentParser())
group = parser.add_argument_group('First TextWorld Competition game settings')
group.add_argument('--recipe', type=int, default=1, metavar='INT', help='Number of ingredients in the recipe. Default: %(default)s')
group.add_argument('--take', type=int, default=0, metavar='INT', help='Number of ingredients to find. It must be less or equal to the value of `--recipe`. Default: %(default)s')
group.add_argument('--nb-rooms', type=int, default=1, help='Number of locations in the game. Default: %(default)s')
group.add_argument('--nb-entities', type=int, default=1, help='Number of entities (rooms + fixed in place + portable). Default: %(default)s')
group.add_argument('--seed-map', type=int, help='Fixing the seed for the map generation. Default: random')
group.add_argument('--with-placeholders', action='store_true', help=' Add as many placeholders as need to cover all possible attributes.')
return parser |
.parametrize('source, expected', [("html.div(key='test')", "html.div({'key': 'test'})"), ("html.div('something', key='test')", "html.div({'key': 'test'}, 'something')"), ("html.div({'some_attr': 1}, child_1, child_2, key='test')", "html.div({'some_attr': 1, 'key': 'test'}, child_1, child_2)"), ("vdom('div', key='test')", "vdom('div', {'key': 'test'})"), ("vdom('div', 'something', key='test')", "vdom('div', {'key': 'test'}, 'something')"), ("vdom('div', {'some_attr': 1}, child_1, child_2, key='test')", "vdom('div', {'some_attr': 1, 'key': 'test'}, child_1, child_2)"), ("html.div(dict(some_attr=1), child_1, child_2, key='test')", "html.div(dict(some_attr=1, key='test'), child_1, child_2)"), ("vdom('div', dict(some_attr=1), child_1, child_2, key='test')", "vdom('div', dict(some_attr=1, key='test'), child_1, child_2)"), ("\n def my_function():\n x = 1 # some comment\n return html.div(key='test')\n ", "\n def my_function():\n x = 1 # some comment\n return html.div({'key': 'test'})\n "), ("\n if condition:\n # some comment\n dom = html.div(key='test')\n ", "\n if condition:\n # some comment\n dom = html.div({'key': 'test'})\n "), ("\n [\n html.div(key='test'),\n html.div(key='test'),\n ]\n ", "\n [\n html.div({'key': 'test'}),\n html.div({'key': 'test'}),\n ]\n "), ("\n (\n html.div(key='test'),\n html.div(key='test'),\n )\n def func():\n # comment\n x = [\n 1\n ]\n ", "\n (\n html.div({'key': 'test'}),\n html.div({'key': 'test'}),\n )\n def func():\n # comment\n x = [\n 1\n ]\n "), ("\n (html.div(key='test'), html.div(key='test'))\n def func():\n # comment\n x = [\n 1\n ]\n ", "\n (html.div({'key': 'test'}), html.div({'key': 'test'}))\n def func():\n # comment\n x = [\n 1\n ]\n "), ("\n (\n result\n if condition\n else html.div(key='test')\n )\n ", "\n (\n result\n if condition\n else html.div({'key': 'test'})\n )\n "), ('\n x = 1\n html.div(\n "hello",\n # comment 1\n html.div(key=\'test\'),\n # comment 2\n key=\'test\',\n )\n ', "\n x = 1\n # comment 1\n # comment 2\n html.div({'key': 'test'}, 'hello', html.div({'key': 'test'}))\n "), ("html.no_an_element(key='test')", None), ("not_html.div(key='test')", None), ('html.div()', None), ("html.div(not_key='something')", None), ('vdom()', None), ("(some + expr)(key='test')", None), ('html.div()', None), ("html.div(child_1, child_2, key='test')", None), ("vdom('div', child_1, child_2, key='test')", None)], ids=(lambda item: (' '.join(map(str.strip, item.split())) if isinstance(item, str) else item)))
def test_generate_rewrite(source, expected):
actual = generate_rewrite(Path('test.py'), dedent(source).strip())
if isinstance(expected, str):
expected = dedent(expected).strip()
assert (actual == expected) |
class NetworkInNetwork(Module):
def __init__(self, nIn, nOut, bias):
Module.__init__(self)
self.nIn = nIn
self.nOut = nOut
std = ((2.0 / nIn) ** 0.5)
self.weight = Parameter(torch.Tensor(nIn, nOut).normal_(0, std))
if bias:
self.bias = Parameter(torch.Tensor(nOut).zero_())
def forward(self, input):
assert ((input.features.nelement() == 0) or (input.features.size(1) == self.nIn)), (self.nIn, input.features.shape)
output = SparseConvNetTensor()
output.metadata = input.metadata
output.spatial_size = input.spatial_size
output.features = NetworkInNetworkFunction.apply(input.features, self.weight, optionalTensor(self, 'bias'))
return output
def __repr__(self):
s = ((('NetworkInNetwork' + str(self.nIn)) + '->') + str(self.nOut))
return s
def input_spatial_size(self, out_size):
return out_size |
class Effect11944(BaseEffect):
type = 'passive'
def handler(fit, src, context, projectionRange, **kwargs):
fit.modules.filteredItemBoost((lambda mod: mod.item.requiresSkill('Capital Projectile Turret')), 'falloff', src.getModifiedItemAttr('shipBonusTitanG2'), skill='Gallente Dreadnought', **kwargs) |
.slow
.pydicom
def test_dose(pinn):
for p in pinn:
export_path = os.path.join(working_path, 'output', p.patient_info['MedicalRecordNumber'], 'RTDOSE')
os.makedirs(export_path)
export_plan = p.plans[0]
p.export_dose(export_plan, export_path)
for f in os.listdir(export_path):
if f.startswith('RD'):
exported_dose = pydicom.read_file(os.path.join(export_path, f))
assert (exported_dose.Modality == 'RTDOSE')
break
pinn_dose = find_corresponding_dicom(exported_dose)
assert (pinn_dose is not None)
assert_same_dose(pinn_dose, exported_dose) |
class RestoreCheckpointConfig():
path: Union[(str, Sequence[str])]
mode: str = 'latest'
assignment_map: Optional[Sequence[Tuple[(str, Optional[str])]]] = None
strict: bool = True
fallback_to_scratch: bool = False
dtype: Optional[str] = None
restore_dataset: bool = False
checkpointer_cls: checkpoints.CheckpointerConstructor = checkpoints.Checkpointer
state_transformation_fns: Sequence[checkpoints.RestoreStateTransformationFn] = ()
use_gda: bool = True
checkpoint_manager_cls: checkpoints.CheckpointManagerConstructor = checkpoints.CheckpointManager
def __post_init__(self):
if (self.mode not in ('specific', 'latest', 'all')):
raise ValueError(f"`RestoreCheckpointConfig.mode` must be one of 'specific', 'latest', or 'all'. Got {self.mode}.")
if (self.dtype not in (None, 'float32', 'bfloat16', 'float16')):
raise ValueError(f"`RestoreCheckpointConfig.dtype` must be one of `None`, 'float32', 'float16' or 'bfloat16'. Got {self.dtype}.")
if (self.assignment_map is not None):
assignment_map_fn = functools.partial(state_utils.apply_assignment_map, assignment_map=self.assignment_map)
self.state_transformation_fns = (assignment_map_fn, *self.state_transformation_fns) |
def sample_dataset(data, num_train, num_test):
data['train']['text'] = data['train']['text'][:num_train]
data['train']['label'] = data['train']['label'][:num_train]
data['test']['text'] = data['test']['text'][:num_test]
data['test']['label'] = data['test']['label'][:num_test]
return data |
_fixtures(WebFixture)
def test_navs(web_fixture):
bookmarks = [Bookmark('', '/one', 'One'), Bookmark('', '/two', 'Two')]
menu = Nav(web_fixture.view).with_bookmarks(bookmarks)
assert (menu.html_representation.tag_name == 'ul')
assert ('nav' in menu.html_representation.get_attribute('class'))
[one, two] = menu.html_representation.children
for (item, expected_href, expected_description) in [(one, '/one', 'One'), (two, '/two', 'Two')]:
assert (item.tag_name == 'li')
assert (item.get_attribute('class') == 'nav-item')
[a] = item.children
assert (a.get_attribute('href') == expected_href)
assert (a.children[0].value == expected_description)
assert (a.get_attribute('class') == 'nav-link') |
_model
def caformer_s18_384_in21ft1k(pretrained=False, **kwargs):
model = MetaFormer(depths=[3, 3, 9, 3], dims=[64, 128, 320, 512], token_mixers=[SepConv, SepConv, Attention, Attention], head_fn=MlpHead, **kwargs)
model.default_cfg = default_cfgs['caformer_s18_384_in21ft1k']
if pretrained:
state_dict = torch.hub.load_state_dict_from_url(url=model.default_cfg['url'], map_location='cpu', check_hash=True)
model.load_state_dict(state_dict)
return model |
class AttentionModule_stage2(nn.Module):
def __init__(self, in_channels, out_channels, size1=(28, 28), size2=(14, 14)):
super(AttentionModule_stage2, self).__init__()
self.first_residual_blocks = ResidualBlock(in_channels, out_channels)
self.trunk_branches = nn.Sequential(ResidualBlock(in_channels, out_channels), ResidualBlock(in_channels, out_channels))
self.mpool1 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.softmax1_blocks = ResidualBlock(in_channels, out_channels)
self.skip1_connection_residual_block = ResidualBlock(in_channels, out_channels)
self.mpool2 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.softmax2_blocks = nn.Sequential(ResidualBlock(in_channels, out_channels), ResidualBlock(in_channels, out_channels))
self.interpolation2 = nn.UpsamplingBilinear2d(size=size2)
self.softmax3_blocks = ResidualBlock(in_channels, out_channels)
self.interpolation1 = nn.UpsamplingBilinear2d(size=size1)
self.softmax4_blocks = nn.Sequential(nn.BatchNorm2d(out_channels), nn.ReLU(inplace=True), nn.Conv2d(out_channels, out_channels, kernel_size=1, stride=1, bias=False), nn.BatchNorm2d(out_channels), nn.ReLU(inplace=True), nn.Conv2d(out_channels, out_channels, kernel_size=1, stride=1, bias=False), nn.Sigmoid())
self.last_blocks = ResidualBlock(in_channels, out_channels)
def forward(self, x):
x = self.first_residual_blocks(x)
out_trunk = self.trunk_branches(x)
out_mpool1 = self.mpool1(x)
out_softmax1 = self.softmax1_blocks(out_mpool1)
out_skip1_connection = self.skip1_connection_residual_block(out_softmax1)
out_mpool2 = self.mpool2(out_softmax1)
out_softmax2 = self.softmax2_blocks(out_mpool2)
out_interp2 = (self.interpolation2(out_softmax2) + out_softmax1)
out = (out_interp2 + out_skip1_connection)
out_softmax3 = self.softmax3_blocks(out)
out_interp1 = (self.interpolation1(out_softmax3) + out_trunk)
out_softmax4 = self.softmax4_blocks(out_interp1)
out = ((1 + out_softmax4) * out_trunk)
out_last = self.last_blocks(out)
return out_last |
def compare_collections(client_1, client_2, num_vectors, attrs=('vectors_count', 'indexed_vectors_count', 'points_count'), collection_name: str=COLLECTION_NAME):
collection_1 = client_1.get_collection(collection_name)
collection_2 = client_2.get_collection(collection_name)
for attr in attrs:
assert (getattr(collection_1, attr) == getattr(collection_2, attr)), f'client_1.{attr} = {getattr(collection_1, attr)}, client_2.{attr} = {getattr(collection_2, attr)}'
compare_client_results(client_1, client_2, (lambda client: client.scroll(collection_name, with_vectors=True, limit=(num_vectors * 2)))) |
def convert_to_train_id(trans_idx, train_mask_folder, train_label_dir, filename):
if filename.endswith('.png'):
maskpath = os.path.join(train_mask_folder, filename)
if os.path.isfile(maskpath):
mask = np.asarray(Image.open(maskpath))
mask = trans_idx[mask]
cv2.imwrite(os.path.join(train_label_dir, filename), mask.astype(np.uint8))
else:
print('cannot find the mask:', maskpath) |
def test_fixedwindow():
with BufferingNodeExecutionContext(bonobo.FixedWindow(2)) as context:
context.write_sync(*range(10))
assert (context.get_buffer() == [(0, 1), (2, 3), (4, 5), (6, 7), (8, 9)])
with BufferingNodeExecutionContext(bonobo.FixedWindow(2)) as context:
context.write_sync(*range(9))
assert (context.get_buffer() == [(0, 1), (2, 3), (4, 5), (6, 7), (8, None)])
with BufferingNodeExecutionContext(bonobo.FixedWindow(1)) as context:
context.write_sync(*range(3))
assert (context.get_buffer() == [(0,), (1,), (2,)]) |
def parse_jpn_number(num):
num_size = len(str(num))
if (num_size <= 4):
return parse_small_jpn_number(num)
elif (num_size <= 8):
low_digit = (num % 10000)
high_digit = (num // 10000)
high_read = (parse_small_jpn_number(high_digit) + '')
low_read = ''
if (low_digit != 0):
low_read = parse_small_jpn_number(low_digit)
return (high_read + low_read)
else:
low_digit = (num % 10000)
mid_digit = ((num // 10000) % 10000)
high_digit = (num // )
high_read = (parse_small_jpn_number(high_digit) + '')
mid_read = ''
if (mid_digit != 0):
mid_read = (parse_small_jpn_number(mid_digit) + '')
low_read = ''
if (low_digit != 0):
low_read = parse_small_jpn_number(low_digit)
return ((high_read + mid_read) + low_read) |
def get_all_status(sess, bn_mean_tf_var_list, bn_variance_tf_var_list, bn_momentum_tf_var_list, bn_training_tf_var_list):
with sess.graph.as_default():
bn_mean_dict = dict(zip(bn_mean_tf_var_list, sess.run([v for v in bn_mean_tf_var_list])))
bn_variance_dict = dict(zip(bn_variance_tf_var_list, sess.run([v for v in bn_variance_tf_var_list])))
bn_momentum_dict = dict(zip(bn_momentum_tf_var_list, sess.run([v for v in bn_momentum_tf_var_list])))
bn_training_dict = dict(zip(bn_training_tf_var_list, sess.run([v for v in bn_training_tf_var_list])))
return (bn_mean_dict, bn_variance_dict, bn_momentum_dict, bn_training_dict) |
_criterion('ctc', dataclass=CtcCriterionConfig)
class CtcCriterion(FairseqCriterion):
def __init__(self, cfg: CtcCriterionConfig, task: FairseqTask):
super().__init__(task)
self.blank_idx = task.target_dictionary.index(task.blank_symbol)
self.pad_idx = task.target_dictionary.pad()
self.eos_idx = task.target_dictionary.eos()
self.post_process = cfg.post_process
if (cfg.wer_args is not None):
(cfg.wer_kenlm_model, cfg.wer_lexicon, cfg.wer_lm_weight, cfg.wer_word_score) = eval(cfg.wer_args)
if (cfg.wer_kenlm_model is not None):
from examples.speech_recognition.w2l_decoder import W2lKenLMDecoder
dec_args = Namespace()
dec_args.nbest = 1
dec_args.criterion = 'ctc'
dec_args.kenlm_model = cfg.wer_kenlm_model
dec_args.lexicon = cfg.wer_lexicon
dec_args.beam = 50
dec_args.beam_size_token = min(50, len(task.target_dictionary))
dec_args.beam_threshold = min(50, len(task.target_dictionary))
dec_args.lm_weight = cfg.wer_lm_weight
dec_args.word_score = cfg.wer_word_score
dec_args.unk_weight = (- math.inf)
dec_args.sil_weight = 0
self.w2l_decoder = W2lKenLMDecoder(dec_args, task.target_dictionary)
else:
self.w2l_decoder = None
self.zero_infinity = cfg.zero_infinity
self.sentence_avg = cfg.sentence_avg
def forward(self, model, sample, reduce=True):
net_output = model(**sample['net_input'])
lprobs = model.get_normalized_probs(net_output, log_probs=True).contiguous()
if ('src_lengths' in sample['net_input']):
input_lengths = sample['net_input']['src_lengths']
else:
non_padding_mask = (~ net_output['padding_mask'])
input_lengths = non_padding_mask.long().sum((- 1))
pad_mask = ((sample['target'] != self.pad_idx) & (sample['target'] != self.eos_idx))
targets_flat = sample['target'].masked_select(pad_mask)
if ('target_lengths' in sample):
target_lengths = sample['target_lengths']
else:
target_lengths = pad_mask.sum((- 1))
with torch.backends.cudnn.flags(enabled=False):
loss = F.ctc_loss(lprobs, targets_flat, input_lengths, target_lengths, blank=self.blank_idx, reduction='sum', zero_infinity=self.zero_infinity)
ntokens = (sample['ntokens'] if ('ntokens' in sample) else target_lengths.sum().item())
sample_size = (sample['target'].size(0) if self.sentence_avg else ntokens)
logging_output = {'loss': utils.item(loss.data), 'ntokens': ntokens, 'nsentences': sample['id'].numel(), 'sample_size': sample_size}
if (not model.training):
import editdistance
with torch.no_grad():
lprobs_t = lprobs.transpose(0, 1).float().contiguous().cpu()
c_err = 0
c_len = 0
w_errs = 0
w_len = 0
wv_errs = 0
for (lp, t, inp_l) in zip(lprobs_t, (sample['target_label'] if ('target_label' in sample) else sample['target']), input_lengths):
lp = lp[:inp_l].unsqueeze(0)
decoded = None
if (self.w2l_decoder is not None):
decoded = self.w2l_decoder.decode(lp)
if (len(decoded) < 1):
decoded = None
else:
decoded = decoded[0]
if (len(decoded) < 1):
decoded = None
else:
decoded = decoded[0]
p = ((t != self.task.target_dictionary.pad()) & (t != self.task.target_dictionary.eos()))
targ = t[p]
targ_units = self.task.target_dictionary.string(targ)
targ_units_arr = targ.tolist()
toks = lp.argmax(dim=(- 1)).unique_consecutive()
pred_units_arr = toks[(toks != self.blank_idx)].tolist()
c_err += editdistance.eval(pred_units_arr, targ_units_arr)
c_len += len(targ_units_arr)
targ_words = post_process(targ_units, self.post_process).split()
pred_units = self.task.target_dictionary.string(pred_units_arr)
pred_words_raw = post_process(pred_units, self.post_process).split()
if ((decoded is not None) and ('words' in decoded)):
pred_words = decoded['words']
w_errs += editdistance.eval(pred_words, targ_words)
wv_errs += editdistance.eval(pred_words_raw, targ_words)
else:
dist = editdistance.eval(pred_words_raw, targ_words)
w_errs += dist
wv_errs += dist
w_len += len(targ_words)
logging_output['wv_errors'] = wv_errs
logging_output['w_errors'] = w_errs
logging_output['w_total'] = w_len
logging_output['c_errors'] = c_err
logging_output['c_total'] = c_len
return (loss, sample_size, logging_output)
def reduce_metrics(logging_outputs) -> None:
loss_sum = utils.item(sum((log.get('loss', 0) for log in logging_outputs)))
ntokens = utils.item(sum((log.get('ntokens', 0) for log in logging_outputs)))
nsentences = utils.item(sum((log.get('nsentences', 0) for log in logging_outputs)))
sample_size = utils.item(sum((log.get('sample_size', 0) for log in logging_outputs)))
metrics.log_scalar('loss', ((loss_sum / sample_size) / math.log(2)), sample_size, round=3)
metrics.log_scalar('ntokens', ntokens)
metrics.log_scalar('nsentences', nsentences)
if (sample_size != ntokens):
metrics.log_scalar('nll_loss', ((loss_sum / ntokens) / math.log(2)), ntokens, round=3)
c_errors = sum((log.get('c_errors', 0) for log in logging_outputs))
metrics.log_scalar('_c_errors', c_errors)
c_total = sum((log.get('c_total', 0) for log in logging_outputs))
metrics.log_scalar('_c_total', c_total)
w_errors = sum((log.get('w_errors', 0) for log in logging_outputs))
metrics.log_scalar('_w_errors', w_errors)
wv_errors = sum((log.get('wv_errors', 0) for log in logging_outputs))
metrics.log_scalar('_wv_errors', wv_errors)
w_total = sum((log.get('w_total', 0) for log in logging_outputs))
metrics.log_scalar('_w_total', w_total)
if (c_total > 0):
metrics.log_derived('uer', (lambda meters: (safe_round(((meters['_c_errors'].sum * 100.0) / meters['_c_total'].sum), 3) if (meters['_c_total'].sum > 0) else float('nan'))))
if (w_total > 0):
metrics.log_derived('wer', (lambda meters: (safe_round(((meters['_w_errors'].sum * 100.0) / meters['_w_total'].sum), 3) if (meters['_w_total'].sum > 0) else float('nan'))))
metrics.log_derived('raw_wer', (lambda meters: (safe_round(((meters['_wv_errors'].sum * 100.0) / meters['_w_total'].sum), 3) if (meters['_w_total'].sum > 0) else float('nan'))))
def logging_outputs_can_be_summed() -> bool:
return True |
def test_context_clear_pass():
context = Context({'key1': 'value1', 'key2': 'value2', 'key3': 'value3', 'key4': 'value4', 'contextClear': ['key2', 'key4', 'contextClear']})
pypyr.steps.contextclear.run_step(context)
assert (len(context) == 2)
assert (context['key1'] == 'value1')
assert ('key2' not in context)
assert (context['key3'] == 'value3')
assert ('key4' not in context)
assert ('contextClear' not in context) |
class HtmlToNodesParser(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.nodes = []
self.current_nodes = self.nodes
self.parent_nodes = []
self.last_text_node = None
self.tags_path = []
def add_str_node(self, s):
if (not s):
return
if ('pre' not in self.tags_path):
s = RE_WHITESPACE.sub(' ', s)
if ((self.last_text_node is None) or self.last_text_node.endswith(' ')):
s = s.lstrip(' ')
if (not s):
self.last_text_node = None
return
self.last_text_node = s
if (self.current_nodes and isinstance(self.current_nodes[(- 1)], str)):
self.current_nodes[(- 1)] += s
else:
self.current_nodes.append(s)
def handle_starttag(self, tag, attrs_list):
if (tag not in ALLOWED_TAGS):
raise NotAllowedTag(f'{tag!r} tag is not allowed')
if (tag in BLOCK_ELEMENTS):
self.last_text_node = None
node = {'tag': tag}
self.tags_path.append(tag)
self.current_nodes.append(node)
if attrs_list:
attrs = {}
node['attrs'] = attrs
for (attr, value) in attrs_list:
attrs[attr] = value
if (tag not in VOID_ELEMENTS):
self.parent_nodes.append(self.current_nodes)
self.current_nodes = node['children'] = []
def handle_endtag(self, tag):
if (tag in VOID_ELEMENTS):
return
if (not len(self.parent_nodes)):
raise InvalidHTML(f'{tag!r} missing start tag')
self.current_nodes = self.parent_nodes.pop()
last_node = self.current_nodes[(- 1)]
if (last_node['tag'] != tag):
raise InvalidHTML(f"{tag!r} tag closed instead of {last_node['tag']!r}")
self.tags_path.pop()
if (not last_node['children']):
last_node.pop('children')
def handle_data(self, data):
self.add_str_node(data)
def handle_entityref(self, name):
self.add_str_node(chr(name2codepoint[name]))
def handle_charref(self, name):
if name.startswith('x'):
c = chr(int(name[1:], 16))
else:
c = chr(int(name))
self.add_str_node(c)
def get_nodes(self):
if self.parent_nodes:
not_closed_tag = self.parent_nodes[(- 1)][(- 1)]['tag']
raise InvalidHTML(f'{not_closed_tag!r} tag is not closed')
return self.nodes |
class LassoCssLexer(DelegatingLexer):
name = 'CSS+Lasso'
aliases = ['css+lasso']
version_added = '1.6'
alias_filenames = ['*.css']
mimetypes = ['text/css+lasso']
url = '
def __init__(self, **options):
options['requiredelimiters'] = True
super().__init__(CssLexer, LassoLexer, **options)
def analyse_text(text):
rv = (LassoLexer.analyse_text(text) - 0.05)
if re.search('\\w+:[^;]+;', text):
rv += 0.1
if ('padding:' in text):
rv += 0.1
return rv |
def thread_profiler(frame, event, arg):
global _state
assert _state, "Global variable '_state' not set"
now = clock()
current_greenlet = greenlet.getcurrent()
current_state = ensure_thread_state(current_greenlet, frame)
if (_state.last != current_state):
current_state.context_switch += 1
_state.last = current_state
if (event in ('c_call', 'c_return', 'c_exception')):
call = {'function': arg.__name__, 'module': (arg.__module__ or '__builtin__'), 'lineno': '', 'abs_path': '', 'filename': '', 'runtime_id': id(arg)}
else:
call = get_trace_info(frame)
if (event in ('call', 'c_call')):
current_state.call_enter(call, now)
elif (event in ('return', 'c_return', 'c_exception')):
current_state.call_exit(call, now)
return thread_profiler |
class MODEL(nn.Module):
def __init__(self, params):
super(MODEL, self).__init__()
self.temporal_size = params.temporal_size
self.image_mean = params.image_mean
kernel_size = 3
skip_kernel_size = 5
weight_norm = torch.nn.utils.weight_norm
num_inputs = params.num_channels
if self.temporal_size:
num_inputs *= self.temporal_size
num_outputs = ((params.scale * params.scale) * params.num_channels)
body = []
conv = weight_norm(nn.Conv2d(num_inputs, params.num_residual_units, kernel_size, padding=(kernel_size // 2)))
init.ones_(conv.weight_g)
init.zeros_(conv.bias)
body.append(conv)
for _ in range(params.num_blocks):
body.append(Block(params.num_residual_units, kernel_size, params.width_multiplier, weight_norm=weight_norm, res_scale=(1 / math.sqrt(params.num_blocks))))
conv = weight_norm(nn.Conv2d(params.num_residual_units, num_outputs, kernel_size, padding=(kernel_size // 2)))
init.ones_(conv.weight_g)
init.zeros_(conv.bias)
body.append(conv)
self.body = nn.Sequential(*body)
skip = []
if (num_inputs != num_outputs):
conv = weight_norm(nn.Conv2d(num_inputs, num_outputs, skip_kernel_size, padding=(skip_kernel_size // 2)))
init.ones_(conv.weight_g)
init.zeros_(conv.bias)
skip.append(conv)
self.skip = nn.Sequential(*skip)
shuf = []
if (params.scale > 1):
shuf.append(nn.PixelShuffle(params.scale))
self.shuf = nn.Sequential(*shuf)
def forward(self, x):
if self.temporal_size:
x = x.view([x.shape[0], (- 1), x.shape[3], x.shape[4]])
x -= self.image_mean
x = (self.body(x) + self.skip(x))
x = self.shuf(x)
x += self.image_mean
if self.temporal_size:
x = x.view([x.shape[0], (- 1), 1, x.shape[2], x.shape[3]])
return x |
def test_default_filesystem_completion(qtmodeltester, config_stub, info, web_history_populated, quickmarks, bookmarks, local_files_path):
config_stub.val.completion.open_categories = ['filesystem']
config_stub.val.completion.favorite_paths = [str(local_files_path)]
model = urlmodel.url(info=info)
model.set_pattern('')
qtmodeltester.check(model)
_check_completions(model, {'Filesystem': [(str(local_files_path), None, None)]}) |
def test_pytest_fail_notrace_collection(pytester: Pytester) -> None:
pytester.makepyfile('\n import pytest\n def some_internal_function():\n pytest.fail("hello", pytrace=False)\n some_internal_function()\n ')
result = pytester.runpytest()
result.stdout.fnmatch_lines(['hello'])
result.stdout.no_fnmatch_line('*def some_internal_function()*') |
def main():
args = parse_args()
device = torch.device(args.device)
model = init_detector(args.config, args.checkpoint, device=device)
camera = cv2.VideoCapture(args.camera_id)
print('Press "Esc", "q" or "Q" to exit.')
while True:
(ret_val, img) = camera.read()
result = model_inference(model, img)
ch = cv2.waitKey(1)
if ((ch == 27) or (ch == ord('q')) or (ch == ord('Q'))):
break
model.show_result(img, result, score_thr=args.score_thr, wait_time=1, show=True) |
class EvenniaLogFile(logfile.LogFile):
global _CHANNEL_LOG_NUM_TAIL_LINES
if (_CHANNEL_LOG_NUM_TAIL_LINES is None):
from django.conf import settings
_CHANNEL_LOG_NUM_TAIL_LINES = settings.CHANNEL_LOG_NUM_TAIL_LINES
num_lines_to_append = _CHANNEL_LOG_NUM_TAIL_LINES
def rotate(self):
append_tail = (self.num_lines_to_append > 0)
if (not append_tail):
logfile.LogFile.rotate(self)
return
lines = tail_log_file(self.path, 0, self.num_lines_to_append)
logfile.LogFile.rotate(self)
for line in lines:
self.write(line)
def seek(self, *args, **kwargs):
return self._file.seek(*args, **kwargs)
def readlines(self, *args, **kwargs):
return [line.decode('utf-8') for line in self._file.readlines(*args, **kwargs)] |
def write_to_table(data: Union[(LocalTable, LocalDataset, DistributedDataset)], table: str, namespace: Optional[str]=None, mode: TableWriteMode=TableWriteMode.AUTO, content_type: ContentType=ContentType.PARQUET, *args, **kwargs) -> None:
raise NotImplementedError('write_to_table not implemented') |
('expressions_fulfilled')
def expressions_fulfilled(stage, depspec, stagespec):
log.debug('checking jsonpath ready predicate\n%s', depspec)
expressions = depspec['expressions']
for expression in expressions:
handler = exprhandlers[expression['expression_type']]
value = handler(stage.view, expression)
if (not value):
return False
return True |
class BTOOLS_OT_add_custom(bpy.types.Operator):
bl_idname = 'btools.add_custom'
bl_label = 'Add Custom Geometry'
bl_options = {'REGISTER', 'UNDO', 'PRESET'}
props: PointerProperty(type=CustomObjectProperty)
def poll(cls, context):
return ((context.object is not None) and (context.mode == 'EDIT_MESH'))
def execute(self, context):
add_material_group([MaterialGroup.CUSTOM])
return add_custom_execute(self, context)
def draw(self, context):
self.props.draw(context, self.layout) |
def get_checkpoint_path(model_path, logger):
if (os.path.basename(model_path) == model_path):
model_path = os.path.join('.', model_path)
if (os.path.basename(model_path) == 'checkpoint'):
assert tf.gfile.Exists(model_path), model_path
model_path = tf.train.latest_checkpoint(os.path.dirname(model_path))
new_path = model_path
if ('00000-of-00001' in model_path):
new_path = model_path.split('.data')[0]
elif model_path.endswith('.index'):
new_path = model_path.split('.index')[0]
if (new_path != model_path):
logger('Checkpoint path {} is auto-corrected to {}.'.format(model_path, new_path))
model_path = new_path
assert (tf.gfile.Exists(model_path) or tf.gfile.Exists((model_path + '.index'))), model_path
return model_path |
def spvcnn_test(weight_path, configs):
model = SPVCNN(num_classes=configs.data.num_classes, cr=configs.model.cr, pres=configs.dataset.voxel_size, vres=configs.dataset.voxel_size).to((('cuda:%d' % dist.local_rank()) if torch.cuda.is_available() else 'cpu'))
assert os.path.exists(weight_path)
logger.info('Load weight from {}\n'.format(weight_path))
init = torch.load(weight_path, map_location=(('cuda:%d' % dist.local_rank()) if torch.cuda.is_available() else 'cpu'))['model']
model.load_state_dict(init)
return model |
class Migration(migrations.Migration):
dependencies = [('views', '0003_refactoring')]
operations = [migrations.AlterModelOptions(name='view', options={'ordering': ('uri',), 'verbose_name': 'View', 'verbose_name_plural': 'Views'}), migrations.AddField(model_name='view', name='uri', field=models.URLField(blank=True, help_text='The Uniform Resource Identifier of this catalog (auto-generated).', max_length=640, null=True, verbose_name='URI')), migrations.AddField(model_name='view', name='uri_prefix', field=models.URLField(blank=True, help_text='The prefix for the URI of this catalog.', max_length=256, null=True, verbose_name='URI Prefix')), migrations.AlterField(model_name='view', name='comment', field=models.TextField(blank=True, help_text='Additional information about this catalog.', null=True, verbose_name='Comment')), migrations.AlterField(model_name='view', name='key', field=models.SlugField(blank=True, help_text='The internal identifier of this catalog. The URI will be generated from this key.', max_length=128, null=True, verbose_name='Key')), migrations.AlterField(model_name='view', name='template', field=models.TextField(blank=True, help_text='The template for this view, written in Django template language.', null=True, verbose_name='Template'))] |
_model
def convformer_m36(pretrained=False, **kwargs):
model = MetaFormer(depths=[3, 12, 18, 3], dims=[96, 192, 384, 576], token_mixers=SepConv, head_fn=MlpHead, **kwargs)
model.default_cfg = default_cfgs['convformer_m36']
if pretrained:
state_dict = torch.hub.load_state_dict_from_url(url=model.default_cfg['url'], map_location='cpu', check_hash=True)
model.load_state_dict(state_dict)
return model |
_torch
_vision
class CLIPFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase):
feature_extraction_class = (CLIPFeatureExtractor if is_vision_available() else None)
def setUp(self):
self.feature_extract_tester = CLIPFeatureExtractionTester(self)
def feat_extract_dict(self):
return self.feature_extract_tester.prepare_feat_extract_dict()
def test_feat_extract_properties(self):
feature_extractor = self.feature_extraction_class(**self.feat_extract_dict)
self.assertTrue(hasattr(feature_extractor, 'do_resize'))
self.assertTrue(hasattr(feature_extractor, 'size'))
self.assertTrue(hasattr(feature_extractor, 'do_center_crop'))
self.assertTrue(hasattr(feature_extractor, 'center_crop'))
self.assertTrue(hasattr(feature_extractor, 'do_normalize'))
self.assertTrue(hasattr(feature_extractor, 'image_mean'))
self.assertTrue(hasattr(feature_extractor, 'image_std'))
def test_batch_feature(self):
pass
def test_call_pil(self):
feature_extractor = self.feature_extraction_class(**self.feat_extract_dict)
image_inputs = self.feature_extract_tester.prepare_inputs(equal_resolution=False)
for image in image_inputs:
self.assertIsInstance(image, Image.Image)
encoded_images = feature_extractor(image_inputs[0], return_tensors='pt').pixel_values
self.assertEqual(encoded_images.shape, (1, self.feature_extract_tester.num_channels, self.feature_extract_tester.crop_size, self.feature_extract_tester.crop_size))
encoded_images = feature_extractor(image_inputs, return_tensors='pt').pixel_values
self.assertEqual(encoded_images.shape, (self.feature_extract_tester.batch_size, self.feature_extract_tester.num_channels, self.feature_extract_tester.crop_size, self.feature_extract_tester.crop_size))
def test_call_numpy(self):
feature_extractor = self.feature_extraction_class(**self.feat_extract_dict)
image_inputs = self.feature_extract_tester.prepare_inputs(equal_resolution=False, numpify=True)
for image in image_inputs:
self.assertIsInstance(image, np.ndarray)
encoded_images = feature_extractor(image_inputs[0], return_tensors='pt').pixel_values
self.assertEqual(encoded_images.shape, (1, self.feature_extract_tester.num_channels, self.feature_extract_tester.crop_size, self.feature_extract_tester.crop_size))
encoded_images = feature_extractor(image_inputs, return_tensors='pt').pixel_values
self.assertEqual(encoded_images.shape, (self.feature_extract_tester.batch_size, self.feature_extract_tester.num_channels, self.feature_extract_tester.crop_size, self.feature_extract_tester.crop_size))
def test_call_pytorch(self):
feature_extractor = self.feature_extraction_class(**self.feat_extract_dict)
image_inputs = self.feature_extract_tester.prepare_inputs(equal_resolution=False, torchify=True)
for image in image_inputs:
self.assertIsInstance(image, torch.Tensor)
encoded_images = feature_extractor(image_inputs[0], return_tensors='pt').pixel_values
self.assertEqual(encoded_images.shape, (1, self.feature_extract_tester.num_channels, self.feature_extract_tester.crop_size, self.feature_extract_tester.crop_size))
encoded_images = feature_extractor(image_inputs, return_tensors='pt').pixel_values
self.assertEqual(encoded_images.shape, (self.feature_extract_tester.batch_size, self.feature_extract_tester.num_channels, self.feature_extract_tester.crop_size, self.feature_extract_tester.crop_size)) |
def _WindowTruncInfo(win):
matchedItems = []
for (i, title) in enumerate(win.texts()):
if (i < len(win.client_rects())):
rect = win.client_rects()[i]
else:
rect = win.client_rects()[0]
if ((len(win.fonts()) - 1) < i):
font = win.font()
else:
font = win.fonts()[i]
matchedItems.append((title, rect, font, win32defines.DT_SINGLELINE))
return matchedItems |
(auto_attribs=True, frozen=True, kw_only=True)
class FaultElement_CodeRange(FaultElement):
codeRange: CodeRange
def toSpecifierStr(self) -> str:
return 'LOC:{start_line}{start_column}-{end_line}{end_column}{str_weigh}'.format(start_line=self.codeRange.start.line, start_column=(f',{self.codeRange.start.column}' if (self.codeRange.start.column is not None) else ''), end_line=self.codeRange.end.line, end_column=(f',{self.codeRange.end.column}' if (self.codeRange.end.column is not None) else ''), str_weigh=(f'(({self.weigh}))' if (self.weigh is not None) else '')) |
def print_stats_metrics(y_test, y_pred):
print(('Accuracy: %.2f' % accuracy_score(y_test, y_pred)))
accuracy_scores_list.append(accuracy_score(y_test, y_pred))
confmat = confusion_matrix(y_true=y_test, y_pred=y_pred)
print('confusion matrix')
print(confmat)
print(pd.crosstab(y_test, y_pred, rownames=['True'], colnames=['Predicted'], margins=True))
precision_scores_list.append(precision_score(y_true=y_test, y_pred=y_pred, average='weighted'))
print(('Precision: %.3f' % precision_score(y_true=y_test, y_pred=y_pred, average='weighted')))
print(('Recall: %.3f' % recall_score(y_true=y_test, y_pred=y_pred, average='weighted')))
print(('F1-measure: %.3f' % f1_score(y_true=y_test, y_pred=y_pred, average='weighted'))) |
def verify_all_same_orientation(folder):
nii_files = subfiles(folder, suffix='.nii.gz', join=True)
orientations = []
for n in nii_files:
img = nib.load(n)
affine = img.affine
orientation = nib.aff2axcodes(affine)
orientations.append(orientation)
orientations = np.array(orientations)
unique_orientations = np.unique(orientations, axis=0)
all_same = (len(unique_orientations) == 1)
return (all_same, unique_orientations) |
_module()
class AudioDataset(BaseDataset):
def __init__(self, ann_file, pipeline, suffix='.wav', **kwargs):
self.suffix = suffix
super().__init__(ann_file, pipeline, modality='Audio', **kwargs)
def load_annotations(self):
if self.ann_file.endswith('.json'):
return self.load_json_annotations()
video_infos = []
with open(self.ann_file, 'r') as fin:
for line in fin:
line_split = line.strip().split()
video_info = {}
idx = 0
filename = line_split[idx]
if (self.data_prefix is not None):
if (not filename.endswith(self.suffix)):
filename = osp.join(self.data_prefix, (filename + self.suffix))
else:
filename = osp.join(self.data_prefix, filename)
video_info['audio_path'] = filename
idx += 1
video_info['total_frames'] = int(line_split[idx])
idx += 1
label = [int(x) for x in line_split[idx:]]
assert label, f'missing label in line: {line}'
if self.multi_class:
assert (self.num_classes is not None)
onehot = torch.zeros(self.num_classes)
onehot[label] = 1.0
video_info['label'] = onehot
else:
assert (len(label) == 1)
video_info['label'] = label[0]
video_infos.append(video_info)
return video_infos |
class ReadInputApp(cmd2.Cmd):
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.prompt = ('\n' + self.prompt)
self.custom_history = ['history 1', 'history 2']
.with_category(EXAMPLE_COMMANDS)
def do_basic(self, _) -> None:
self.poutput('Tab completion and up-arrow history is off')
try:
self.read_input('> ')
except EOFError:
pass
.with_category(EXAMPLE_COMMANDS)
def do_basic_with_history(self, _) -> None:
self.poutput('Tab completion is off but using custom history')
try:
input_str = self.read_input('> ', history=self.custom_history)
except EOFError:
pass
else:
self.custom_history.append(input_str)
.with_category(EXAMPLE_COMMANDS)
def do_commands(self, _) -> None:
self.poutput('Tab completing and up-arrow history configured for commands')
try:
self.read_input('> ', completion_mode=cmd2.CompletionMode.COMMANDS)
except EOFError:
pass
.with_category(EXAMPLE_COMMANDS)
def do_custom_choices(self, _) -> None:
self.poutput('Tab completing with static choices list and using custom history')
try:
input_str = self.read_input('> ', history=self.custom_history, completion_mode=cmd2.CompletionMode.CUSTOM, choices=['choice_1', 'choice_2', 'choice_3'])
except EOFError:
pass
else:
self.custom_history.append(input_str)
def choices_provider(self) -> List[str]:
return ['from_provider_1', 'from_provider_2', 'from_provider_3']
.with_category(EXAMPLE_COMMANDS)
def do_custom_choices_provider(self, _) -> None:
self.poutput('Tab completing with choices from provider function and using custom history')
try:
input_str = self.read_input('> ', history=self.custom_history, completion_mode=cmd2.CompletionMode.CUSTOM, choices_provider=ReadInputApp.choices_provider)
except EOFError:
pass
else:
self.custom_history.append(input_str)
.with_category(EXAMPLE_COMMANDS)
def do_custom_completer(self, _) -> None:
self.poutput('Tab completing paths and using custom history')
try:
input_str = self.read_input('> ', history=self.custom_history, completion_mode=cmd2.CompletionMode.CUSTOM, completer=cmd2.Cmd.path_complete)
self.custom_history.append(input_str)
except EOFError:
pass
.with_category(EXAMPLE_COMMANDS)
def do_custom_parser(self, _) -> None:
parser = cmd2.Cmd2ArgumentParser(prog='', description='An example parser')
parser.add_argument('-o', '--option', help='an optional arg')
parser.add_argument('arg_1', help='a choice for this arg', metavar='arg_1', choices=['my_choice', 'your_choice'])
parser.add_argument('arg_2', help='path of something', completer=cmd2.Cmd.path_complete)
self.poutput('Tab completing with argument parser and using custom history')
self.poutput(parser.format_usage())
try:
input_str = self.read_input('> ', history=self.custom_history, completion_mode=cmd2.CompletionMode.CUSTOM, parser=parser)
except EOFError:
pass
else:
self.custom_history.append(input_str) |
class AllowSame(discord.ui.Button):
def __init__(self):
super().__init__(emoji=kd(7))
async def callback(self, interaction: discord.Interaction):
(await interaction.response.defer())
self.view.record.allow_same = (not self.view.record.allow_same)
(await self.view.refresh_view()) |
class _Highlighter(QSyntaxHighlighter):
def __init__(self, doc, pattern, color):
super().__init__(doc)
self._format = QTextCharFormat()
self._format.setForeground(color)
words = pattern.split()
words.sort(key=len, reverse=True)
pat = '|'.join((re.escape(word) for word in words))
self._expression = QRegularExpression(pat, QRegularExpression.PatternOption.CaseInsensitiveOption)
qtutils.ensure_valid(self._expression)
def highlightBlock(self, text):
match_iterator = self._expression.globalMatch(text)
while match_iterator.hasNext():
match = match_iterator.next()
self.setFormat(match.capturedStart(), match.capturedLength(), self._format) |
def test_create_speaker_vouchers_on_pretix(rf, conference_factory, mocker, speaker_voucher_factory):
mock_create_voucher = mocker.patch('conferences.admin.create_voucher', side_effect=[{'id': 1}, {'id': 2}, {'id': 3}])
mocker.patch('conferences.admin.messages')
conference = conference_factory(pretix_speaker_voucher_quota_id=123)
voucher_1 = speaker_voucher_factory(conference=conference, voucher_code='SPEAKER-123', pretix_voucher_id=None)
voucher_2 = speaker_voucher_factory(conference=conference, voucher_code='SPEAKER-456', pretix_voucher_id=None)
voucher_3 = speaker_voucher_factory(conference=conference, voucher_code='SPEAKER-999', pretix_voucher_id=None, voucher_type=SpeakerVoucher.VoucherType.CO_SPEAKER)
create_speaker_vouchers_on_pretix(None, request=rf.get('/'), queryset=SpeakerVoucher.objects.filter(conference=conference))
mock_create_voucher.assert_has_calls([call(conference=conference, code='SPEAKER-123', comment=f'Voucher for user_id={voucher_1.user_id}', tag='speakers', quota_id=123, price_mode='set', value='0.00'), call(conference=conference, code='SPEAKER-456', comment=f'Voucher for user_id={voucher_2.user_id}', tag='speakers', quota_id=123, price_mode='set', value='0.00'), call(conference=conference, code='SPEAKER-999', comment=f'Voucher for user_id={voucher_3.user_id}', tag='speakers', quota_id=123, price_mode='percent', value='25.00')], any_order=True)
voucher_1.refresh_from_db()
voucher_2.refresh_from_db()
voucher_3.refresh_from_db()
assert (voucher_1.pretix_voucher_id == 1)
assert (voucher_2.pretix_voucher_id == 2)
assert (voucher_3.pretix_voucher_id == 3) |
def walk_git_project(*args, exlude_git_dir=True, **kwargs):
parser = igittigitt.IgnoreParser()
parser.parse_rule_files(PROJECT_ROOT)
is_ignored = parser.match
for (root, dirs, files) in os.walk(*args, **kwargs):
if is_ignored(root):
continue
ignored_dirs = [dir for dir in dirs if (is_ignored(dir) or (exlude_git_dir and (dir == '.git')))]
for dir in ignored_dirs:
dirs.remove(dir)
files = [file for file in files if (not is_ignored(file))]
(yield (root, dirs, files)) |
_torch
_vision
class EfficientNetImageProcessorTest(ImageProcessingSavingTestMixin, unittest.TestCase):
image_processing_class = (EfficientNetImageProcessor if is_vision_available() else None)
def setUp(self):
self.image_processor_tester = EfficientNetImageProcessorTester(self)
def image_processor_dict(self):
return self.image_processor_tester.prepare_image_processor_dict()
def test_image_processor_properties(self):
image_processing = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(image_processing, 'image_mean'))
self.assertTrue(hasattr(image_processing, 'image_std'))
self.assertTrue(hasattr(image_processing, 'do_normalize'))
self.assertTrue(hasattr(image_processing, 'do_resize'))
self.assertTrue(hasattr(image_processing, 'size'))
def test_image_processor_from_dict_with_kwargs(self):
image_processor = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size, {'height': 18, 'width': 18})
image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size=42)
self.assertEqual(image_processor.size, {'height': 42, 'width': 42})
def test_call_pil(self):
image_processing = self.image_processing_class(**self.image_processor_dict)
image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False)
for image in image_inputs:
self.assertIsInstance(image, Image.Image)
encoded_images = image_processing(image_inputs[0], return_tensors='pt').pixel_values
self.assertEqual(encoded_images.shape, (1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width']))
encoded_images = image_processing(image_inputs, return_tensors='pt').pixel_values
self.assertEqual(encoded_images.shape, (self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width']))
def test_call_numpy(self):
image_processing = self.image_processing_class(**self.image_processor_dict)
image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, numpify=True)
for image in image_inputs:
self.assertIsInstance(image, np.ndarray)
encoded_images = image_processing(image_inputs[0], return_tensors='pt').pixel_values
self.assertEqual(encoded_images.shape, (1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width']))
encoded_images = image_processing(image_inputs, return_tensors='pt').pixel_values
self.assertEqual(encoded_images.shape, (self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width']))
def test_call_pytorch(self):
image_processing = self.image_processing_class(**self.image_processor_dict)
image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, torchify=True)
for image in image_inputs:
self.assertIsInstance(image, torch.Tensor)
encoded_images = image_processing(image_inputs[0], return_tensors='pt').pixel_values
self.assertEqual(encoded_images.shape, (1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width']))
encoded_images = image_processing(image_inputs, return_tensors='pt').pixel_values
self.assertEqual(encoded_images.shape, (self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'])) |
.grid
def test_transform_wgs84_to_alaska():
with pytest.warns(FutureWarning):
lat_lon_proj = pyproj.Proj(init='epsg:4326', preserve_units=False)
alaska_aea_proj = pyproj.Proj(init='epsg:2964', preserve_units=False)
test = ((- 179.72638), 49.752533)
with pytest.warns(FutureWarning):
(xx, yy) = pyproj.transform(lat_lon_proj, alaska_aea_proj, *test)
if grids_available('us_noaa_alaska.tif'):
assert (f'{xx:.3f} {yy:.3f}' == '-1824924.495 330822.800')
else:
assert (f'{xx:.3f} {yy:.3f}' == '-1825155.697 330730.391') |
def test_DecisionMatrix_to_dict(data_values):
(mtx, objectives, weights, alternatives, criteria) = data_values(seed=42)
dm = data.mkdm(matrix=mtx, objectives=objectives, weights=weights, alternatives=alternatives, criteria=criteria)
expected = {'matrix': mtx, 'objectives': construct_iobjectives(objectives), 'weights': weights, 'alternatives': np.asarray(alternatives), 'criteria': np.asarray(criteria), 'dtypes': np.full(len(weights), float)}
result = dm.to_dict()
cmp = {k: np.all((result[k] == expected[k])) for k in result.keys()}
assert np.all(cmp.values()) |
_tf
class TFCTRLModelLanguageGenerationTest(unittest.TestCase):
def test_lm_generate_ctrl(self):
model = TFCTRLLMHeadModel.from_pretrained('ctrl')
input_ids = tf.convert_to_tensor([[11859, 0, 1611, 8]], dtype=tf.int32)
expected_output_ids = [11859, 0, 1611, 8, 5, 150, 26449, 2, 19, 348, 469, 3, 2595, 48, 20740, 246533, 246533, 19, 30, 5]
output_ids = model.generate(input_ids, do_sample=False)
self.assertListEqual(output_ids[0].numpy().tolist(), expected_output_ids) |
def test_DecisionMatrix_mtx_ndim1(data_values):
(mtx, objectives, weights, alternatives, criteria) = data_values(seed=42)
mtx = mtx.flatten()
with pytest.raises(ValueError):
data.mkdm(matrix=mtx, objectives=objectives, weights=weights, alternatives=alternatives, criteria=criteria) |
class CodeEditor(HighlightCurrentLine, HighlightMatchingOccurrences, HighlightMatchingBracket, FullUnderlines, IndentationGuides, CodeFolding, LongLineIndicator, ShowWhitespace, ShowLineEndings, Wrap, BreakPoints, LineNumbers, AutoCompletion, Calltip, Indentation, MoveLinesUpDown, ScrollWithUpDownKeys, HomeKey, EndKey, AutoIndent, PythonAutoIndent, AutoCloseQuotesAndBrackets, SyntaxHighlighting, SmartCopyAndPaste, CodeEditorBase):
pass |
def test_allow_gc_cvm():
mode = config.mode
if (mode in ['DEBUG_MODE', 'DebugMode']):
mode = 'FAST_RUN'
v = vector()
f = function([v], (v + 1), mode=mode)
f([1])
n = list(f.maker.fgraph.apply_nodes)[0].outputs[0]
assert (f.vm.storage_map[n][0] is None)
assert (f.vm.allow_gc is True)
f.vm.allow_gc = False
assert (f.vm.allow_gc is False)
f([1])
assert (f.vm.storage_map[n][0] is not None)
f.vm.allow_gc = True
assert (f.vm.allow_gc is True)
f([1])
assert (f.vm.storage_map[n][0] is None) |
def test_large_union_optimization() -> None:
val = MultiValuedValue([*[KnownValue(i) for i in range(10000)], TypedValue(str)])
assert_can_assign(val, KnownValue(1))
assert_cannot_assign(val, KnownValue(234234))
assert_cannot_assign(val, KnownValue(True))
assert_can_assign(val, KnownValue('')) |
def test_update_mixin_no_id(gl):
class M(UpdateMixin, FakeManager):
_create_attrs = gl_types.RequiredOptional(required=('foo',), optional=('bar', 'baz'))
_update_attrs = gl_types.RequiredOptional(required=('foo',), optional=('bam',))
url = '
responses.add(method=responses.PUT, url=url, json={'foo': 'baz'}, status=200, match=[responses.matchers.query_param_matcher({})])
mgr = M(gl)
server_data = mgr.update(new_data={'foo': 'baz'})
assert isinstance(server_data, dict)
assert (server_data['foo'] == 'baz')
assert (responses.assert_call_count(url, 1) is True) |
def weighted_quantile(values, q=0.5, axis=0, sample_weight=None, **kws):
if (sample_weight is None):
return np.quantile(a=values, q=q, axis=axis)
else:
out = np.apply_along_axis(func1d=weighted_quantile_1d, arr=values, axis=axis, q=q, sample_weight=sample_weight, **kws)
if (out.ndim == 0):
out = float(out)
return out |
def check_weights(W, X=None):
if (X is not None):
assert (W.shape[0] == X.shape[0]), 'W does not have the same number of samples as X'
graph = sp.csc_matrix(W)
graph.eliminate_zeros()
(components, labels) = csg.connected_components(graph)
if (components > 1):
warn(f'Spatial affinity matrix is disconnected, and has {components} subcomponents. This will certainly affect the solution output.', stacklevel=3)
return W |
def most_common_viz(output_dir: str, ints: collections.Counter) -> None:
df = pd.DataFrame(ints.most_common(), columns=['Integer', 'Frequency'])
with open(os.path.join(output_dir, 'index.html'), 'w') as fh:
fh.write('<html><body>\n')
fh.write('<h3>Most common integers:</h3>\n')
fh.write(df.to_html(index=False))
fh.write('</body></html>')
with open(os.path.join(output_dir, 'index.tsv'), 'w') as fh:
fh.write(df.to_csv(sep='\t', index=False)) |
class GuiChangeLocalFighterMetasCommand(wx.Command):
def __init__(self, fitID, positions, newItemID):
wx.Command.__init__(self, True, 'Change Local Fighter Metas')
self.internalHistory = InternalCommandHistory()
self.fitID = fitID
self.positions = positions
self.newItemID = newItemID
def Do(self):
sFit = Fit.getInstance()
fit = sFit.getFit(self.fitID)
results = []
for position in sorted(self.positions, reverse=True):
fighter = fit.fighters[position]
if (fighter.itemID == self.newItemID):
continue
info = FighterInfo.fromFighter(fighter)
info.itemID = self.newItemID
cmdRemove = CalcRemoveLocalFighterCommand(fitID=self.fitID, position=position)
cmdAdd = CalcAddLocalFighterCommand(fitID=self.fitID, fighterInfo=info, ignoreRestrictions=True)
results.append(self.internalHistory.submitBatch(cmdRemove, cmdAdd))
success = any(results)
eos.db.flush()
sFit.recalc(self.fitID)
sFit.fill(self.fitID)
eos.db.commit()
wx.PostEvent(gui.mainFrame.MainFrame.getInstance(), GE.FitChanged(fitIDs=(self.fitID,)))
return success
def Undo(self):
success = self.internalHistory.undoAll()
eos.db.flush()
sFit = Fit.getInstance()
sFit.recalc(self.fitID)
sFit.fill(self.fitID)
eos.db.commit()
wx.PostEvent(gui.mainFrame.MainFrame.getInstance(), GE.FitChanged(fitIDs=(self.fitID,)))
return success |
def _filter_out_dangerous_actions(resource_nodes: Iterator[ResourceNodeT], game: GameDescription, context: NodeContext) -> Iterator[ResourceNodeT]:
for resource_node in resource_nodes:
if all(((resource not in game.dangerous_resources) for (resource, _) in resource_node.resource_gain_on_collect(context))):
(yield resource_node) |
def main(args):
reader = FrameDataReader(args.seq_folder)
batch_end = reader.cvt_end(args.end)
generator = ContactLabelGenerator()
(smpl_fit_name, obj_fit_name) = ('fit02', 'fit01')
for idx in range(args.start, batch_end):
outfile = reader.objfit_meshfile(idx, obj_fit_name).replace('.ply', '_contact.npz')
if (isfile(outfile) and (not args.redo)):
print(outfile, 'done, skipped')
continue
smpl = reader.get_smplfit(idx, smpl_fit_name)
obj = reader.get_objfit(idx, obj_fit_name)
(samples, contacts, vertices) = generator.get_contact_labels(generator.to_trimesh(smpl), generator.to_trimesh(obj), args.num_samples)
np.savez(outfile, {'object_points': samples, 'contact_label': contacts, 'contact_vertices': vertices})
print('all done') |
def _test():
import torch
pretrained = False
models = [(resdropresnet20_cifar10, 10), (resdropresnet20_cifar100, 100), (resdropresnet20_svhn, 10)]
for (model, num_classes) in models:
net = model(pretrained=pretrained)
net.eval()
weight_count = _calc_width(net)
print('m={}, {}'.format(model.__name__, weight_count))
assert ((model != resdropresnet20_cifar10) or (weight_count == 272474))
assert ((model != resdropresnet20_cifar100) or (weight_count == 278324))
assert ((model != resdropresnet20_svhn) or (weight_count == 272474))
x = torch.randn(14, 3, 32, 32)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (14, num_classes)) |
def construct_indicator(y_score, y):
num_label = np.sum(y, axis=1, dtype=np.int)
y_sort = np.fliplr(np.argsort(y_score, axis=1))
y_pred = np.zeros_like(y, dtype=np.int)
for i in range(y.shape[0]):
for j in range(num_label[i]):
y_pred[(i, y_sort[(i, j)])] = 1
return y_pred |
class WriteFileRecordResponse(ModbusResponse):
function_code = 21
_rtu_byte_count_pos = 2
def __init__(self, records=None, **kwargs):
ModbusResponse.__init__(self, **kwargs)
self.records = (records or [])
def encode(self):
total_length = sum((((record.record_length * 2) + 7) for record in self.records))
packet = struct.pack('B', total_length)
for record in self.records:
packet += struct.pack('>BHHH', 6, record.file_number, record.record_number, record.record_length)
packet += record.record_data
return packet
def decode(self, data):
(count, self.records) = (1, [])
byte_count = int(data[0])
while (count < byte_count):
decoded = struct.unpack('>BHHH', data[count:(count + 7)])
response_length = (decoded[3] * 2)
count += (response_length + 7)
record = FileRecord(record_length=decoded[3], file_number=decoded[1], record_number=decoded[2], record_data=data[(count - response_length):count])
if (decoded[0] == 6):
self.records.append(record) |
class QueueModel(PlaylistModel):
__reset = False
def set(self, songs: Sequence[Any]):
self.__reset = True
super().set(songs)
def next(self):
print_d(f'Using {self.order}.next_explicit() to get next song')
iter_ = (None if self.__reset else self.current_iter)
self.__reset = False
self.current_iter = self.order.next_explicit(self, iter_)
def previous(self):
iter_ = (None if self.__reset else self.current_iter)
self.__reset = False
self.current_iter = self.order.previous_explicit(self, iter_)
def go_to(self, song_or_iter, explicit=False, source=None):
self.__reset = False
return super().go_to(song_or_iter, explicit, source) |
class ComputeDescription(Description):
('ComputeDescription', rus.optional(dict))
(rus.nothing)
def __init__(self, d=None):
if d:
if ((c.RTYPE in d) and (d[c.RTYPE] != c.COMPUTE)):
raise se.BadParameter(("Cannot create ComputeResource type '%s'" % d[c.RTYPE]))
self._descr = super(ComputeDescription, self)
self._descr.__init__(d)
self.rtype = c.COMPUTE |
class TwoLayerCNN(nn.Module):
def __init__(self, C, M, embedding, channel, mtc_input):
super(TwoLayerCNN, self).__init__()
self.C = C
self.M = M
self.embedding = embedding
self.mtc_input = (C if mtc_input else 1)
self.conv1 = nn.Conv1d(self.mtc_input, channel, 3, 1, padding=1, bias=False)
self.flat_size = ((((M // 2) * C) // self.mtc_input) * channel)
self.fc1 = nn.Linear(self.flat_size, embedding)
def forward(self, x):
N = len(x)
x = x.view((- 1), self.mtc_input, self.M)
x = F.relu(self.conv1(x))
x = F.max_pool1d(x, 2)
x = x.view(N, self.flat_size)
x = self.fc1(x)
return x |
def build_composite_bloq(self, bb: 'BloqBuilder', exponent: 'SoquetT') -> Dict[(str, 'SoquetT')]:
x = bb.add(IntState(val=1, bitsize=self.x_bitsize))
exponent = bb.split(exponent)
base = self.base
for j in range((self.exp_bitsize - 1), (0 - 1), (- 1)):
(exponent[j], x) = bb.add(self._CtrlModMul(k=base), ctrl=exponent[j], x=x)
base = ((base * base) % self.mod)
return {'exponent': bb.join(exponent), 'x': x} |
_grad()
def _hungarian_match(flat_preds, flat_targets, preds_k, targets_k):
num_samples = flat_targets.shape[0]
assert (preds_k == targets_k)
num_k = preds_k
num_correct = np.zeros((num_k, num_k))
for c1 in range(num_k):
for c2 in range(num_k):
votes = int(((flat_preds == c1) * (flat_targets == c2)).sum())
num_correct[(c1, c2)] = votes
match = linear_sum_assignment((num_samples - num_correct))
match = np.array(list(zip(*match)))
res = []
for (out_c, gt_c) in match:
res.append((out_c, gt_c))
return res |
class PenaltyController():
def __init__(self, ocp, nlp: NonLinearProgram, t: list, x: list, u: list, x_scaled: list, u_scaled: list, p: ((MX | SX) | list), a: list, a_scaled: list, node_index: int=None):
self._ocp: Any = ocp
self._nlp: NonLinearProgram = nlp
self.t = t
self.x = x
self.u = u
self.x_scaled = x_scaled
self.u_scaled = u_scaled
self.a = a
self.a_scaled = a_scaled
self.p = (vertcat(p) if (p is not None) else p)
self.node_index = node_index
self.cx_index_to_get = 0
def __len__(self):
return len(self.t)
def ocp(self):
return self._ocp
def get_nlp(self):
return self._nlp
def t_span(self) -> list:
dt = self.phases_time_cx[self.phase_idx]
return (vertcat(self.time_cx, (self.time_cx + dt)) + (self.node_index * dt))
def phases_time_cx(self) -> list:
return self.ocp.dt_parameter.cx
def time_cx(self) -> ((MX | SX) | Callable):
return self._nlp.time_cx
def cx(self) -> ((MX | SX) | Callable):
return self._nlp.cx
def to_casadi_func(self) -> Callable:
return self._nlp.to_casadi_func
def control_type(self) -> ControlType:
return self._nlp.control_type
def ode_solver(self) -> OdeSolver:
return self._nlp.ode_solver
def phase_idx(self) -> int:
return self._nlp.phase_idx
def ns(self) -> int:
return self._nlp.ns
def mx_to_cx(self):
return self._nlp.mx_to_cx
def model(self):
return self._nlp.model
def dt(self) -> (MX | SX):
return self._nlp.dt
def tf(self) -> (MX | SX):
return self._nlp.tf
def time(self) -> OptimizationVariable:
tp = OptimizationVariableList(self._nlp.cx, (self._nlp.phase_dynamics == PhaseDynamics.SHARED_DURING_THE_PHASE))
tp.append('time', mx=self._nlp.time_mx, cx=[self._nlp.time_cx, self._nlp.time_cx, self._nlp.time_cx], bimapping=BiMapping(to_second=[0], to_first=[0]))
return tp['time']
def states(self) -> OptimizationVariableList:
self._nlp.states.node_index = self.node_index
out = self._nlp.states.unscaled
out.current_cx_to_get = self.cx_index_to_get
return out
def controls(self) -> OptimizationVariableList:
self._nlp.controls.node_index = self.node_index
out = self._nlp.controls.unscaled
out.current_cx_to_get = self.cx_index_to_get
return out
def states_dot(self) -> OptimizationVariableList:
self._nlp.states_dot.node_index = self.node_index
out = self._nlp.states_dot.unscaled
out.current_cx_to_get = self.cx_index_to_get
return out
def algebraic_states(self) -> OptimizationVariableList:
self._nlp.algebraic_states.node_index = self.node_index
out = self._nlp.algebraic_states.unscaled
out.current_cx_to_get = self.cx_index_to_get
return out
def integrated_values(self) -> OptimizationVariableList:
self._nlp.integrated_values.node_index = self.node_index
out = self._nlp.integrated_values
out.current_cx_to_get = self.cx_index_to_get
return out
def integrate(self):
return self._nlp.dynamics[self.node_index]
def integrate_extra_dynamics(self, dynamics_index):
return self._nlp.extra_dynamics[dynamics_index][self.node_index]
def dynamics(self):
return self._nlp.dynamics_func[0]
def extra_dynamics(self, dynamics_index):
return self._nlp.dynamics_func[(dynamics_index + 1)]
def states_scaled(self) -> OptimizationVariableList:
self._nlp.states.node_index = self.node_index
out = self._nlp.states.scaled
out.current_cx_to_get = self.cx_index_to_get
return out
def controls_scaled(self) -> OptimizationVariableList:
self._nlp.controls.node_index = self.node_index
out = self._nlp.controls.scaled
out.current_cx_to_get = self.cx_index_to_get
return out
def states_dot_scaled(self) -> OptimizationVariableList:
self._nlp.states_dot.node_index = self.node_index
out = self._nlp.states_dot.scaled
out.current_cx_to_get = self.cx_index_to_get
return out
def algebraic_states_scaled(self) -> OptimizationVariableList:
self._nlp.algebraic_states.node_index = self.node_index
out = self._nlp.algebraic_states.scaled
out.current_cx_to_get = self.cx_index_to_get
return out
def parameters(self) -> OptimizationVariableList:
return self._nlp.parameters
def parameters_scaled(self) -> OptimizationVariableList:
return self._nlp.parameters.scaled
def copy(self):
return PenaltyController(self.ocp, self._nlp, self.t, self.x, self.u, self.x_scaled, self.u_scaled, self.p, self.a, self.a_scaled, self.node_index) |
def parse_args():
parser = argparse.ArgumentParser(description='Basic CLI for generating Evennia worlds from GML files.')
parser.add_argument('-i', '--input-file', type=str, default='graph.gml', help='Input file *.gml to read in')
parser.add_argument('-o', '--output-file', type=str, default=None, help='Output file *.ev to write to')
return parser.parse_args() |
def polynomial_mmd_averages(codes_r, codes_g, n_subsets=100, subset_size=1000, ret_var=True, replace=False, **kernel_args):
m = min(codes_r.shape[0], codes_g.shape[0])
mmds = np.zeros(n_subsets)
if ret_var:
vars = np.zeros(n_subsets)
choice = np.random.choice
for i in range(n_subsets):
r = codes_r[choice(len(codes_r), subset_size, replace=replace)]
g = codes_g[choice(len(codes_g), subset_size, replace=replace)]
o = polynomial_mmd(r, g, **kernel_args, var_at_m=m, ret_var=ret_var)
if ret_var:
(mmds[i], vars[i]) = o
else:
mmds[i] = o
return ((mmds, vars) if ret_var else mmds) |
def extract(archives, version, flags):
for (pl_name, arc_path) in archives.items():
with tarfile.open(arc_path) as tar:
pl_dir = (DataDir / pl_name)
system = plat_to_system(pl_name)
libname = LibnameForSystem[system]
tar_libdir = ('lib' if (system != SysNames.windows) else 'bin')
tar_extract_file(tar, f'{tar_libdir}/{libname}', (pl_dir / libname))
write_pdfium_info(pl_dir, version, origin='pdfium-binaries', flags=flags)
arc_path.unlink() |
def graph_construction(args, triples, num_entities, num_relations):
with_cuda = args.cuda
if (args.self_loop == 1):
graph = build_graph_from_triples(num_nodes=num_entities, num_relations=num_relations, triples=np.array(triples, dtype=np.int64).transpose())
else:
graph = build_graph_from_triples_without_loop(num_nodes=num_entities, num_relations=num_relations, triples=np.array(triples, dtype=np.int64).transpose())
logging.info('Graph information (nodes = {}, edges={})'.format(graph.number_of_nodes(), graph.number_of_edges()))
if with_cuda:
for (key, value) in graph.ndata.items():
graph.ndata[key] = value.cuda()
for (key, value) in graph.edata.items():
graph.edata[key] = value.cuda()
return graph |
def load_plugins():
plugin_path = os.path.dirname(__file__)
sys.path.append(plugin_path)
path_to_glob = os.path.join(plugin_path, '*.py')
all_python_files = glob.glob(path_to_glob)
modules = []
for module in all_python_files:
if ('__init__' in module):
continue
name = os.path.basename(module).replace('.py', '')
module = import_by_name(name)
if (not module):
continue
monitor_description[module._whats_your_job()] = module |
def parse_global_quanta(df, mol, verbose=True, dataframe_type='pandas'):
if (mol in HITRAN_CLASS1):
df = _parse_HITRAN_class1(df, verbose=verbose, dataframe_type=dataframe_type)
elif (mol in HITRAN_CLASS2):
df = _parse_HITRAN_class2(df, verbose=verbose)
elif (mol in HITRAN_CLASS3):
df = _parse_HITRAN_class3(df, verbose=verbose)
elif (mol in HITRAN_CLASS4):
df = _parse_HITRAN_class4(df, verbose=verbose, dataframe_type=dataframe_type)
elif (mol in HITRAN_CLASS5):
df = _parse_HITRAN_class5(df, verbose=verbose, dataframe_type=dataframe_type)
elif (mol in HITRAN_CLASS6):
df = _parse_HITRAN_class6(df, verbose=verbose, dataframe_type=dataframe_type)
elif (mol in HITRAN_CLASS7):
df = _parse_HITRAN_class7(df, verbose=verbose)
elif (mol in HITRAN_CLASS8):
df = _parse_HITRAN_class8(df, verbose=verbose)
elif (mol in HITRAN_CLASS9):
df = _parse_HITRAN_class9(df, verbose=verbose)
elif (mol in HITRAN_CLASS10):
df = _parse_HITRAN_class10(df, verbose=verbose)
else:
raise ValueError('Unknown class for molecule {0}. Cant parse global quanta'.format(mol))
return df |
class SubscriberRTD(RTD):
def __init__(self, topic):
super().__init__(value='Waiting...')
self.__topic = topic
def connect(self):
pubsub.subscribe(self.__topic, self.__callback)
def disconnect(self):
pubsub.unsubscribe(self.__topic, self.__callback)
def __callback(self, value):
self.value = value |
def summary_trace(pkts, res):
table_data = [['HOST 1', 'HOST 2', 'COUNT']]
for (key, count) in packet_counts.items():
table_data.append([host_formatter(key[0], pkts), host_formatter(key[1], pkts), count])
if (table_data == 1):
table = SingleTable([[red('No packets captured')]])
else:
table = SingleTable(table_data)
print(f'{s} Packet trace {s}')
print(table.table)
if res.OUTPUT:
with open(res.OUTPUT, 'a') as out_file:
out_file.write(f'''
{table.table}
''')
print('') |
.xfail(raises=NotImplementedError)
def test_scan_mit_mot():
xs = pt.vector('xs', shape=(10,))
(ys, _) = scan((lambda xtm2, xtm1: (xtm2 + xtm1)), outputs_info=[{'initial': xs, 'taps': [(- 2), (- 1)]}], n_steps=10)
grads_wrt_xs = pt.grad(ys.sum(), wrt=xs)
fg = FunctionGraph([xs], [grads_wrt_xs])
compare_jax_and_py(fg, [np.arange(10)]) |
class BookmarkNotify(EventPlugin, PluginConfigMixin):
PLUGIN_ID = 'BookmarkNotify'
PLUGIN_NAME = _('Bookmark Notifications')
PLUGIN_DESC = _('Uses notifications to display bookmarks / comments in real-time. Works well for the Soundcloud browser.')
PLUGIN_ICON = Icons.DIALOG_INFORMATION
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._timer = None
self.song: (AudioFile | None) = None
self._bookmarks = []
self._loaded = False
def enabled(self) -> None:
self._timer = GLib.timeout_add(1000, self._sync)
if app.player:
self._player: BasePlayer = app.player
else:
raise TypeError(f'App {app} has no player set up')
def disabled(self) -> None:
GLib.source_remove(self._timer)
def _sync(self) -> bool:
if (not self._player.paused):
shown = 0
while self.bookmarks:
(t, text) = self.bookmarks[0]
if ((t * 1000) < self._player.get_position()):
if (not shown):
self._show(t, text)
shown += 1
self.bookmarks.popleft()
else:
return True
return True
def bookmarks(self):
if ((not self._bookmarks) and (not self._loaded) and self.song):
self._bookmarks = deque(self.song.bookmarks)
if self._bookmarks:
print_d(f'Loaded {len(self._bookmarks)} bookmarks')
self._loaded = True
return self._bookmarks
def bookmarks(self, bms):
self._bookmarks = deque(bms)
def _show(self, ts: int, line: str) -> None:
if (not self.song):
return
msg = f' {format_time(ts)}: <b>{line.strip()}</b> '
print_d(msg)
line = GLib.markup_escape_text(line)
notif = Notify.Notification.new(f"Quodlibet {self.song('title')}", f'<b>{line}</b> {format_time(ts)}', 'user-idle')
notif.show()
def plugin_on_song_started(self, song) -> None:
self.song = song
if self.song:
Notify.init(f"Quodlibet - {self.song('title')}")
self.bookmarks = []
self._loaded = False
def plugin_on_seek(self, song: AudioFile, msec: int) -> None:
self.song = song
self._reset_to(msec)
def plugin_on_changed(self, songs):
if (self.song in songs):
print_d('Song has been changed, reloading')
self._loaded = False
self.bookmarks = []
def _reset_to(self, msec: int) -> None:
print_d(f'Resetting to {format_time((msec / 1000))}')
self.bookmarks = deque(([] if (self.song is None) else self.song.bookmarks))
while self.bookmarks:
(t, text) = self.bookmarks[0]
if ((t * 1000) < msec):
self.bookmarks.popleft()
else:
time_str = format_time((msec / 1000))
print_d(f'Next bookmark at {format_time(t)}s (at {time_str}) - {len(self.bookmarks)} left')
return
print_d('Finished bookmarks') |
class TesttestGeoBUGSTextIO():
def setup_method(self):
self.test_file_scot = test_file_scot = pysal_examples.get_path('geobugs_scot')
self.test_file_col = test_file_col = pysal_examples.get_path('spdep_listw2WB_columbus')
self.obj_scot = GeoBUGSTextIO(test_file_scot, 'r')
self.obj_col = GeoBUGSTextIO(test_file_col, 'r')
def test_close(self):
for obj in [self.obj_scot, self.obj_col]:
f = obj
f.close()
pytest.raises(ValueError, f.read)
def test_read(self):
w_scot = self.obj_scot.read()
assert (w_scot.n == 56)
assert (w_scot.mean_neighbors == 4.)
assert ([1.0, 1.0, 1.0] == list(w_scot[1].values()))
w_col = self.obj_col.read()
assert (w_col.n == 49)
assert (w_col.mean_neighbors == 4.)
assert ([0.5, 0.5] == list(w_col[1].values()))
def test_seek(self):
self.test_read()
pytest.raises(StopIteration, self.obj_scot.read)
pytest.raises(StopIteration, self.obj_col.read)
self.obj_scot.seek(0)
self.obj_col.seek(0)
self.test_read()
def test_write(self):
for obj in [self.obj_scot, self.obj_col]:
w = obj.read()
f = tempfile.NamedTemporaryFile(suffix='')
fname = f.name
f.close()
o = FileIO(fname, 'w', 'geobugs_text')
o.write(w)
o.close()
wnew = FileIO(fname, 'r', 'geobugs_text').read()
assert (wnew.pct_nonzero == w.pct_nonzero)
os.remove(fname) |
class NNBase(nn.Module):
def __init__(self, recurrent, recurrent_input_size, hidden_size):
super(NNBase, self).__init__()
self._hidden_size = hidden_size
self._recurrent = recurrent
if recurrent:
self.gru = nn.GRU(recurrent_input_size, hidden_size)
for (name, param) in self.gru.named_parameters():
if ('bias' in name):
nn.init.constant_(param, 0)
elif ('weight' in name):
nn.init.orthogonal_(param)
def is_recurrent(self):
return self._recurrent
def recurrent_hidden_state_size(self):
if self._recurrent:
return self._hidden_size
return 1
def output_size(self):
return self._hidden_size
def _forward_gru(self, x, hxs, masks):
if (x.size(0) == hxs.size(0)):
(x, hxs) = self.gru(x.unsqueeze(0), (hxs * masks).unsqueeze(0))
x = x.squeeze(0)
hxs = hxs.squeeze(0)
else:
N = hxs.size(0)
T = int((x.size(0) / N))
x = x.view(T, N, x.size(1))
masks = masks.view(T, N)
has_zeros = (masks[1:] == 0.0).any(dim=(- 1)).nonzero().squeeze().cpu()
if (has_zeros.dim() == 0):
has_zeros = [(has_zeros.item() + 1)]
else:
has_zeros = (has_zeros + 1).numpy().tolist()
has_zeros = (([0] + has_zeros) + [T])
hxs = hxs.unsqueeze(0)
outputs = []
for i in range((len(has_zeros) - 1)):
start_idx = has_zeros[i]
end_idx = has_zeros[(i + 1)]
(rnn_scores, hxs) = self.gru(x[start_idx:end_idx], (hxs * masks[start_idx].view(1, (- 1), 1)))
outputs.append(rnn_scores)
x = torch.cat(outputs, dim=0)
x = x.view((T * N), (- 1))
hxs = hxs.squeeze(0)
return (x, hxs) |
def split_input(model_input, total_pixels, n_pixels=10000):
split = []
for (i, indx) in enumerate(torch.split(torch.arange(total_pixels).cuda(), n_pixels, dim=0)):
data = model_input.copy()
data['uv'] = torch.index_select(model_input['uv'], 1, indx)
if ('object_mask' in data):
data['object_mask'] = torch.index_select(model_input['object_mask'], 1, indx)
split.append(data)
return split |
def bench_json_loads(objs):
for obj in objs:
json.loads(obj)
json.loads(obj)
json.loads(obj)
json.loads(obj)
json.loads(obj)
json.loads(obj)
json.loads(obj)
json.loads(obj)
json.loads(obj)
json.loads(obj)
json.loads(obj)
json.loads(obj)
json.loads(obj)
json.loads(obj)
json.loads(obj)
json.loads(obj)
json.loads(obj)
json.loads(obj)
json.loads(obj)
json.loads(obj) |
class TestTPBGroupedWeightedPauliOperator(QiskitAquaTestCase):
def setUp(self):
super().setUp()
seed = 1
aqua_globals.random_seed = seed
self.num_qubits = 3
paulis = [Pauli(''.join(pauli_label)) for pauli_label in itertools.product('IXYZ', repeat=self.num_qubits)]
weights = aqua_globals.random.random(len(paulis))
self.qubit_op = WeightedPauliOperator.from_list(paulis, weights)
self.var_form = EfficientSU2(self.qubit_op.num_qubits, reps=1)
qasm_simulator = BasicAer.get_backend('qasm_simulator')
self.quantum_instance_qasm = QuantumInstance(qasm_simulator, shots=65536, seed_simulator=seed, seed_transpiler=seed)
statevector_simulator = BasicAer.get_backend('statevector_simulator')
self.quantum_instance_statevector = QuantumInstance(statevector_simulator, shots=1, seed_simulator=seed, seed_transpiler=seed)
def test_sorted_grouping(self):
num_qubits = 2
paulis = [Pauli(''.join(pauli_label)) for pauli_label in itertools.product('IXYZ', repeat=num_qubits)]
weights = aqua_globals.random.random(len(paulis))
op = WeightedPauliOperator.from_list(paulis, weights)
grouped_op = op_converter.to_tpb_grouped_weighted_pauli_operator(op, TPBGroupedWeightedPauliOperator.sorted_grouping)
for g_p in grouped_op.paulis:
passed = False
for pauli in op.paulis:
if (pauli[1] == g_p[1]):
passed = (pauli[0] == g_p[0])
break
self.assertTrue(passed, 'non-existed paulis in grouped_paulis: {}'.format(g_p[1].to_label()))
self.assertGreaterEqual(len(op.basis), len(grouped_op.basis))
def test_unsorted_grouping(self):
num_qubits = 4
paulis = [Pauli(''.join(pauli_label)) for pauli_label in itertools.product('IXYZ', repeat=num_qubits)]
weights = aqua_globals.random.random(len(paulis))
op = WeightedPauliOperator.from_list(paulis, weights)
grouped_op = op_converter.to_tpb_grouped_weighted_pauli_operator(op, TPBGroupedWeightedPauliOperator.unsorted_grouping)
for g_p in grouped_op.paulis:
passed = False
for pauli in op.paulis:
if (pauli[1] == g_p[1]):
passed = (pauli[0] == g_p[0])
break
self.assertTrue(passed, 'non-existed paulis in grouped_paulis: {}'.format(g_p[1].to_label()))
self.assertGreaterEqual(len(op.basis), len(grouped_op.basis))
def test_chop(self):
paulis = [Pauli(x) for x in ['IIXX', 'ZZXX', 'ZZZZ', 'XXZZ', 'XXXX', 'IXXX']]
coeffs = [0.2, 0.3, 0.4, 0.5, 0.6, 0.7]
op = WeightedPauliOperator.from_list(paulis, coeffs)
grouped_op = op_converter.to_tpb_grouped_weighted_pauli_operator(op, TPBGroupedWeightedPauliOperator.sorted_grouping)
original_num_basis = len(grouped_op.basis)
chopped_grouped_op = grouped_op.chop(0.35, copy=True)
self.assertLessEqual(len(chopped_grouped_op.basis), 3)
self.assertLessEqual(len(chopped_grouped_op.basis), original_num_basis)
for (b, _) in chopped_grouped_op.basis:
self.assertFalse((b.to_label() == 'ZZXX'))
chopped_grouped_op = grouped_op.chop(0.55, copy=True)
self.assertLessEqual(len(chopped_grouped_op.basis), 1)
self.assertLessEqual(len(chopped_grouped_op.basis), original_num_basis)
for (b, _) in chopped_grouped_op.basis:
self.assertFalse((b.to_label() == 'ZZXX'))
self.assertFalse((b.to_label() == 'ZZZZ'))
self.assertFalse((b.to_label() == 'XXZZ'))
def test_evaluate_qasm_mode(self):
wave_function = self.var_form.assign_parameters(np.array(aqua_globals.random.standard_normal(self.var_form.num_parameters)))
wave_fn_statevector = self.quantum_instance_statevector.execute(wave_function).get_statevector(wave_function)
reference = self.qubit_op.copy().evaluate_with_statevector(wave_fn_statevector)
shots = (65536 // len(self.qubit_op.paulis))
self.quantum_instance_qasm.set_config(shots=shots)
circuits = self.qubit_op.construct_evaluation_circuit(wave_function=wave_function, statevector_mode=False)
result = self.quantum_instance_qasm.execute(circuits)
pauli_value = self.qubit_op.evaluate_with_result(result=result, statevector_mode=False)
grouped_op = op_converter.to_tpb_grouped_weighted_pauli_operator(self.qubit_op, TPBGroupedWeightedPauliOperator.sorted_grouping)
shots = (65536 // grouped_op.num_groups)
self.quantum_instance_qasm.set_config(shots=shots)
circuits = grouped_op.construct_evaluation_circuit(wave_function=wave_function, statevector_mode=False)
grouped_pauli_value = grouped_op.evaluate_with_result(result=self.quantum_instance_qasm.execute(circuits), statevector_mode=False)
self.assertGreaterEqual(reference[0].real, (grouped_pauli_value[0].real - (3 * grouped_pauli_value[1].real)))
self.assertLessEqual(reference[0].real, (grouped_pauli_value[0].real + (3 * grouped_pauli_value[1].real)))
self.assertLessEqual(grouped_pauli_value[1].real, pauli_value[1].real)
def test_equal(self):
gop_1 = op_converter.to_tpb_grouped_weighted_pauli_operator(self.qubit_op, TPBGroupedWeightedPauliOperator.sorted_grouping)
gop_2 = op_converter.to_tpb_grouped_weighted_pauli_operator(self.qubit_op, TPBGroupedWeightedPauliOperator.unsorted_grouping)
self.assertEqual(gop_1, gop_2) |
_grad()
def generate_prediction_scores(model, test_dataloader, test_dataset, args):
device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu'))
print(device)
model.to(device)
model.eval()
test_loss = 0
ls = []
err = []
with tqdm(total=((len(test_dataloader) - args.seq_length) + 1)) as pbar:
for (i, (char, _)) in enumerate(test_dataloader):
char = char.to(device)
if (char.shape[1] != args.seq_length):
continue
predictions = model.prediction(char.float())
df = pd.DataFrame(predictions.cpu().numpy(), columns=['score'])
try:
index = test_dataset.index[(((args.seq_length + i) - 1) * args.batch_size):((args.seq_length + i) * args.batch_size)]
df.index = index
df.drop('empty', level='instrument', inplace=True)
ls.append(df)
except:
err.append(df)
pbar.update(1)
return (pd.concat(ls), err) |
_test
def test_merge_sum(in_tmpdir):
((x_train, y_train), (x_test, y_test)) = _get_test_data()
left = Sequential()
left.add(Dense(num_hidden, input_shape=(input_dim,)))
left.add(Activation('relu'))
right = Sequential()
right.add(Dense(num_hidden, input_shape=(input_dim,)))
right.add(Activation('relu'))
model = Sequential()
model.add(Merge([left, right], mode='sum'))
model.add(Dense(num_class))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
model.fit([x_train, x_train], y_train, batch_size=batch_size, epochs=epochs, verbose=0, validation_data=([x_test, x_test], y_test))
model.fit([x_train, x_train], y_train, batch_size=batch_size, epochs=epochs, verbose=0, validation_split=0.1)
model.fit([x_train, x_train], y_train, batch_size=batch_size, epochs=epochs, verbose=0)
model.fit([x_train, x_train], y_train, batch_size=batch_size, epochs=epochs, verbose=0, shuffle=False)
loss = model.evaluate([x_test, x_test], y_test, verbose=0)
model.predict([x_test, x_test], verbose=0)
model.predict_classes([x_test, x_test], verbose=0)
model.predict_proba([x_test, x_test], verbose=0)
fname = 'test_merge_sum_temp.h5'
model.save_weights(fname, overwrite=True)
left = Sequential()
left.add(Dense(num_hidden, input_shape=(input_dim,)))
left.add(Activation('relu'))
right = Sequential()
right.add(Dense(num_hidden, input_shape=(input_dim,)))
right.add(Activation('relu'))
model = Sequential()
model.add(Merge([left, right], mode='sum'))
model.add(Dense(num_class))
model.add(Activation('softmax'))
model.load_weights(fname)
os.remove(fname)
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
nloss = model.evaluate([x_test, x_test], y_test, verbose=0)
assert (loss == nloss)
config = model.get_config()
Sequential.from_config(config)
model.summary()
json_str = model.to_json()
model_from_json(json_str)
yaml_str = model.to_yaml()
model_from_yaml(yaml_str) |
def _detectDefaultFoamDir():
if (platform.system() == 'Linux'):
if _searchSubfolder('/opt', 'openfoam'):
return _searchSubfolder('/opt', 'openfoam')
if _searchSubfolder(os.path.expanduser('~'), 'OpenFOAM'):
_searchSubfolder(os.path.expanduser('~'), 'OpenFOAM')
elif (platform.system() == 'Windows'):
blueCFD_dir = _searchSubfolder('C:\\Program Files', 'blueCFD')
if blueCFD_dir:
return _searchSubfolder(blueCFD_dir, 'OpenFOAM')
else:
print('Default OpenFOAM installation check is not supported, please export WM_PROJECT_DIR on ', platform.system()) |
.parametrize(('expr', 'expected_passed'), [('xyz', ['test_one']), ('((( xyz)) )', ['test_one']), ('not not xyz', ['test_one']), ('xyz and xyz2', []), ('xyz2', ['test_two']), ('xyz or xyz2', ['test_one', 'test_two'])])
def test_mark_option(expr: str, expected_passed: List[Optional[str]], pytester: Pytester) -> None:
pytester.makepyfile('\n import pytest\n .xyz\n def test_one():\n pass\n .xyz2\n def test_two():\n pass\n ')
rec = pytester.inline_run('-m', expr)
(passed, skipped, fail) = rec.listoutcomes()
passed_str = [x.nodeid.split('::')[(- 1)] for x in passed]
assert (passed_str == expected_passed) |
class BooleanArrayMask(MaskBase):
def __init__(self, mask, wcs, shape=None, include=True):
self._mask_type = ('include' if include else 'exclude')
self._wcs = wcs
self._wcs_whitelist = set()
if ((shape is not None) and (not is_broadcastable_and_smaller(mask.shape, shape))):
raise ValueError('Mask cannot be broadcast to the specified shape.')
self._shape = (shape or mask.shape)
self._mask = mask
if (shape is not None):
n_empty_dims = (len(self._shape) - mask.ndim)
extra_dims = [ii for (ii, (sh1, sh2)) in enumerate(zip((((0,) * n_empty_dims) + mask.shape), shape)) if ((sh1 == 1) and (sh1 != sh2))]
n_extra_dims = (n_empty_dims + len(extra_dims))
if (n_extra_dims > 0):
strides = (((0,) * n_empty_dims) + mask.strides)
for ed in extra_dims:
assert (strides[ed] == 0), 'Stride shape failure'
self._mask = as_strided(mask, shape=self.shape, strides=strides)
assert (self._mask.shape == self.shape), 'Shape initialization failure'
def _validate_wcs(self, new_data=None, new_wcs=None, **kwargs):
if ((new_data is not None) and (not is_broadcastable_and_smaller(self._mask.shape, new_data.shape))):
raise ValueError('data shape cannot be broadcast to match mask shape')
if (new_wcs is not None):
if (new_wcs not in self._wcs_whitelist):
try:
if (not wcs_utils.check_equality(new_wcs, self._wcs, warn_missing=True, **kwargs)):
raise ValueError('WCS does not match mask WCS')
else:
self._wcs_whitelist.add(new_wcs)
except InconsistentAxisTypesError:
warnings.warn('Inconsistent axis type encountered; WCS is invalid and therefore will not be checked against other WCSes.', WCSWarning)
self._wcs_whitelist.add(new_wcs)
def _include(self, data=None, wcs=None, view=()):
result_mask = self._mask[view]
return (result_mask if (self._mask_type == 'include') else np.logical_not(result_mask))
def _exclude(self, data=None, wcs=None, view=()):
result_mask = self._mask[view]
return (result_mask if (self._mask_type == 'exclude') else np.logical_not(result_mask))
def shape(self):
return self._shape
def __getitem__(self, view):
return BooleanArrayMask(self._mask[view], wcs_utils.slice_wcs(self._wcs, view, shape=self.shape, drop_degenerate=True), shape=self._mask[view].shape)
def with_spectral_unit(self, unit, velocity_convention=None, rest_value=None):
newwcs = self._get_new_wcs(unit, velocity_convention, rest_value)
newmask = BooleanArrayMask(self._mask, newwcs, include=(self._mask_type == 'include'))
return newmask
with_spectral_unit.__doc__ += with_spectral_unit_docs |
def build_nuscenes_dataloader(config, args, val=False, pinet=False):
(train_data, val_data) = build_nuscenes_datasets(config, args, val=val, pinet=pinet)
sampler = RandomSampler(train_data, True)
train_loader = DataLoader(train_data, config.batch_size, sampler=sampler, collate_fn=my_collate, num_workers=1)
val_loader = DataLoader(val_data, 1, collate_fn=my_collate, num_workers=1)
return (train_loader, train_data, val_loader, val_data) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.