code stringlengths 281 23.7M |
|---|
class ConvNormal(nn.Module):
def __init__(self, nIn, nOut, bottleneck, bnWidth):
super(ConvNormal, self).__init__()
self.conv_normal = ConvBN(nIn, nOut, 'normal', bottleneck, bnWidth)
def forward(self, x):
if (not isinstance(x, list)):
x = [x]
res = [x[0], self.conv_normal(x[0])]
return torch.cat(res, dim=1) |
def test_cswap_unitary():
cswap = CSwap(bitsize=4)
np.testing.assert_array_equal(np.eye((2 ** (4 * 2))), _set_ctrl_swap(0, cswap).tensor_contract())
qubits = cirq.LineQubit.range(8)
(q_x, q_y) = (qubits[:4], qubits[4:])
unitary = cirq.unitary(cirq.Circuit((cirq.SWAP(x, y) for (x, y) in zip(q_x, q_y))))
np.testing.assert_array_equal(unitary, _set_ctrl_swap(1, cswap).tensor_contract()) |
class Converter():
def __init__(self, path: str, dest: str, language: str='en', show_err: bool=False):
self.path = path
self.dest = dest
self.language = language
self.show_err = show_err
def delete_directory(self, path: Path):
if (not path.exists()):
return
for p in path.iterdir():
if p.is_dir():
self.delete_directory(p)
else:
p.unlink()
path.rmdir()
def write_json(self, path: Path, data):
with open(path, 'w', encoding='utf-8') as f:
json.dump(data, f, indent=4, ensure_ascii=False)
def convert(self):
try:
self.__convert()
except Exception as e:
print(colored.stylize(f'Failed to convert {self.path}', colored.fg('red')))
print(colored.stylize(e, colored.fg('red')))
if self.show_err:
raise e
def get_py_code(path: str, language: str='en'):
archive = zipfile.ZipFile(path, 'r')
with archive.open('project.json') as f:
project_name = to_filename(Path(path).stem)
data = json.loads(f.read())
project = get_intermediate(data, project_name)
return get_python(project, language=language)
def __convert(self):
archive = zipfile.ZipFile(self.path, 'r')
with archive.open('project.json') as f:
project_name = to_filename(Path(self.path).stem)
data = json.loads(f.read())
dp = (Path(self.dest) / project_name)
print(f'Exporting to: {dp}')
self.delete_directory(dp)
dp.mkdir(parents=True)
self.write_json((dp / 'project.json'), data)
project = get_intermediate(data, project_name)
self.write_json((dp / 'project_intermediate.json'), project)
(dp / 'images').mkdir(parents=True)
(dp / 'sounds').mkdir(parents=True)
for key in project['costumes']:
c = project['costumes'][key]
with archive.open(f"{key}.{c['extension']}") as infile:
with open(((dp / 'images') / f"{c['global_name']}.{c['extension']}"), 'wb') as outfile:
outfile.write(infile.read())
for key in project['sounds']:
s = project['sounds'][key]
with archive.open(f"{key}.{s['extension']}") as infile:
with open(((dp / 'sounds') / f"{s['global_name']}.{s['extension']}"), 'wb') as outfile:
outfile.write(infile.read())
with open((dp / f'{project_name}.py'), 'w', encoding='utf-8') as pyfile:
pyfile.write(get_python(project, language=self.language)) |
def policy_nn(state, state_dim, action_dim, initializer):
w1 = tf.get_variable('W1', [state_dim, 512], initializer=initializer, regularizer=tf.contrib.layers.l2_regularizer(0.01))
b1 = tf.get_variable('b1', [512], initializer=tf.constant_initializer(0.0))
h1 = tf.nn.relu((tf.matmul(state, w1) + b1))
w2 = tf.get_variable('w2', [512, 1024], initializer=initializer, regularizer=tf.contrib.layers.l2_regularizer(0.01))
b2 = tf.get_variable('b2', [1024], initializer=tf.constant_initializer(0.0))
h2 = tf.nn.relu((tf.matmul(h1, w2) + b2))
w3 = tf.get_variable('w3', [1024, action_dim], initializer=initializer, regularizer=tf.contrib.layers.l2_regularizer(0.01))
b3 = tf.get_variable('b3', [action_dim], initializer=tf.constant_initializer(0.0))
action_prob = tf.nn.softmax((tf.matmul(h2, w3) + b3))
return action_prob |
.parametrize('parser', [('scenario-outline',)], indirect=['parser'])
def test_parse_feature_with_scenario_outline(parser):
feature = parser.parse()
assert (len(feature.scenarios) == 1)
assert isinstance(feature.scenarios[0], ScenarioOutline)
assert (len(feature.scenarios[0].scenarios) == 2)
assert (feature.scenarios[0].scenarios[0].steps[0].sentence == 'Given I have the number 1')
assert (feature.scenarios[0].scenarios[0].steps[1].sentence == 'And I have the number 2')
assert (feature.scenarios[0].scenarios[0].steps[2].sentence == 'When I add them up')
assert (feature.scenarios[0].scenarios[0].steps[3].sentence == 'Then I expect the sum to be 3')
assert (feature.scenarios[0].scenarios[1].steps[0].sentence == 'Given I have the number 4')
assert (feature.scenarios[0].scenarios[1].steps[1].sentence == 'And I have the number 5')
assert (feature.scenarios[0].scenarios[1].steps[2].sentence == 'When I add them up')
assert (feature.scenarios[0].scenarios[1].steps[3].sentence == 'Then I expect the sum to be 9') |
def get_backbone(p):
if (p['model_kwargs']['pretraining'] == 'imagenet_supervised'):
print('Loaded model with ImageNet supervised initialization.')
return resnet50(pretrained=True)
elif (p['model_kwargs']['pretraining'] == 'random'):
print('Loaded model with random initialization.')
return resnet50(pretrained=False)
elif (p['model_kwargs']['pretraining'] == 'moco'):
print('Loading model with MoCo initialization')
print('State dict found at {}'.format(p['model_kwargs']['pretraining_path']))
model = resnet50(pretrained=False)
checkpoint = torch.load(p['model_kwargs']['pretraining_path'], map_location='cpu')
state_dict = checkpoint['state_dict']
for k in list(state_dict.keys()):
if (k.startswith('module.encoder_q') and (not k.startswith('module.encoder_q.fc'))):
state_dict[k[len('module.encoder_q.'):]] = state_dict[k]
elif (k.startswith('module.base_encoder') and (not k.startswith('module.base_encoder.fc'))):
state_dict[k[len('module.base_encoder.'):]] = state_dict[k]
del state_dict[k]
msg = model.load_state_dict(state_dict, strict=False)
assert (set(msg.missing_keys) == {'fc.weight', 'fc.bias'})
return model
else:
raise NotImplementedError('Model with pretraining {} not implemented.'.format(p['model_kwargs']['pretraining'])) |
class KSMCollector(diamond.collector.Collector):
def get_default_config_help(self):
config_help = super(KSMCollector, self).get_default_config_help()
config_help.update({'ksm_path': 'location where KSM kernel data can be found'})
return config_help
def get_default_config(self):
config = super(KSMCollector, self).get_default_config()
config.update({'path': 'ksm', 'ksm_path': '/sys/kernel/mm/ksm'})
return config
def collect(self):
for item in glob.glob(os.path.join(self.config['ksm_path'], '*')):
if os.access(item, os.R_OK):
filehandle = open(item)
try:
self.publish(os.path.basename(item), float(filehandle.readline().rstrip()))
except ValueError:
pass
filehandle.close() |
def _retrieve_checkpoint_dirpaths(dirpath: str) -> List[str]:
def sort_fn(path: str) -> Tuple[(int, int)]:
x = os.path.basename(path)
return (int(x.split('_')[1]), int(x.split('_')[3]))
fs = get_filesystem(dirpath)
contents = fs.ls(dirpath, detail=True)
contents = [item['name'] for item in contents if (item['type'] == 'directory')]
ckpt_dirpaths = []
for path in contents:
match = re.search('epoch_(\\d+)_step_(\\d+)', path)
if match:
ckpt_dirpaths.append(path)
ckpt_dirpaths.sort(key=sort_fn)
return ckpt_dirpaths |
def run(args: Union[(str, List[str])], *, log_run_to_stderr: bool=True, abbreviate_non_option_arguments: bool=False, check: bool=True, text: bool=True, **subprocess_run_kwargs) -> subprocess.CompletedProcess:
subprocess_run_kwargs.update(check=check, text=text)
if log_run_to_stderr:
cmd_desc: Tuple[(str, ...)] = ((args,) if isinstance(args, str) else tuple(args))
if abbreviate_non_option_arguments:
cmd_desc = abbreviate_command_arguments_after_switches(cmd_desc)
print('run:', cmd_desc, file=sys.stderr)
return subprocess.run(args, **subprocess_run_kwargs) |
class VariableDeviceChooser(object):
def __init__(self, num_tasks=0, job_name='ps', device_type='CPU', device_index=0, replica=None):
self._job_name = job_name
self._device_type = device_type
self._device_index = device_index
self._replica = replica
self._num_tasks = num_tasks
self._next_task_id = 0
def __call__(self, op):
device_spec = tf_device.DeviceSpec(replica=self._replica, device_type=self._device_type, device_index=self._device_index)
if (self._num_tasks > 0):
task_id = self._next_task_id
self._next_task_id = ((self._next_task_id + 1) % self._num_tasks)
device_spec.job = self._job_name
device_spec.task = task_id
return device_spec.to_string() |
.parametrize('metadata_version', [None, '0.1', '0.2'])
def test_uninstall_with_missing_interpreter(pipx_temp_env, metadata_version):
executable_path = (constants.LOCAL_BIN_DIR / app_name('pycowsay'))
assert (not run_pipx_cli(['install', 'pycowsay']))
assert executable_path.exists()
mock_legacy_venv('pycowsay', metadata_version=metadata_version)
remove_venv_interpreter('pycowsay')
assert (not run_pipx_cli(['uninstall', 'pycowsay']))
if (not (sys.platform.startswith('win') and (metadata_version is None))):
assert (not file_or_symlink(executable_path)) |
class TestMetadataConstructionAndProperties(unittest.TestCase):
def assertEqualColumns(self, obs_columns, exp):
obs = [(name, props.type) for (name, props) in obs_columns.items()]
self.assertEqual(obs, exp)
def test_minimal(self):
md = Metadata(pd.DataFrame({}, index=pd.Index(['a'], name='id')))
self.assertEqual(md.id_count, 1)
self.assertEqual(md.column_count, 0)
self.assertEqual(md.id_header, 'id')
self.assertEqual(md.ids, ('a',))
self.assertEqualColumns(md.columns, [])
def test_single_id(self):
index = pd.Index(['id1'], name='id')
df = pd.DataFrame({'col1': [1.0], 'col2': ['a'], 'col3': ['foo']}, index=index)
md = Metadata(df)
self.assertEqual(md.id_count, 1)
self.assertEqual(md.column_count, 3)
self.assertEqual(md.id_header, 'id')
self.assertEqual(md.ids, ('id1',))
self.assertEqualColumns(md.columns, [('col1', 'numeric'), ('col2', 'categorical'), ('col3', 'categorical')])
def test_no_columns(self):
index = pd.Index(['id1', 'id2', 'foo'], name='id')
df = pd.DataFrame({}, index=index)
md = Metadata(df)
self.assertEqual(md.id_count, 3)
self.assertEqual(md.column_count, 0)
self.assertEqual(md.id_header, 'id')
self.assertEqual(md.ids, ('id1', 'id2', 'foo'))
self.assertEqualColumns(md.columns, [])
def test_single_column(self):
index = pd.Index(['id1', 'a', 'my-id'], name='id')
df = pd.DataFrame({'column': ['foo', 'bar', 'baz']}, index=index)
md = Metadata(df)
self.assertEqual(md.id_count, 3)
self.assertEqual(md.column_count, 1)
self.assertEqual(md.id_header, 'id')
self.assertEqual(md.ids, ('id1', 'a', 'my-id'))
self.assertEqualColumns(md.columns, [('column', 'categorical')])
def test_retains_column_order(self):
index = pd.Index(['id1', 'id2', 'id3'], name='id')
columns = ['z', 'a', 'ch']
data = [[1.0, 'a', 'foo'], [2.0, 'b', 'bar'], [3.0, 'c', '42']]
df = pd.DataFrame(data, index=index, columns=columns)
md = Metadata(df)
self.assertEqual(md.id_count, 3)
self.assertEqual(md.column_count, 3)
self.assertEqual(md.id_header, 'id')
self.assertEqual(md.ids, ('id1', 'id2', 'id3'))
self.assertEqualColumns(md.columns, [('z', 'numeric'), ('a', 'categorical'), ('ch', 'categorical')])
def test_supported_id_headers(self):
case_insensitive = {'id', 'sampleid', 'sample id', 'sample-id', 'featureid', 'feature id', 'feature-id'}
exact_match = {'#SampleID', '#Sample ID', '#OTUID', '#OTU ID', 'sample_name'}
headers = set()
for header in case_insensitive:
headers.add(header)
headers.add(header.upper())
headers.add(header.title())
for header in exact_match:
headers.add(header)
count = 0
for header in headers:
index = pd.Index(['id1', 'id2'], name=header)
df = pd.DataFrame({'column': ['foo', 'bar']}, index=index)
md = Metadata(df)
self.assertEqual(md.id_header, header)
count += 1
self.assertEqual(count, 26)
def test_recommended_ids(self):
index = pd.Index(['c6ca034a-223f-40b4-a0e0-a5ea', 'My.ID'], name='id')
df = pd.DataFrame({'col1': ['foo', 'bar']}, index=index)
md = Metadata(df)
self.assertEqual(md.id_count, 2)
self.assertEqual(md.column_count, 1)
self.assertEqual(md.id_header, 'id')
self.assertEqual(md.ids, ('c6ca034a-223f-40b4-a0e0-a5ea', 'My.ID'))
self.assertEqualColumns(md.columns, [('col1', 'categorical')])
def test_non_standard_characters(self):
index = pd.Index(['id##1', '((id))2', "'id_3<>'", '"id#4"', 'i d\r\t\n5'], name='id')
columns = ['', 'col(#2)', "#col'3", '"<col_4>"', 'col\t \r\n5']
data = [['oo', '(foo)', '#f o #o', 'fo\ro', np.nan], ["''2''", 'b#r', 'ba\nr', np.nan, np.nan], ['b"ar', 'c\td', '4\r\n2', np.nan, np.nan], ['b__a_z', '<42>', '>42', np.nan, np.nan], ['baz', np.nan, '42']]
df = pd.DataFrame(data, index=index, columns=columns)
md = Metadata(df)
self.assertEqual(md.id_count, 5)
self.assertEqual(md.column_count, 5)
self.assertEqual(md.id_header, 'id')
self.assertEqual(md.ids, ('id##1', '((id))2', "'id_3<>'", '"id#4"', 'i d\r\t\n5'))
self.assertEqualColumns(md.columns, [('', 'categorical'), ('col(#2)', 'categorical'), ("#col'3", 'categorical'), ('"<col_4>"', 'categorical'), ('col\t \r\n5', 'numeric')])
def test_missing_data(self):
index = pd.Index(['None', 'nan', 'NA', 'foo'], name='id')
df = pd.DataFrame(collections.OrderedDict([('col1', [1.0, np.nan, np.nan, np.nan]), ('NA', [np.nan, np.nan, np.nan, np.nan]), ('col3', ['null', 'N/A', np.nan, 'NA']), ('col4', np.array([np.nan, np.nan, np.nan, np.nan], dtype=object))]), index=index)
md = Metadata(df)
self.assertEqual(md.id_count, 4)
self.assertEqual(md.column_count, 4)
self.assertEqual(md.id_header, 'id')
self.assertEqual(md.ids, ('None', 'nan', 'NA', 'foo'))
self.assertEqualColumns(md.columns, [('col1', 'numeric'), ('NA', 'numeric'), ('col3', 'categorical'), ('col4', 'categorical')])
def test_missing_data_insdc(self):
index = pd.Index(['None', 'nan', 'NA', 'foo'], name='id')
df = pd.DataFrame(collections.OrderedDict([('col1', [1.0, np.nan, 'missing', np.nan]), ('col3', ['null', 'N/A', 'not collected', 'NA']), ('col4', np.array([np.nan, np.nan, 'restricted access', np.nan], dtype=object))]), index=index)
md = Metadata(df, default_missing_scheme='INSDC:missing')
self.assertEqual(md.id_count, 4)
self.assertEqual(md.column_count, 3)
self.assertEqual(md.id_header, 'id')
self.assertEqual(md.ids, ('None', 'nan', 'NA', 'foo'))
self.assertEqualColumns(md.columns, [('col1', 'numeric'), ('col3', 'categorical'), ('col4', 'categorical')])
pd.testing.assert_frame_equal(md.to_dataframe(), pd.DataFrame({'col1': [1.0, np.nan, np.nan, np.nan], 'col3': ['null', 'N/A', np.nan, 'NA'], 'col4': np.array([np.nan, np.nan, np.nan, np.nan], dtype=object)}, index=index))
def test_missing_data_insdc_column_missing(self):
index = pd.Index(['None', 'nan', 'NA', 'foo'], name='id')
df = pd.DataFrame(collections.OrderedDict([('col1', [1.0, np.nan, 'missing', np.nan]), ('col3', ['null', 'N/A', 'not collected', 'NA']), ('col4', np.array([np.nan, np.nan, 'restricted access', np.nan], dtype=object))]), index=index)
md = Metadata(df, column_missing_schemes={'col1': 'INSDC:missing', 'col3': 'INSDC:missing', 'col4': 'INSDC:missing'})
self.assertEqual(md.id_count, 4)
self.assertEqual(md.column_count, 3)
self.assertEqual(md.id_header, 'id')
self.assertEqual(md.ids, ('None', 'nan', 'NA', 'foo'))
self.assertEqualColumns(md.columns, [('col1', 'numeric'), ('col3', 'categorical'), ('col4', 'categorical')])
pd.testing.assert_frame_equal(md.to_dataframe(), pd.DataFrame({'col1': [1.0, np.nan, np.nan, np.nan], 'col3': ['null', 'N/A', np.nan, 'NA'], 'col4': np.array([np.nan, np.nan, np.nan, np.nan], dtype=object)}, index=index))
def test_missing_data_default_override(self):
index = pd.Index(['None', 'nan', 'NA', 'foo'], name='id')
df = pd.DataFrame(collections.OrderedDict([('col1', [1.0, np.nan, 'missing', np.nan]), ('col3', ['null', 'N/A', 'not collected', 'NA']), ('col4', np.array([np.nan, np.nan, 'restricted access', np.nan], dtype=object))]), index=index)
md = Metadata(df, column_missing_schemes={'col1': 'INSDC:missing', 'col3': 'INSDC:missing', 'col4': 'INSDC:missing'}, default_missing_scheme='no-missing')
self.assertEqual(md.id_count, 4)
self.assertEqual(md.column_count, 3)
self.assertEqual(md.id_header, 'id')
self.assertEqual(md.ids, ('None', 'nan', 'NA', 'foo'))
self.assertEqualColumns(md.columns, [('col1', 'numeric'), ('col3', 'categorical'), ('col4', 'categorical')])
pd.testing.assert_frame_equal(md.to_dataframe(), pd.DataFrame({'col1': [1.0, np.nan, np.nan, np.nan], 'col3': ['null', 'N/A', np.nan, 'NA'], 'col4': np.array([np.nan, np.nan, np.nan, np.nan], dtype=object)}, index=index))
def test_does_not_cast_ids_or_column_names(self):
index = pd.Index(['0.000001', '0.004000', '0.000000'], dtype=object, name='id')
columns = ['42.0', '1000', '-4.2']
data = [[2.0, 'b', 2.5], [1.0, 'b', 4.2], [3.0, 'c', (- 9.999)]]
df = pd.DataFrame(data, index=index, columns=columns)
md = Metadata(df)
self.assertEqual(md.id_count, 3)
self.assertEqual(md.column_count, 3)
self.assertEqual(md.id_header, 'id')
self.assertEqual(md.ids, ('0.000001', '0.004000', '0.000000'))
self.assertEqualColumns(md.columns, [('42.0', 'numeric'), ('1000', 'categorical'), ('-4.2', 'numeric')])
def test_mixed_column_types(self):
md = Metadata(pd.DataFrame({'col0': [1.0, 2.0, 3.0], 'col1': ['a', 'b', 'c'], 'col2': ['foo', 'bar', '42'], 'col3': ['1.0', '2.5', '-4.002'], 'col4': [1, 2, 3], 'col5': [1, 2, 3.5], 'col6': [0.0001, (- 0.0002), np.nan], 'col7': ['cat', np.nan, 'dog'], 'col8': ['a', 'a', 'a'], 'col9': [0, 0, 0]}, index=pd.Index(['id1', 'id2', 'id3'], name='id')))
self.assertEqual(md.id_count, 3)
self.assertEqual(md.column_count, 10)
self.assertEqual(md.id_header, 'id')
self.assertEqual(md.ids, ('id1', 'id2', 'id3'))
self.assertEqualColumns(md.columns, [('col0', 'numeric'), ('col1', 'categorical'), ('col2', 'categorical'), ('col3', 'categorical'), ('col4', 'numeric'), ('col5', 'numeric'), ('col6', 'numeric'), ('col7', 'categorical'), ('col8', 'categorical'), ('col9', 'numeric')])
def test_case_insensitive_duplicate_ids(self):
index = pd.Index(['a', 'b', 'A'], name='id')
df = pd.DataFrame({'column': ['1', '2', '3']}, index=index)
metadata = Metadata(df)
self.assertEqual(metadata.ids, ('a', 'b', 'A'))
def test_case_insensitive_duplicate_column_names(self):
index = pd.Index(['a', 'b', 'c'], name='id')
df = pd.DataFrame({'column': ['1', '2', '3'], 'Column': ['4', '5', '6']}, index=index)
metadata = Metadata(df)
self.assertEqual(set(metadata.columns), {'column', 'Column'})
def test_categorical_column_leading_trailing_whitespace_value(self):
md1 = Metadata(pd.DataFrame({'col1': [1, 2, 3], 'col2': ['foo', ' bar ', 'baz']}, index=pd.Index(['a', 'b', 'c'], name='id')))
md2 = Metadata(pd.DataFrame({'col1': [1, 2, 3], 'col2': ['foo', 'bar', 'baz']}, index=pd.Index(['a', 'b', 'c'], name='id')))
self.assertEqual(md1, md2)
def test_leading_trailing_whitespace_id(self):
md1 = Metadata(pd.DataFrame({'col1': [1, 2, 3], 'col2': [4, 5, 6]}, index=pd.Index(['a', ' b ', 'c'], name='id')))
md2 = Metadata(pd.DataFrame({'col1': [1, 2, 3], 'col2': [4, 5, 6]}, index=pd.Index(['a', 'b', 'c'], name='id')))
self.assertEqual(md1, md2)
def test_leading_trailing_whitespace_column_name(self):
md1 = Metadata(pd.DataFrame({'col1': [1, 2, 3], ' col2 ': [4, 5, 6]}, index=pd.Index(['a', 'b', 'c'], name='id')))
md2 = Metadata(pd.DataFrame({'col1': [1, 2, 3], 'col2': [4, 5, 6]}, index=pd.Index(['a', 'b', 'c'], name='id')))
self.assertEqual(md1, md2) |
def evaluate_batch_retrieval(args, rag_model, questions):
def strip_title(title):
if title.startswith('"'):
title = title[1:]
if title.endswith('"'):
title = title[:(- 1)]
return title
retriever_input_ids = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(questions, return_tensors='pt', padding=True, truncation=True)['input_ids'].to(args.device)
question_enc_outputs = rag_model.rag.question_encoder(retriever_input_ids)
question_enc_pool_output = question_enc_outputs[0]
result = rag_model.retriever(retriever_input_ids, question_enc_pool_output.cpu().detach().to(torch.float32).numpy(), prefix=rag_model.rag.generator.config.prefix, n_docs=rag_model.config.n_docs, return_tensors='pt')
all_docs = rag_model.retriever.index.get_doc_dicts(result.doc_ids)
provenance_strings = []
for docs in all_docs:
provenance = [strip_title(title) for title in docs['title']]
provenance_strings.append('\t'.join(provenance))
return provenance_strings |
class Keithley2260B(Instrument):
def __init__(self, adapter, name='Keithley 2260B DC Power Supply', read_termination='\n', **kwargs):
super().__init__(adapter, name, read_termination=read_termination, **kwargs)
output_enabled = Instrument.control('OUTPut?', 'OUTPut %d', 'A boolean property that controls whether the source is enabled, takes\n values True or False.', validator=strict_discrete_set, values={True: 1, False: 0}, map_values=True)
current_limit = Instrument.control(':SOUR:CURR?', ':SOUR:CURR %g', 'A floating point property that controls the source current\n in amps. This is not checked against the allowed range. Depending on\n whether the instrument is in constant current or constant voltage mode,\n this might differ from the actual current achieved.')
voltage_setpoint = Instrument.control(':SOUR:VOLT?', ':SOUR:VOLT %g', 'A floating point property that controls the source voltage\n in volts. This is not checked against the allowed range. Depending on\n whether the instrument is in constant current or constant voltage mode,\n this might differ from the actual voltage achieved.')
power = Instrument.measurement(':MEAS:POW?', 'Reads the power (in Watt) the dc power supply is putting out.\n ')
voltage = Instrument.measurement(':MEAS:VOLT?', 'Reads the voltage (in Volt) the dc power supply is putting out.\n ')
current = Instrument.measurement(':MEAS:CURR?', 'Reads the current (in Ampere) the dc power supply is putting out.\n ')
applied = Instrument.control(':APPly?', ':APPly %g,%g', 'Simultaneous control of voltage (volts) and current (amps).\n Values need to be supplied as tuple of (voltage, current). Depending on\n whether the instrument is in constant current or constant voltage mode,\n the values achieved by the instrument will differ from the ones set.\n ')
def enabled(self):
log.warning('Deprecated property name "enabled", use the identical "output_enabled", instead.', FutureWarning)
return self.output_enabled
def enabled(self, value):
log.warning('Deprecated property name "enabled", use the identical "output_enabled", instead.', FutureWarning)
self.output_enabled = value
def error(self):
err = self.values(':system:error?')
if (len(err) < 2):
err = self.read()
code = err[0]
message = err[1].replace('"', '')
return (code, message)
def check_errors(self):
(code, message) = self.error
while (code != 0):
t = time.time()
log.info(('Keithley 2260B reported error: %d, %s' % (code, message)))
(code, message) = self.error
if ((time.time() - t) > 10):
log.warning('Timed out for Keithley 2260B error retrieval.')
def shutdown(self):
self.output_enabled = False
super().shutdown() |
_unraisablehook()
def test_async_function_implemented_in_C() -> None:
async def agen_fn(record: list[str]) -> AsyncIterator[None]:
assert (not _core.currently_ki_protected())
record.append('the generator ran')
(yield)
run_record: list[str] = []
agen = agen_fn(run_record)
_core.run(agen.__anext__)
assert (run_record == ['the generator ran'])
async def main() -> None:
start_soon_record: list[str] = []
agen = agen_fn(start_soon_record)
async with _core.open_nursery() as nursery:
nursery.start_soon(agen.__anext__)
assert (start_soon_record == ['the generator ran'])
_core.run(main) |
class WebAppData(TelegramObject):
__slots__ = ('data', 'button_text')
def __init__(self, data: str, button_text: str, *, api_kwargs: Optional[JSONDict]=None):
super().__init__(api_kwargs=api_kwargs)
self.data: str = data
self.button_text: str = button_text
self._id_attrs = (self.data, self.button_text)
self._freeze() |
def _build_proj_equation(free_dims, bound_dims, output_dims):
input_str = ''
kernel_str = ''
output_str = ''
bias_axes = ''
letter_offset = 0
for i in range(free_dims):
char = _CHR_IDX[(i + letter_offset)]
input_str += char
output_str += char
letter_offset += free_dims
for i in range(bound_dims):
char = _CHR_IDX[(i + letter_offset)]
input_str += char
kernel_str += char
letter_offset += bound_dims
for i in range(output_dims):
char = _CHR_IDX[(i + letter_offset)]
kernel_str += char
output_str += char
bias_axes += char
equation = ('%s,%s->%s' % (input_str, kernel_str, output_str))
return (equation, bias_axes, len(output_str)) |
def process_one(data):
utterances = data[0]
reps = data[1]
summary = data[2]
weight_matrix = []
for i in range((len(reps) - 1), 0, (- 1)):
q_rep = reps[i]
k_rep = reps[:i]
weights = cosine_sim(q_rep, k_rep)
weight_matrix.append(weights)
return (utterances, weight_matrix, summary) |
class TestSQLiteTLE(unittest.TestCase):
def setUp(self):
from pyorbital.tlefile import SQLiteTLE
from pyorbital.tlefile import Tle
from tempfile import TemporaryDirectory
self.temp_dir = TemporaryDirectory()
self.db_fname = os.path.join(self.temp_dir.name, 'tle.db')
self.platforms = {25544: 'ISS'}
self.writer_config = {'output_dir': os.path.join(self.temp_dir.name, 'tle_dir'), 'filename_pattern': 'tle_%Y%m%d_%H%M%S.%f.txt', 'write_name': True, 'write_always': False}
self.db = SQLiteTLE(self.db_fname, self.platforms, self.writer_config)
self.tle = Tle('ISS', line1=line1, line2=line2)
def tearDown(self):
with suppress(PermissionError, NotADirectoryError):
self.temp_dir.cleanup()
def test_init(self):
from pyorbital.tlefile import table_exists, PLATFORM_NAMES_TABLE
columns = [col.strip() for col in PLATFORM_NAMES_TABLE.strip('()').split(',')]
num_columns = len(columns)
self.assertTrue(os.path.exists(self.db_fname))
self.assertTrue(table_exists(self.db.db, 'platform_names'))
res = self.db.db.execute('select * from platform_names')
names = [description[0] for description in res.description]
self.assertEqual(len(names), num_columns)
for col in columns:
self.assertTrue((col.split(' ')[0] in names))
def test_update_db(self):
from pyorbital.tlefile import table_exists, SATID_TABLE, ISO_TIME_FORMAT
columns = [col.strip() for col in SATID_TABLE.replace("'{}' (", '').strip(')').split(',')]
satid = str(list(self.platforms.keys())[0])
self.db.platforms = {}
self.db.update_db(self.tle, 'foo')
self.assertFalse(table_exists(self.db.db, satid))
self.assertFalse(self.db.updated)
self.db.platforms = self.platforms
self.db.update_db(self.tle, 'foo')
self.assertTrue(table_exists(self.db.db, satid))
self.assertTrue(self.db.updated)
res = self.db.db.execute(("select * from '%s'" % satid))
names = [description[0] for description in res.description]
for col in columns:
self.assertTrue((col.split(' ')[0] in names))
data = res.fetchall()
self.assertEqual(len(data), 1)
self.assertEqual(data[0][0], '2008-09-20T12:25:40.104192')
self.assertEqual(data[0][1], '\n'.join((line1, line2)))
date_added = datetime.datetime.strptime(data[0][2], ISO_TIME_FORMAT)
now = datetime.datetime.utcnow()
self.assertTrue(((now - date_added).total_seconds() < 1.0))
self.assertTrue((data[0][3] == 'foo'))
self.db.update_db(self.tle, 'bar')
res = self.db.db.execute(("select * from '%s'" % satid))
data = res.fetchall()
self.assertEqual(len(data), 1)
date_added2 = datetime.datetime.strptime(data[0][2], ISO_TIME_FORMAT)
self.assertEqual(date_added, date_added2)
self.assertTrue((data[0][3] == 'foo'))
def test_write_tle_txt(self):
import glob
tle_dir = self.writer_config['output_dir']
self.db.update_db(self.tle, 'foo')
self.db.updated = False
self.db.write_tle_txt()
self.assertFalse(os.path.exists(tle_dir))
self.db.updated = True
self.db.write_tle_txt()
self.assertTrue(os.path.exists(tle_dir))
files = glob.glob(os.path.join(tle_dir, 'tle_*txt'))
self.assertEqual(len(files), 1)
self.assertTrue(('%' not in files[0]))
with open(files[0], 'r') as fid:
data = fid.read().split('\n')
self.assertEqual(len(data), 3)
self.assertTrue(('ISS' in data[0]))
self.assertEqual(data[1], line1)
self.assertEqual(data[2], line2)
self.db.updated = False
self.db.write_tle_txt()
files = glob.glob(os.path.join(tle_dir, 'tle_*txt'))
self.assertEqual(len(files), 1)
self.db.writer_config['write_always'] = True
self.db.writer_config['write_name'] = False
time.sleep(2)
self.db.write_tle_txt()
files = sorted(glob.glob(os.path.join(tle_dir, 'tle_*txt')))
self.assertEqual(len(files), 2)
with open(files[1], 'r') as fid:
data = fid.read().split('\n')
self.assertEqual(len(data), 2)
self.assertEqual(data[0], line1)
self.assertEqual(data[1], line2) |
class UnZip(BaseExtractor):
__name__ = 'UnZip'
__type__ = 'extractor'
__version__ = '1.28'
__status__ = 'stable'
__description__ = 'ZIP extractor plugin'
__license__ = 'GPLv3'
__authors__ = [('Walter Purcaro', '')]
VERSION = '{}.{}.{}'.format(sys.version_info[0], sys.version_info[1], sys.version_info[2])
def archivetype(cls, filename):
try:
return ('zip' if cls.isarchive(filename) else None)
except IOError:
return None
def isarchive(cls, filename):
if (os.path.splitext(filename)[1] != '.zip'):
return False
try:
with open(filename, 'rb') as f:
data = f.read(4)
if (data != b'PK\x03\x04'):
return False
else:
return zipfile.is_zipfile(f)
except IOError:
return False
def find(cls):
return (sys.version_info[:2] >= (2, 6))
def list(self, password=None):
with zipfile.ZipFile(self.filename, 'r') as z:
z.setpassword(password)
self.files = [os.path.join(self.dest, _f) for _f in z.namelist() if (_f[(- 1)] != os.path.sep)]
return self.files
def verify(self, password=None):
try:
with zipfile.ZipFile(self.filename, 'r') as z:
z.setpassword(password)
badfile = z.testzip()
if (badfile is not None):
raise CRCError(badfile)
except (zipfile.BadZipfile, zipfile.LargeZipFile) as exc:
raise ArchiveError(exc)
except RuntimeError as exc:
if (('encrypted' in exc.args[0]) or ('Bad password' in exc.args[0])):
raise PasswordError(exc)
else:
raise CRCError(exc)
def extract(self, password=None):
self.verify(password)
try:
with zipfile.ZipFile(self.filename, 'r') as z:
z.setpassword(password)
members = (member for member in z.namelist() if (not any((fnmatch.fnmatch(member, exclusion) for exclusion in self.excludefiles))))
z.extractall(self.dest, members=members)
self.files = [os.path.join(self.dest, _f) for _f in z.namelist() if ((_f[(- 1)] != os.path.sep) and (_f in members))]
return self.files
except RuntimeError as exc:
raise ArchiveError(exc) |
class AutoapiClassDocumenter(AutoapiDocumenter, autodoc.ClassDocumenter, _AutoapiDocstringSignatureMixin):
objtype = 'apiclass'
directivetype = 'class'
doc_as_attr = False
priority = ((autodoc.ClassDocumenter.priority * 100) + 100)
def can_document_member(cls, member, membername, isattr, parent):
return isinstance(member, PythonClass)
def format_args(self, **kwargs):
return (('(' + self.object.args) + ')')
def add_directive_header(self, sig):
autodoc.Documenter.add_directive_header(self, sig)
if self.options.show_inheritance:
sourcename = self.get_sourcename()
self.add_line('', sourcename)
if self.object.bases:
bases = ', '.join((f':class:`{base}`' for base in self.object.bases))
self.add_line(f' Bases: {bases}', sourcename) |
class SegmentationDataGenerator():
def __init__(self, input_shape=(128, 128), batch_size=32, preprocess=None, augs=None):
self.input_shape = input_shape
self.batch_size = batch_size
self.preprocess = preprocess
self.augs = augs
def _read_image_train(self, id):
if os.path.isfile(os.path.join(args.images_dir, '{}.png'.format(id))):
img = cv2.imread(os.path.join(args.images_dir, '{}.png'.format(id)), cv2.IMREAD_COLOR)
mask = cv2.imread(os.path.join(args.masks_dir, '{}.png'.format(id)), cv2.IMREAD_GRAYSCALE)
else:
img = cv2.imread(os.path.join(args.test_folder, '{}.png'.format(id)), cv2.IMREAD_COLOR)
mask = cv2.imread(os.path.join(args.pseudolabels_dir, '{}.png'.format(id)), cv2.IMREAD_GRAYSCALE)
img = np.array(img, np.float32)
if self.augs:
data = {'image': img, 'mask': mask}
augmented = self.augs(**data)
(img, mask) = (augmented['image'], augmented['mask'])
img = cv2.resize(img, (args.resize_size, args.resize_size))
mask = cv2.resize(mask, (args.resize_size, args.resize_size))
augmentation = PadIfNeeded(min_height=self.input_shape[0], min_width=self.input_shape[1], p=1.0, border_mode=4)
data = {'image': img, 'mask': mask}
augmented = augmentation(**data)
(img, mask) = (augmented['image'], augmented['mask'])
img = np.array(img, np.float32)
img = self.preprocess(img)
mask = np.array((mask / 255.0), np.float32)
if (len(mask.shape) < 3):
mask = np.expand_dims(mask, axis=2)
return (img, mask)
def _read_image_valid(self, id):
img = cv2.imread(os.path.join(args.images_dir, '{}.png'.format(id)), cv2.IMREAD_COLOR)
mask = cv2.imread(os.path.join(args.masks_dir, '{}.png'.format(id)), cv2.IMREAD_GRAYSCALE)
img = np.array(img, np.float32)
img = cv2.resize(img, (args.resize_size, args.resize_size))
mask = cv2.resize(mask, (args.resize_size, args.resize_size))
augmentation = PadIfNeeded(min_height=self.input_shape[0], min_width=self.input_shape[1], p=1.0, border_mode=4)
data = {'image': img, 'mask': mask}
augmented = augmentation(**data)
(img, mask) = (augmented['image'], augmented['mask'])
img = np.array(img, np.float32)
img = self.preprocess(img)
mask = np.array((mask / 255.0), np.float32)
if (len(mask.shape) < 3):
mask = np.expand_dims(mask, axis=2)
return (img, mask)
def train_batch_generator(self, ids):
num_images = ids.shape[0]
while True:
idx_batch = np.random.randint(low=0, high=num_images, size=self.batch_size)
image_masks = [self._read_image_train(x) for x in ids[idx_batch]]
X = np.array([x[0] for x in image_masks])
y = np.array([x[1] for x in image_masks])
(yield (X, y))
def evaluation_batch_generator(self, ids):
num_images = ids.shape[0]
while True:
for start in range(0, num_images, self.batch_size):
end = min((start + self.batch_size), num_images)
image_masks = [self._read_image_valid(x) for x in ids[start:end]]
X = np.array([x[0] for x in image_masks])
y = np.array([x[1] for x in image_masks])
(yield (X, y)) |
def chunks(iterator: Iterator[T], n: int) -> Iterator[Iterator[T]]:
empty_iterator = True
for first in iterator:
empty_iterator = False
rest_of_chunk = itertools.islice(iterator, 0, (n - 1))
(yield itertools.chain([first], rest_of_chunk))
if empty_iterator:
(yield iter([])) |
def process_account(account, i):
values = account.split('')
cookie = values[0]
print(f'''
======={i}=======''')
current_time = str(int(time.time()))
sign_str = f'key=4fck9x4dqa6linkman3ho9b1quarto49x0yp706qi5185o&time={current_time}'
sha256_hash = hashlib.sha256(sign_str.encode())
sign = sha256_hash.hexdigest()
url = '
headers = {'User-Agent': 'Mozilla/5.0 (Linux; Android 9; V1923A Build/PQ3B.190801.; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/91.0.4472.114 Safari/537.36 MMWEBID/5635 MicroMessenger/8.0.40.2420(0x) WeChat/arm64 Weixin Android Tablet NetType/WIFI Language/zh_CN ABI/arm64', 'Cookie': cookie}
data = {'time': current_time, 'sign': sign}
with lock:
response = requests.get(url, headers=headers, json=data).json()
share_link = response['data']['share_link'][0]
p_value = share_link.split('=')[1].split('&')[0]
url = '
try:
response = requests.get(url, headers=headers, json=data, timeout=7).json()
except requests.Timeout:
print(',...')
response = requests.get(url, headers=headers, json=data, timeout=7).json()
except Exception as e:
print('')
print(e)
if (response['code'] == 0):
remain = response['data']['remain']
read = response['data']['read']
print(f'''ID:{p_value}-----:{remain}
::{read}
:{share_link}''')
else:
print(response['message'])
print('')
for j in range(30):
biz_list = ['MzkyMzI5NjgxMA==', 'MzkzMzI5NjQ3MA==', 'Mzg5NTU4MzEyNQ==', 'Mzg3NzY5Nzg0NQ==', 'MzU5OTgxNjg1Mg==', 'Mzg4OTY5Njg4Mw==', 'MzI1ODcwNTgzNA==', 'Mzg2NDY5NzU0Mw==']
sign_str = f'key=4fck9x4dqa6linkman3ho9b1quarto49x0yp706qi5185o&time={current_time}'
sha256_hash = hashlib.sha256(sign_str.encode())
sign = sha256_hash.hexdigest()
url = '
try:
response = requests.get(url, headers=headers, json=data, timeout=7).json()
except requests.Timeout:
print(',...')
response = requests.get(url, headers=headers, json=data, timeout=7).json()
except Exception as e:
print(e)
print(',...')
response = requests.get(url, headers=headers, json=data, timeout=7).json()
if (response['code'] == 1):
print(response['message'])
break
else:
try:
mid = response['data']['link'].split('&mid=')[1].split('&')[0]
biz = response['data']['link'].split('__biz=')[1].split('&')[0]
print(f'[{p_value}]---{mid} [{biz}]')
if (biz in biz_list):
print(f'[{biz}] !!!')
link = response['data']['link']
url = (' + key)
messages = [f'''!!!
{link}
60s''']
for message in messages:
data = {'msgtype': 'text', 'text': {'content': message}}
headers = {'Content-Type': 'application/json'}
response = requests.post(url, headers=headers, data=json.dumps(data))
print('60s--60s')
time.sleep(60)
url = '
headers = {'User-Agent': 'Mozilla/5.0 (Linux; Android 9; V1923A Build/PQ3B.190801.; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/91.0.4472.114 Safari/537.36 MMWEBID/5635 MicroMessenger/8.0.40.2420(0x) WeChat/arm64 Weixin Android Tablet NetType/WIFI Language/zh_CN ABI/arm64', 'Cookie': cookie}
data = {'time': current_time, 'sign': sign}
try:
response = requests.get(url, headers=headers, data=data, timeout=7).json()
except requests.Timeout:
print(',...')
response = requests.get(url, headers=headers, data=data, timeout=7).json()
except Exception as e:
print('')
print(e)
if (response['code'] == 0):
gain = response['data']['gain']
print(f'{(j + 1)}---[{gain}]')
print(f'')
else:
print(f',')
break
else:
sleep = random.randint(8, 11)
print(f'{sleep}')
time.sleep(sleep)
url = '
headers = {'User-Agent': 'Mozilla/5.0 (Linux; Android 9; V1923A Build/PQ3B.190801.; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/91.0.4472.114 Safari/537.36 MMWEBID/5635 MicroMessenger/8.0.40.2420(0x) WeChat/arm64 Weixin Android Tablet NetType/WIFI Language/zh_CN ABI/arm64', 'Cookie': cookie}
data = {'time': current_time, 'sign': sign}
try:
response = requests.get(url, headers=headers, data=data, timeout=7).json()
except requests.Timeout:
print(',...')
response = requests.get(url, headers=headers, data=data, timeout=7).json()
except Exception as e:
print('')
print(e)
if (response['code'] == 0):
gain = response['data']['gain']
print(f'{(j + 1)}---[{gain}]')
print(f'')
else:
print(f'{response}')
except KeyError:
print(f',{response}')
break
if (money_Withdrawal == 1):
print(f'')
url = '
response = requests.get(url, headers=headers, json=data).json()
if (response['code'] == 0):
print(response['message'])
elif (response['code'] == 1):
print(response['message'])
else:
print(f'{response}')
elif (money_Withdrawal == 0):
print(f'''{('-' * 30)}
''') |
def cross_layer_equalization_auto_stepwise():
model = tf.keras.applications.resnet50.ResNet50(weights=None, classes=10)
(model_for_cle, _) = replace_relu6_with_relu(model)
(folded_pairs, model) = fold_all_batch_norms(model_for_cle)
bn_dict = {}
for (conv_or_linear, bn) in folded_pairs:
bn_dict[conv_or_linear] = bn
cls_set_info_list = CrossLayerScaling.scale_model(model_for_cle)
HighBiasFold.bias_fold(cls_set_info_list, bn_dict)
return model_for_cle |
class bertLSTMCRF(object):
def __init__(self, params, bert_config):
self.dropout_rate = params['dropout_prob']
self.num_labels = params['num_labels']
self.rnn_size = params['rnn_size']
self.num_layers = params['num_layers']
self.hidden_units = params['hidden_units']
self.bert_config = bert_config
def __call__(self, input_ids, labels, text_length_list, is_training, is_testing=False):
bert_model = modeling.BertModel(config=self.bert_config, is_training=is_training, input_ids=input_ids, text_length=text_length_list, use_one_hot_embeddings=False)
bert_embedding = bert_model.get_sequence_output()
lstm_layer = BLSTM(None, self.rnn_size, self.num_layers, (1.0 - self.dropout_rate), lengths=text_length_list, is_training=is_training)
lstm_output = lstm_layer.blstm_layer(bert_embedding)
crf_input = tf.layers.dense(lstm_output, self.num_labels)
crf_layer = CRF(self.num_labels, labels, text_length_list)
(loss, trans) = crf_layer.crf_layer(crf_input)
pred_ids = crf_layer.crf_decoding(crf_input, trans)
weight = tf.sequence_mask(text_length_list, dtype=tf.float32, name='mask')
if (not is_testing):
return (loss, pred_ids, weight)
else:
return pred_ids |
class FcBlockWOutput(nn.Module):
def __init__(self, fc_params, output_params, flatten=False):
super(FcBlockWOutput, self).__init__()
input_size = fc_params[0]
output_size = fc_params[1]
add_output = output_params[0]
num_classes = output_params[1]
self.output_id = output_params[2]
self.depth = 1
fc_layers = []
if flatten:
fc_layers.append(af.Flatten())
fc_layers.append(nn.Linear(input_size, output_size))
fc_layers.append(nn.ReLU())
fc_layers.append(nn.Dropout(0.5))
self.layers = nn.Sequential(*fc_layers)
if add_output:
self.output = nn.Linear(output_size, num_classes)
self.no_output = False
else:
self.output = nn.Sequential()
self.forward = self.only_forward
self.no_output = True
def forward(self, x):
fwd = self.layers(x)
return (fwd, 1, self.output(fwd))
def only_output(self, x):
fwd = self.layers(x)
return self.output(fwd)
def only_forward(self, x):
return (self.layers(x), 0, None) |
def parallel_data_prefetch(func: callable, data, n_proc, target_data_type='ndarray', cpu_intensive=True, use_worker_id=False):
if (isinstance(data, np.ndarray) and (target_data_type == 'list')):
raise ValueError('list expected but function got ndarray.')
elif isinstance(data, abc.Iterable):
if isinstance(data, dict):
print(f'WARNING:"data" argument passed to parallel_data_prefetch is a dict: Using only its values and disregarding keys.')
data = list(data.values())
if (target_data_type == 'ndarray'):
data = np.asarray(data)
else:
data = list(data)
else:
raise TypeError(f'The data, that shall be processed parallel has to be either an np.ndarray or an Iterable, but is actually {type(data)}.')
if cpu_intensive:
Q = mp.Queue(1000)
proc = mp.Process
else:
Q = Queue(1000)
proc = Thread
if (target_data_type == 'ndarray'):
arguments = [[func, Q, part, i, use_worker_id] for (i, part) in enumerate(np.array_split(data, n_proc))]
else:
step = (int(((len(data) / n_proc) + 1)) if ((len(data) % n_proc) != 0) else int((len(data) / n_proc)))
arguments = [[func, Q, part, i, use_worker_id] for (i, part) in enumerate([data[i:(i + step)] for i in range(0, len(data), step)])]
processes = []
for i in range(n_proc):
p = proc(target=_do_parallel_data_prefetch, args=arguments[i])
processes += [p]
print(f'Start prefetching...')
import time
start = time.time()
gather_res = [[] for _ in range(n_proc)]
try:
for p in processes:
p.start()
k = 0
while (k < n_proc):
res = Q.get()
if (res == 'Done'):
k += 1
else:
gather_res[res[0]] = res[1]
except Exception as e:
print('Exception: ', e)
for p in processes:
p.terminate()
raise e
finally:
for p in processes:
p.join()
print(f'Prefetching complete. [{(time.time() - start)} sec.]')
if (target_data_type == 'ndarray'):
if (not isinstance(gather_res[0], np.ndarray)):
return np.concatenate([np.asarray(r) for r in gather_res], axis=0)
return np.concatenate(gather_res, axis=0)
elif (target_data_type == 'list'):
out = []
for r in gather_res:
out.extend(r)
return out
else:
return gather_res |
class GELS(Function):
def forward(ctx, A, b):
u = torch.cholesky(torch.matmul(A.transpose((- 1), (- 2)), A), upper=True)
ret = torch.cholesky_solve(torch.matmul(A.transpose((- 1), (- 2)), b), u, upper=True)
ctx.save_for_backward(u, ret, A, b)
return ret
def backward(ctx, grad_output):
(chol, x, a, b) = ctx.saved_tensors
z = torch.cholesky_solve(grad_output, chol, upper=True)
xzt = torch.matmul(x, z.transpose((- 1), (- 2)))
zx_sym = (xzt + xzt.transpose((- 1), (- 2)))
grad_A = ((- torch.matmul(a, zx_sym)) + torch.matmul(b, z.transpose((- 1), (- 2))))
grad_b = torch.matmul(a, z)
return (grad_A, grad_b) |
class ImportExportTagsAndTrackUserDataPlugin(SongsMenuPlugin):
PLUGIN_ID = _PLUGIN_ID
PLUGIN_NAME = _('Import / Export')
PLUGIN_DESC = _('Imports and exports tags and track user data.')
PLUGIN_ICON = Icons.EDIT_COPY
plugin_handles = each_song(is_finite)
_album_id_to_export_path: MutableMapping[(AlbumId, Path)]
def PluginPreferences(self, *args):
vbox = Gtk.VBox(spacing=6)
def asd_toggled(button, *args):
CONFIG.need_user_check_if_number_of_albums_differs = button.get_active()
def tsd_toggled(button, *args):
CONFIG.need_user_check_if_number_of_tracks_differs = button.get_active()
def de_toggled(button, *args):
CONFIG.delete_exports_after_importing = button.get_active()
def pp_toggled(button, *args):
CONFIG.pretty_print_json = button.get_active()
def mt_scale_changed(scale):
CONFIG.max_track_similarity_to_need_user_check = scale.get_value()
def ma_scale_changed(scale):
CONFIG.max_album_similarity_to_need_user_check = scale.get_value()
info_box = Gtk.VBox(spacing=6)
info_frame = qltk.Frame(_('Further information'), child=info_box)
vbox.pack_start(info_frame, False, True, 0)
meta_markup = util.monospace(', '.join(MIGRATE))
info_text = (_("The term 'track user data' includes the playlists in which the selected tracks are and the following metadata:\n\n%s\n\nBe aware that whatever you chose to export will be imported. If you exported the file stems (file names without extension), then, on import, the selected files will be renamed.\n\nAfter exporting an album you can import the data into another version of the album. Order and number of tracks can be different. The plugin matches the exported data to the new tracks, even if the names of the tracks are slightly different. The automatic matching is not always correct, so it is better to not reduce the following similarity values too much.") % meta_markup)
info_lbl = Gtk.Label(label=info_text, use_markup=True, wrap=True)
info_box.pack_start(info_lbl, True, True, 0)
manual_box = Gtk.VBox(spacing=6)
manual_frame = qltk.Frame(_('User interaction on import'), child=manual_box)
vbox.pack_start(manual_frame, False, True, 0)
tsd = Gtk.CheckButton(label=_('Require confirmation if number of tracks differs'))
tsd.set_active(CONFIG.need_user_check_if_number_of_tracks_differs)
tsd.connect('toggled', tsd_toggled)
manual_box.pack_start(tsd, True, True, 0)
asd = Gtk.CheckButton(label=_('Require confirmation if number of albums differs'))
asd.set_active(CONFIG.need_user_check_if_number_of_albums_differs)
asd.connect('toggled', asd_toggled)
manual_box.pack_start(asd, True, True, 0)
desc = _('Percentage below which the user will have to manually check and optionally change which track is matched with which.')
perc_table = Gtk.Table(n_rows=2, n_columns=2)
perc_table.set_col_spacings(6)
perc_table.set_row_spacings(6)
manual_box.pack_start(perc_table, True, True, 0)
def format_perc(scale, value):
return (_('%d %%') % (value * 100))
def add_perc_scale_with_label(ratio, col, lbl_text, tooltip_text, on_change):
scale = Gtk.HScale(adjustment=Gtk.Adjustment.new(0, 0, 1, 0.01, 0.01, 0))
scale.set_digits(2)
scale.set_tooltip_text(tooltip_text)
scale.set_value_pos(Gtk.PositionType.RIGHT)
scale.set_value(ratio)
scale.connect('format-value', format_perc)
scale.connect('value-changed', on_change)
label = Gtk.Label(label=lbl_text)
label.set_alignment(0.0, 0.5)
label.set_padding(0, 6)
label.set_mnemonic_widget(scale)
xoptions = (Gtk.AttachOptions.FILL | Gtk.AttachOptions.SHRINK)
perc_table.attach(label, 0, 1, col, (col + 1), xoptions=xoptions)
perc_table.attach(scale, 1, 2, col, (col + 1))
add_perc_scale_with_label(CONFIG.max_track_similarity_to_need_user_check, 0, _('Track similarity:'), desc, mt_scale_changed)
add_perc_scale_with_label(CONFIG.max_album_similarity_to_need_user_check, 1, _('Album similarity:'), desc, ma_scale_changed)
export_box = Gtk.VBox(spacing=6)
export_frame = qltk.Frame(_('Export files'), child=export_box)
vbox.pack_start(export_frame, False, True, 0)
pp = Gtk.CheckButton(label=_('Write pretty and clear JSON (slower)'))
pp.set_active(CONFIG.pretty_print_json)
pp.connect('toggled', pp_toggled)
export_box.pack_start(pp, True, True, 0)
de = Gtk.CheckButton(label=_("Delete export files after they've been imported"))
de.set_active(CONFIG.delete_exports_after_importing)
de.connect('toggled', de_toggled)
export_box.pack_start(de, True, True, 0)
return vbox
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._export_collectors = []
self._import_or_export_option_index = None
self._album_id_matcher: ObjectListMatcher[AlbumId] = ObjectListMatcher({(lambda a: a.title): 9, (lambda a: a.artist): 4.5, (lambda a: a.tracks): 1.2, (lambda a: a.last_directory_parts): 1, (lambda a: a.discs): 0.8, (lambda a: a.id_value): 0.5})
self._album_id_matcher.should_store_similarity_matrix = True
self._album_id_matcher.should_go_through_every_attribute = True
self._track_id_matcher: ObjectListMatcher[TrackId] = ObjectListMatcher({(lambda t: t.title): 8, (lambda t: t.artist): 3.5, (lambda t: t.track): 1.2, (lambda t: t.file_stem): 1, (lambda t: t.disc): 0.8})
self._track_id_matcher.should_store_similarity_matrix = True
self._album_id_matcher.should_go_through_every_attribute = True
self._album_id_to_export_path = {}
submenu = Gtk.Menu()
self._init_collectors_and_menu(submenu)
if submenu.get_children():
self.set_submenu(submenu)
else:
self.set_sensitive(False)
def _init_collectors_and_menu(self, submenu):
import_item = Gtk.MenuItem(label=_('Import'))
connect_obj(import_item, 'activate', self.__set_import_export_option_index, (- 1))
submenu.append(import_item)
submenu.append(SeparatorMenuItem())
for (idx, (name, query)) in enumerate(EXPORT_OPTIONS):
collector = track_data_collector_for(query)
self._export_collectors.append(collector)
item = Gtk.MenuItem(label=name)
connect_obj(item, 'activate', self.__set_import_export_option_index, idx)
submenu.append(item)
submenu.append(SeparatorMenuItem())
open_dir_item = Gtk.MenuItem(label=_('Open Export Directory'))
def open_export_dir(_):
show_files(path2fsn(EXPORT_DIR_PATH), [path2fsn(TAGS_AND_USERDATA_INDEX_FILE_PATH.name)])
connect_obj(open_dir_item, 'activate', open_export_dir, None)
submenu.append(open_dir_item)
def __set_import_export_option_index(self, index):
self._import_or_export_option_index = index
def _error_msg(self, message):
title = (_('Error in %s') % self.PLUGIN_NAME)
ErrorMessage(app.window, title, message).run()
def plugin_albums(self, albums):
index = self._import_or_export_option_index
if ((index is None) or (index >= len(self._export_collectors))):
return
if (index < 0):
self.import_data_to_albums(albums)
else:
collect_data = self._export_collectors[index]
self.export_albums(albums, collect_data)
self._rewrite_index()
self._import_or_export_option_index = None
def import_data_to_albums(self, albums):
if (not self._try_load_exports()):
return
for (exp_album_id, songs) in self._iter_export_album_id_matched_to_songs(albums):
if (exp_album_id is not None):
self.import_data(exp_album_id, songs)
def _iter_export_album_id_matched_to_songs(self, albums):
album_ids = [AlbumId.of_song(songs[0]) for songs in albums]
exp_album_ids = list(self._album_id_to_export_path.keys())
exp_indices = self._album_id_matcher.get_indices(album_ids, exp_album_ids)
size_differs = (len(exp_album_ids) != len(exp_album_ids))
need_check = (CONFIG.need_user_check_if_number_of_albums_differs and size_differs)
need_check = (need_check or self._does_match_need_manual_check(self._album_id_matcher, exp_indices, CONFIG.max_album_similarity_to_need_user_check))
if need_check:
columns = [ColumnSpec(_('Discs'), (lambda a: str(a.discs)), False), ColumnSpec(_('Tracks'), (lambda a: str(a.tracks)), False), ColumnSpec(_('Title'), (lambda a: a.title), True), ColumnSpec(_('Artist(s)'), (lambda a: a.artist), True), ColumnSpec(_('End of path'), (lambda a: a.last_directory_parts), True)]
prompt = MatchListsDialog(album_ids, exp_album_ids, exp_indices, columns, _('Match Albums'), _('Continue'), id_for_window_tracking=self.PLUGIN_ID)
exp_indices = prompt.run()
for (exp_idx, songs) in zip(exp_indices, albums, strict=False):
if (exp_idx is not None):
(yield (exp_album_ids[exp_idx], songs))
def _try_load_exports(self) -> bool:
index_path = TAGS_AND_USERDATA_INDEX_FILE_PATH
if (not index_path.exists()):
self._warning_nothing_to_import()
return False
try:
with index_path.open(encoding='utf-8') as f:
album_json_key_to_export_file_name = json.load(f)
except ValueError:
self._handle_broken_index()
return False
if (not album_json_key_to_export_file_name):
self._warning_nothing_to_import()
return False
self._load_exports_in_index(album_json_key_to_export_file_name)
return True
def _warning_nothing_to_import(self):
WarningMessage(app.window, _('Nothing to import'), _('You have to export something before you can import.'))
def _load_exports_in_index(self, album_json_key_to_export_file_name):
for (key, file_name) in album_json_key_to_export_file_name.items():
path = (EXPORT_DIR_PATH / file_name)
if (not path.exists()):
continue
try:
album_id = AlbumId(*json.loads(key))
except ValueError:
continue
self._album_id_to_export_path[album_id] = path
def _handle_broken_index(self):
index_path = TAGS_AND_USERDATA_INDEX_FILE_PATH
now = cur_datetime_as_str()
new_path = index_path.with_name(f'index-broken-{now}.{EXPORT_EXTENSION}')
index_path.rename(new_path)
self._error_msg(_('The index was corrupt.'))
def import_data(self, export_album_id: AlbumId, songs: list[SongWrapper]):
songs = [s for s in songs if is_writable(s)]
if (not songs):
return
songs.sort(key=sort_key_for_song)
export_path = self._album_id_to_export_path[export_album_id]
changed_songs = self.import_data_and_get_changed(songs, export_path)
if changed_songs:
check_wrapper_changed(app.library, changed_songs)
del self._album_id_to_export_path[export_album_id]
if CONFIG.delete_exports_after_importing:
try:
export_path.unlink()
except FileNotFoundError:
pass
else:
move_export_to_used(export_path)
def import_data_and_get_changed(self, songs: list[SongWrapper], source_path: Path) -> list[SongWrapper]:
exported = self._try_read_source_json(source_path)
if (exported is None):
return []
exported_indices = self._get_exported_indices_matched_to_songs(exported, songs)
if (not exported_indices):
return []
changed_songs = []
for (song, exp_idx) in zip(songs, exported_indices, strict=False):
if (exp_idx is None):
continue
self._update_song(exported[exp_idx], song)
if song._needs_write:
changed_songs.append(song)
return changed_songs
def _try_read_source_json(self, path: Path):
try:
print_d(f'Loading from {str(path)!r}')
with path.open(encoding='utf-8') as f:
return json.load(f)
except ValueError:
print_e(f"Couldn't parse JSON in {path}.")
self._error_msg((_("Couldn't parse JSON in %s") % path))
return None
except OSError:
print_e(f"Couldn't read {path}")
self._error_msg((_("Couldn't read %s") % path))
return None
def _update_song(self, exported_data, song):
file_stem = exported_data.pop(FILE_STEM_KEY, None)
if (file_stem is not None):
file_ext = extension_of_file_name(song('~basename'))
new_name = f'{file_stem}{file_ext}'
new_song_path = os.path.join(song('~dirname'), new_name)
try:
app.library.rename(song._song, new_song_path)
except ValueError:
print_e(f'Could not rename {song._song} to {new_song_path}.')
for pl_name in exported_data.pop(PLAYLISTS_KEY, []):
add_song_to_playlist(pl_name, song)
for (tag_key, tag_value) in exported_data.items():
if ((tag_key in song) and (song[tag_key] == tag_value)):
continue
song[tag_key] = tag_value
song._needs_write = True
def _rewrite_index(self):
obj = {json.dumps(k): p.name for (k, p) in self._album_id_to_export_path.items()}
self._rewrite_json(obj, TAGS_AND_USERDATA_INDEX_FILE_PATH)
def _rewrite_json(self, obj, path):
try:
print_d(f'Writing to {str(path)!r}')
with path.open('w+', encoding='utf-8') as f:
json.dump(obj, f, indent=self._get_json_indent())
except (ValueError, OSError):
self._error_msg((_("Couldn't write '%s'") % path))
print_e(f"Couldn't write {path} due to:")
print_exc()
def _get_exported_indices_matched_to_songs(self, exported, songs):
songs_ids = [TrackId.of_song(s) for s in songs]
export_ids = [TrackId(*md.pop(IDENTIFIER_KEY)) for md in exported]
export_ids_indices = self._track_id_matcher.get_indices(songs_ids, export_ids)
size_differs = (len(exported) != len(songs))
need_check = (CONFIG.need_user_check_if_number_of_tracks_differs and size_differs)
need_check = (need_check or self._does_match_need_manual_check(self._track_id_matcher, export_ids_indices, CONFIG.max_track_similarity_to_need_user_check))
if need_check:
columns = [ColumnSpec(_('Disc'), (lambda t: t.disc_text), False), ColumnSpec(_('Track'), (lambda t: t.track_text), False), ColumnSpec(_('Title'), (lambda t: t.title), True), ColumnSpec(_('Artist(s)'), (lambda t: t.artist), True), ColumnSpec(_('File name'), (lambda t: t.file_name), True)]
prompt = MatchListsDialog(songs_ids, export_ids, export_ids_indices, columns, _('Match Tracks'), _('Import'), id_for_window_tracking=self.PLUGIN_ID)
return prompt.run()
return export_ids_indices
def _does_match_need_manual_check(self, matcher, b_indices, max_similarity_to_need_manual_check):
if (max_similarity_to_need_manual_check <= 0.0):
return False
if (max_similarity_to_need_manual_check >= 1.0):
return True
sim_matrix = matcher.similarity_matrix
for (a_idx, b_idx) in enumerate(b_indices):
if (b_idx is None):
continue
sim = sim_matrix[a_idx][b_idx]
if (sim <= max_similarity_to_need_manual_check):
return True
return False
def _get_json_indent(self):
return (4 if CONFIG.pretty_print_json else None)
def export_albums(self, albums, collect_data):
self._try_load_exports()
for songs in albums:
self.extract_data_and_export(songs, collect_data)
def extract_data_and_export(self, songs, collect_data):
songs.sort(key=sort_key_for_song)
songs_data = [collect_data(s._song) for s in songs]
album_id = AlbumId.of_song(songs[0])
prev_path = self._album_id_to_export_path.get(album_id)
path = (new_export_path_for_album(album_id) if (prev_path is None) else prev_path)
self._album_id_to_export_path[album_id] = path
self._rewrite_json(songs_data, path) |
def annotate(*, decision=None, output=None, varHeuristic=None, valHeuristic=None, filtering=None, prepro=None, search=None, restarts=None):
def add_annotation(obj, Ann):
if obj:
ann = Ann(obj)
assert (type(ann) not in AnnEntities.items_types), 'This type of annotation can be specified only one time'
annotations.append(EAnnotation(ann))
annotations = []
add_annotation(decision, AnnotationDecision)
add_annotation(output, AnnotationOutput)
add_annotation(varHeuristic, AnnotationVarHeuristic)
add_annotation(valHeuristic, AnnotationValHeuristic)
add_annotation(filtering, AnnotationFiltering)
add_annotation(prepro, AnnotationPrepro)
add_annotation(search, AnnotationSearch)
add_annotation(restarts, AnnotationRestarts)
return annotations |
class CosineAnnealingRestartLR(_LRScheduler):
def __init__(self, optimizer, periods, restart_weights=(1,), eta_min=0, last_epoch=(- 1)):
self.periods = periods
self.restart_weights = restart_weights
self.eta_min = eta_min
assert (len(self.periods) == len(self.restart_weights)), 'periods and restart_weights should have the same length.'
self.cumulative_period = [sum(self.periods[0:(i + 1)]) for i in range(0, len(self.periods))]
super(CosineAnnealingRestartLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
idx = get_position_from_periods(self.last_epoch, self.cumulative_period)
current_weight = self.restart_weights[idx]
nearest_restart = (0 if (idx == 0) else self.cumulative_period[(idx - 1)])
current_period = self.periods[idx]
return [(self.eta_min + (((current_weight * 0.5) * (base_lr - self.eta_min)) * (1 + math.cos((math.pi * ((self.last_epoch - nearest_restart) / current_period)))))) for base_lr in self.base_lrs] |
class WTFPython(commands.Cog):
def __init__(self, bot: Bot):
self.bot = bot
self.headers: dict[(str, str)] = {}
self.fetch_readme.start()
(minutes=60)
async def fetch_readme(self) -> None:
async with self.bot. as resp:
log.trace('Fetching the latest WTF Python README.md')
if (resp.status == 200):
raw = (await resp.text())
self.parse_readme(raw)
def parse_readme(self, data: str) -> None:
table_of_contents = re.search('\\[ Examples\\]\\(#-examples\\)\\n([\\w\\W]*)<!-- tocstop -->', data)[0].split('\n')
for header in list(map(str.strip, table_of_contents)):
match = re.search('\\[ (.*)\\]\\((.*)\\)', header)
if match:
hyper_link = match[0].split('(')[1].replace(')', '')
self.headers[match[0]] = f'{BASE_URL}/{hyper_link}'
def fuzzy_match_header(self, query: str) -> (str | None):
(match, certainty, _) = rapidfuzz.process.extractOne(query, self.headers.keys())
return (match if (certainty > MINIMUM_CERTAINTY) else None)
(aliases=('wtf',))
async def wtf_python(self, ctx: commands.Context, *, query: (str | None)=None) -> None:
if (query is None):
no_query_embed = Embed(title='WTF Python?!', colour=constants.Colours.dark_green, description=f'''A repository filled with suprising snippets that can make you say WTF?!
[Go to the Repository]({BASE_URL})''')
logo = File(LOGO_PATH, filename='wtf_logo.jpg')
no_query_embed.set_thumbnail(url='attachment://wtf_logo.jpg')
(await ctx.send(embed=no_query_embed, file=logo))
return
if (len(query) > 50):
embed = Embed(title=random.choice(constants.ERROR_REPLIES), description=ERROR_MESSAGE, colour=constants.Colours.soft_red)
match = None
else:
match = self.fuzzy_match_header(query)
if (not match):
embed = Embed(title=random.choice(constants.ERROR_REPLIES), description=ERROR_MESSAGE, colour=constants.Colours.soft_red)
(await ctx.send(embed=embed))
return
embed = Embed(title='WTF Python?!', colour=constants.Colours.dark_green, description=f'''Search result for '{query}': {match.split(']')[0].replace('[', '')}
[Go to Repository Section]({self.headers[match]})''')
logo = File(LOGO_PATH, filename='wtf_logo.jpg')
embed.set_thumbnail(url='attachment://wtf_logo.jpg')
(await ctx.send(embed=embed, file=logo))
def cog_unload(self) -> None:
self.fetch_readme.cancel() |
class CrossEntropyLoss(BaseLoss):
def __init__(self, label_name):
self._label_name = label_name
def loss_fn(self, logits, examples):
labels = tf.to_float(examples[self._label_name])
return self._cross_entropy_loss(logits, labels)
def _cross_entropy_loss(self, logits, labels):
sample_loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels)
avg_loss = tf.reduce_mean(sample_loss)
return avg_loss |
def add_image_net_computational_nodes_in_graph(session: tf.Session, logits_name: str, num_classes: int):
with session.graph.as_default():
y_hat = session.graph.get_tensor_by_name(logits_name)
y_hat_argmax = tf.argmax(y_hat, axis=1)
y = tf.placeholder(tf.int64, shape=[None, num_classes], name='labels')
y_argmax = tf.argmax(y, axis=1)
correct_prediction = tf.equal(y_hat_argmax, y_argmax)
top1_acc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name='top1-acc')
top5_acc = tf.reduce_mean(tf.cast(tf.nn.in_top_k(predictions=y_hat, targets=tf.cast(y_argmax, tf.int32), k=5), tf.float32), name='top5-acc')
loss = tf.reduce_mean(tf.losses.softmax_cross_entropy(onehot_labels=y, logits=y_hat)) |
def main(argv: List[str]) -> None:
args = parse_args(argv)
input_dir = args.input_dir
output_dir = args.output_dir
input_files = [os.path.join(input_dir, f'day_{i}_sparse.npy') for i in range(DAYS)]
if (not input_files):
raise ValueError(f"There are no files that end with '_sparse.npy' in this directory: {input_dir}")
print(f'Processing files in: {input_files}. Outputs will be saved to {output_dir}.')
BinaryCriteoUtils.sparse_to_contiguous(input_files, output_dir, frequency_threshold=int(args.frequency_threshold))
print('Done processing.') |
def main():
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments))
if ((len(sys.argv) == 2) and sys.argv[1].endswith('.json')):
(model_args, data_args, training_args) = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
(model_args, data_args, training_args) = parser.parse_args_into_dataclasses()
send_example_telemetry('run_qa', model_args, data_args, framework='tensorflow')
output_dir = Path(training_args.output_dir)
output_dir.mkdir(parents=True, exist_ok=True)
checkpoint = None
if ((len(os.listdir(training_args.output_dir)) > 0) and (not training_args.overwrite_output_dir)):
if ((output_dir / CONFIG_NAME).is_file() and (output_dir / TF2_WEIGHTS_NAME).is_file()):
checkpoint = output_dir
logger.info(f'Checkpoint detected, resuming training from checkpoint in {training_args.output_dir}. To avoid this behavior, change the `--output_dir` or add `--overwrite_output_dir` to train from scratch.')
else:
raise ValueError(f'Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to continue regardless.')
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout)])
logger.setLevel((logging.INFO if training_args.should_log else logging.WARN))
if training_args.should_log:
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info(f'Training/evaluation parameters {training_args}')
set_seed(training_args.seed)
if (data_args.dataset_name is not None):
datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir, use_auth_token=(True if model_args.use_auth_token else None))
else:
data_files = {}
if (data_args.train_file is not None):
data_files['train'] = data_args.train_file
extension = data_args.train_file.split('.')[(- 1)]
if (data_args.validation_file is not None):
data_files['validation'] = data_args.validation_file
extension = data_args.validation_file.split('.')[(- 1)]
if (data_args.test_file is not None):
data_files['test'] = data_args.test_file
extension = data_args.test_file.split('.')[(- 1)]
datasets = load_dataset(extension, data_files=data_files, field='data', cache_dir=model_args.cache_dir, use_auth_token=(True if model_args.use_auth_token else None))
config = AutoConfig.from_pretrained((model_args.config_name if model_args.config_name else model_args.model_name_or_path), cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None))
tokenizer = AutoTokenizer.from_pretrained((model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path), cache_dir=model_args.cache_dir, use_fast=True, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None))
if (not isinstance(tokenizer, PreTrainedTokenizerFast)):
raise ValueError('This example script only works for models that have a fast tokenizer. Checkout the big table of models at to find the model types that meet this requirement')
if training_args.do_train:
column_names = datasets['train'].column_names
elif training_args.do_eval:
column_names = datasets['validation'].column_names
else:
column_names = datasets['test'].column_names
question_column_name = ('question' if ('question' in column_names) else column_names[0])
context_column_name = ('context' if ('context' in column_names) else column_names[1])
answer_column_name = ('answers' if ('answers' in column_names) else column_names[2])
pad_on_right = (tokenizer.padding_side == 'right')
if (data_args.max_seq_length > tokenizer.model_max_length):
logger.warning(f'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for themodel ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.')
max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length)
if (data_args.pad_to_max_length or isinstance(training_args.strategy, tf.distribute.TPUStrategy)):
logger.info("Padding all batches to max length because argument was set or we're on TPU.")
padding = 'max_length'
else:
padding = False
def prepare_train_features(examples):
examples[question_column_name] = [q.lstrip() for q in examples[question_column_name]]
tokenized_examples = tokenizer(examples[(question_column_name if pad_on_right else context_column_name)], examples[(context_column_name if pad_on_right else question_column_name)], truncation=('only_second' if pad_on_right else 'only_first'), max_length=max_seq_length, stride=data_args.doc_stride, return_overflowing_tokens=True, return_offsets_mapping=True, padding=padding)
sample_mapping = tokenized_examples.pop('overflow_to_sample_mapping')
offset_mapping = tokenized_examples.pop('offset_mapping')
tokenized_examples['start_positions'] = []
tokenized_examples['end_positions'] = []
for (i, offsets) in enumerate(offset_mapping):
input_ids = tokenized_examples['input_ids'][i]
cls_index = input_ids.index(tokenizer.cls_token_id)
sequence_ids = tokenized_examples.sequence_ids(i)
sample_index = sample_mapping[i]
answers = examples[answer_column_name][sample_index]
if (len(answers['answer_start']) == 0):
tokenized_examples['start_positions'].append(cls_index)
tokenized_examples['end_positions'].append(cls_index)
else:
start_char = answers['answer_start'][0]
end_char = (start_char + len(answers['text'][0]))
token_start_index = 0
while (sequence_ids[token_start_index] != (1 if pad_on_right else 0)):
token_start_index += 1
token_end_index = (len(input_ids) - 1)
while (sequence_ids[token_end_index] != (1 if pad_on_right else 0)):
token_end_index -= 1
if (not ((offsets[token_start_index][0] <= start_char) and (offsets[token_end_index][1] >= end_char))):
tokenized_examples['start_positions'].append(cls_index)
tokenized_examples['end_positions'].append(cls_index)
else:
while ((token_start_index < len(offsets)) and (offsets[token_start_index][0] <= start_char)):
token_start_index += 1
tokenized_examples['start_positions'].append((token_start_index - 1))
while (offsets[token_end_index][1] >= end_char):
token_end_index -= 1
tokenized_examples['end_positions'].append((token_end_index + 1))
return tokenized_examples
processed_datasets = {}
if training_args.do_train:
if ('train' not in datasets):
raise ValueError('--do_train requires a train dataset')
train_dataset = datasets['train']
if (data_args.max_train_samples is not None):
max_train_samples = min(len(train_dataset), data_args.max_train_samples)
train_dataset = train_dataset.select(range(max_train_samples))
train_dataset = train_dataset.map(prepare_train_features, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=(not data_args.overwrite_cache))
if (data_args.max_train_samples is not None):
max_train_samples = min(len(train_dataset), data_args.max_train_samples)
train_dataset = train_dataset.select(range(max_train_samples))
processed_datasets['train'] = train_dataset
def prepare_validation_features(examples):
examples[question_column_name] = [q.lstrip() for q in examples[question_column_name]]
tokenized_examples = tokenizer(examples[(question_column_name if pad_on_right else context_column_name)], examples[(context_column_name if pad_on_right else question_column_name)], truncation=('only_second' if pad_on_right else 'only_first'), max_length=max_seq_length, stride=data_args.doc_stride, return_overflowing_tokens=True, return_offsets_mapping=True, padding=padding)
sample_mapping = tokenized_examples.pop('overflow_to_sample_mapping')
tokenized_examples['example_id'] = []
for i in range(len(tokenized_examples['input_ids'])):
sequence_ids = tokenized_examples.sequence_ids(i)
context_index = (1 if pad_on_right else 0)
sample_index = sample_mapping[i]
tokenized_examples['example_id'].append(examples['id'][sample_index])
tokenized_examples['offset_mapping'][i] = [(o if (sequence_ids[k] == context_index) else None) for (k, o) in enumerate(tokenized_examples['offset_mapping'][i])]
return tokenized_examples
if training_args.do_eval:
if ('validation' not in datasets):
raise ValueError('--do_eval requires a validation dataset')
eval_examples = datasets['validation']
if (data_args.max_eval_samples is not None):
max_eval_samples = min(len(eval_examples), data_args.max_eval_samples)
eval_examples = eval_examples.select(range(max_eval_samples))
eval_dataset = eval_examples.map(prepare_validation_features, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=(not data_args.overwrite_cache))
if (data_args.max_eval_samples is not None):
max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples)
eval_dataset = eval_dataset.select(range(max_eval_samples))
processed_datasets['validation'] = eval_dataset
if training_args.do_predict:
if ('test' not in datasets):
raise ValueError('--do_predict requires a test dataset')
predict_examples = datasets['test']
if (data_args.max_predict_samples is not None):
predict_examples = predict_examples.select(range(data_args.max_predict_samples))
predict_dataset = predict_examples.map(prepare_validation_features, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=(not data_args.overwrite_cache))
if (data_args.max_predict_samples is not None):
max_predict_samples = min(len(predict_dataset), data_args.max_predict_samples)
predict_dataset = predict_dataset.select(range(max_predict_samples))
processed_datasets['test'] = predict_dataset
def post_processing_function(examples, features, predictions, stage='eval'):
predictions = postprocess_qa_predictions(examples=examples, features=features, predictions=predictions, version_2_with_negative=data_args.version_2_with_negative, n_best_size=data_args.n_best_size, max_answer_length=data_args.max_answer_length, null_score_diff_threshold=data_args.null_score_diff_threshold, output_dir=training_args.output_dir, prefix=stage)
if data_args.version_2_with_negative:
formatted_predictions = [{'id': k, 'prediction_text': v, 'no_answer_probability': 0.0} for (k, v) in predictions.items()]
else:
formatted_predictions = [{'id': k, 'prediction_text': v} for (k, v) in predictions.items()]
references = [{'id': ex['id'], 'answers': ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=formatted_predictions, label_ids=references)
metric = evaluate.load(('squad_v2' if data_args.version_2_with_negative else 'squad'))
def compute_metrics(p: EvalPrediction):
return metric.compute(predictions=p.predictions, references=p.label_ids)
with training_args.strategy.scope():
dataset_options = tf.data.Options()
dataset_options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.OFF
num_replicas = training_args.strategy.num_replicas_in_sync
if (checkpoint is None):
model_path = model_args.model_name_or_path
else:
model_path = checkpoint
model = TFAutoModelForQuestionAnswering.from_pretrained(model_path, config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None))
if training_args.do_train:
training_dataset = model.prepare_tf_dataset(processed_datasets['train'], shuffle=True, batch_size=(training_args.per_device_train_batch_size * num_replicas), tokenizer=tokenizer)
training_dataset = training_dataset.with_options(dataset_options)
num_train_steps = (len(training_dataset) * training_args.num_train_epochs)
if (training_args.warmup_steps > 0):
num_warmup_steps = training_args.warmup_steps
elif (training_args.warmup_ratio > 0):
num_warmup_steps = int((num_train_steps * training_args.warmup_ratio))
else:
num_warmup_steps = 0
(optimizer, schedule) = create_optimizer(init_lr=training_args.learning_rate, num_train_steps=(len(training_dataset) * training_args.num_train_epochs), num_warmup_steps=num_warmup_steps, adam_beta1=training_args.adam_beta1, adam_beta2=training_args.adam_beta2, adam_epsilon=training_args.adam_epsilon, weight_decay_rate=training_args.weight_decay, adam_global_clipnorm=training_args.max_grad_norm)
model.compile(optimizer=optimizer, jit_compile=training_args.xla, metrics=['accuracy'])
else:
model.compile(optimizer=None, jit_compile=training_args.xla, metrics=['accuracy'])
training_dataset = None
if training_args.do_eval:
eval_dataset = model.prepare_tf_dataset(processed_datasets['validation'], shuffle=False, batch_size=(training_args.per_device_train_batch_size * num_replicas), tokenizer=tokenizer)
eval_dataset = eval_dataset.with_options(dataset_options)
else:
eval_dataset = None
if training_args.do_predict:
predict_dataset = model.prepare_tf_dataset(processed_datasets['test'], shuffle=False, batch_size=(training_args.per_device_eval_batch_size * num_replicas), tokenizer=tokenizer)
predict_dataset = predict_dataset.with_options(dataset_options)
else:
predict_dataset = None
push_to_hub_model_id = training_args.push_to_hub_model_id
model_name = model_args.model_name_or_path.split('/')[(- 1)]
if (not push_to_hub_model_id):
if (data_args.dataset_name is not None):
push_to_hub_model_id = f'{model_name}-finetuned-{data_args.dataset_name}'
else:
push_to_hub_model_id = f'{model_name}-finetuned-question-answering'
model_card_kwargs = {'finetuned_from': model_args.model_name_or_path, 'tasks': 'question-answering'}
if (data_args.dataset_name is not None):
model_card_kwargs['dataset_tags'] = data_args.dataset_name
if (data_args.dataset_config_name is not None):
model_card_kwargs['dataset_args'] = data_args.dataset_config_name
model_card_kwargs['dataset'] = f'{data_args.dataset_name} {data_args.dataset_config_name}'
else:
model_card_kwargs['dataset'] = data_args.dataset_name
if training_args.push_to_hub:
callbacks = [PushToHubCallback(output_dir=training_args.output_dir, hub_model_id=push_to_hub_model_id, hub_token=training_args.push_to_hub_token, tokenizer=tokenizer, **model_card_kwargs)]
else:
callbacks = []
if training_args.do_train:
model.fit(training_dataset, epochs=int(training_args.num_train_epochs), callbacks=callbacks)
if training_args.do_eval:
logger.info('*** Evaluation ***')
eval_predictions = model.predict(eval_dataset)
if isinstance(eval_predictions.start_logits, tf.RaggedTensor):
eval_start_logits = eval_predictions.start_logits.to_tensor(default_value=(- 1000)).numpy()
eval_end_logits = eval_predictions.end_logits.to_tensor(default_value=(- 1000)).numpy()
else:
eval_start_logits = eval_predictions.start_logits
eval_end_logits = eval_predictions.end_logits
post_processed_eval = post_processing_function(datasets['validation'], processed_datasets['validation'], (eval_start_logits, eval_end_logits))
metrics = compute_metrics(post_processed_eval)
logging.info('Evaluation metrics:')
for (metric, value) in metrics.items():
logging.info(f'{metric}: {value:.3f}')
if (training_args.output_dir is not None):
output_eval_file = os.path.join(training_args.output_dir, 'all_results.json')
with open(output_eval_file, 'w') as writer:
writer.write(json.dumps(metrics))
if training_args.do_predict:
logger.info('*** Predict ***')
test_predictions = model.predict(predict_dataset)
if isinstance(test_predictions.start_logits, tf.RaggedTensor):
test_start_logits = test_predictions.start_logits.to_tensor(default_value=(- 1000)).numpy()
test_end_logits = test_predictions.end_logits.to_tensor(default_value=(- 1000)).numpy()
else:
test_start_logits = test_predictions.start_logits
test_end_logits = test_predictions.end_logits
post_processed_test = post_processing_function(datasets['test'], processed_datasets['test'], (test_start_logits, test_end_logits))
metrics = compute_metrics(post_processed_test)
logging.info('Test metrics:')
for (metric, value) in metrics.items():
logging.info(f'{metric}: {value:.3f}')
if ((training_args.output_dir is not None) and (not training_args.push_to_hub)):
model.save_pretrained(training_args.output_dir) |
class SelectPresetWidget(QtWidgets.QWidget, Ui_SelectPresetWidget):
CanGenerate = QtCore.Signal(bool)
for_multiworld: bool = False
_logic_settings_window: (CustomizePresetDialog | None) = None
_preset_history: (PresetHistoryDialog | None) = None
_has_set_from_last_selected: bool = False
_preset_menu: PresetMenu
_window_manager: WindowManager
_options: Options
_game: RandovaniaGame
_can_generate: bool = False
def __init__(self, parent: (QtWidgets.QWidget | None)=None):
super().__init__(parent)
self.setupUi(self)
def setup_ui(self, game: RandovaniaGame, window_manager: WindowManager, options: Options):
self._window_manager = window_manager
self._options = options
self._game = game
self.create_preset_tree.game = game
self.create_preset_tree.preset_manager = self._window_manager.preset_manager
self.create_preset_tree.options = self._options
self._preset_menu = PresetMenu(self)
self.create_preset_tree.itemSelectionChanged.connect(self._on_select_preset)
self.create_preset_tree.customContextMenuRequested.connect(self._on_tree_context_menu)
self._preset_menu.action_customize.triggered.connect(self._on_customize_preset)
self._preset_menu.action_delete.triggered.connect(self._on_delete_preset)
self._preset_menu.action_history.triggered.connect(self._on_view_preset_history)
self._preset_menu.action_export.triggered.connect(self._on_export_preset)
self._preset_menu.action_duplicate.triggered.connect(self._on_duplicate_preset)
self._preset_menu.action_map_tracker.triggered.connect(self._on_open_map_tracker_for_preset)
self._preset_menu.action_required_tricks.triggered.connect(self._on_open_required_tricks_for_preset)
self._preset_menu.action_import.triggered.connect(self._on_import_preset)
self._preset_menu.action_view_deleted.triggered.connect(self._on_view_deleted)
self.create_preset_description.linkActivated.connect(self._on_click_create_preset_description)
self._update_preset_tree_items()
def _update_preset_tree_items(self):
self.create_preset_tree.update_items()
def _current_preset_data(self) -> (VersionedPreset | None):
return self.create_preset_tree.current_preset_data
def _add_new_preset(self, preset: VersionedPreset, *, parent: (uuid.UUID | None)):
with self._options as options:
options.set_parent_for_preset(preset.uuid, parent)
options.set_selected_preset_uuid_for(self._game, preset.uuid)
self._window_manager.preset_manager.add_new_preset(preset)
self._update_preset_tree_items()
self.create_preset_tree.select_preset(preset)
()
async def _on_customize_preset(self):
if (self._logic_settings_window is not None):
self._logic_settings_window.raise_()
return
old_preset = self._current_preset_data.get_preset()
if self._current_preset_data.is_included_preset:
parent_uuid = old_preset.uuid
old_preset = old_preset.fork()
else:
parent_uuid = self._options.get_parent_for_preset(old_preset.uuid)
editor = PresetEditor(old_preset, self._options)
self._logic_settings_window = CustomizePresetDialog(self._window_manager, editor)
self._logic_settings_window.on_preset_changed(editor.create_custom_preset_with())
editor.on_changed = (lambda : self._logic_settings_window.on_preset_changed(editor.create_custom_preset_with()))
result = (await async_dialog.execute_dialog(self._logic_settings_window))
self._logic_settings_window = None
if (result == QtWidgets.QDialog.DialogCode.Accepted):
self._add_new_preset(VersionedPreset.with_preset(editor.create_custom_preset_with()), parent=parent_uuid)
()
async def _on_delete_preset(self):
result = (await async_dialog.warning(self, 'Delete preset?', f'Are you sure you want to delete preset {self._current_preset_data.name}?', buttons=(async_dialog.StandardButton.Yes | async_dialog.StandardButton.No), default_button=async_dialog.StandardButton.No))
if (result == async_dialog.StandardButton.Yes):
self._window_manager.preset_manager.delete_preset(self._current_preset_data)
self._update_preset_tree_items()
self._on_select_preset()
()
async def _on_view_preset_history(self):
if (self._preset_history is not None):
return (await async_dialog.warning(self, 'Dialog already open', 'Another preset history dialog is already open. Please close it first.'))
preset = self._current_preset_data
assert (preset is not None)
self._preset_history = PresetHistoryDialog(self._window_manager.preset_manager, preset)
result = (await async_dialog.execute_dialog(self._preset_history))
new_preset = self._preset_history.selected_preset()
self._preset_history = None
if (result == QtWidgets.QDialog.DialogCode.Accepted):
assert (new_preset is not None)
self._window_manager.preset_manager.add_new_preset(VersionedPreset.with_preset(new_preset))
self._update_preset_tree_items()
def _on_export_preset(self):
default_name = f'{self._current_preset_data.slug_name}.rdvpreset'
path = common_qt_lib.prompt_user_for_preset_file(self._window_manager, new_file=True, name=default_name)
if (path is not None):
self._current_preset_data.save_to_file(path)
def _on_duplicate_preset(self):
old_preset = self._current_preset_data
new_preset = VersionedPreset.with_preset(old_preset.get_preset().fork())
self._add_new_preset(new_preset, parent=old_preset.uuid)
()
async def _on_open_map_tracker_for_preset(self):
(await self._window_manager.open_map_tracker(self._current_preset_data.get_preset()))
def _on_open_required_tricks_for_preset(self):
from randovania.gui.dialog.trick_usage_popup import TrickUsagePopup
self._trick_usage_popup = TrickUsagePopup(self, self._window_manager, self._current_preset_data.get_preset())
self._trick_usage_popup.setWindowModality(QtCore.Qt.WindowModality.WindowModal)
self._trick_usage_popup.open()
def _on_import_preset(self):
path = common_qt_lib.prompt_user_for_preset_file(self._window_manager, new_file=False)
if (path is not None):
self.import_preset_file(path)
def _on_view_deleted(self):
raise RuntimeError('Feature not implemented')
def import_preset_file(self, path: Path):
try:
preset = VersionedPreset.from_file_sync(path)
preset.get_preset()
except (InvalidPreset, json.JSONDecodeError):
QtWidgets.QMessageBox.critical(self._window_manager, 'Error loading preset', f"The file at '{path}' contains an invalid preset.")
return
existing_preset = self._window_manager.preset_manager.preset_for_uuid(preset.uuid)
if (existing_preset is not None):
user_response = QtWidgets.QMessageBox.warning(self._window_manager, 'Preset ID conflict', "The new preset '{}' has the same ID as existing '{}'. Do you want to overwrite it?".format(preset.name, existing_preset.name), ((async_dialog.StandardButton.Yes | async_dialog.StandardButton.No) | async_dialog.StandardButton.Cancel), async_dialog.StandardButton.Cancel)
if (user_response == async_dialog.StandardButton.Cancel):
return
elif (user_response == async_dialog.StandardButton.No):
preset = VersionedPreset.with_preset(dataclasses.replace(preset.get_preset(), uuid=uuid.uuid4()))
self._add_new_preset(preset, parent=None)
def _on_select_preset(self):
preset_data = self._current_preset_data
self.on_preset_changed(preset_data)
if (preset_data is not None):
with self._options as options:
options.set_selected_preset_uuid_for(self._game, preset_data.uuid)
def _on_tree_context_menu(self, pos: QtCore.QPoint):
item: QtWidgets.QTreeWidgetItem = self.create_preset_tree.itemAt(pos)
preset = None
if (item is not None):
preset = self.create_preset_tree.preset_for_item(item)
self._preset_menu.set_preset(preset)
self._preset_menu.exec(QtGui.QCursor.pos())
def preset(self) -> (VersionedPreset | None):
return self._current_preset_data
def on_options_changed(self, options: Options):
if (not self._has_set_from_last_selected):
self._has_set_from_last_selected = True
preset_manager = self._window_manager.preset_manager
preset = preset_manager.preset_for_uuid(options.selected_preset_uuid_for(self._game))
if (preset is None):
preset = preset_manager.default_preset_for_game(self._game)
self.create_preset_tree.select_preset(preset)
def on_preset_changed(self, preset: (VersionedPreset | None)):
can_generate = False
if (preset is None):
description = 'Please select a preset from the list.'
else:
try:
raw_preset = preset.get_preset()
incompatible = (self.for_multiworld and raw_preset.settings_incompatible_with_multiworld())
if incompatible:
description = ('The following settings are incompatible with multiworld:\n' + '\n'.join(incompatible))
else:
can_generate = True
formatted_description = markdown.markdown(raw_preset.description)
description = f"<p style='font-weight:600;'>{raw_preset.name}</p><p>{formatted_description}</p>"
description += preset_describer.merge_categories(preset_describer.describe(raw_preset))
except InvalidPreset as e:
if isinstance(e.original_exception, UnsupportedVersion):
exception_desc = f'<p>{e.original_exception}</p>'
else:
logging.warning(f'Invalid preset for {preset.name}')
exception_desc = '<pre>{}</pre>'.format('\n'.join(traceback.format_exception(e.original_exception)))
description = f"<p>Preset {preset.name} can't be used as it contains errors.</p><p>Please edit the file named <a href='open-preset://{preset.uuid}'>{preset.uuid}</a> manually or delete this preset.</p>{exception_desc}"
self.create_preset_description.setText(description)
self._can_generate = can_generate
self.CanGenerate.emit(can_generate)
def _on_click_create_preset_description(self, link: str):
info = re.match('^open-preset://([0-9a-f\\-]{36})$', link)
if (info is None):
return
path = self._window_manager.preset_manager.data_dir
if (path is None):
return
common_qt_lib.open_directory_in_explorer(path, common_qt_lib.FallbackDialog('Preset', f'''The selected preset can be found at:
{path}''', self))
def change_game(self, game: RandovaniaGame):
self._game = game
self.create_preset_tree.game = game
self.create_preset_tree.update_items()
self.on_preset_changed(None) |
class SMU():
def __init__(self, parent, channel, smu_type, name, **kwargs):
self._b1500 = weakref.proxy(parent)
channel = strict_discrete_set(channel, range(1, 11))
self.channel = channel
smu_type = strict_discrete_set(smu_type, ['HRSMU', 'MPSMU', 'HPSMU', 'MCSMU', 'HCSMU', 'DHCSMU', 'HVSMU', 'UHCU', 'HVMCU', 'UHVU'])
self.voltage_ranging = SMUVoltageRanging(smu_type)
self.current_ranging = SMUCurrentRanging(smu_type)
self.name = name
def write(self, string):
self._b1500.write(string)
def ask(self, string):
return self._b1500.ask(string)
def query_learn(self, query_type, command):
response = self._b1500.query_learn(query_type)
response = response[(command + str(self.channel))]
return response
def check_errors(self):
return self._b1500.check_errors()
def _query_status_raw(self):
return self._b1500.query_learn(str(self.channel))
def status(self):
return self._b1500.query_learn_header(str(self.channel))
def enable(self):
self.write(('CN %d' % self.channel))
def disable(self):
self.write(('CL %d' % self.channel))
def force_gnd(self):
self.write(('DZ %d' % self.channel))
def filter(self):
response = self._b1500.query_learn(30)
if ('FL' in response.keys()):
return False
elif (str(self.channel) in response['FL0']):
return False
elif (str(self.channel) in response['FL1']):
return True
else:
raise NotImplementedError('Filter Value cannot be read!')
def filter(self, setting):
setting = strict_discrete_set(int(setting), (0, 1))
self.write(('FL %d, %d' % (setting, self.channel)))
self.check_errors()
def series_resistor(self):
response = self.query_learn(53, 'SSR')
response = bool(int(response))
return response
_resistor.setter
def series_resistor(self, setting):
setting = strict_discrete_set(int(setting), (0, 1))
self.write(('SSR %d, %d' % (self.channel, setting)))
self.check_errors()
def meas_op_mode(self):
response = self.query_learn(46, 'CMM')
response = int(response)
return MeasOpMode(response)
_op_mode.setter
def meas_op_mode(self, op_mode):
op_mode = MeasOpMode.get(op_mode)
self.write(('CMM %d, %d' % (self.channel, op_mode.value)))
self.check_errors()
def adc_type(self):
response = self.query_learn(55, 'AAD')
response = int(response)
return ADCType(response)
_type.setter
def adc_type(self, adc_type):
adc_type = ADCType.get(adc_type)
self.write(('AAD %d, %d' % (self.channel, adc_type.value)))
self.check_errors()
def force(self, source_type, source_range, output, comp='', comp_polarity='', comp_range=''):
if (source_type.upper() == 'VOLTAGE'):
cmd = 'DV'
source_range = self.voltage_ranging.output(source_range).index
if (not (comp_range == '')):
comp_range = self.current_ranging.meas(comp_range).index
elif (source_type.upper() == 'CURRENT'):
cmd = 'DI'
source_range = self.current_ranging.output(source_range).index
if (not (comp_range == '')):
comp_range = self.voltage_ranging.meas(comp_range).index
else:
raise ValueError('Source Type must be Current or Voltage.')
cmd += (' %d, %d, %g' % (self.channel, source_range, output))
if (not (comp == '')):
cmd += (', %g' % comp)
if (not (comp_polarity == '')):
comp_polarity = CompliancePolarity.get(comp_polarity).value
cmd += (', %d' % comp_polarity)
if (not (comp_range == '')):
cmd += (', %d' % comp_range)
self.write(cmd)
self.check_errors()
def ramp_source(self, source_type, source_range, target_output, comp='', comp_polarity='', comp_range='', stepsize=0.001, pause=0.02):
if (source_type.upper() == 'VOLTAGE'):
source_type = 'VOLTAGE'
cmd = ('DV%d' % self.channel)
source_range = self.voltage_ranging.output(source_range).index
unit = 'V'
if (not (comp_range == '')):
comp_range = self.current_ranging.meas(comp_range).index
elif (source_type.upper() == 'CURRENT'):
source_type = 'CURRENT'
cmd = ('DI%d' % self.channel)
source_range = self.current_ranging.output(source_range).index
unit = 'A'
if (not (comp_range == '')):
comp_range = self.voltage_ranging.meas(comp_range).index
else:
raise ValueError('Source Type must be Current or Voltage.')
status = self._query_status_raw()
if ('CL' in status):
start = 0
elif (cmd in status):
start = float(status[cmd][1])
else:
log.info('{} in different state. Changing to {} Source.'.format(self.name, source_type))
start = 0
nop = np.ceil(abs(((target_output - start) / stepsize)))
nop = int(nop)
log.info('{0} ramping from {1}{2} to {3}{2} in {4} steps'.format(self.name, start, unit, target_output, nop))
outputs = np.linspace(start, target_output, nop, endpoint=False)
for output in outputs:
self.force(source_type, source_range, output, comp, comp_polarity, comp_range)
time.sleep(pause)
self.force(source_type, source_range, target_output, comp, comp_polarity, comp_range)
def meas_range_current(self):
response = self.query_learn(32, 'RI')
response = self.current_ranging.meas(response)
return response
_range_current.setter
def meas_range_current(self, meas_range):
meas_range_index = self.current_ranging.meas(meas_range).index
self.write(('RI %d, %d' % (self.channel, meas_range_index)))
self.check_errors()
def meas_range_voltage(self):
response = self.query_learn(32, 'RV')
response = self.voltage_ranging.meas(response)
return response
_range_voltage.setter
def meas_range_voltage(self, meas_range):
meas_range_index = self.voltage_ranging.meas(meas_range).index
self.write(('RV %d, %d' % (self.channel, meas_range_index)))
self.check_errors()
def meas_range_current_auto(self, mode, rate=50):
mode = strict_range(mode, range(1, 4))
if (mode == 1):
self.write(('RM %d, %d' % (self.channel, mode)))
else:
self.write(('RM %d, %d, %d' % (self.channel, mode, rate)))
self.write
def staircase_sweep_source(self, source_type, mode, source_range, start, stop, steps, comp, Pcomp=''):
if (source_type.upper() == 'VOLTAGE'):
cmd = 'WV'
source_range = self.voltage_ranging.output(source_range).index
elif (source_type.upper() == 'CURRENT'):
cmd = 'WI'
source_range = self.current_ranging.output(source_range).index
else:
raise ValueError('Source Type must be Current or Voltage.')
mode = SweepMode.get(mode).value
if (mode in [2, 4]):
if ((start >= 0) and (stop >= 0)):
pass
elif ((start <= 0) and (stop <= 0)):
pass
else:
raise ValueError('For Log Sweep Start and Stop Values must have the same polarity.')
steps = strict_range(steps, range(1, 10002))
cmd += ('%d, %d, %d, %g, %g, %g, %g' % (self.channel, mode, source_range, start, stop, steps, comp))
if (not (Pcomp == '')):
cmd += (', %g' % Pcomp)
self.write(cmd)
self.check_errors()
def synchronous_sweep_source(self, source_type, source_range, start, stop, comp, Pcomp=''):
if (source_type.upper() == 'VOLTAGE'):
cmd = 'WSV'
source_range = self.voltage_ranging.output(source_range).index
elif (source_type.upper() == 'CURRENT'):
cmd = 'WSI'
source_range = self.current_ranging.output(source_range).index
else:
raise ValueError('Source Type must be Current or Voltage.')
cmd += ('%d, %d, %g, %g, %g' % (self.channel, source_range, start, stop, comp))
if (not (Pcomp == '')):
cmd += (', %g' % Pcomp)
self.write(cmd)
self.check_errors()
def sampling_source(self, source_type, source_range, base, bias, comp):
if (source_type.upper() == 'VOLTAGE'):
cmd = 'MV'
source_range = self.voltage_ranging.output(source_range).index
elif (source_type.upper() == 'CURRENT'):
cmd = 'MI'
source_range = self.current_ranging.output(source_range).index
else:
raise ValueError('Source Type must be Current or Voltage.')
cmd += ('%d, %d, %g, %g, %g' % (self.channel, source_range, base, bias, comp))
self.write(cmd)
self.check_errors() |
def test_override():
class TestObject():
def __init__(self):
self.v = None
o = TestObject()
o.v = 'a'
with qcore.override(o, 'v', 'b'):
assert_eq(o.v, 'b')
try:
with qcore.override(o, 'v', 'c'):
assert_eq(o.v, 'c')
raise NotImplementedError()
except NotImplementedError:
pass
assert_eq(o.v, 'b')
assert_eq(o.v, 'a') |
class PReNetTS(BaseDeepAD):
def __init__(self, epochs=100, batch_size=64, lr=0.001, network='Transformer', seq_len=30, stride=1, rep_dim=128, hidden_dims='512', act='GELU', bias=False, n_heads=8, d_model=512, attn='self_attn', pos_encoding='fixed', norm='BatchNorm', epoch_steps=(- 1), prt_steps=10, device='cuda', verbose=2, random_state=42):
super(PReNetTS, self).__init__(model_name='PReNet', data_type='ts', epochs=epochs, batch_size=batch_size, lr=lr, network=network, seq_len=seq_len, stride=stride, epoch_steps=epoch_steps, prt_steps=prt_steps, device=device, verbose=verbose, random_state=random_state)
self.hidden_dims = hidden_dims
self.rep_dim = rep_dim
self.act = act
self.bias = bias
self.n_heads = n_heads
self.d_model = d_model
self.pos_encoding = pos_encoding
self.norm = norm
self.attn = attn
return
def training_prepare(self, X, y):
train_loader = PReNetLoader(X, y, batch_size=self.batch_size)
net = DualInputNet(self.network, self.n_features, hidden_dims=self.hidden_dims, rep_dim=self.rep_dim, activation=self.act, n_heads=self.n_heads, d_model=self.d_model, attn=self.attn, pos_encoding=self.pos_encoding, norm=self.norm, seq_len=self.seq_len, bias=False).to(self.device)
criterion = torch.nn.L1Loss(reduction='mean')
if (self.verbose >= 2):
print(net)
return (train_loader, net, criterion)
def inference_prepare(self, X):
y = self.train_label
unlabeled_id = np.where((y == 0))[0]
known_anom_id = np.where((y == 1))[0]
if (X.shape[0] > 100000):
a = 10
elif (X.shape[0] > 50000):
a = 20
else:
a = 30
X = torch.from_numpy(X)
train_data = torch.from_numpy(self.train_data)
x2_a_lst = []
x2_u_lst = []
for i in range(a):
a_idx = np.random.choice(known_anom_id, X.shape[0], replace=True)
u_idx = np.random.choice(unlabeled_id, X.shape[0], replace=True)
x2_a = train_data[a_idx]
x2_u = train_data[u_idx]
x2_a_lst.append(x2_a)
x2_u_lst.append(x2_u)
test_loader = []
n_batches = int(np.ceil((len(X) / self.batch_size)))
for i in range(n_batches):
left = (i * self.batch_size)
right = min(((i + 1) * self.batch_size), len(X))
batch_x1 = X[left:right]
batch_x_sup1 = [x2[left:right] for x2 in x2_a_lst]
batch_x_sup2 = [x2[left:right] for x2 in x2_u_lst]
test_loader.append([batch_x1, batch_x_sup1, batch_x_sup2])
self.criterion.reduction = 'none'
return test_loader
def training_forward(self, batch_x, net, criterion):
(batch_x1, batch_x2, batch_y) = batch_x
batch_x1 = batch_x1.float().to(self.device)
batch_x2 = batch_x2.float().to(self.device)
batch_y = batch_y.float().to(self.device)
pred = net(batch_x1, batch_x2).flatten()
loss = criterion(pred, batch_y)
return loss
def inference_forward(self, batch_x, net, criterion):
(batch_x1, batch_x_sup1_lst, batch_x_sup2_lst) = batch_x
batch_x1 = batch_x1.float().to(self.device)
pred_s = []
for batch_x2 in batch_x_sup1_lst:
batch_x2 = batch_x2.float().to(self.device)
pred = net(batch_x1, batch_x2).flatten()
pred_s.append(pred)
for batch_x2 in batch_x_sup2_lst:
batch_x2 = batch_x2.float().to(self.device)
pred = net(batch_x1, batch_x2).flatten()
pred_s.append(pred)
pred_s = torch.stack(pred_s)
s = torch.mean(pred_s, dim=0)
batch_z = batch_x1
return (batch_z, s) |
def get_config(name):
config = {}
if (name.upper() == 'ARID'):
config['num_classes'] = 11
else:
logging.error("Configs for dataset '{}'' not found".format(name))
raise NotImplemented
logging.debug("Target dataset: '{}', configs: {}".format(name.upper(), config))
return config |
class Conv2dSubSampler(LayerSubSampler):
def verify_layers(self, orig_layer: torch.nn.Module, pruned_layer: torch.nn.Module):
assert isinstance(orig_layer, torch.nn.Conv2d)
assert isinstance(pruned_layer, torch.nn.Conv2d)
assert (orig_layer.dilation == (1, 1)), 'No Conv2d layers supported for dilation other than (1, 1)'
assert (pruned_layer.dilation == (1, 1)), 'No Conv2d layers supported for dilation other than (1, 1)'
def get_number_of_batches(self, data_loader: Iterator, orig_layer: torch.nn.Module, num_reconstruction_samples: int, samples_per_image: int) -> int:
total_num_of_images = int((num_reconstruction_samples / samples_per_image))
num_of_batches = math.ceil((total_num_of_images / data_loader.batch_size))
return num_of_batches
def get_sub_sampled_data(self, orig_layer: torch.nn.Module, input_data: np.ndarray, output_data: np.ndarray, samples_per_image: int) -> Tuple[(np.ndarray, np.ndarray)]:
layer_attributes = (orig_layer.kernel_size, orig_layer.stride, orig_layer.padding)
(sub_sampled_inp_data, sub_sampled_out_data) = InputMatchSearch.subsample_data(layer_attributes, input_data, output_data, samples_per_image)
return (sub_sampled_inp_data, sub_sampled_out_data) |
class Commit():
def __init__(self, commit_hash, category, topic, title):
self.commit_hash = commit_hash
self.category = category
self.topic = topic
self.title = title
def __eq__(self, other):
if (not isinstance(other, self.__class__)):
return False
return ((self.commit_hash == other.commit_hash) and (self.category == other.category) and (self.topic == other.topic) and (self.title == other.title))
def __repr__(self):
return f'Commit({self.commit_hash}, {self.category}, {self.topic}, {self.title})' |
def get_internal_env_config() -> dict[(str, Any)]:
from hatch.env.internal import build, static_analysis
internal_config = {}
for (env_name, env_config) in (('hatch-build', build.get_default_config()), ('hatch-static-analysis', static_analysis.get_default_config())):
env_config['template'] = env_name
ensure_valid_environment(env_config)
internal_config[env_name] = env_config
return internal_config |
def unique_type_in(l, tpe=None):
if isinstance(l, (list, tuple, set, frozenset)):
if (len(l) == 0):
return None
for v in l:
t = unique_type_in(v, tpe)
if (t is False):
return False
if (tpe is None):
tpe = t
return tpe
else:
return (None if (l is None) else (type(l) if (tpe is None) else (tpe if isinstance(l, tpe) else False))) |
class TestDataDownload():
(autouse=True)
def _setup_custom_configs(self, tmpdir):
_setup_custom_composite_config(tmpdir)
_setup_custom_reader_config(tmpdir)
_setup_custom_writer_config(tmpdir)
self.tmpdir = tmpdir
.parametrize('comp_sensors', [tuple(), None, ('visir',)])
.parametrize('writers', [[], None, ['fake']])
.parametrize('readers', [[], None, ['fake']])
def test_find_registerable(self, readers, writers, comp_sensors):
import satpy
from satpy.aux_download import find_registerable_files
with satpy.config.set(config_path=[self.tmpdir]), mock.patch('satpy.aux_download._FILE_REGISTRY', {}):
found_files = find_registerable_files(readers=readers, writers=writers, composite_sensors=comp_sensors)
_assert_reader_files_downloaded(readers, found_files)
_assert_writer_files_downloaded(writers, found_files)
_assert_comp_files_downloaded(comp_sensors, found_files)
_assert_mod_files_downloaded(comp_sensors, found_files)
def test_limited_find_registerable(self):
import satpy
from satpy.aux_download import find_registerable_files
file_registry = {}
with satpy.config.set(config_path=[self.tmpdir]), mock.patch('satpy.aux_download._FILE_REGISTRY', file_registry):
found_files = find_registerable_files(readers=[], writers=[], composite_sensors=[])
assert (not found_files)
def test_retrieve(self):
import satpy
from satpy.aux_download import find_registerable_files, retrieve
file_registry = {}
with satpy.config.set(config_path=[self.tmpdir], data_dir=str(self.tmpdir)), mock.patch('satpy.aux_download._FILE_REGISTRY', file_registry):
comp_file = 'composites/README.rst'
found_files = find_registerable_files()
assert (comp_file in found_files)
assert (not self.tmpdir.join(comp_file).exists())
retrieve(comp_file)
assert self.tmpdir.join(comp_file).exists()
def test_offline_retrieve(self):
import satpy
from satpy.aux_download import find_registerable_files, retrieve
file_registry = {}
with satpy.config.set(config_path=[self.tmpdir], data_dir=str(self.tmpdir), download_aux=True), mock.patch('satpy.aux_download._FILE_REGISTRY', file_registry):
comp_file = 'composites/README.rst'
found_files = find_registerable_files()
assert (comp_file in found_files)
assert (not self.tmpdir.join(comp_file).exists())
with satpy.config.set(download_aux=False):
pytest.raises(RuntimeError, retrieve, comp_file)
retrieve(comp_file)
assert self.tmpdir.join(comp_file).exists()
with satpy.config.set(download_aux=False):
local_file = retrieve(comp_file)
assert local_file
def test_offline_retrieve_all(self):
import satpy
from satpy.aux_download import retrieve_all
with satpy.config.set(config_path=[self.tmpdir], data_dir=str(self.tmpdir), download_aux=False):
pytest.raises(RuntimeError, retrieve_all)
def test_retrieve_all(self):
import satpy
from satpy.aux_download import retrieve_all
file_registry = {}
file_urls = {}
with satpy.config.set(config_path=[self.tmpdir], data_dir=str(self.tmpdir)), mock.patch('satpy.aux_download._FILE_REGISTRY', file_registry), mock.patch('satpy.aux_download._FILE_URLS', file_urls), mock.patch('satpy.aux_download.find_registerable_files'):
comp_file = 'composites/README.rst'
file_registry[comp_file] = None
file_urls[comp_file] = README_URL
assert (not self.tmpdir.join(comp_file).exists())
retrieve_all()
assert self.tmpdir.join(comp_file).exists()
def test_no_downloads_in_tests(self):
import satpy
from satpy.aux_download import register_file, retrieve
file_registry = {}
with satpy.config.set(config_path=[self.tmpdir], data_dir=str(self.tmpdir), download_aux=True), mock.patch('satpy.aux_download._FILE_REGISTRY', file_registry):
cache_key = 'myfile.rst'
register_file(README_URL, cache_key)
assert (not self.tmpdir.join(cache_key).exists())
pytest.raises(RuntimeError, retrieve, cache_key)
open(self.tmpdir.join(cache_key), 'w').close()
with satpy.config.set(download_aux=False):
retrieve(cache_key)
def test_download_script(self):
import satpy
from satpy.aux_download import retrieve_all_cmd
file_registry = {}
file_urls = {}
with satpy.config.set(config_path=[self.tmpdir]), mock.patch('satpy.aux_download._FILE_REGISTRY', file_registry), mock.patch('satpy.aux_download._FILE_URLS', file_urls), mock.patch('satpy.aux_download.find_registerable_files'):
comp_file = 'composites/README.rst'
file_registry[comp_file] = None
file_urls[comp_file] = README_URL
assert (not self.tmpdir.join(comp_file).exists())
retrieve_all_cmd(argv=['--data-dir', str(self.tmpdir)])
assert self.tmpdir.join(comp_file).exists() |
class Effect4458(BaseEffect):
runTime = 'early'
type = 'passive'
def handler(fit, implant, context, projectionRange, **kwargs):
fit.appliedImplants.filteredItemMultiply((lambda target: target.item.requiresSkill('Cybernetics')), 'scanLadarStrengthPercent', implant.getModifiedItemAttr('implantSetRepublicFleet'), **kwargs) |
def test_compose():
with pytest.raises(TypeError):
Compose('LoadImageFromFile')
target_keys = ['img', 'img_rename', 'img_metas']
img = np.random.randn(256, 256, 3)
results = dict(img=img, img_file='test_image.png')
test_pipeline = [dict(type='Collect', keys=['img', ('img', 'img_rename')], meta_keys=['img_file'])]
compose = Compose(test_pipeline)
compose_results = compose(results)
assert check_keys_equal(compose_results.keys(), target_keys)
assert check_keys_equal(compose_results['img_metas'].data.keys(), ['img_file'])
results = None
class ExamplePipeline():
def __call__(self, results):
return None
nonePipeline = ExamplePipeline()
test_pipeline = [nonePipeline]
compose = Compose(test_pipeline)
compose_results = compose(results)
assert (compose_results is None)
assert (repr(compose) == (compose.__class__.__name__ + f'''(
{nonePipeline}
)''')) |
class LinOpWithoutGetParamNames(LinearOperator):
def __init__(self, mat, is_hermitian=False):
super(LinOpWithoutGetParamNames, self).__init__(shape=mat.shape, is_hermitian=is_hermitian, dtype=mat.dtype, device=mat.device)
self.mat = mat
self.implemented_methods = []
def _mv(self, x):
return torch.matmul(self.mat, x.unsqueeze((- 1))).squeeze((- 1)) |
def crop_image(img):
(w, h) = img.size
if (h == w):
return img
normal = min(h, w)
diff_w = (w - normal)
diff_h = (h - normal)
crop_top = (diff_h // 2)
crop_bot = ((diff_h // 2) + (diff_h % 2))
crop_left = (diff_w // 2)
crop_right = ((diff_w // 2) + (diff_w % 2))
box = (crop_left, crop_top, (w - crop_right), (h - crop_bot))
return img.crop(box) |
def inference_small_config(x, c, trl_type, rank):
c['bottleneck'] = False
c['ksize'] = 3
c['stride'] = 1
with tf.variable_scope('scale1'):
c['conv_filters_out'] = 16
c['block_filters_internal'] = 16
c['stack_stride'] = 1
x = conv(x, c)
x = bn(x, c)
x = activation(x)
x = stack(x, c)
with tf.variable_scope('scale2'):
c['block_filters_internal'] = 32
c['stack_stride'] = 2
x = stack(x, c)
with tf.variable_scope('scale3'):
c['block_filters_internal'] = 64
c['stack_stride'] = 2
x = stack(x, c)
if (trl_type == 'cp'):
x = cprl(x, rank, NUM_CLASSES)
elif (trl_type == 't'):
x = trl(x, rank, NUM_CLASSES)
elif (trl_type == 'tt'):
x = ttrl(x, rank, NUM_CLASSES)
elif (trl_type == 'tcl_gap'):
convshape = x.get_shape().as_list()[1:]
weight_initializer = tf.contrib.layers.xavier_initializer()
u1 = tf.get_variable('tcl_gap_{}'.format(1), shape=[8, convshape[0]], initializer=weight_initializer)
u2 = tf.get_variable('tcl_gap_{}'.format(2), shape=[4, convshape[1]], initializer=weight_initializer)
u3 = tf.get_variable('tcl_gap_{}'.format(3), shape=[2, convshape[2]], initializer=weight_initializer)
x = mode_dot(x, u1, 1)
x = mode_dot(x, u2, 2)
x = mode_dot(x, u3, 3)
x = tf.reshape(x, [tf.app.flags.FLAGS.test_size, (- 1)])
if (c['num_classes'] != None):
with tf.variable_scope('fc'):
x = fc(x, c)
else:
x = tf.reshape(x, [(- 1), ((8 * 8) * 64)])
if (c['num_classes'] != None):
with tf.variable_scope('fc'):
x = fc(x, c)
return x |
_module()
class CocoCaptionOVDDataset(CocoDataset):
def prepare_data(self, idx):
data_info = self.get_data_info(idx)
if data_info['has_caption']:
return self.pipeline(data_info)
else:
return None
def parse_data_info(self, raw_data_info: dict):
img_info = raw_data_info['raw_img_info']
data_info = {}
img_path = osp.join(self.data_prefix['img'], img_info['file_name'])
seg_map_path = None
data_info['img_path'] = img_path
data_info['img_id'] = img_info['img_id']
data_info['seg_map_path'] = seg_map_path
data_info['height'] = img_info['height']
data_info['width'] = img_info['width']
data_info['captions'] = [img_info.get('captions', [])]
pos_cat_ids = img_info.get('pos_category_ids', [])
tags = [self.cat2label[cat_id] for cat_id in pos_cat_ids]
data_info['tags'] = [tags]
data_info['image_ids'] = [img_info['img_id']]
has_caption = (len(img_info.get('captions', [])) > 0)
data_info['has_caption'] = has_caption
instance = {}
bbox = [0.0, 0.0, img_info['width'], img_info['height']]
instance['ignore_flag'] = 0
instance['bbox'] = bbox
instance['bbox_label'] = 0
data_info['instances'] = [instance]
return data_info |
def read_tables(config, c=None):
table_reader = build_reader(data_format=config['file_format'], basepath=config['data_dir'], split_row_groups=config['split_row_groups'], backend=config['backend'])
date_dim_cols = ['d_date_sk', 'd_date']
web_page_cols = ['wp_web_page_sk', 'wp_type']
web_sales_cols = ['ws_net_paid', 'ws_order_number', 'ws_sold_date_sk']
wcs_cols = ['wcs_user_sk', 'wcs_sales_sk', 'wcs_click_date_sk', 'wcs_click_time_sk', 'wcs_web_page_sk']
date_dim_df = table_reader.read('date_dim', relevant_cols=date_dim_cols)
web_page_df = table_reader.read('web_page', relevant_cols=web_page_cols)
web_sales_df = table_reader.read('web_sales', relevant_cols=web_sales_cols)
wcs_df = table_reader.read('web_clickstreams', relevant_cols=wcs_cols)
if c:
c.create_table('web_clickstreams', wcs_df, persist=False)
c.create_table('web_sales', web_sales_df, persist=False)
c.create_table('web_page', web_page_df, persist=False)
c.create_table('date_dim', date_dim_df, persist=False)
return (date_dim_df, web_page_df, web_sales_df) |
class ScriptError(Exception):
def __init__(self, errorinfo):
self._errorinfo = dict(errorinfo)
def __repr__(self):
return 'ScriptError({})'.format(self._errorinfo)
def message(self):
msg = self._errorinfo.get(NSAppleScriptErrorMessage)
if (not msg):
msg = self._errorinfo.get(NSAppleScriptErrorBriefMessage, 'Script Error')
return msg
number = property((lambda self: self._errorinfo.get(NSAppleScriptErrorNumber)), doc='int | None -- the error number, if given')
appname = property((lambda self: self._errorinfo.get(NSAppleScriptErrorAppName)), doc='str | None -- the name of the application that reported the error, where relevant')
def range(self):
range = self._errorinfo.get(NSAppleScriptErrorRange)
if range:
start = range.rangeValue().location
end = (start + range.rangeValue().length)
return (start, end)
else:
return None
def __str__(self):
msg = self.message
for (s, v) in [(' ({})', self.number), (' app={!r}', self.appname), (' range={0[0]}-{0[1]}', self.range)]:
if (v is not None):
msg += s.format(v)
return (msg.encode('ascii', 'replace') if (sys.version_info.major < 3) else msg) |
class TestMonochromeColor(unittest.TestCase):
def test_main_functionality(self):
self.assertEqual(monochrome_color((255, 255, 255)), COLORS['white'])
self.assertEqual(monochrome_color((254, 254, 254)), COLORS['white'])
self.assertEqual(monochrome_color((255, 112, 112)), COLORS['white'])
self.assertEqual(monochrome_color((0, 0, 0)), COLORS['black'])
self.assertEqual(monochrome_color((1, 1, 1)), COLORS['black'])
self.assertEqual(monochrome_color((30, 0, 0)), COLORS['black']) |
def compute_weights(labels, classes, count, verbose=False):
if verbose:
print('')
sum_weights = 0
for c in range(len(classes)):
if ((classes[c] / count) > 0):
sum_weights += (count / classes[c])
sum_weight_norm = 0
weights = list()
for c in range(len(classes)):
if ((classes[c] / count) > 0):
weight = ((count / classes[c]) / sum_weights)
sum_weight_norm += weight
if verbose:
print('{0:>20s} {1:>1.3f} {2:>6d}'.format(labels[c], weight, int(classes[c])))
weights.append(weight)
else:
if verbose:
print('{0:>20s} {1:>1.3f} {2:>6d}'.format(labels[c], 0.0, int(classes[c])))
weights.append(0)
if verbose:
print('')
return weights |
def main(args):
tf.logging.set_verbosity(tf.logging.INFO)
model_cls = models.get_model(args.model)
params = default_parameters()
params = merge_parameters(params, model_cls.get_parameters())
params = import_params(args.checkpoint, args.model, params)
override_parameters(params, args)
with tf.Graph().as_default():
model = model_cls(params)
inputs = read_files(args.input)
features = get_features(inputs, params)
score_fn = model.get_evaluation_func()
scores = score_fn(features, params)
sess_creator = tf.train.ChiefSessionCreator(config=session_config(params))
tf.logging.info(('Loading %s' % args.checkpoint))
var_list = tf.train.list_variables(args.checkpoint)
values = {}
reader = tf.train.load_checkpoint(args.checkpoint)
for (name, shape) in var_list:
if (not name.startswith(model_cls.get_name())):
continue
tensor = reader.get_tensor(name)
values[name] = tensor
ops = set_variables(tf.trainable_variables(), values, model_cls.get_name())
assign_op = tf.group(*ops)
with tf.train.MonitoredSession(session_creator=sess_creator) as sess:
sess.run(assign_op)
fd = tf.gfile.Open(args.output, 'w')
while (not sess.should_stop()):
results = sess.run(scores)
for value in results:
fd.write(('%f\n' % value))
fd.close() |
class SettingsDialog(QtWidgets.QDialog):
def __init__(self, parent):
super().__init__(parent)
self.setWindowTitle(f'{constants.APPNAME} Settings')
tabs = QtWidgets.QTabWidget()
misc = QtWidgets.QWidget()
misc_layout = QtWidgets.QGridLayout()
misc.setLayout(misc_layout)
misc_layout.addWidget(ImageStorageFormatWidget(), 0, 0)
misc_layout.addWidget(ArrangeGapWidget(), 0, 1)
tabs.addTab(misc, '&Miscellaneous')
layout = QtWidgets.QVBoxLayout()
self.setLayout(layout)
layout.addWidget(tabs)
buttons = QtWidgets.QDialogButtonBox(QtWidgets.QDialogButtonBox.StandardButton.Close)
buttons.rejected.connect(self.reject)
reset_btn = QtWidgets.QPushButton('&Restore Defaults')
reset_btn.setAutoDefault(False)
reset_btn.clicked.connect(self.on_restore_defaults)
buttons.addButton(reset_btn, QtWidgets.QDialogButtonBox.ButtonRole.ActionRole)
layout.addWidget(buttons)
self.show()
def on_restore_defaults(self, *args, **kwargs):
reply = QtWidgets.QMessageBox.question(self, 'Restore defaults?', 'Do you want to restore all settings to their default values?')
if (reply == QtWidgets.QMessageBox.StandardButton.Yes):
BeeSettings().restore_defaults() |
def _compute_dloss_by_dmin_dmax_and_dx(inputs: tf.Tensor, encoding_min: tf.Variable, encoding_max: tf.Variable, op_mode: tf.Variable, bitwidth: tf.Variable, is_symmetric: tf.Variable, grad: tf.Tensor) -> Tuple:
x = tf.cast(inputs, tf.float32)
bitwidth = tf.cast(bitwidth, tf.float32)
op_mode = tf.cast(op_mode, tf.int8)
encoding_min = tf.cast(encoding_min, tf.float32)
encoding_max = tf.cast(encoding_max, tf.float32)
epsilon = tf.constant(1e-05, dtype=tf.float32)
encoding_max = tf.math.maximum(encoding_max, tf.add(encoding_min, epsilon))
(n, p) = _get_n_and_p(bitwidth, is_symmetric)
steps = tf.cast((tf.pow(tf.cast(tf.constant(2), tf.float32), bitwidth) - 1), tf.float32)
scaling = tf.cast(((encoding_max - encoding_min) / steps), tf.float32)
rounded_offset = tf.round(((- encoding_min) / scaling))
r_x_by_s_plus_round_o = (tf.round((x / scaling)) + rounded_offset)
inner_cond = tf.where(tf.less_equal(r_x_by_s_plus_round_o, p), tf.ones_like(r_x_by_s_plus_round_o), tf.zeros_like(r_x_by_s_plus_round_o))
dloss_by_dx = (tf.where(tf.less_equal(n, r_x_by_s_plus_round_o), inner_cond, tf.zeros_like(r_x_by_s_plus_round_o)) * grad)
dloss_by_dmax = tf.cast(_compute_dloss_by_dmax(x, grad, scaling, rounded_offset, bitwidth, is_symmetric), tf.float64)
dloss_by_dmin = tf.cast(_compute_dloss_by_dmin_using_dmax(dloss_by_dmax), tf.float64)
dloss_by_dx = tf.cond(tf.equal(op_mode, 3), (lambda : tf.convert_to_tensor(grad)), (lambda : dloss_by_dx))
return (dloss_by_dmin, dloss_by_dmax, dloss_by_dx) |
def SetFlags(os, binType, type, defaultFlags, advanced=True):
configLines = list()
configLines.append(' <Flags>\n')
usingUB = False
for flag in defaultFlags:
if (len(flag) > 0):
configLines.append((' <%s/>\n' % flag))
if (flag == 'PCHEAP_CONFIG_LOADED_WITH_UTILITY_BURST'):
usingUB = True
if (type.lower() == 'level3'):
if dsz.ui.Prompt('Perform IMMEDIATE CALLBACK?', False):
configLines.append(' <PCHEAP_CONFIG_FLAG_CALLBACK_NOW/>\n')
if (binType.lower() == 'exe'):
if dsz.ui.Prompt('Enable QUICK SELF-DELETION?', False):
configLines.append(' <PCHEAP_CONFIG_FLAG_QUICK_DELETE_SELF/>\n')
elif (type.lower() == 'level4'):
if dsz.ui.Prompt('Listen AT ALL TIMES?', False):
configLines.append(' <PCHEAP_CONFIG_FLAG_24_HOUR/>\n')
if (advanced and (not usingUB) and (os == 'winnt')):
if dsz.ui.Prompt('Configure for install with UTILITYBURST?', False):
configLines.append(' <PCHEAP_CONFIG_LOADED_WITH_UTILITY_BURST/>\n')
if (advanced and (os == 'winnt')):
if (not dsz.ui.Prompt('Update the Windows firewall when listening?')):
configLines.append(' <PCHEAP_CONFIG_FLAG_IGNORE_WIN_FIREWALL/>\n')
defaultWindowFlag = False
if (binType.lower() == 'sharedlib'):
defaultWindowFlag = True
if (advanced and (os == 'winnt')):
if dsz.ui.Prompt('Disable window creation?', defaultWindowFlag):
configLines.append(' <PCHEAP_CONFIG_FLAG_DONT_CREATE_WINDOW/>\n')
elif defaultWindowFlag:
configLines.append(' <PCHEAP_CONFIG_FLAG_DONT_CREATE_WINDOW/>\n')
if (advanced and (os == 'winnt') and (type.lower() == 'level4')):
if dsz.ui.Prompt('Disable shared status memory creation?', False):
configLines.append(' <PCHEAP_CONFIG_FLAG_DONT_CREATE_SECTION/>\n')
configLines.append(' </Flags>\n')
return configLines |
.skipif((not ((platform.system() == 'Windows') and randovania.is_frozen())), reason='only works in frozen Windows')
def test_find_bad_installation():
progress_update = MagicMock()
hash_list: dict[(str, str)] = json_lib.read_path(randovania.get_data_path().joinpath('frozen_file_list.json'))
result = installation_check.find_bad_installation(hash_list, progress_update)
assert (result == ([], [], set())) |
class DownsampleBlock(nn.Module):
def __init__(self, in_channels, out_channels, x0_channels, dilations):
super(DownsampleBlock, self).__init__()
inc_channels = (out_channels - in_channels)
self.pool = nn.AvgPool2d(kernel_size=3, stride=2, padding=1)
self.eesp = ESPBlock(in_channels=in_channels, out_channels=inc_channels, stride=2, dilations=dilations)
self.shortcut_block = ShortcutBlock(in_channels=x0_channels, out_channels=out_channels)
self.activ = nn.PReLU(out_channels)
def forward(self, x, x0):
y1 = self.pool(x)
(y2, _) = self.eesp(x, None)
x = torch.cat((y1, y2), dim=1)
x0 = self.pool(x0)
y3 = self.shortcut_block(x0)
x = (x + y3)
x = self.activ(x)
return (x, x0) |
def test_shebang_matches():
assert util.shebang_matches('#!/usr/bin/env python\n', 'python(2\\.\\d)?')
assert util.shebang_matches('#!/usr/bin/python2.4', 'python(2\\.\\d)?')
assert util.shebang_matches('#!/usr/bin/startsomethingwith python', 'python(2\\.\\d)?')
assert util.shebang_matches('#!C:\\Python2.4\\Python.exe', 'python(2\\.\\d)?')
assert (not util.shebang_matches('#!/usr/bin/python-ruby', 'python(2\\.\\d)?'))
assert (not util.shebang_matches('#!/usr/bin/python/ruby', 'python(2\\.\\d)?'))
assert (not util.shebang_matches('#!', 'python')) |
_bool('is_required_a', 'is_required_b')
def test_flat(debug_ctx, debug_trail, trail_select, is_required_a, is_required_b, acc_schema):
dumper_getter = make_dumper_getter(shape=shape(TestField('a', acc_schema.accessor_maker('a', is_required_a)), TestField('b', acc_schema.accessor_maker('b', is_required_b))), name_layout=OutputNameLayout(crown=OutDictCrown({'a': OutFieldCrown('a'), 'b': OutFieldCrown('b')}, sieves={'b': skipper}), extra_move=None), debug_trail=debug_trail, debug_ctx=debug_ctx)
dumper = dumper_getter()
assert (dumper(acc_schema.dummy(a=1, b=2)) == {'a': 1, 'b': 2})
assert (dumper(acc_schema.dummy(a=1, b=2, c=3)) == {'a': 1, 'b': 2})
assert (dumper(acc_schema.dummy(a=1, b=Skip())) == {'a': 1})
assert (dumper(acc_schema.dummy(a=1, b=Skip(), c=3)) == {'a': 1})
assert (dumper(acc_schema.dummy(a=Skip(), b=2)) == {'a': Skip(), 'b': 2})
assert (dumper(acc_schema.dummy(a=Skip(), b=2, c=3)) == {'a': Skip(), 'b': 2})
assert (dumper(acc_schema.dummy(a=Skip(), b=Skip())) == {'a': Skip()})
assert (dumper(acc_schema.dummy(a=Skip(), b=Skip(), c=3)) == {'a': Skip()})
if is_required_a:
raises_exc(trail_select(disable=acc_schema.access_error(ANY), first=with_trail(acc_schema.access_error(ANY), [acc_schema.trail_element_maker('a')]), all=CompatExceptionGroup(f'while dumping model {Dummy}', [with_trail(acc_schema.access_error(ANY), [acc_schema.trail_element_maker('a')])])), (lambda : dumper(acc_schema.dummy(b=1))))
if is_required_b:
raises_exc(trail_select(disable=acc_schema.access_error(ANY), first=with_trail(acc_schema.access_error(ANY), [acc_schema.trail_element_maker('b')]), all=CompatExceptionGroup(f'while dumping model {Dummy}', [with_trail(acc_schema.access_error(ANY), [acc_schema.trail_element_maker('b')])])), (lambda : dumper(acc_schema.dummy(a=1))))
if (is_required_a and is_required_b):
raises_exc(trail_select(disable=acc_schema.access_error(ANY), first=with_trail(acc_schema.access_error(ANY), [acc_schema.trail_element_maker('a')]), all=CompatExceptionGroup(f'while dumping model {Dummy}', [with_trail(acc_schema.access_error(ANY), [acc_schema.trail_element_maker('a')]), with_trail(acc_schema.access_error(ANY), [acc_schema.trail_element_maker('b')])])), (lambda : dumper(acc_schema.dummy())))
if (not is_required_a):
assert (dumper(acc_schema.dummy(b=1)) == {'b': 1})
assert (dumper(acc_schema.dummy(b=Skip())) == {})
if (not is_required_b):
assert (dumper(acc_schema.dummy(a=1)) == {'a': 1})
assert (dumper(acc_schema.dummy(a=Skip())) == {'a': Skip()})
if ((not is_required_a) and (not is_required_b)):
assert (dumper(acc_schema.dummy()) == {})
raises_exc(trail_select(disable=SomeError(), first=with_trail(SomeError(), [acc_schema.trail_element_maker('a')]), all=CompatExceptionGroup(f'while dumping model {Dummy}', [with_trail(SomeError(), [acc_schema.trail_element_maker('a')])])), (lambda : dumper(acc_schema.dummy(a=SomeError(), b=Skip()))))
raises_exc(trail_select(disable=SomeError(), first=with_trail(SomeError(), [acc_schema.trail_element_maker('b')]), all=CompatExceptionGroup(f'while dumping model {Dummy}', [with_trail(SomeError(), [acc_schema.trail_element_maker('b')])])), (lambda : dumper(acc_schema.dummy(a=1, b=SomeError()))))
raises_exc(trail_select(disable=SomeError(0), first=with_trail(SomeError(0), [acc_schema.trail_element_maker('a')]), all=CompatExceptionGroup(f'while dumping model {Dummy}', [with_trail(SomeError(0), [acc_schema.trail_element_maker('a')]), with_trail(SomeError(1), [acc_schema.trail_element_maker('b')])])), (lambda : dumper(acc_schema.dummy(a=SomeError(0), b=SomeError(1))))) |
class TestOpenAICompatibility():
def test_models(self, openai_testing_model):
models = openai.Model.list()
assert (len(models['data']) == 1), 'Only the test model should be returned'
assert (models.data[0].id == openai_testing_model), 'The test model id should match'
def test_completions(self, openai_testing_model):
completion = openai.Completion.create(model=openai_testing_model, prompt='Hello world', typical_p=0.1, max_tokens=2)
assert (completion.model == openai_testing_model)
assert completion.model
assert (completion.choices[0].text == 'test_0 test_1 ')
def test_chat(self, openai_testing_model):
chat_completion = openai.ChatCompletion.create(model=openai_testing_model, messages=[{'role': 'user', 'content': 'Hello world'}], typical_p=1)
assert chat_completion
assert chat_completion.usage
assert chat_completion.id
assert isinstance(chat_completion.choices, list)
assert chat_completion.choices[0].message.content
def test_completions_bad_request(self, openai_testing_model):
with pytest.raises(openai.error.InvalidRequestError) as exc_info:
openai.Completion.create(model=openai_testing_model, prompt='Hello world', temperature=(- 0.1))
assert ('temperature' in str(exc_info.value))
def test_chat_bad_request(self, openai_testing_model):
with pytest.raises(openai.error.InvalidRequestError) as exc_info:
openai.ChatCompletion.create(model=openai_testing_model, messages=[{'role': 'user', 'content': 'Hello world'}], temperature=(- 0.1))
assert ('temperature' in str(exc_info.value))
def test_completions_stream(self, openai_testing_model):
i = 0
for completion in openai.Completion.create(model=openai_testing_model, prompt='Hello world', stream=True, typical_p=1):
i += 1
assert completion
assert completion.id
assert isinstance(completion.choices, list)
assert isinstance(completion.choices[0].text, str)
assert (i > 4)
def test_chat_stream(self, openai_testing_model):
i = 0
for chat_completion in openai.ChatCompletion.create(model=openai_testing_model, messages=[{'role': 'user', 'content': 'Hello world'}], stream=True, temperature=0.4, frequency_penalty=0.02, typical_p=1):
if (i == 0):
assert chat_completion
assert chat_completion.id
assert isinstance(chat_completion.choices, list)
assert chat_completion.choices[0].delta.role
else:
assert chat_completion
assert chat_completion.id
assert isinstance(chat_completion.choices, list)
assert ((chat_completion.choices[0].delta == {}) or hasattr(chat_completion.choices[0].delta, 'content'))
i += 1
assert chat_completion
assert chat_completion.id
assert isinstance(chat_completion.choices, list)
assert (chat_completion.choices[0].delta == {})
assert chat_completion.choices[0].finish_reason
assert (i > 4)
def test_completions_stream_bad_request(self, openai_testing_model):
with pytest.raises(openai.error.APIError) as exc_info:
for _ in openai.Completion.create(model=openai_testing_model, prompt='Hello world', stream=True, temperature=(- 0.1)):
pass
assert ('temperature' in str(exc_info.value))
def test_chat_stream(self, openai_testing_model):
with pytest.raises(openai.error.APIError) as exc_info:
for _chat_completion in openai.ChatCompletion.create(model=openai_testing_model, messages=[{'role': 'user', 'content': 'Hello world'}], stream=True, temperature=(- 0.1)):
pass
assert ('temperature' in str(exc_info.value)) |
class Xmate3RobotiqDefaultConfig():
def __init__(self) -> None:
self.urdf_path = '{ASSET_DIR}/xmate3_robotiq/xmate3_robotiq.urdf'
self.urdf_config = dict(_materials=dict(gripper=dict(static_friction=2.0, dynamic_friction=2.0, restitution=0.0)), link=dict(left_inner_finger_pad=dict(material='gripper', patch_radius=0.1, min_patch_radius=0.1), right_inner_finger_pad=dict(material='gripper', patch_radius=0.1, min_patch_radius=0.1)))
self.arm_joint_names = ['joint1', 'joint2', 'joint3', 'joint4', 'joint5', 'joint6', 'joint7']
self.arm_stiffness = 1000.0
self.arm_damping = 100.0
self.arm_force_limit = 100
self.gripper_joint_names = ['robotiq_2f_140_left_driver_joint', 'robotiq_2f_140_right_driver_joint']
self.gripper_stiffness = 1000.0
self.gripper_damping = 100.0
self.gripper_force_limit = 100
self.ee_link_name = 'grasp_convenient_link'
def controllers(self):
arm_pd_joint_pos = PDJointPosControllerConfig(self.arm_joint_names, None, None, self.arm_stiffness, self.arm_damping, self.arm_force_limit, normalize_action=False)
arm_pd_joint_delta_pos = PDJointPosControllerConfig(self.arm_joint_names, (- 0.1), 0.1, self.arm_stiffness, self.arm_damping, self.arm_force_limit, use_delta=True)
arm_pd_ee_delta_pos = PDEEPosControllerConfig(self.arm_joint_names, (- 0.1), 0.1, self.arm_stiffness, self.arm_damping, self.arm_force_limit, ee_link=self.ee_link_name)
arm_pd_ee_delta_pose = PDEEPoseControllerConfig(self.arm_joint_names, (- 0.1), 0.1, 0.1, self.arm_stiffness, self.arm_damping, self.arm_force_limit, ee_link=self.ee_link_name)
gripper_pd_joint_pos = PDJointPosMimicControllerConfig(self.gripper_joint_names, 0, (0.068 + 0.01), self.gripper_stiffness, self.gripper_damping, self.gripper_force_limit)
controller_configs = dict(pd_joint_pos=dict(arm=arm_pd_joint_pos, gripper=gripper_pd_joint_pos), arm_pd_joint_delta_pos=dict(arm=arm_pd_joint_delta_pos, gripper=gripper_pd_joint_pos), pd_ee_delta_pos=dict(arm=arm_pd_ee_delta_pos, gripper=gripper_pd_joint_pos), pd_ee_delta_pose=dict(arm=arm_pd_ee_delta_pose, gripper=gripper_pd_joint_pos))
return deepcopy_dict(controller_configs)
def cameras(self):
return [CameraConfig(uid='base_camera', p=[0.0, 0.0, 0.0], q=[1, 0, 0, 0], width=128, height=128, fov=1.5707, near=0.01, far=10, actor_uid='camera_base_link', hide_link=False), CameraConfig(uid='hand_camera', p=[0.0, 0.0, 0.0], q=[1, 0, 0, 0], width=128, height=128, fov=1.5707, near=0.01, far=10, actor_uid='camera_hand_link', hide_link=False)] |
def get_acq_time_exp(start_time, nlines):
tline_exp = np.zeros(464, dtype='datetime64[ms]')
tline_exp[0] = np.datetime64('NaT')
tline_exp[(- 1)] = np.datetime64('NaT')
tline_exp[1:(- 1)] = np.datetime64(start_time)
tline_exp[1:(- 1)] += np.arange((nlines - 2)).astype('timedelta64[ms]')
return tline_exp.astype('datetime64[ns]') |
def calculateSSIM():
original_name = ((str(BASE_TRUTH_DIR) + '/6400/') + str(SLIDE_NAME))
print(original_name)
original = cv2.imread(original_name)
synthesized_name = ((str(BASE_TRUTH_DIR) + '/H/') + str(SLIDE_NAME))
print(synthesized_name)
synthesized = cv2.imread(synthesized_name)
compare_images(original, synthesized, 'Original vs. synthesized') |
('torchx.runner.events.record')
class LogEventTest(unittest.TestCase):
def assert_torchx_event(self, expected: TorchxEvent, actual: TorchxEvent) -> None:
self.assertEqual(expected.session, actual.session)
self.assertEqual(expected.app_id, actual.app_id)
self.assertEqual(expected.api, actual.api)
self.assertEqual(expected.app_image, actual.app_image)
self.assertEqual(expected.source, actual.source)
def test_create_context(self, _) -> None:
cfg = json.dumps({'test_key': 'test_value'})
context = log_event('test_call', 'local', 'test_app_id', app_image='test_app_image_id', runcfg=cfg)
expected_torchx_event = TorchxEvent('test_app_id', 'local', 'test_call', 'test_app_id', app_image='test_app_image_id', runcfg=cfg)
self.assert_torchx_event(expected_torchx_event, context._torchx_event)
def test_record_event(self, record_mock: MagicMock) -> None:
cfg = json.dumps({'test_key': 'test_value'})
with log_event('test_call', 'local', 'test_app_id', app_image='test_app_image_id', runcfg=cfg) as ctx:
pass
expected_torchx_event = TorchxEvent('test_app_id', 'local', 'test_call', 'test_app_id', app_image='test_app_image_id', runcfg=cfg, cpu_time_usec=ctx._torchx_event.cpu_time_usec, wall_time_usec=ctx._torchx_event.wall_time_usec)
self.assert_torchx_event(expected_torchx_event, ctx._torchx_event)
def test_record_event_with_exception(self, record_mock: MagicMock) -> None:
cfg = json.dumps({'test_key': 'test_value'})
with self.assertRaises(RuntimeError):
with log_event('test_call', 'local', 'test_app_id', cfg) as ctx:
raise RuntimeError('test error')
self.assertTrue(('test error' in ctx._torchx_event.raw_exception)) |
class InteractionProjectionArchTest(unittest.TestCase):
def test_basic(self) -> None:
D = 3
B = 10
keys = ['f1', 'f2']
F = len(keys)
F1 = 2
F2 = 2
I1 = DenseArch(in_features=((2 * D) + D), layer_sizes=[(2 * D), (F1 * D)])
I2 = DenseArch(in_features=((2 * D) + D), layer_sizes=[(2 * D), (F2 * D)])
inter_arch = InteractionProjectionArch(num_sparse_features=F, interaction_branch1=I1, interaction_branch2=I2)
dense_features = torch.rand((B, D))
sparse_features = torch.rand((B, F, D))
concat_dense = inter_arch(dense_features, sparse_features)
self.assertEqual(concat_dense.size(), (B, (D + (F1 * F2))))
def test_larger(self) -> None:
D = 8
B = 20
keys = ['f1', 'f2', 'f3', 'f4']
F = len(keys)
F1 = 4
F2 = 4
I1 = DenseArch(in_features=((4 * D) + D), layer_sizes=[(4 * D), (F1 * D)])
I2 = DenseArch(in_features=((4 * D) + D), layer_sizes=[(4 * D), (F2 * D)])
inter_arch = InteractionProjectionArch(num_sparse_features=F, interaction_branch1=I1, interaction_branch2=I2)
dense_features = torch.rand((B, D))
sparse_features = torch.rand((B, F, D))
concat_dense = inter_arch(dense_features, sparse_features)
self.assertEqual(concat_dense.size(), (B, (D + (F1 * F2))))
def test_correctness(self) -> None:
D = 4
B = 3
keys = ['f1', 'f2', 'f3', 'f4']
F = len(keys)
F1 = 5
F2 = 5
I1 = nn.Identity()
I2 = nn.Identity()
inter_arch = InteractionProjectionArch(num_sparse_features=F, interaction_branch1=I1, interaction_branch2=I2)
torch.manual_seed(0)
dense_features = torch.rand((B, D))
sparse_features = torch.rand((B, F, D))
concat_dense = inter_arch(dense_features, sparse_features)
self.assertEqual(concat_dense.size(), (B, (D + (F1 * F2))))
expected = torch.tensor([[0.4963, 0.7682, 0.0885, 0.132, 0.5057, 0.6874, 0.5756, 0.8082, 0.6656, 0.5402, 0.3672, 0.5765, 0.8837, 0.271, 0.754, 0.9349, 0.7424, 1.0666, 0.7297, 1.3209, 1.2713, 1.2888, 1.9248, 0.9367, 0.4865, 0.7688, 0.9932, 1.3475, 0.8313], [0.3074, 0.6341, 0.4901, 0.8964, 1.0706, 0.8757, 1.0621, 1.3669, 0.6122, 0.9342, 0.7316, 0.7294, 1.0603, 0.3866, 0.2011, 0.2153, 0.3768, 0.3638, 0.2154, 1.0712, 0.8293, 1.3, 1.4564, 0.8369, 0.3655, 0.444, 0.6148, 1.0776, 0.5871], [0.4556, 0.6323, 0.3489, 0.4017, 0.7294, 1.3899, 0.9493, 0.6186, 0.7565, 0.9535, 1.5688, 0.8992, 0.7077, 1.0088, 1.1206, 1.9778, 1.1639, 0.8642, 1.1966, 1.1827, 1.8592, 1.3003, 0.9441, 1.1177, 0.473, 0.7631, 0.4304, 0.3937, 0.323]])
self.assertTrue(torch.allclose(concat_dense, expected, rtol=0.0001, atol=0.0001)) |
def check_models_are_in_init():
models_not_in_init = []
dir_transformers = dir(transformers)
for module in get_model_modules():
models_not_in_init += [model[0] for model in get_models(module, include_pretrained=True) if (model[0] not in dir_transformers)]
models_not_in_init = [model for model in models_not_in_init if (not is_a_private_model(model))]
if (len(models_not_in_init) > 0):
raise Exception(f"The following models should be in the main init: {','.join(models_not_in_init)}.") |
class ModelEMA(object):
def __init__(self, model, decay=0.9999, updates=0):
self.ema = deepcopy(model).eval()
self.updates = updates
self.decay = (lambda x: (decay * (1 - math.exp(((- x) / 2000.0)))))
for p in self.ema.parameters():
p.requires_grad_(False)
def update(self, model):
with torch.no_grad():
self.updates += 1
d = self.decay(self.updates)
msd = model.state_dict()
for (k, v) in self.ema.state_dict().items():
if v.dtype.is_floating_point:
v *= d
v += ((1.0 - d) * msd[k].detach()) |
def load_pretrain(model, pretrained_path):
logger.info('load pretrained model from {}'.format(pretrained_path))
device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu'))
pretrained_dict = torch.load(pretrained_path, map_location=device)
if ('state_dict' in pretrained_dict.keys()):
pretrained_dict = remove_prefix(pretrained_dict['state_dict'], 'module.')
else:
pretrained_dict = remove_prefix(pretrained_dict, 'module.')
try:
check_keys(model, pretrained_dict)
except:
logger.info('[Warning]: using pretrain as features. Adding "features." as prefix')
new_dict = {}
for (k, v) in pretrained_dict.items():
k = ('features.' + k)
new_dict[k] = v
pretrained_dict = new_dict
check_keys(model, pretrained_dict)
model.load_state_dict(pretrained_dict, strict=False)
return model |
def build_tqdm(n: int, message: typing.Optional[str]=None) -> typing.Tuple[(typing.Callable, typing.Callable)]:
if (message is None):
message = f'Running for {n:,} iterations'
tqdm_bars = {}
if (n > 20):
print_rate = int((n / 20))
else:
print_rate = 1
remainder = (n % print_rate)
def _define_tqdm(arg, transform):
tqdm_bars[0] = tqdm(range(n))
tqdm_bars[0].set_description(message, refresh=False)
def _update_tqdm(arg, transform):
tqdm_bars[0].update(arg)
def _update_progress_bar(iter_num):
_ = jax.jax.lax.cond((iter_num == 0), (lambda _: host_callback.id_tap(_define_tqdm, None, result=iter_num)), (lambda _: iter_num), operand=None)
_ = jax.lax.cond((((iter_num % print_rate) == 0) & (iter_num != (n - remainder))), (lambda _: host_callback.id_tap(_update_tqdm, print_rate, result=iter_num)), (lambda _: iter_num), operand=None)
_ = jax.lax.cond((iter_num == (n - remainder)), (lambda _: host_callback.id_tap(_update_tqdm, remainder, result=iter_num)), (lambda _: iter_num), operand=None)
def _close_tqdm(arg, transform):
tqdm_bars[0].close()
def close_tqdm(result, iter_num):
return jax.lax.cond((iter_num == (n - 1)), (lambda _: host_callback.id_tap(_close_tqdm, None, result=result)), (lambda _: result), operand=None)
return (_update_progress_bar, close_tqdm) |
class Effect6786(BaseEffect):
type = 'passive'
def handler(fit, src, context, projectionRange, **kwargs):
fit.modules.filteredItemBoost((lambda mod: mod.item.requiresSkill('Shield Command')), 'warfareBuff4Value', src.getModifiedItemAttr('shipBonusICS3'), skill='Industrial Command Ships', **kwargs)
fit.modules.filteredItemBoost((lambda mod: mod.item.requiresSkill('Shield Command')), 'warfareBuff1Value', src.getModifiedItemAttr('shipBonusICS3'), skill='Industrial Command Ships', **kwargs)
fit.modules.filteredItemBoost((lambda mod: mod.item.requiresSkill('Shield Command')), 'warfareBuff2Value', src.getModifiedItemAttr('shipBonusICS3'), skill='Industrial Command Ships', **kwargs)
fit.modules.filteredItemBoost((lambda mod: mod.item.requiresSkill('Shield Command')), 'warfareBuff3Value', src.getModifiedItemAttr('shipBonusICS3'), skill='Industrial Command Ships', **kwargs)
fit.modules.filteredItemBoost((lambda mod: mod.item.requiresSkill('Shield Command')), 'buffDuration', src.getModifiedItemAttr('shipBonusICS3'), skill='Industrial Command Ships', **kwargs) |
class AutopartUsage_TestCase(CommandSequenceTest):
def runTest(self):
correct_command_sequences = ['part / --size=2048', 'partition / --size=2048', 'autopart', 'raid / --level=1 --device=md0 raid.01', 'logvol / --vgname=foo --size=2000 --name=bar', 'volgroup foo pv.01']
for sequence in correct_command_sequences:
self.assert_parse(sequence)
incorrect_command_sequences = ['part / --size=2048\nautopart', 'autopart\npart / --size=2048', 'partition / --size=2048\nautopart', 'autopart\npartition / --size=2048', 'raid / --level=1 --device=md0 raid.01\nautopart', 'autopart\nraid / --level=1 --device=md0 raid.01', 'logvol / --vgname=foo --size=2000 --name=bar\nautopart', 'autopart\nlogvol / --vgname=foo --size=2000 --name=bar', 'volgroup foo pv.01\nautopart', 'autopart\nvolgroup foo pv.01']
for sequence in incorrect_command_sequences:
self.assert_parse_error(sequence)
long_incorrect_sequence = '\npart / --size=2048\npartition /opt --size=2048\nautopart\nraid / --level=1 --device=md0 raid.01\nlogvol / --vgname=foo --size=2000 --name=bar\nvolgroup foo pv.01\n'
self.assert_parse_error(long_incorrect_sequence) |
def _get_filenames_to_download(channels, granules):
if any((('DNB' in chan) for chan in channels)):
(yield from _yield_specific_granules(GDNBO_URLS, granules))
if any((('I' in chan) for chan in channels)):
(yield from _yield_specific_granules(GITCO_URLS, granules))
if any((('M' in chan) for chan in channels)):
(yield from _yield_specific_granules(GMTCO_URLS, granules))
for channel in channels:
(yield from _yield_specific_granules(FILES__1229[channel], granules)) |
def _parse_datetime_header(value: str) -> datetime.datetime:
match = re.match('^(?P<datetime>.*?)(?P<tzoffset>[+-]\\d{4})?$', value)
dt = datetime.datetime.strptime(match.group('datetime'), '%Y-%m-%d %H:%M')
tzoffset = match.group('tzoffset')
if (tzoffset is not None):
(plus_minus_s, rest) = (tzoffset[0], tzoffset[1:])
(hours_offset_s, mins_offset_s) = (rest[:2], rest[2:])
plus_minus = int(f'{plus_minus_s}1')
hours_offset = int(hours_offset_s)
mins_offset = int(mins_offset_s)
net_mins_offset = (hours_offset * 60)
net_mins_offset += mins_offset
net_mins_offset *= plus_minus
tzoffset = FixedOffsetTimezone(net_mins_offset)
dt = dt.replace(tzinfo=tzoffset)
return dt |
def resp_update_push_rules_project():
with responses.RequestsMock() as rsps:
rsps.add(method=responses.GET, url=' json=push_rules_content, content_type='application/json', status=200)
rsps.add(method=responses.PUT, url=' json=push_rules_content, content_type='application/json', status=201)
(yield rsps) |
def checkStyle():
print('flake8: check all code against mandatory error set...')
errors = ','.join(FLAKE_MANDATORY)
cmd = (['flake8', ('--select=' + errors)] + FLAKE_CHECK_PATHS)
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
output = proc.stdout.read().decode('utf-8')
ret = proc.wait()
printFlakeOutput(output)
print('check line endings in all files...')
count = 0
allowedEndings = set([None, '\n'])
for (path, dirs, files) in os.walk('.'):
if path.startswith((('.' + os.path.sep) + '.tox')):
continue
for f in files:
if (os.path.splitext(f)[1] not in ('.py', '.rst')):
continue
filename = os.path.join(path, f)
with open(filename, 'U') as fh:
_ = fh.readlines()
endings = set((fh.newlines if isinstance(fh.newlines, tuple) else (fh.newlines,)))
endings -= allowedEndings
if (len(endings) > 0):
print(((('\x1b[0;31m' + 'File has invalid line endings: ') + ('%s' % filename)) + '\x1b[0m'))
ret = (ret | 2)
count += 1
print(('checked line endings in %d files' % count))
print('flake8: check new code against recommended error set...')
diff = subprocess.check_output(['git', 'diff'])
proc = subprocess.Popen(['flake8', '--diff', ('--ignore=' + errors)], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
proc.stdin.write(diff)
proc.stdin.close()
output = proc.stdout.read().decode('utf-8')
ret |= printFlakeOutput(output)
if (ret == 0):
print('style test passed.')
else:
print(('style test failed: %d' % ret))
return ret |
_config
def test_spiral_bottom_anticlockwise(manager):
manager.c.next_layout()
manager.c.next_layout()
manager.c.next_layout()
manager.test_window('one')
assert_dimensions(manager, 0, 0, 798, 598)
manager.test_window('two')
assert_dimensions(manager, 0, 0, 798, 298)
manager.test_window('three')
assert_dimensions(manager, 0, 0, 398, 298)
manager.test_window('four')
assert_dimensions(manager, 0, 150, 398, 148)
manager.test_window('five')
assert_dimensions(manager, 200, 150, 198, 148) |
(frozen=True)
class InputShape(BaseShape, Generic[T]):
fields: VarTuple[InputField]
params: VarTuple[Param]
kwargs: Optional[ParamKwargs]
constructor: Callable[(..., T)]
fields_dict: Mapping[(str, InputField)] = field(init=False, hash=False, repr=False, compare=False)
def allow_kwargs(self) -> bool:
return (self.kwargs is not None)
def _validate(self):
super()._validate()
param_names = {param.name for param in self.params}
if (len(param_names) != len(self.params)):
duplicates = {param.name for param in self.params if (param.name in param_names)}
raise ValueError(f'Parameter names {duplicates} are duplicated')
wild_params = {param.name: param.field_id for param in self.params if (param.field_id not in self.fields_dict)}
if wild_params:
raise ValueError(f'Parameters {wild_params} bind to non-existing fields')
wild_fields = (self.fields_dict.keys() - {param.field_id for param in self.params})
if wild_fields:
raise ValueError(f'Fields {wild_fields} do not bound to any parameter')
for (past, current) in pairs(self.params):
if (past.kind.value > current.kind.value):
raise ValueError(f'Inconsistent order of fields, {current.kind} must be after {past.kind}')
if (self.fields_dict[past.field_id].is_optional and self.fields_dict[current.field_id].is_required and (current.kind != ParamKind.KW_ONLY)):
raise ValueError(f'All not required fields must be after required ones except {ParamKind.KW_ONLY} fields')
for param in self.params:
if ((param.kind == ParamKind.POS_ONLY) and self.fields_dict[param.field_id].is_optional):
raise ValueError(f'Field {param.field_id!r} can not be positional only and optional') |
class SentenceBERT():
def __init__(self, model_path: Union[(str, Tuple)]=None, sep: str=' ', **kwargs):
self.sep = sep
if isinstance(model_path, str):
self.q_model = SentenceTransformer(model_path)
self.doc_model = self.q_model
elif isinstance(model_path, tuple):
self.q_model = SentenceTransformer(model_path[0])
self.doc_model = SentenceTransformer(model_path[1])
def encode_queries(self, queries: List[str], batch_size: int=16, **kwargs) -> Union[(List[Tensor], np.ndarray, Tensor)]:
return self.q_model.encode(queries, batch_size=batch_size, **kwargs)
def encode_corpus(self, corpus: List[Dict[(str, str)]], batch_size: int=8, **kwargs) -> Union[(List[Tensor], np.ndarray, Tensor)]:
sentences = [(((doc['title'] + self.sep) + doc['text']).strip() if ('title' in doc) else doc['text'].strip()) for doc in corpus]
return self.doc_model.encode(sentences, batch_size=batch_size, **kwargs) |
def _transverse_mercator__to_cf(conversion):
params = _to_dict(conversion)
return {'grid_mapping_name': 'transverse_mercator', 'latitude_of_projection_origin': params['latitude_of_natural_origin'], 'longitude_of_central_meridian': params['longitude_of_natural_origin'], 'false_easting': params['false_easting'], 'false_northing': params['false_northing'], 'scale_factor_at_central_meridian': params['scale_factor_at_natural_origin']} |
def load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=False):
if ((args.local_rank not in [(- 1), 0]) and (not evaluate)):
torch.distributed.barrier()
input_file = (args.predict_file if evaluate else args.train_file)
cached_features_file = os.path.join(os.path.dirname(input_file), 'cached_{}_{}_{}'.format(('dev' if evaluate else 'train'), list(filter(None, args.model_name_or_path.split('/'))).pop(), str(args.max_seq_length)))
if (os.path.exists(cached_features_file) and (not args.overwrite_cache) and (not output_examples)):
logger.info('Loading features from cached file %s', cached_features_file)
features = torch.load(cached_features_file)
else:
logger.info('Creating features from dataset file at %s', input_file)
examples = read_squad_examples(input_file=input_file, is_training=(not evaluate), version_2_with_negative=args.version_2_with_negative)
features = convert_examples_to_features(examples=examples, tokenizer=tokenizer, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=(not evaluate), cls_token_segment_id=(2 if (args.model_type in ['xlnet']) else 0), pad_token_segment_id=(3 if (args.model_type in ['xlnet']) else 0), cls_token_at_end=(True if (args.model_type in ['xlnet']) else False), sequence_a_is_doc=(True if (args.model_type in ['xlnet']) else False))
if (args.local_rank in [(- 1), 0]):
logger.info('Saving features into cached file %s', cached_features_file)
torch.save(features, cached_features_file)
if ((args.local_rank == 0) and (not evaluate)):
torch.distributed.barrier()
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
all_cls_index = torch.tensor([f.cls_index for f in features], dtype=torch.long)
all_p_mask = torch.tensor([f.p_mask for f in features], dtype=torch.float)
if evaluate:
all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long)
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_example_index, all_cls_index, all_p_mask)
else:
all_start_positions = torch.tensor([f.start_position for f in features], dtype=torch.long)
all_end_positions = torch.tensor([f.end_position for f in features], dtype=torch.long)
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_start_positions, all_end_positions, all_cls_index, all_p_mask)
if output_examples:
return (dataset, examples, features)
return dataset |
def test_import_dotted_library(capsys: CaptureFixture, caplog: LogCaptureFixture) -> None:
caplog.set_level(logging.INFO)
original_module = sys.modules.pop('xml.etree.ElementTree')
expected_out = 'INFO (TEST): Welcome to cElementTree!'
expected_err = 'WARNING (TEST): Monkey-patched version of cElementTree'
def function_with_stdout_and_stderr(expected_out, expected_err):
def mocked_function(*args, **kwargs):
print(f'{expected_out} args={args} kwargs={kwargs}')
print(expected_err, file=sys.stderr)
return mocked_function
try:
with unittest.mock.patch('importlib.import_module', side_effect=function_with_stdout_and_stderr(expected_out, expected_err)):
modutils.load_module_from_name('xml.etree.ElementTree')
(out, err) = capsys.readouterr()
assert (expected_out in caplog.text)
assert (expected_err in caplog.text)
assert (not out)
assert (not err)
finally:
sys.modules['xml.etree.ElementTree'] = original_module |
def create_dumped_response():
dumped_issues = _load_dumped_issues()
for issue in dumped_issues:
issue.pop('milestone', None)
issue.pop('performed_via_github_app', None)
issue.pop('draft', None)
if (issue['closed_at'] is not None):
issue['closed_at'] = issue['closed_at'][:(- 1)]
if (issue['created_at'] is not None):
issue['created_at'] = issue['created_at'][:(- 1)]
if (issue['updated_at'] is not None):
issue['updated_at'] = issue['updated_at'][:(- 1)]
if (issue.get('pull_request') is not None):
if (issue['pull_request'].get('merged_at') is not None):
issue['pull_request']['merged_at'] = issue['pull_request']['merged_at'][:(- 1)]
else:
issue['pull_request'].pop('merged_at', None)
return {'data': dumped_issues} |
def with_implementation(fn: object, implementation_fn: Impl) -> Iterator[None]:
if (fn in ArgSpecCache.DEFAULT_ARGSPECS):
with qcore.override(ArgSpecCache.DEFAULT_ARGSPECS[fn], 'impl', implementation_fn):
(yield)
else:
checker = pyanalyze.checker.Checker()
argspec = checker.arg_spec_cache.get_argspec(fn, impl=implementation_fn)
if (argspec is None):
argspec = Signature.make([SigParameter('args', ParameterKind.VAR_POSITIONAL), SigParameter('kwargs', ParameterKind.VAR_KEYWORD)], callable=fn, impl=implementation_fn)
known_argspecs = dict(ArgSpecCache.DEFAULT_ARGSPECS)
known_argspecs[fn] = argspec
with qcore.override(ArgSpecCache, 'DEFAULT_ARGSPECS', known_argspecs):
(yield) |
_cache(maxsize=5000)
def to_checksum_address(address: AddressTypes) -> ChecksumAddress:
out = ''
v = int.from_bytes(keccak(bytes(address.hex(), 'ascii')), byteorder='big')
for (i, char) in enumerate(address.hex()):
if (char in ''):
out += char
else:
out += (char.upper() if (v & (2 ** (255 - (4 * i)))) else char.lower())
return ChecksumAddress(AddressHex(HexStr(('0x' + out)))) |
(frozen=True, order=True)
class AmmoPickupDefinition(JsonDataclass):
game: RandovaniaGame = dataclasses.field(metadata={'init_from_extra': True})
name: str = dataclasses.field(metadata={'init_from_extra': True})
model_name: str
offworld_models: frozendict[(RandovaniaGame, str)]
items: tuple[(str, ...)]
preferred_location_category: LocationCategory
broad_category: PickupCategory = dataclasses.field(metadata={'init_from_extra': True})
additional_resources: frozendict[(str, int)] = dataclasses.field(default_factory=frozendict, metadata=EXCLUDE_DEFAULT)
unlocked_by: (str | None) = dataclasses.field(default=None, metadata=EXCLUDE_DEFAULT)
temporary: (str | None) = dataclasses.field(default=None, metadata=EXCLUDE_DEFAULT)
allows_negative: (bool | None) = dataclasses.field(default=None, metadata=EXCLUDE_DEFAULT)
description: (str | None) = dataclasses.field(default=None, metadata=EXCLUDE_DEFAULT)
extra: frozendict = dataclasses.field(default_factory=frozendict, metadata=EXCLUDE_DEFAULT)
def __post_init__(self) -> None:
if (self.temporary is not None):
if (self.unlocked_by is None):
raise ValueError('If temporaries is set, unlocked_by must be set.')
if (len(self.items) != 1):
raise ValueError(f'If temporaries is set, only one item is supported. Got {len(self.items)} instead')
elif (self.unlocked_by is not None):
raise ValueError('If temporaries is not set, unlocked_by must not be set.')
def from_json_with_categories(cls, name: str, game: RandovaniaGame, pickup_categories: dict[(str, PickupCategory)], value: dict) -> Self:
return cls.from_json(value, game=game, name=name, broad_category=pickup_categories[value['broad_category']])
def as_json(self) -> dict:
return {'broad_category': self.broad_category.name, **super().as_json}
def pickup_category(self) -> PickupCategory:
return AMMO_PICKUP_CATEGORY
def create_resource_lock(self, resource_database: ResourceDatabase) -> (ResourceLock | None):
if (self.unlocked_by is not None):
assert (self.temporary is not None)
return ResourceLock(locked_by=resource_database.get_item(self.unlocked_by), item_to_lock=resource_database.get_item(self.items[0]), temporary_item=resource_database.get_item(self.temporary))
return None |
def calculate_mro(info: TypeInfo, obj_type: (Callable[([], Instance)] | None)=None) -> None:
mro = linearize_hierarchy(info, obj_type)
assert mro, f'Could not produce a MRO at all for {info}'
info.mro = mro
info.fallback_to_any = any((baseinfo.fallback_to_any for baseinfo in info.mro))
type_state.reset_all_subtype_caches_for(info) |
def pytest_collection_modifyitems(items, config):
sanity = config.getoption('--sanity', False)
non_interactive = config.getoption('--non-interactive', False)
remaining = []
deselected = []
for item in items:
if _skip_item(item, sanity, non_interactive):
deselected.append(item)
else:
remaining.append(item)
if deselected:
items[:] = remaining
config.hook.pytest_deselected(items=deselected) |
class GenericTests(SphinxIntegrationTests):
build_path = 'tests/sphinx_generic'
def test_headings(self):
output = self.read_file('index.html')
self.assertIn('<h1>Heading 1<a class="headerlink" href="#heading-1" title="Permalink to this headline"></a></h1>', output)
self.assertIn('<h2>Heading 2<a class="headerlink" href="#heading-2" title="Permalink to this headline"></a></h2>', output)
self.assertIn('<h3>Heading 3<a class="headerlink" href="#heading-3" title="Permalink to this headline"></a></h3>', output)
self.assertIn('<h4>Heading 4<a class="headerlink" href="#heading-4" title="Permalink to this headline"></a></h4>', output)
def test_links(self):
output = self.read_file('index.html')
self.assertIn('This is a <a class="reference external" href=" output)
self.assertIn('This is a <a class="reference external" href=" link</a>', output)
self.assertIn('This is a <a class="reference external" href="/example">relative link</a>', output)
self.assertIn('This is a <a class="reference internal" href="#"><span class="doc">pending ref</span></a>', output)
self.assertIn('External link to Markdown file: <a class="reference external" href=" output)
def test_image(self):
output = self.read_file('index.html')
self.assertIn('<p><img alt="foo "handle quotes"" src="image.png" /></p>', output)
def test_paragraph(self):
output = self.read_file('index.html')
self.assertIn('<p>Foo</p>', output)
self.assertIn('<p>Bar</p>', output)
def test_lists(self):
output = self.read_file('index.html')
self.assertIn('<ul class="simple">\n<li>Item A</li>\n<li>Item B</li>\n<li>Item C</li>\n</ul>', output)
self.assertIn('<ol class="simple">\n<li>Item 1</li>\n<li>Item 2</li>\n<li>Item 3</li>\n</ol>', output)
def test_code(self):
output = self.read_file('index.html')
self.assertIn('<pre><span></span><span class="ch">#!/bin/sh</span>\n<span class="n">python</span>\n</pre>', output)
def test_thematic_break(self):
output = self.read_file('index.html')
self.assertIn('<p>Foo</p>\n<hr class="docutils" />\n<p>Bar</p>', output) |
class TPlaylistMenu(TestCase):
SONG = AudioFile({'title': 'two', 'artist': 'mu', '~filename': dummy_path('/dev/zero')})
SONGS = [AudioFile({'title': 'one', 'artist': 'piman', '~filename': dummy_path('/dev/null')}), SONG]
def setUp(self):
self.assertTrue(((_TEMP_DIR in _DEFAULT_PLAYLIST_DIR) or (os.name == 'nt')), msg=("Failing, don't want to delete %s" % _DEFAULT_PLAYLIST_DIR))
try:
os.mkdir(_DEFAULT_PLAYLIST_DIR)
except OSError:
pass
quodlibet.config.init()
self.lib = FileLibrary()
self.lib.librarian = SongLibrarian()
for af in self.SONGS:
af.sanitize()
self.lib.add(self.SONGS)
def tearDown(self):
self.lib.destroy()
self.lib.librarian.destroy()
quodlibet.config.quit()
def test__on_new_playlist_activate(self):
main = qltk.MenuItem('Menu')
menu = StubbedPlaylistMenu(self.SONGS, PlaylistLibrary(SongFileLibrary()))
main.set_submenu(menu)
pl = menu._on_new_playlist_activate(main, self.SONGS)
assert pl, 'No playlists added'
assert (pl.name == FIXED_NAME), 'Wrong name used'
assert (pl.songs == self.SONGS) |
def multiline_merge(lines, current_event, re_after, re_before):
events = []
for line in lines:
if (re_before and re_before.match(line)):
current_event.append(line)
elif (re_after and current_event and re_after.match(current_event[(- 1)])):
current_event.append(line)
else:
if current_event:
events.append('\n'.join(current_event))
current_event.clear()
current_event.append(line)
return events |
class Sync(Cog):
def __init__(self, bot: Bot) -> None:
self.bot = bot
async def cog_load(self) -> None:
(await self.bot.wait_until_guild_available())
guild = self.bot.get_guild(constants.Guild.id)
if (guild is None):
return
attempts = 0
while True:
attempts += 1
if guild.chunked:
log.info('Guild was found to be chunked after %d attempt(s).', attempts)
break
if (attempts == MAX_ATTEMPTS):
log.info('Guild not chunked after %d attempts, calling chunk manually.', MAX_ATTEMPTS)
(await guild.chunk())
break
log.info('Attempt %d/%d: Guild not yet chunked, checking again in 10s.', attempts, MAX_ATTEMPTS)
(await asyncio.sleep(10))
log.info('Starting syncers.')
for syncer in (_syncers.RoleSyncer, _syncers.UserSyncer):
(await syncer.sync(guild))
async def patch_user(self, user_id: int, json: dict[(str, Any)], ignore_404: bool=False) -> None:
try:
(await self.bot.api_client.patch(f'bot/users/{user_id}', json=json))
except ResponseCodeError as e:
if (e.response.status != 404):
raise
if (not ignore_404):
log.warning('Unable to update user, got 404. Assuming race condition from join event.')
()
async def on_guild_role_create(self, role: Role) -> None:
if (role.guild.id != constants.Guild.id):
return
(await self.bot.api_client.post('bot/roles', json={'colour': role.colour.value, 'id': role.id, 'name': role.name, 'permissions': role.permissions.value, 'position': role.position}))
()
async def on_guild_role_delete(self, role: Role) -> None:
if (role.guild.id != constants.Guild.id):
return
(await self.bot.api_client.delete(f'bot/roles/{role.id}'))
()
async def on_guild_role_update(self, before: Role, after: Role) -> None:
if (after.guild.id != constants.Guild.id):
return
was_updated = ((before.name != after.name) or (before.colour != after.colour) or (before.permissions != after.permissions) or (before.position != after.position))
if was_updated:
(await self.bot.api_client.put(f'bot/roles/{after.id}', json={'colour': after.colour.value, 'id': after.id, 'name': after.name, 'permissions': after.permissions.value, 'position': after.position}))
()
async def on_member_join(self, member: Member) -> None:
if (member.guild.id != constants.Guild.id):
return
packed = {'discriminator': int(member.discriminator), 'id': member.id, 'in_guild': True, 'name': member.name, 'roles': sorted((role.id for role in member.roles))}
got_error = False
try:
(await self.bot.api_client.put(f'bot/users/{member.id}', json=packed))
except ResponseCodeError as e:
if (e.response.status != 404):
raise
got_error = True
if got_error:
(await self.bot.api_client.post('bot/users', json=packed))
()
async def on_member_remove(self, member: Member) -> None:
if (member.guild.id != constants.Guild.id):
return
(await self.patch_user(member.id, json={'in_guild': False}))
()
async def on_member_update(self, before: Member, after: Member) -> None:
if (after.guild.id != constants.Guild.id):
return
if (before.roles != after.roles):
updated_information = {'roles': sorted((role.id for role in after.roles))}
(await self.patch_user(after.id, json=updated_information))
()
async def on_user_update(self, before: User, after: User) -> None:
attrs = ('name', 'discriminator')
if any(((getattr(before, attr) != getattr(after, attr)) for attr in attrs)):
updated_information = {'name': after.name, 'discriminator': int(after.discriminator)}
(await self.patch_user(after.id, json=updated_information, ignore_404=True))
(name='sync')
_permissions(administrator=True)
async def sync_group(self, ctx: Context) -> None:
_group.command(name='roles')
_permissions(administrator=True)
async def sync_roles_command(self, ctx: Context) -> None:
(await _syncers.RoleSyncer.sync(ctx.guild, ctx))
_group.command(name='users')
_permissions(administrator=True)
async def sync_users_command(self, ctx: Context) -> None:
(await _syncers.UserSyncer.sync(ctx.guild, ctx)) |
class _FlaskLoginClient(FlaskClient):
def __init__(self, *args, **kwargs):
user = kwargs.pop('user', None)
fresh = kwargs.pop('fresh_login', True)
super(_FlaskLoginClient, self).__init__(*args, **kwargs)
with self.session_transaction() as sess:
if user:
sess['_user_id'] = user.uuid
sess['user_id'] = user.uuid
sess['_fresh'] = fresh
sess['login_time'] = datetime.datetime.now()
else:
sess['_user_id'] = 'anonymous' |
def test_check_credits(skip_qtbot, preset_manager):
base = preset_manager.default_preset_for_game(RandovaniaGame.METROID_PRIME).get_preset()
preset = dataclasses.replace(base, uuid=uuid.UUID('b41fde84-1f57-4b79-8cd6-3e5a78077fa6'))
options = MagicMock()
editor = PresetEditor(preset, options)
window = PresetMetroidStartingArea(editor, default_database.game_description_for(preset.game), MagicMock())
skip_qtbot.addWidget(window)
not_expected = NodeIdentifier.create('End of Game', 'Credits', 'Event - Credits')
checkbox_list = window._starting_location_for_node
assert (checkbox_list.get(not_expected, None) is None) |
class RecvFL2SendRTL(Component):
def recv(s, msg):
while (s.entry is not None):
greenlet.getcurrent().parent.switch(0)
s.entry = msg
def construct(s, MsgType):
s.recv = RecvIfcFL(method=s.recv)
s.send = SendIfcRTL(MsgType)
s.entry = None
def up_clear():
if (s.send.en & (s.entry is not None)):
s.entry = None
def up_fl_send_rtl():
if (s.send.rdy & (s.entry is not None)):
s.send.en = 1
s.send.msg = s.entry
else:
s.send.en = 0
s.add_constraints((M(s.recv) < U(up_fl_send_rtl)), (U(up_clear) < WR(s.send.en)))
def line_trace(s):
return '{}(){}'.format(s.recv, s.send) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.