code
stringlengths
281
23.7M
class PackagesDistributionsEggTest(fixtures.EggInfoPkg, fixtures.EggInfoPkgPipInstalledNoToplevel, fixtures.EggInfoPkgPipInstalledNoModules, fixtures.EggInfoPkgSourcesFallback, unittest.TestCase): def test_packages_distributions_on_eggs(self): distributions = packages_distributions() def import_names_from_package(package_name): return {import_name for (import_name, package_names) in distributions.items() if (package_name in package_names)} assert (import_names_from_package('egginfo-pkg') == {'mod'}) assert (import_names_from_package('egg_with_module-pkg') == {'egg_with_module'}) assert (import_names_from_package('egg_with_no_modules-pkg') == set()) assert (import_names_from_package('sources_fallback-pkg') == {'sources_fallback'})
class RedButton(DefaultObject): def at_object_creation(self): desc = 'This is a large red button, inviting yet evil-looking. ' desc += 'A closed glass lid protects it.' self.db.desc = desc self.db.lid_open = False self.db.lamp_works = True self.db.lid_locked = False self.cmdset.add_default(cmdsetexamples.DefaultCmdSet, permanent=True) self.scripts.add(scriptexamples.ClosedLidState) self.scripts.add(scriptexamples.BlinkButtonEvent) def open_lid(self): if self.db.lid_open: return desc = self.db.desc_lid_open if (not desc): desc = 'This is a large red button, inviting yet evil-looking. ' desc += 'Its glass cover is open and the button exposed.' self.db.desc = desc self.db.lid_open = True self.scripts.validate() self.scripts.add(scriptexamples.OpenLidState) self.scripts.add(scriptexamples.CloseLidEvent) def close_lid(self): if (not self.db.lid_open): return desc = self.db.desc_lid_closed if (not desc): desc = 'This is a large red button, inviting yet evil-looking. ' desc += 'Its glass cover is closed, protecting it.' self.db.desc = desc self.db.lid_open = False self.scripts.validate() self.scripts.add(scriptexamples.ClosedLidState) def break_lamp(self, feedback=True): self.db.lamp_works = False desc = self.db.desc_lamp_broken if (not desc): self.db.desc += '\nThe big red button has stopped blinking for the time being.' else: self.db.desc = desc if (feedback and self.location): self.location.msg_contents('The lamp flickers, the button going dark.') self.scripts.validate() def press_button(self, pobject): self.scripts.add(scriptexamples.DeactivateButtonEvent) pobject.scripts.add(scriptexamples.BlindedState) def blink(self): loc = self.location if loc: rand = random.random() if (rand < 0.2): string = 'The red button flashes briefly.' elif (rand < 0.4): string = 'The red button blinks invitingly.' elif (rand < 0.6): string = 'The red button flashes. You know you wanna push it!' else: return loc.msg_contents(string)
def setUpModule(): global mol, mf mol = gto.M() mol.atom = 'O 0. 0. 0.\n H 0. -1. 2.\n H 0. 1. 2.' mol.unit = 'Bohr' mol.basis = 'sto3g' mol.verbose = 4 mol.output = '/dev/null' mol.build() mf = dft.RKS(mol) mf.chkfile = tempfile.NamedTemporaryFile().name mf.grids.level = 3 mf.xc = 'b3lyp5' mf.conv_tol = 1e-14 mf.conv_tol_grad = 1e-09 mf.kernel()
class SEResNet(nn.Module): def __init__(self, block, layers, strides=(2, 2, 2, 2), dilations=(1, 1, 2, 4), zero_init_residual=True): super(SEResNet, self).__init__() self.inplanes = 64 self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = FixedBatchNorm(64) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.layer1 = self._make_layer(block, 64, layers[0], stride=1, dilation=dilations[0]) self.layer2 = self._make_layer(block, 128, layers[1], stride=strides[1], dilation=dilations[1]) self.layer3 = self._make_layer(block, 256, layers[2], stride=strides[2], dilation=dilations[2]) self.layer4 = self._make_layer(block, 512, layers[3], stride=strides[3], dilation=dilations[3]) self.inplanes = 1024 def _make_layer(self, block, planes, blocks, stride=1, dilation=1): downsample = None if ((stride != 1) or (self.inplanes != (planes * block.expansion))): downsample = nn.Sequential(nn.Conv2d(self.inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), FixedBatchNorm((planes * block.expansion))) layers = [block(self.inplanes, planes, stride, downsample, dilation=1)] self.inplanes = (planes * block.expansion) for i in range(1, blocks): layers.append(block(self.inplanes, planes, dilation=dilation)) return nn.Sequential(*layers) def forward(self, x): x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.maxpool(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) x = self.avgpool(x) x = x.view(x.size(0), (- 1)) x = self.fc(x) return x
class LagPrec(): def __init__(self, Adiag=None, level_shift=None, **kwargs): self.Adiag = Adiag self.level_shift = level_shift def __call__(self, x): Adiagd = (self.Adiag + self.level_shift) Adiagd[(abs(Adiagd) < 1e-08)] = 1e-08 x /= Adiagd return x
class ConvBlock(nn.Module): def __init__(self): super(ConvBlock, self).__init__() self.conv1 = nn.Conv2d(3, 6, 5) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(6, 16, 5) def forward(self, x): x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) x = x.view((- 1), ((16 * 5) * 5)) return x
class SmartlineV1(Instrument): def __init__(self, adapter, name='Thyracont Vacuum Gauge V1', address=1, baud_rate=9600, **kwargs): super().__init__(adapter, name, includeSCPI=False, write_termination='\r', read_termination='\r', asrl=dict(baud_rate=baud_rate), **kwargs) self.address = address def read(self): msg = super().read() chksum = calculate_checksum(msg[:(- 1)]) if (msg[(- 1)] == chksum): return msg[:(- 1)] else: raise ConnectionError(f'checksum error in received message {msg} with checksum {chksum} but received {msg[(- 1)]}') def write(self, command): fullcmd = (f'{self.address:03d}' + command) super().write((fullcmd + calculate_checksum(fullcmd))) def check_set_errors(self): reply = self.read() if (len(reply) < 4): raise ConnectionError(f"Reply of instrument ('{reply}') too short.") if (reply[3] in ['N', 'X']): raise ConnectionError(f"Reply from Instrument indicates an error '{reply}'") return [] device_type = Instrument.measurement('T', 'Get the device type.', cast=str, preprocess_reply=(lambda s: s[4:])) pressure = Instrument.measurement('M', 'Get the pressure measurement in mbar.', cast=str, preprocess_reply=(lambda s: s[4:]), get_process=(lambda s: ((float(s[:4]) / 1000) * (10 ** (int(s[4:]) - 20))))) display_unit = Instrument.control('U', 'u%06d', "Control the display's pressure unit.", cast=int, preprocess_reply=(lambda s: s[4:]), values={'mbar': 0, 'Torr': 1, 'hPa': 2}, map_values=True, validator=strict_discrete_set) cathode_enabled = Instrument.control('I', 'i%d', 'Control the hot/cold cathode state of the pressure gauge.', cast=int, preprocess_reply=(lambda s: s[4:]), values={True: 1, False: 0}, map_values=True, validator=strict_discrete_set, check_set_errors=True)
def test_log_in_runtest_logreport(pytester: Pytester) -> None: log_file = str(pytester.path.joinpath('pytest.log')) pytester.makeini('\n [pytest]\n log_file={}\n log_file_level = INFO\n log_cli=true\n '.format(log_file)) pytester.makeconftest('\n import logging\n logger = logging.getLogger(__name__)\n\n def pytest_runtest_logreport(report):\n logger.info("logreport")\n ') pytester.makepyfile('\n def test_first():\n assert True\n ') pytester.runpytest() with open(log_file, encoding='utf-8') as rfh: contents = rfh.read() assert (contents.count('logreport') == 3)
class TestEmbDimBucketer(unittest.TestCase): def setUp(self) -> None: super().setUp() def gen_tables(self) -> Tuple[(List[ShardedEmbeddingTable], int)]: num_tables = 103 num_buckets = 11 embeddings: List[ShardedEmbeddingTable] = [] buckets = random.sample(range(1024), num_buckets) for i in range(num_tables): embeddings.append(ShardedEmbeddingTable(name=f'table_{i}', embedding_dim=buckets[(i % num_buckets)], num_embeddings=random.randint(100, 500000), data_type=DataType.FP16, compute_kernel=EmbeddingComputeKernel.FUSED_UVM_CACHING)) return (embeddings, len(buckets)) def gen_single_dim_tables(self) -> List[ShardedEmbeddingTable]: num_tables = 47 embeddings: List[ShardedEmbeddingTable] = [] for i in range(num_tables): embeddings.append(ShardedEmbeddingTable(name=f'table_{i}', embedding_dim=16, num_embeddings=random.randint(100, 500000), data_type=DataType.FP16)) return embeddings def test_single_bucket_tables(self) -> None: embedding_tables = self.gen_single_dim_tables() emb_dim_bucketer = EmbDimBucketer(embedding_tables, EmbDimBucketerPolicy.CACHELINE_BUCKETS) self.assertTrue((emb_dim_bucketer.bucket_count() == 1)) def test_single_bucket_policy(self) -> None: (embedding_tables, _) = self.gen_tables() emb_dim_bucketer = EmbDimBucketer(embedding_tables, EmbDimBucketerPolicy.SINGLE_BUCKET) self.assertTrue((emb_dim_bucketer.bucket_count() == 1)) def test_cacheline_bucket_policy(self) -> None: (embedding_tables, _) = self.gen_tables() emb_dim_bucketer = EmbDimBucketer(embedding_tables, EmbDimBucketerPolicy.CACHELINE_BUCKETS) for i in range(emb_dim_bucketer.bucket_count()): self.assertTrue((i in emb_dim_bucketer.emb_dim_buckets.values())) def test_all_bucket_policy(self) -> None: (embedding_tables, num_buckets) = self.gen_tables() emb_dim_bucketer = EmbDimBucketer(embedding_tables, EmbDimBucketerPolicy.ALL_BUCKETS) self.assertTrue((emb_dim_bucketer.bucket_count() == num_buckets)) for i in range(emb_dim_bucketer.bucket_count()): self.assertTrue((i in emb_dim_bucketer.emb_dim_buckets.values())) def test_should_do_dim_bucketing(self) -> None: (embedding_tables, _) = self.gen_tables() self.assertFalse(should_do_dim_bucketing(embedding_tables))
def main(): rgb_image_filename = sys.argv[1] left_template_filename = sys.argv[2] right_template_filename = sys.argv[3] image_path = sys.argv[4] img_rgb = cv2.imread(rgb_image_filename) img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY) template = cv2.imread(left_template_filename, 0) left = find_ayat(img_gray, template, 0.75) template = cv2.imread(right_template_filename, 0) right = find_ayat(img_gray, template, 0.75) image = Image.open(image_path).convert('RGBA') output_directory = 'no_markers' if (len(sys.argv) > 5): output_directory = sys.argv[5] os.makedirs(output_directory, exist_ok=True) if ((len(left) > 0) or (len(right) > 0)): print(('processing %s' % image_path)) remove_markers(image, left, right, os.path.join(output_directory, os.path.basename(image_path)))
def args(): parser = argparse.ArgumentParser(description='Test keypoints network') parser.add_argument('--cfg', help='experiment configure file name', required=True, default='config.yaml', type=str) parser.add_argument('--exp_name', help='experiment name', default='test', type=str) parser.add_argument('--use_dt', help='if use detection results or', action='store_true', default=False) parser.add_argument('--flip_test', help='', action='store_true', default=False) parser.add_argument('--test_model', help='test model', type=str) parser.add_argument('--param_flop', help=' ', action='store_true', default=False) parser.add_argument('--gpu', help='gpu ids', type=str) parser.add_argument('--margin', help='margin_to_border', type=float, default=1.25) parser.add_argument('--visualize', help='visualize', action='store_true', default=False) parser.add_argument('--dataset', help='run test.py on which dataset. options: test or val', default='val') args = parser.parse_args() return args
def remove_all_but_the_largest_connected_component(image: np.ndarray, for_which_classes: list, volume_per_voxel: float, minimum_valid_object_size: dict=None): if (for_which_classes is None): for_which_classes = np.unique(image) for_which_classes = for_which_classes[(for_which_classes > 0)] assert (0 not in for_which_classes), 'cannot remove background' largest_removed = {} kept_size = {} for c in for_which_classes: if isinstance(c, (list, tuple)): c = tuple(c) mask = np.zeros_like(image, dtype=bool) for cl in c: mask[(image == cl)] = True else: mask = (image == c) (lmap, num_objects) = label(mask.astype(int)) object_sizes = {} for object_id in range(1, (num_objects + 1)): object_sizes[object_id] = ((lmap == object_id).sum() * volume_per_voxel) largest_removed[c] = None kept_size[c] = None if (num_objects > 0): maximum_size = max(object_sizes.values()) kept_size[c] = maximum_size for object_id in range(1, (num_objects + 1)): if (object_sizes[object_id] != maximum_size): remove = True if (minimum_valid_object_size is not None): remove = (object_sizes[object_id] < minimum_valid_object_size[c]) if remove: image[((lmap == object_id) & mask)] = 0 if (largest_removed[c] is None): largest_removed[c] = object_sizes[object_id] else: largest_removed[c] = max(largest_removed[c], object_sizes[object_id]) return (image, largest_removed, kept_size)
def test_mixed_markers4(item_names_for): test_content = '\n import pytest\n\n .order(2)\n def test_1():\n pass\n\n .order(index=1, after="test_3")\n def test_2():\n pass\n\n def test_3():\n pass\n ' assert (item_names_for(test_content) == ['test_3', 'test_2', 'test_1'])
_hook('torchscript') class TorchscriptHook(ClassyHook): on_phase_start = ClassyHook._noop on_phase_end = ClassyHook._noop on_step = ClassyHook._noop def __init__(self, torchscript_folder: str, use_trace: bool=True, trace_strict: bool=True, device: str='cpu') -> None: super().__init__() assert isinstance(torchscript_folder, str), 'torchscript_folder must be a string specifying the torchscript directory' self.torchscript_folder: str = torchscript_folder self.use_trace: bool = use_trace self.trace_strict: bool = trace_strict self.device: str = device def torchscript_using_trace(self, model): input_shape = (model.input_shape if hasattr(model, 'input_shape') else None) if (not input_shape): logging.warning("This model doesn't implement input_shape.Cannot save torchscripted model.") return input_data = get_model_dummy_input(model, input_shape, input_key=(model.input_key if hasattr(model, 'input_key') else None)) with (eval_model(model) and torch.no_grad()): torchscript = torch.jit.trace(model, input_data, strict=self.trace_strict) return torchscript def torchscript_using_script(self, model): with (eval_model(model) and torch.no_grad()): torchscript = torch.jit.script(model) return torchscript def save_torchscript(self, task) -> None: model = task.base_model torchscript = (self.torchscript_using_trace(model) if self.use_trace else self.torchscript_using_script(model)) logging.info("Saving torchscript to '{}'...".format(self.torchscript_folder)) torchscript = torchscript.to(self.device) torchscript_name = f'{self.torchscript_folder}/{TORCHSCRIPT_FILE}' with PathManager.open(torchscript_name, 'wb') as f: torch.jit.save(torchscript, f) def on_start(self, task) -> None: if (not is_primary()): return if (not PathManager.exists(self.torchscript_folder)): err_msg = "Torchscript folder '{}' does not exist.".format(self.torchscript_folder) raise FileNotFoundError(err_msg) def on_end(self, task) -> None: if (not is_primary()): return self.save_torchscript(task)
def compute_lambda(hcore: npt.NDArray, sparse_int_obj: SparseFactorization) -> SparseHamiltonianProperties: kpts = sparse_int_obj.kmf.kpts nkpts = len(kpts) one_body_mat = np.empty(len(kpts), dtype=object) lambda_one_body = 0.0 import time for kidx in range(len(kpts)): h1_pos = np.zeros_like(hcore[kidx]) h1_neg = np.zeros_like(hcore[kidx]) for qidx in range(len(kpts)): eri_kqqk_pqrs = sparse_int_obj.get_eri_exact([kidx, qidx, qidx, kidx]) h1_neg -= (np.einsum('prrq->pq', eri_kqqk_pqrs, optimize=True) / nkpts) eri_kkqq_pqrs = sparse_int_obj.get_eri_exact([kidx, kidx, qidx, qidx]) h1_pos += (np.einsum('pqrr->pq', eri_kkqq_pqrs) / nkpts) one_body_mat[kidx] = ((hcore[kidx] + (0.5 * h1_neg)) + h1_pos) lambda_one_body += (np.sum(np.abs(one_body_mat[kidx].real)) + np.sum(np.abs(one_body_mat[kidx].imag))) lambda_two_body = 0.0 nkpts = len(kpts) for kidx in range(nkpts): for kpidx in range(nkpts): for qidx in range(nkpts): kmq_idx = sparse_int_obj.k_transfer_map[(qidx, kidx)] kpmq_idx = sparse_int_obj.k_transfer_map[(qidx, kpidx)] test_eri_block = (sparse_int_obj.get_eri([kidx, kmq_idx, kpmq_idx, kpidx]) / nkpts) lambda_two_body += (np.sum(np.abs(test_eri_block.real)) + np.sum(np.abs(test_eri_block.imag))) lambda_tot = (lambda_one_body + lambda_two_body) sparse_data = SparseHamiltonianProperties(lambda_total=lambda_tot, lambda_one_body=lambda_one_body, lambda_two_body=lambda_two_body, num_sym_unique=sparse_int_obj.get_total_unique_terms_above_thresh(return_nk_counter=False)) return sparse_data
def test_valid_organization(app): someorg = model.user.get_namespace_user('buynlarge') someorg.uuid = str(uuid.uuid4()) someorg.verified = True someorg.save() login_user(LoginWrappedDBUser(someorg.uuid, someorg)) result = validate_session_cookie() assert (result.authed_user is None) assert (result.context.identity is None) assert (not result.has_nonrobot_user) assert (result.error_message == 'Cannot login to organization')
def get_cone_chart(paths_data_frame, series_list, names_list, title=None, log_sacle=True): line_chart = LineChart(log_scale=log_sacle) for series_name in paths_data_frame: series_element = DataElementDecorator(paths_data_frame[series_name], linewidth=1) line_chart.add_decorator(series_element) legend_decorator = LegendDecorator(key='legend') colors = ['black', 'red', 'green', 'purple', 'lime'] for i in range(len(series_list)): series = series_list[i] name = names_list[i] series_element = DataElementDecorator(series, color=colors[(i % len(colors))], linewidth=3) line_chart.add_decorator(series_element) legend_decorator.add_entry(series_element, name) point = (series.index[(- 1)], series[series.index[(- 1)]]) point_emphasis = PointEmphasisDecorator(series_element, point, move_point=False) line_chart.add_decorator(point_emphasis) line_chart.add_decorator(legend_decorator) if (title is not None): title_decorator = TitleDecorator(title, 'title') line_chart.add_decorator(title_decorator) return line_chart
class TestFunction(): def test_getmodulecollector(self, pytester: Pytester) -> None: item = pytester.getitem('def test_func(): pass') modcol = item.getparent(pytest.Module) assert isinstance(modcol, pytest.Module) assert hasattr(modcol.obj, 'test_func') .filterwarnings('default') def test_function_as_object_instance_ignored(self, pytester: Pytester) -> None: pytester.makepyfile('\n class A(object):\n def __call__(self, tmp_path):\n 0/0\n\n test_a = A()\n ') result = pytester.runpytest() result.stdout.fnmatch_lines(['collected 0 items', "*test_function_as_object_instance_ignored.py:2: *cannot collect 'test_a' because it is not a function."]) def make_function(pytester: Pytester, **kwargs: Any) -> Any: from _pytest.fixtures import FixtureManager config = pytester.parseconfigure() session = Session.from_config(config) session._fixturemanager = FixtureManager(session) return pytest.Function.from_parent(parent=session, **kwargs) def test_function_equality(self, pytester: Pytester) -> None: def func1(): pass def func2(): pass f1 = self.make_function(pytester, name='name', callobj=func1) assert (f1 == f1) f2 = self.make_function(pytester, name='name', callobj=func2, originalname='foobar') assert (f1 != f2) def test_repr_produces_actual_test_id(self, pytester: Pytester) -> None: f = self.make_function(pytester, name='test[\\xe5]', callobj=self.test_repr_produces_actual_test_id) assert (repr(f) == '<Function test[\\xe5]>') def test_issue197_parametrize_emptyset(self, pytester: Pytester) -> None: pytester.makepyfile("\n import pytest\n .parametrize('arg', [])\n def test_function(arg):\n pass\n ") reprec = pytester.inline_run() reprec.assertoutcome(skipped=1) def test_single_tuple_unwraps_values(self, pytester: Pytester) -> None: pytester.makepyfile("\n import pytest\n .parametrize(('arg',), [(1,)])\n def test_function(arg):\n assert arg == 1\n ") reprec = pytester.inline_run() reprec.assertoutcome(passed=1) def test_issue213_parametrize_value_no_equal(self, pytester: Pytester) -> None: pytester.makepyfile('\n import pytest\n class A(object):\n def __eq__(self, other):\n raise ValueError("not possible")\n .parametrize(\'arg\', [A()])\n def test_function(arg):\n assert arg.__class__.__name__ == "A"\n ') reprec = pytester.inline_run('--fulltrace') reprec.assertoutcome(passed=1) def test_parametrize_with_non_hashable_values(self, pytester: Pytester) -> None: pytester.makepyfile("\n archival_mapping = {\n '1.0': {'tag': '1.0'},\n '1.2.2a1': {'tag': 'release-1.2.2a1'},\n }\n\n import pytest\n .parametrize('key value'.split(),\n archival_mapping.items())\n def test_archival_to_version(key, value):\n assert key in archival_mapping\n assert value == archival_mapping[key]\n ") rec = pytester.inline_run() rec.assertoutcome(passed=2) def test_parametrize_with_non_hashable_values_indirect(self, pytester: Pytester) -> None: pytester.makepyfile("\n archival_mapping = {\n '1.0': {'tag': '1.0'},\n '1.2.2a1': {'tag': 'release-1.2.2a1'},\n }\n\n import pytest\n\n \n def key(request):\n return request.param\n\n \n def value(request):\n return request.param\n\n .parametrize('key value'.split(),\n archival_mapping.items(), indirect=True)\n def test_archival_to_version(key, value):\n assert key in archival_mapping\n assert value == archival_mapping[key]\n ") rec = pytester.inline_run() rec.assertoutcome(passed=2) def test_parametrize_overrides_fixture(self, pytester: Pytester) -> None: pytester.makepyfile("\n import pytest\n\n \n def value():\n return 'value'\n\n .parametrize('value',\n ['overridden'])\n def test_overridden_via_param(value):\n assert value == 'overridden'\n\n .parametrize('somevalue', ['overridden'])\n def test_not_overridden(value, somevalue):\n assert value == 'value'\n assert somevalue == 'overridden'\n\n .parametrize('other,value', [('foo', 'overridden')])\n def test_overridden_via_multiparam(other, value):\n assert other == 'foo'\n assert value == 'overridden'\n ") rec = pytester.inline_run() rec.assertoutcome(passed=3) def test_parametrize_overrides_parametrized_fixture(self, pytester: Pytester) -> None: pytester.makepyfile("\n import pytest\n\n (params=[1, 2])\n def value(request):\n return request.param\n\n .parametrize('value',\n ['overridden'])\n def test_overridden_via_param(value):\n assert value == 'overridden'\n ") rec = pytester.inline_run() rec.assertoutcome(passed=1) def test_parametrize_overrides_indirect_dependency_fixture(self, pytester: Pytester) -> None: pytester.makepyfile("\n import pytest\n\n fix3_instantiated = False\n\n \n def fix1(fix2):\n return fix2 + '1'\n\n \n def fix2(fix3):\n return fix3 + '2'\n\n \n def fix3():\n global fix3_instantiated\n fix3_instantiated = True\n return '3'\n\n .parametrize('fix2', ['2'])\n def test_it(fix1):\n assert fix1 == '21'\n assert not fix3_instantiated\n ") rec = pytester.inline_run() rec.assertoutcome(passed=1) def test_parametrize_with_mark(self, pytester: Pytester) -> None: items = pytester.getitems("\n import pytest\n .foo\n .parametrize('arg', [\n 1,\n pytest.param(2, marks=[pytest.mark.baz, pytest.mark.bar])\n ])\n def test_function(arg):\n pass\n ") keywords = [item.keywords for item in items] assert (('foo' in keywords[0]) and ('bar' not in keywords[0]) and ('baz' not in keywords[0])) assert (('foo' in keywords[1]) and ('bar' in keywords[1]) and ('baz' in keywords[1])) def test_parametrize_with_empty_string_arguments(self, pytester: Pytester) -> None: items = pytester.getitems(" import pytest\n\n .parametrize('v', ('', ' '))\n .parametrize('w', ('', ' '))\n def test(v, w): ...\n ") names = {item.name for item in items} assert (names == {'test[-]', 'test[ -]', 'test[- ]', 'test[ - ]'}) def test_function_equality_with_callspec(self, pytester: Pytester) -> None: items = pytester.getitems("\n import pytest\n .parametrize('arg', [1,2])\n def test_function(arg):\n pass\n ") assert (items[0] != items[1]) assert (not (items[0] == items[1])) def test_pyfunc_call(self, pytester: Pytester) -> None: item = pytester.getitem('def test_func(): raise ValueError') config = item.config class MyPlugin1(): def pytest_pyfunc_call(self): raise ValueError class MyPlugin2(): def pytest_pyfunc_call(self): return True config.pluginmanager.register(MyPlugin1()) config.pluginmanager.register(MyPlugin2()) config.hook.pytest_runtest_setup(item=item) config.hook.pytest_pyfunc_call(pyfuncitem=item) def test_multiple_parametrize(self, pytester: Pytester) -> None: modcol = pytester.getmodulecol("\n import pytest\n .parametrize('x', [0, 1])\n .parametrize('y', [2, 3])\n def test1(x, y):\n pass\n ") colitems = modcol.collect() assert (colitems[0].name == 'test1[2-0]') assert (colitems[1].name == 'test1[2-1]') assert (colitems[2].name == 'test1[3-0]') assert (colitems[3].name == 'test1[3-1]') def test_issue751_multiple_parametrize_with_ids(self, pytester: Pytester) -> None: modcol = pytester.getmodulecol("\n import pytest\n .parametrize('x', [0], ids=['c'])\n .parametrize('y', [0, 1], ids=['a', 'b'])\n class Test(object):\n def test1(self, x, y):\n pass\n def test2(self, x, y):\n pass\n ") colitems = modcol.collect()[0].collect() assert (colitems[0].name == 'test1[a-c]') assert (colitems[1].name == 'test1[b-c]') assert (colitems[2].name == 'test2[a-c]') assert (colitems[3].name == 'test2[b-c]') def test_parametrize_skipif(self, pytester: Pytester) -> None: pytester.makepyfile("\n import pytest\n\n m = pytest.mark.skipif('True')\n\n .parametrize('x', [0, 1, pytest.param(2, marks=m)])\n def test_skip_if(x):\n assert x < 2\n ") result = pytester.runpytest() result.stdout.fnmatch_lines(['* 2 passed, 1 skipped in *']) def test_parametrize_skip(self, pytester: Pytester) -> None: pytester.makepyfile("\n import pytest\n\n m = pytest.mark.skip('')\n\n .parametrize('x', [0, 1, pytest.param(2, marks=m)])\n def test_skip(x):\n assert x < 2\n ") result = pytester.runpytest() result.stdout.fnmatch_lines(['* 2 passed, 1 skipped in *']) def test_parametrize_skipif_no_skip(self, pytester: Pytester) -> None: pytester.makepyfile("\n import pytest\n\n m = pytest.mark.skipif('False')\n\n .parametrize('x', [0, 1, m(2)])\n def test_skipif_no_skip(x):\n assert x < 2\n ") result = pytester.runpytest() result.stdout.fnmatch_lines(['* 1 failed, 2 passed in *']) def test_parametrize_xfail(self, pytester: Pytester) -> None: pytester.makepyfile("\n import pytest\n\n m = pytest.mark.xfail('True')\n\n .parametrize('x', [0, 1, pytest.param(2, marks=m)])\n def test_xfail(x):\n assert x < 2\n ") result = pytester.runpytest() result.stdout.fnmatch_lines(['* 2 passed, 1 xfailed in *']) def test_parametrize_passed(self, pytester: Pytester) -> None: pytester.makepyfile("\n import pytest\n\n m = pytest.mark.xfail('True')\n\n .parametrize('x', [0, 1, pytest.param(2, marks=m)])\n def test_xfail(x):\n pass\n ") result = pytester.runpytest() result.stdout.fnmatch_lines(['* 2 passed, 1 xpassed in *']) def test_parametrize_xfail_passed(self, pytester: Pytester) -> None: pytester.makepyfile("\n import pytest\n\n m = pytest.mark.xfail('False')\n\n .parametrize('x', [0, 1, m(2)])\n def test_passed(x):\n pass\n ") result = pytester.runpytest() result.stdout.fnmatch_lines(['* 3 passed in *']) def test_function_originalname(self, pytester: Pytester) -> None: items = pytester.getitems("\n import pytest\n\n .parametrize('arg', [1,2])\n def test_func(arg):\n pass\n\n def test_no_param():\n pass\n ") originalnames = [] for x in items: assert isinstance(x, pytest.Function) originalnames.append(x.originalname) assert (originalnames == ['test_func', 'test_func', 'test_no_param']) def test_function_with_square_brackets(self, pytester: Pytester) -> None: p1 = pytester.makepyfile('\n locals()["test_foo[name]"] = lambda: None\n ') result = pytester.runpytest('-v', str(p1)) result.stdout.fnmatch_lines(['test_function_with_square_brackets.py::test_foo[[]name[]] PASSED *', '*= 1 passed in *'])
def test_matplotlib_completions(config, workspace): doc_mpl = 'import matplotlib.pyplot as plt; plt.' com_position = {'line': 0, 'character': len(doc_mpl)} doc = Document(DOC_URI, workspace, doc_mpl) items = pylsp_jedi_completions(config, doc, com_position) assert items assert any((('plot' in i['label']) for i in items))
() def main(): project_root = (Path(__file__).parent / '..') os.chdir(project_root) if git_repo_has_changes(): print('Your git repo has uncommitted changes. Commit or stash before continuing.') sys.exit(1) previous_branch = shell('git rev-parse --abbrev-ref HEAD', check=True, capture_output=True, encoding='utf8').stdout.strip() shell('git fetch origin', check=True) timestamp = time.strftime('%Y-%m-%dT%H-%M-%S', time.gmtime()) branch_name = f'update-constraints-{timestamp}' shell(f'git checkout -b {branch_name} origin/main', check=True) try: shell('bin/update_dependencies.py', check=True) if (not git_repo_has_changes()): print('Done: no constraint updates required.') return shell('git commit -a -m "Update dependencies"', check=True) body = textwrap.dedent(f''' Update the versions of our dependencies. PR generated by `{os.path.basename(__file__)}`. ''') run(['gh', 'pr', 'create', '--repo=pypa/cibuildwheel', '--base=main', '--title=Update dependencies', f"--body='{body}'"], check=True) print('Done.') finally: shell('git checkout -- .', check=True) shell(f'git checkout {previous_branch}', check=True) shell(f'git branch -D --force {branch_name}', check=True)
def get_metadata_value(property_name): setup_py_dir = os.path.join(os.path.dirname(__file__), '..', '..') setup_py_file = os.path.join(setup_py_dir, 'setup.py') out = subprocess.run(['python', setup_py_file, '-q', ('--%s' % property_name)], stdout=subprocess.PIPE, cwd=setup_py_dir, check=True) property_value = out.stdout.decode().strip() return property_value
class MLP(nn.Module): def __init__(self, in_features=2048, hidden_layers=[], activation='relu', bn=True, dropout=0.0): super().__init__() if isinstance(hidden_layers, int): hidden_layers = [hidden_layers] assert (len(hidden_layers) > 0) self.out_features = hidden_layers[(- 1)] mlp = [] if (activation == 'relu'): act_fn = functools.partial(nn.ReLU, inplace=True) elif (activation == 'leaky_relu'): act_fn = functools.partial(nn.LeakyReLU, inplace=True) else: raise NotImplementedError for hidden_dim in hidden_layers: mlp += [nn.Linear(in_features, hidden_dim)] if bn: mlp += [nn.BatchNorm1d(hidden_dim)] mlp += [act_fn()] if (dropout > 0): mlp += [nn.Dropout(dropout)] in_features = hidden_dim self.mlp = nn.Sequential(*mlp) def forward(self, x): return self.mlp(x)
(HAS_SELF_TYPE) def test_self_type(): class WithSelf(): a: int next: Optional[typing.Self] = None dumped_data = {'a': 1, 'next': None} loaded_data = WithSelf(a=1) assert (retort.dump(loaded_data) == dumped_data) assert (retort.load(dumped_data, WithSelf) == loaded_data) dumped_data = {'a': 1, 'next': {'a': 2, 'next': None}} loaded_data = WithSelf(a=1, next=WithSelf(a=2)) assert (retort.dump(loaded_data) == dumped_data) assert (retort.load(dumped_data, WithSelf) == loaded_data) dumped_data = {'a': 1, 'next': {'a': 2, 'next': {'a': 3, 'next': None}}} loaded_data = WithSelf(a=1, next=WithSelf(a=2, next=WithSelf(a=3))) assert (retort.dump(loaded_data) == dumped_data) assert (retort.load(dumped_data, WithSelf) == loaded_data)
def main(): args = parse_args() source_weights = torch.load(args.source_model)['model'] converted_weights = {} keys = list(source_weights.keys()) prefix = 'backbone.bottom_up.' for key in keys: converted_weights[(prefix + key)] = source_weights[key] torch.save(converted_weights, args.output_model)
def statusCheck(probecheck=False): status = '' pprint(('Checking this system (%s)...' % platform.node())) res = sysvals.colorText('NO (No features of this tool will work!)') if sysvals.rootCheck(False): res = 'YES' pprint((' have root access: %s' % res)) if (res != 'YES'): pprint(' Try running this script with sudo') return 'missing root access' res = sysvals.colorText('NO (No features of this tool will work!)') if os.path.exists(sysvals.powerfile): res = 'YES' pprint((' is sysfs mounted: %s' % res)) if (res != 'YES'): return 'sysfs is missing' if (sysvals.suspendmode != 'command'): res = sysvals.colorText('NO') modes = getModes() if (sysvals.suspendmode in modes): res = 'YES' else: status = ('%s mode is not supported' % sysvals.suspendmode) pprint((' is "%s" a valid power mode: %s' % (sysvals.suspendmode, res))) if (res == 'NO'): pprint((' valid power modes are: %s' % modes)) pprint(' please choose one with -m') res = sysvals.colorText('NO') ftgood = sysvals.verifyFtrace() if ftgood: res = 'YES' elif sysvals.usecallgraph: status = 'ftrace is not properly supported' pprint((' is ftrace supported: %s' % res)) if sysvals.usekprobes: res = sysvals.colorText('NO') sysvals.usekprobes = sysvals.verifyKprobes() if sysvals.usekprobes: res = 'YES' else: sysvals.usedevsrc = False pprint((' are kprobes supported: %s' % res)) res = 'DMESG' if ftgood: sysvals.usetraceevents = True for e in sysvals.traceevents: if (not os.path.exists((sysvals.epath + e))): sysvals.usetraceevents = False if sysvals.usetraceevents: res = 'FTRACE (all trace events found)' pprint((' timeline data source: %s' % res)) res = sysvals.colorText('NO') if (sysvals.rtcpath != ''): res = 'YES' elif sysvals.rtcwake: status = 'rtcwake is not properly supported' pprint((' is rtcwake supported: %s' % res)) pprint(' optional commands this tool may use for info:') no = sysvals.colorText('MISSING') yes = sysvals.colorText('FOUND', 32) for c in ['turbostat', 'mcelog', 'lspci', 'lsusb']: if (c == 'turbostat'): res = (yes if sysvals.haveTurbostat() else no) else: res = (yes if sysvals.getExec(c) else no) pprint((' %s: %s' % (c, res))) if (not probecheck): return status if sysvals.usekprobes: for name in sysvals.tracefuncs: sysvals.defaultKprobe(name, sysvals.tracefuncs[name]) if sysvals.usedevsrc: for name in sysvals.dev_tracefuncs: sysvals.defaultKprobe(name, sysvals.dev_tracefuncs[name]) sysvals.addKprobes(True) return status
class TestDNSCache(unittest.TestCase): def test_order(self): record1 = r.DNSAddress('a', const._TYPE_SOA, const._CLASS_IN, 1, b'a') record2 = r.DNSAddress('a', const._TYPE_SOA, const._CLASS_IN, 1, b'b') cache = r.DNSCache() cache.async_add_records([record1, record2]) entry = r.DNSEntry('a', const._TYPE_SOA, const._CLASS_IN) cached_record = cache.get(entry) assert (cached_record == record2) def test_adding_same_record_to_cache_different_ttls_with_get(self): record1 = r.DNSAddress('a', const._TYPE_A, const._CLASS_IN, 1, b'a') record2 = r.DNSAddress('a', const._TYPE_A, const._CLASS_IN, 10, b'a') cache = r.DNSCache() cache.async_add_records([record1, record2]) entry = r.DNSEntry(record2.name, const._TYPE_A, const._CLASS_IN) cached_record = cache.get(entry) assert (cached_record == record2) def test_adding_same_record_to_cache_different_ttls_with_get_all(self): record1 = r.DNSAddress('a', const._TYPE_A, const._CLASS_IN, 1, b'a') record2 = r.DNSAddress('a', const._TYPE_A, const._CLASS_IN, 10, b'a') cache = r.DNSCache() cache.async_add_records([record1, record2]) cached_records = cache.get_all_by_details('a', const._TYPE_A, const._CLASS_IN) assert (cached_records == [record2]) def test_cache_empty_does_not_leak_memory_by_leaving_empty_list(self): record1 = r.DNSAddress('a', const._TYPE_SOA, const._CLASS_IN, 1, b'a') record2 = r.DNSAddress('a', const._TYPE_SOA, const._CLASS_IN, 1, b'b') cache = r.DNSCache() cache.async_add_records([record1, record2]) assert ('a' in cache.cache) cache.async_remove_records([record1, record2]) assert ('a' not in cache.cache) def test_cache_empty_multiple_calls(self): record1 = r.DNSAddress('a', const._TYPE_SOA, const._CLASS_IN, 1, b'a') record2 = r.DNSAddress('a', const._TYPE_SOA, const._CLASS_IN, 1, b'b') cache = r.DNSCache() cache.async_add_records([record1, record2]) assert ('a' in cache.cache) cache.async_remove_records([record1, record2]) assert ('a' not in cache.cache)
class TestPlotSummaryVariables(TestCase): def test_plot(self): model = pybamm.lithium_ion.SPM({'SEI': 'ec reaction limited'}) parameter_values = pybamm.ParameterValues('Mohtat2020') experiment = pybamm.Experiment(([('Discharge at C/10 for 10 hours or until 3.3 V', 'Rest for 1 hour', 'Charge at 1 A until 4.1 V', 'Hold at 4.1 V until 50 mA', 'Rest for 1 hour')] * 3)) output_variables = ['Capacity [A.h]', 'Loss of lithium inventory [%]', 'Total capacity lost to side reactions [A.h]', 'Loss of active material in negative electrode [%]', 'Loss of active material in positive electrode [%]', 'x_100', 'x_0', 'y_100', 'y_0'] sim = pybamm.Simulation(model, experiment=experiment, parameter_values=parameter_values) sol = sim.solve(initial_soc=1) axes = pybamm.plot_summary_variables(sol, testing=True) axes = axes.flatten() self.assertEqual(len(axes), 9) for (output_var, ax) in zip(output_variables, axes): self.assertEqual(ax.get_xlabel(), 'Cycle number') self.assertEqual(ax.get_ylabel(), output_var) (cycle_number, var) = ax.get_lines()[0].get_data() np.testing.assert_array_equal(cycle_number, sol.summary_variables['Cycle number']) np.testing.assert_array_equal(var, sol.summary_variables[output_var]) axes = pybamm.plot_summary_variables([sol, sol], labels=['SPM', 'SPM'], testing=True) axes = axes.flatten() self.assertEqual(len(axes), 9) for (output_var, ax) in zip(output_variables, axes): self.assertEqual(ax.get_xlabel(), 'Cycle number') self.assertEqual(ax.get_ylabel(), output_var) (cycle_number, var) = ax.get_lines()[0].get_data() np.testing.assert_array_equal(cycle_number, sol.summary_variables['Cycle number']) np.testing.assert_array_equal(var, sol.summary_variables[output_var]) (cycle_number, var) = ax.get_lines()[1].get_data() np.testing.assert_array_equal(cycle_number, sol.summary_variables['Cycle number']) np.testing.assert_array_equal(var, sol.summary_variables[output_var])
def parseEtree(inFileName, silence=False, print_warnings=True, mapping=None, reverse_mapping=None, nsmap=None): parser = None doc = parsexml_(inFileName, parser) gds_collector = GdsCollector_() rootNode = doc.getroot() (rootTag, rootClass) = get_root_tag(rootNode) if (rootClass is None): rootTag = 'scpd' rootClass = scpd rootObj = rootClass.factory() rootObj.build(rootNode, gds_collector_=gds_collector) if (mapping is None): mapping = {} if (reverse_mapping is None): reverse_mapping = {} rootElement = rootObj.to_etree(None, name_=rootTag, mapping_=mapping, reverse_mapping_=reverse_mapping, nsmap_=nsmap) reverse_node_mapping = rootObj.gds_reverse_node_mapping(mapping) if (not SaveElementTreeNode): doc = None rootNode = None if (not silence): content = etree_.tostring(rootElement, pretty_print=True, xml_declaration=True, encoding='utf-8') sys.stdout.write(str(content)) sys.stdout.write('\n') if (print_warnings and (len(gds_collector.get_messages()) > 0)): separator = (('-' * 50) + '\n') sys.stderr.write(separator) sys.stderr.write('----- Warnings -- count: {} -----\n'.format(len(gds_collector.get_messages()))) gds_collector.write_messages(sys.stderr) sys.stderr.write(separator) return (rootObj, rootElement, mapping, reverse_node_mapping)
class Rect(): def __init__(self, x1, y1, x2, y2): self.x1 = x1 self.y1 = y1 self.x2 = x2 self.y2 = y2 def __repr__(self): return ('Rect(%d, %d to %d, %d)' % (self.x1, self.y1, self.x2, self.y2)) def intersects(self, other): return ((self.x2 > other.x1) and (self.x1 < other.x2) and (self.y2 > other.y1) and (self.y1 < other.y2))
def gdboutput(pipe): global gdb_process global gdb_lastresult global gdb_lastline global gdb_last_console_line global gdb_stack_frame global gdb_run_status global gdb_stack_index command_result_regex = re.compile('^\\d+\\^') run_status_regex = re.compile('(^\\d*\\*)([^,]+)') while True: try: raw = pipe.readline() if (len(raw) == 0): log_debug(('gdb_%s: broken pipe\n' % ('stdout' if (pipe == gdb_process.stdout) else 'stderr'))) break line = raw.strip().decode(sys.getdefaultencoding()) log_debug(('gdb_%s: %s\n' % (('stdout' if (pipe == gdb_process.stdout) else 'stderr'), line))) gdb_session_view.add_line(('%s\n' % line), False) if (pipe != gdb_process.stdout): continue if (command_result_regex.match(line) is not None): gdb_lastresult.put(line) run_status = run_status_regex.match(line) if (run_status is not None): gdb_run_status = run_status.group(2) reason = re.search('(?<=reason=")[a-zA-Z0-9\\-]+(?=")', line) if ((reason is not None) and reason.group(0).startswith('exited')): log_debug(('gdb: exiting %s' % line)) run_cmd('-gdb-exit') elif ((not ('running' in gdb_run_status)) and (not gdb_shutting_down)): thread_id = re.search('thread-id="(\\d+)"', line) if (thread_id is not None): gdb_threads_view.select_thread(int(thread_id.group(1))) sublime.set_timeout(update_cursor, 0) if (not line.startswith('(gdb)')): gdb_lastline = line if line.startswith('~'): console_line = line[2:(- 1)].replace('\\n', '\n').replace('\\"', '"').replace('\\t', '\t') if (not gdb_python_command_running): gdb_console_view.add_line(console_line, False) gdb_last_console_line = console_line[:(- 1)] if (not (line.startswith('(gdb)') or line.startswith('~') or line.startswith('=') or line.startswith('&"') or command_result_regex.match(line) or run_status_regex.match(line))): console_line = raw.decode(sys.getdefaultencoding()) console_line = '\n'.join(console_line.splitlines()) gdb_console_view.add_line(('%s\n' % console_line), False) except: traceback.print_exc() if (pipe == gdb_process.stdout): log_debug('GDB session ended\n') gdb_session_view.add_line('GDB session ended\n') sublime.set_timeout(session_ended_status_message, 0) gdb_stack_frame = None global gdb_cursor_position gdb_stack_index = (- 1) gdb_cursor_position = 0 gdb_run_status = None sublime.set_timeout(update_view_markers, 0) for view in gdb_views: sublime.set_timeout(view.on_session_ended, 0) sublime.set_timeout(cleanup, 0)
class BertForMaskedLM(BertPreTrainedModel): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.bert = BertModel(config, add_pooling_layer=False) self.cls = BertOnlyMLMHead(config) self.post_init() def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[(Tuple[torch.Tensor], MaskedLMOutput)]: return_dict = (return_dict if (return_dict is not None) else self.config.use_return_dict) outputs = self.bert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] prediction_scores = self.cls(sequence_output) masked_lm_loss = None if (labels is not None): loss_fct = CrossEntropyLoss() masked_lm_loss = loss_fct(prediction_scores.view((- 1), self.config.vocab_size), labels.view((- 1))) if (not return_dict): output = ((prediction_scores,) + outputs[2:]) return (((masked_lm_loss,) + output) if (masked_lm_loss is not None) else output) return MaskedLMOutput(loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
class TrainingConfig(FairseqDataclass): common: CommonParams = CommonParams() distributed_training: DistributedTrainingParams = DistributedTrainingParams() dataset: DatasetParams = DatasetParams() optimization: OptimizationParams = OptimizationParams() checkpoint: CheckpointParams = CheckpointParams() bmuf: FairseqBMUFConfig = FairseqBMUFConfig()
class S3StoragePlugin(StoragePlugin): def __init__(self, root: str, storage_options: Optional[Dict[(str, Any)]]=None) -> None: try: from aiobotocore.session import get_session except ImportError: raise RuntimeError('S3 support requires aiobotocore. Please make sure aiobotocore is installed.') components = root.split('/') if (len(components) < 2): raise RuntimeError(f'The S3 root path must follow the following pattern: [BUCKET]/[PATH] (got {root})') self.bucket: str = components[0] self.root: str = '/'.join(components[1:]) self.session = get_session() async def write(self, write_io: WriteIO) -> None: if isinstance(write_io.buf, bytes): stream = io.BytesIO(write_io.buf) elif isinstance(write_io.buf, memoryview): stream = MemoryviewStream(write_io.buf) else: raise TypeError(f'Unrecognized buffer type: {type(write_io.buf)}') async with self.session.create_client('s3') as client: key = os.path.join(self.root, write_io.path) (await client.put_object(Bucket=self.bucket, Key=key, Body=stream)) async def read(self, read_io: ReadIO) -> None: async with self.session.create_client('s3') as client: key = os.path.join(self.root, read_io.path) byte_range = read_io.byte_range if (byte_range is None): response = (await client.get_object(Bucket=self.bucket, Key=key)) else: response = (await client.get_object(Bucket=self.bucket, Key=key, Range=f'bytes={byte_range[0]}-{(byte_range[1] - 1)}')) async with response['Body'] as stream: read_io.buf = io.BytesIO((await stream.read())) async def delete(self, path: str) -> None: async with self.session.create_client('s3') as client: key = os.path.join(self.root, path) (await client.delete_object(Bucket=self.bucket, Key=key)) async def delete_dir(self, path: str) -> None: raise NotImplementedError() async def close(self) -> None: pass
def test_explicit_path(temp_dir, helpers): config = {'path': f'foo/{DEFAULT_BUILD_SCRIPT}'} file_path = ((temp_dir / 'foo') / DEFAULT_BUILD_SCRIPT) file_path.ensure_parent_dir_exists() file_path.write_text(helpers.dedent('\n from hatchling.metadata.plugin.interface import MetadataHookInterface\n\n class CustomHook(MetadataHookInterface):\n def update(self, metadata):\n pass\n\n def foo(self):\n return self.PLUGIN_NAME, self.root\n ')) with temp_dir.as_cwd(): hook = CustomMetadataHook(str(temp_dir), config) assert (hook.foo() == ('custom', str(temp_dir)))
class ActivateAccount(DeferredAction): __tablename__ = 'activateaccount' __mapper_args__ = {'polymorphic_identity': 'activateaccount'} id = Column(Integer, ForeignKey(DeferredAction.id, ondelete='CASCADE'), primary_key=True) system_account_id = Column(Integer, ForeignKey('systemaccount.id'), index=True) system_account = relationship(SystemAccount) def __init__(self, system_account=None, **kwargs): config = ExecutionContext.get_context().config deadline = (datetime.now() + timedelta(days=config.accounts.request_verification_timeout)) self.system_account = system_account super().__init__(deadline=deadline, **kwargs) def success_action(self): self.system_account.activate() def deadline_action(self): Session.flush() self.system_account.cancel_reservation()
class SuperResModel(UNetModel): def __init__(self, in_channels, *args, **kwargs): super().__init__((in_channels * 2), *args, **kwargs) def forward(self, x, timesteps, low_res=None, **kwargs): (_, _, new_height, new_width) = x.shape upsampled = F.interpolate(low_res, (new_height, new_width), mode='bilinear') x = th.cat([x, upsampled], dim=1) return super().forward(x, timesteps, **kwargs) def get_feature_vectors(self, x, timesteps, low_res=None, **kwargs): (_, new_height, new_width, _) = x.shape upsampled = F.interpolate(low_res, (new_height, new_width), mode='bilinear') x = th.cat([x, upsampled], dim=1) return super().get_feature_vectors(x, timesteps, **kwargs)
def train(G_loss, D_loss, G_vars, D_vars, global_step): G_optim = tf.train.AdamOptimizer(FLAGS.learning_rate, beta1=FLAGS.beta1) D_optim = tf.train.AdamOptimizer(FLAGS.learning_rate, beta1=FLAGS.beta1) G_grads = G_optim.compute_gradients(G_loss, var_list=G_vars) D_grads = D_optim.compute_gradients(D_loss, var_list=D_vars) for var in tf.trainable_variables(): tf.histogram_summary(var.op.name, var) for (grad, var) in D_grads: if (grad is not None): tf.histogram_summary((var.op.name + '/gradients'), grad) for (grad, var) in G_grads: if (grad is not None): tf.histogram_summary((var.op.name + '/gradients'), grad) G_train_op = G_optim.apply_gradients(G_grads, global_step=global_step) D_train_op = D_optim.apply_gradients(D_grads) return (G_train_op, D_train_op)
def test_select_column_using_expression_with_table_qualifier_without_column_alias(): sql = 'INSERT INTO tab1\nSELECT a.col1 + a.col2 + a.col3 + a.col4\nFROM tab2 a' assert_column_lineage_equal(sql, [(ColumnQualifierTuple('col1', 'tab2'), ColumnQualifierTuple('a.col1 + a.col2 + a.col3 + a.col4', 'tab1')), (ColumnQualifierTuple('col2', 'tab2'), ColumnQualifierTuple('a.col1 + a.col2 + a.col3 + a.col4', 'tab1')), (ColumnQualifierTuple('col3', 'tab2'), ColumnQualifierTuple('a.col1 + a.col2 + a.col3 + a.col4', 'tab1')), (ColumnQualifierTuple('col4', 'tab2'), ColumnQualifierTuple('a.col1 + a.col2 + a.col3 + a.col4', 'tab1'))])
def validate_app_oauth_token(token): validated = model.oauth.validate_access_token(token) if (not validated): logger.warning('OAuth access token could not be validated: %s', token) return ValidateResult(AuthKind.oauth, error_message='OAuth access token could not be validated') if (validated.expires_at <= datetime.utcnow()): logger.warning('OAuth access with an expired token: %s', token) return ValidateResult(AuthKind.oauth, error_message='OAuth access token has expired') if (not validated.authorized_user.enabled): return ValidateResult(AuthKind.oauth, error_message='Granter of the oauth access token is disabled') scope_set = scopes_from_scope_string(validated.scope) logger.debug('Successfully validated oauth access token with scope: %s', scope_set) return ValidateResult(AuthKind.oauth, oauthtoken=validated)
class MCHEvictionPolicy(abc.ABC): def __init__(self, metadata_info: List[MCHEvictionPolicyMetadataInfo], threshold_filtering_func: Optional[Callable[([torch.Tensor], Tuple[(torch.Tensor, Union[(float, torch.Tensor)])])]]=None) -> None: self._metadata_info = metadata_info self._threshold_filtering_func = threshold_filtering_func def metadata_info(self) -> List[MCHEvictionPolicyMetadataInfo]: pass def record_history_metadata(self, current_iter: int, incoming_ids: torch.Tensor, history_metadata: Dict[(str, torch.Tensor)]) -> None: pass def coalesce_history_metadata(self, current_iter: int, history_metadata: Dict[(str, torch.Tensor)], unique_ids_counts: torch.Tensor, unique_inverse_mapping: torch.Tensor, additional_ids: Optional[torch.Tensor]=None, threshold_mask: Optional[torch.Tensor]=None) -> Dict[(str, torch.Tensor)]: pass def update_metadata_and_generate_eviction_scores(self, current_iter: int, mch_size: int, coalesced_history_argsort_mapping: torch.Tensor, coalesced_history_sorted_unique_ids_counts: torch.Tensor, coalesced_history_mch_matching_elements_mask: torch.Tensor, coalesced_history_mch_matching_indices: torch.Tensor, mch_metadata: Dict[(str, torch.Tensor)], coalesced_history_metadata: Dict[(str, torch.Tensor)]) -> Tuple[(torch.Tensor, torch.Tensor)]: pass def _compute_selected_eviction_and_replacement_indices(self, pivot: int, eviction_scores: torch.Tensor) -> Tuple[(torch.Tensor, torch.Tensor)]: argsorted_eviction_scores = torch.argsort(eviction_scores, descending=True, stable=True) selected_new_ids_mask = (argsorted_eviction_scores[:pivot] >= pivot) evicted_ids_mask = (argsorted_eviction_scores[pivot:] < pivot) evicted_indices = argsorted_eviction_scores[pivot:][evicted_ids_mask] selected_new_indices = (argsorted_eviction_scores[:pivot][selected_new_ids_mask] - pivot) return (evicted_indices, selected_new_indices)
(scope='module') def input_text_message_content(): return InputTextMessageContent(TestInputTextMessageContentBase.message_text, parse_mode=TestInputTextMessageContentBase.parse_mode, entities=TestInputTextMessageContentBase.entities, disable_web_page_preview=TestInputTextMessageContentBase.disable_web_page_preview)
class NAG(Optimizer): def __init__(self, params, lr=required, momentum=0, weight_decay=0): defaults = dict(lr=lr, lr_old=lr, momentum=momentum, weight_decay=weight_decay) super(NAG, self).__init__(params, defaults) def supports_memory_efficient_fp16(self): return True def supports_flat_params(self): return True def step(self, closure=None): loss = None if (closure is not None): loss = closure() for group in self.param_groups: weight_decay = group['weight_decay'] momentum = group['momentum'] lr = group['lr'] lr_old = group.get('lr_old', lr) lr_correct = (lr / lr_old) for p in group['params']: if (p.grad is None): continue p_data_fp32 = p.data if (p_data_fp32.dtype in {torch.float16, torch.bfloat16}): p_data_fp32 = p_data_fp32.float() d_p = p.grad.data.float() param_state = self.state[p] if ('momentum_buffer' not in param_state): param_state['momentum_buffer'] = torch.zeros_like(d_p) else: param_state['momentum_buffer'] = param_state['momentum_buffer'].to(d_p) buf = param_state['momentum_buffer'] if (weight_decay != 0): p_data_fp32.mul_((1 - (lr * weight_decay))) p_data_fp32.add_(buf, alpha=((momentum * momentum) * lr_correct)) p_data_fp32.add_(d_p, alpha=((- (1 + momentum)) * lr)) buf.mul_((momentum * lr_correct)).add_(d_p, alpha=(- lr)) if (p.data.dtype in {torch.float16, torch.bfloat16}): p.data.copy_(p_data_fp32) group['lr_old'] = lr return loss
class Effect5521(BaseEffect): type = 'passive' def handler(fit, ship, context, projectionRange, **kwargs): damageTypes = ('em', 'explosive', 'kinetic', 'thermal') for damageType in damageTypes: fit.modules.filteredChargeBoost((lambda mod: mod.charge.requiresSkill('Heavy Missiles')), '{0}Damage'.format(damageType), ship.getModifiedItemAttr('eliteBonusCommandShips2'), skill='Command Ships', **kwargs)
class _DeformConv(Function): def forward(ctx, input, offset, weight, stride=1, padding=0, dilation=1, groups=1, deformable_groups=1, im2col_step=64): if ((input is not None) and (input.dim() != 4)): raise ValueError('Expected 4D tensor as input, got {}D tensor instead.'.format(input.dim())) ctx.stride = _pair(stride) ctx.padding = _pair(padding) ctx.dilation = _pair(dilation) ctx.groups = groups ctx.deformable_groups = deformable_groups ctx.im2col_step = im2col_step ctx.save_for_backward(input, offset, weight) output = input.new_empty(_DeformConv._output_size(input, weight, ctx.padding, ctx.dilation, ctx.stride)) ctx.bufs_ = [input.new_empty(0), input.new_empty(0)] if (not input.is_cuda): if (deformable_groups != 1): raise NotImplementedError('Deformable Conv with deformable_groups != 1 is not supported on CPUs!') return deform_conv2d(input, offset, weight, stride=stride, padding=padding, dilation=dilation) else: cur_im2col_step = _DeformConv._cal_im2col_step(input.shape[0], ctx.im2col_step) assert ((input.shape[0] % cur_im2col_step) == 0), 'im2col step must divide batchsize' _C.deform_conv_forward(input, weight, offset, output, ctx.bufs_[0], ctx.bufs_[1], weight.size(3), weight.size(2), ctx.stride[1], ctx.stride[0], ctx.padding[1], ctx.padding[0], ctx.dilation[1], ctx.dilation[0], ctx.groups, ctx.deformable_groups, cur_im2col_step) return output _differentiable def backward(ctx, grad_output): (input, offset, weight) = ctx.saved_tensors grad_input = grad_offset = grad_weight = None if (not grad_output.is_cuda): raise NotImplementedError('Deformable Conv is not supported on CPUs!') else: cur_im2col_step = _DeformConv._cal_im2col_step(input.shape[0], ctx.im2col_step) assert ((input.shape[0] % cur_im2col_step) == 0), 'im2col step must divide batchsize' if (ctx.needs_input_grad[0] or ctx.needs_input_grad[1]): grad_input = torch.zeros_like(input) grad_offset = torch.zeros_like(offset) _C.deform_conv_backward_input(input, offset, grad_output, grad_input, grad_offset, weight, ctx.bufs_[0], weight.size(3), weight.size(2), ctx.stride[1], ctx.stride[0], ctx.padding[1], ctx.padding[0], ctx.dilation[1], ctx.dilation[0], ctx.groups, ctx.deformable_groups, cur_im2col_step) if ctx.needs_input_grad[2]: grad_weight = torch.zeros_like(weight) _C.deform_conv_backward_filter(input, offset, grad_output, grad_weight, ctx.bufs_[0], ctx.bufs_[1], weight.size(3), weight.size(2), ctx.stride[1], ctx.stride[0], ctx.padding[1], ctx.padding[0], ctx.dilation[1], ctx.dilation[0], ctx.groups, ctx.deformable_groups, 1, cur_im2col_step) return (grad_input, grad_offset, grad_weight, None, None, None, None, None, None) def _output_size(input, weight, padding, dilation, stride): channels = weight.size(0) output_size = (input.size(0), channels) for d in range((input.dim() - 2)): in_size = input.size((d + 2)) pad = padding[d] kernel = ((dilation[d] * (weight.size((d + 2)) - 1)) + 1) stride_ = stride[d] output_size += (((((in_size + (2 * pad)) - kernel) // stride_) + 1),) if (not all(map((lambda s: (s > 0)), output_size))): raise ValueError('convolution input is too small (output would be {})'.format('x'.join(map(str, output_size)))) return output_size _cache(maxsize=128) def _cal_im2col_step(input_size, default_size): if (input_size <= default_size): return input_size best_step = 1 for step in range(2, min((int(math.sqrt(input_size)) + 1), default_size)): if ((input_size % step) == 0): if ((input_size // step) <= default_size): return (input_size // step) best_step = step return best_step
class VerifyHandler(BaseHandler): async def get(self, code): userid = None try: async with self.db.transaction() as sql_session: verified_code = base64.b64decode(code) (userid, verified_code) = (await self.db.user.decrypt(0, verified_code, sql_session=sql_session)) user = (await self.db.user.get(userid, fields=('id', 'email', 'email_verified'), sql_session=sql_session)) assert user assert (not user['email_verified']) (email, time_time) = (await self.db.user.decrypt(userid, verified_code, sql_session=sql_session)) assert ((time.time() - time_time) < (((30 * 24) * 60) * 60)) assert (user['email'] == email) (await self.db.user.mod(userid, email_verified=True, mtime=time.time(), sql_session=sql_session)) (await self.finish('')) except Exception as e: self.evil((+ 5)) logger_Web_Handler.error('UserID: %s verify email failed! Reason: %s', (userid or '-1'), e) self.set_status(400) (await self.finish(''))
def select_components(components, exclude_components=None): short_component_names = [shorten_component_name(name) for name in REPO_LIST_ALL] output = set([]) for component in components: if (component == 'ALL'): for repo in REPO_LIST_ALL: output.add(repo) elif (component == 'DEMO'): for repo in REPO_LIST_DEMO_RUNNABLE: output.add(repo) elif (component == 'CLIENT'): for repo in REPO_LIST_CLIENT: output.add(repo) elif (component == 'CLUSTER'): for repo in REPO_LIST_CLUSTER: output.add(repo) elif (component == 'CLUSTER-INFRASTRUCTURE'): for repo in REPO_LIST_CLUSTER_INFRASTRUCTURE: output.add(repo) elif (component == 'CLUSTER-RUNTIMEBATCH'): for repo in REPO_LIST_CLUSTER_RUNTIME_BATCH: output.add(repo) elif (component == '.'): cwd = os.path.basename(os.getcwd()) output.add(cwd) elif (component in REPO_LIST_ALL): output.add(component) elif (component in short_component_names): component_standard_name = find_standard_component_name(component) output.add(component_standard_name) else: display_message('Ignoring unknown component {0}.'.format(component)) if exclude_components: output = exclude_components_from_selection(output, exclude_components) return sorted(output)
class SpeechTransformerEncoder(TransformerEncoder): def __init__(self, opt, dicts, positional_encoder, encoder_type='text', language_embeddings=None): self.death_rate = opt.death_rate self.learnable_position_encoding = opt.learnable_position_encoding self.layer_modules = list() self.asynchronous = opt.asynchronous self.max_memory_size = opt.max_memory_size self.extra_context_size = opt.extra_context_size self.experimental = opt.experimental self.unidirectional = opt.unidirectional self.reversible = opt.src_reversible self.n_heads = opt.n_heads self.fast_self_attn = opt.fast_self_attention self.checkpointing = opt.checkpointing self.mpw = opt.multilingual_partitioned_weights self.multilingual_linear_projection = opt.multilingual_linear_projection self.mln = opt.multilingual_layer_norm self.no_input_scale = opt.no_input_scale self.learnable_position_encoding = opt.learnable_position_encoding self.max_pos_length = opt.max_pos_length super().__init__(opt, dicts, positional_encoder, encoder_type, language_embeddings) if self.learnable_position_encoding: self.positional_encoder = None else: self.positional_encoder = SinusoidalPositionalEmbedding(opt.model_size) self.d_head = (self.model_size // self.n_heads) if self.multilingual_linear_projection: self.linear_proj = nn.Parameter(torch.Tensor(opt.n_languages, self.model_size, self.model_size)) std_ = math.sqrt((2.0 / (self.model_size + self.model_size))) torch.nn.init.normal_(self.linear_proj, 0.0, std_) self.mln = opt.multilingual_layer_norm if (not opt.rezero): self.postprocess_layer = PrePostProcessing(opt.model_size, opt.dropout, sequence='n', multilingual=self.mln, n_languages=opt.n_languages) else: self.postprocess_layer = Identity()
.usefixtures('session_app_data') def test_session_report_subprocess(tmp_path): out = check_output([sys.executable, '-m', 'virtualenv', str(tmp_path), '--activators', 'powershell', '--without-pip'], text=True, encoding='utf-8') lines = out.split('\n') regexes = ['created virtual environment .* in \\d+ms', ' creator .*', ' activators .*'] _match_regexes(lines, regexes)
def get_packages(root_dir='aitom', exclude_dir_roots=['aitom/tomominer/core/src', 'aitom/tomominer/core/cython']): pkg = [] for (root, dirs, files) in os.walk(root_dir): exclude = False for d in exclude_dir_roots: if root.startswith(d): exclude = True if exclude: continue pkg.append(root.replace('/', '.')) return pkg
def run_step(context): logger.debug('started') context.assert_key_has_value('fileWriteYaml', __name__) input_context = context.get_formatted('fileWriteYaml') assert_key_has_value(obj=input_context, key='path', caller=__name__, parent='fileWriteYaml') out_path = Path(input_context['path']) payload = input_context.get('payload', sentinel) encoding = input_context.get('encoding', config.default_encoding) yaml_writer = pypyr.yaml.get_yaml_parser_roundtrip_for_context() logger.debug('opening destination file for writing: %s', out_path) out_path.parent.mkdir(parents=True, exist_ok=True) if (payload is sentinel): payload = context.get_formatted_value(context) else: payload = input_context['payload'] with open(out_path, 'w', encoding=encoding) as outfile: yaml_writer.dump(payload, outfile) logger.info('formatted context content and wrote to %s', out_path) logger.debug('done')
def test_resnet(): with pytest.raises(KeyError): ResNet(20) with pytest.raises(AssertionError): ResNet(50, num_stages=0) with pytest.raises(AssertionError): ResNet(50, num_stages=5) with pytest.raises(AssertionError): ResNet(50, strides=(1,), dilations=(1, 1), num_stages=3) with pytest.raises(TypeError): model = ResNet(50) model.init_weights(pretrained=0) with pytest.raises(AssertionError): ResNet(50, style='tensorflow') model = ResNet(50, norm_eval=True) model.init_weights() model.train() assert check_norm_state(model.modules(), False) model = ResNet(depth=50, norm_eval=True) model.init_weights('torchvision://resnet50') model.train() assert check_norm_state(model.modules(), False) frozen_stages = 1 model = ResNet(50, frozen_stages=frozen_stages) model.init_weights() model.train() assert (model.norm1.training is False) for layer in [model.conv1, model.norm1]: for param in layer.parameters(): assert (param.requires_grad is False) for i in range(1, (frozen_stages + 1)): layer = getattr(model, f'layer{i}') for mod in layer.modules(): if isinstance(mod, _BatchNorm): assert (mod.training is False) for param in layer.parameters(): assert (param.requires_grad is False) model = ResNet(18, out_indices=(0, 1, 2, 3)) model.init_weights() model.train() imgs = torch.randn(1, 3, 224, 224) feat = model(imgs) assert (len(feat) == 4) assert (feat[0].shape == (1, 64, 56, 56)) assert (feat[1].shape == (1, 128, 28, 28)) assert (feat[2].shape == (1, 256, 14, 14)) assert (feat[3].shape == (1, 512, 7, 7)) model = ResNet(50, out_indices=(0, 1, 2, 3)) model.init_weights() model.train() imgs = torch.randn(1, 3, 224, 224) feat = model(imgs) assert (len(feat) == 4) assert (feat[0].shape == (1, 256, 56, 56)) assert (feat[1].shape == (1, 512, 28, 28)) assert (feat[2].shape == (1, 1024, 14, 14)) assert (feat[3].shape == (1, 2048, 7, 7)) model = ResNet(50, out_indices=(0, 1, 2)) model.init_weights() model.train() imgs = torch.randn(1, 3, 224, 224) feat = model(imgs) assert (len(feat) == 3) assert (feat[0].shape == (1, 256, 56, 56)) assert (feat[1].shape == (1, 512, 28, 28)) assert (feat[2].shape == (1, 1024, 14, 14)) model = ResNet(50, out_indices=(3,)) model.init_weights() model.train() imgs = torch.randn(1, 3, 224, 224) feat = model(imgs) assert (feat.shape == (1, 2048, 7, 7)) model = ResNet(50, out_indices=(0, 1, 2, 3), with_cp=True) for m in model.modules(): if is_block(m): assert m.with_cp model.init_weights() model.train() imgs = torch.randn(1, 3, 224, 224) feat = model(imgs) assert (len(feat) == 4) assert (feat[0].shape == (1, 256, 56, 56)) assert (feat[1].shape == (1, 512, 28, 28)) assert (feat[2].shape == (1, 1024, 14, 14)) assert (feat[3].shape == (1, 2048, 7, 7)) model = ResNet(50, out_indices=(0, 1, 2, 3), zero_init_residual=True) model.init_weights() for m in model.modules(): if isinstance(m, Bottleneck): assert all_zeros(m.norm3) elif isinstance(m, BasicBlock): assert all_zeros(m.norm2) model = ResNet(50, out_indices=(0, 1, 2, 3), zero_init_residual=False) model.init_weights() for m in model.modules(): if isinstance(m, Bottleneck): assert (not all_zeros(m.norm3)) elif isinstance(m, BasicBlock): assert (not all_zeros(m.norm2))
def pytest_addoption(parser: Parser) -> None: group = parser.getgroup('terminal reporting') group.addoption('--junitxml', '--junit-xml', action='store', dest='xmlpath', metavar='path', type=functools.partial(filename_arg, optname='--junitxml'), default=None, help='Create junit-xml style report file at given path') group.addoption('--junitprefix', '--junit-prefix', action='store', metavar='str', default=None, help='Prepend prefix to classnames in junit-xml output') parser.addini('junit_suite_name', 'Test suite name for JUnit report', default='pytest') parser.addini('junit_logging', 'Write captured log messages to JUnit report: one of no|log|system-out|system-err|out-err|all', default='no') parser.addini('junit_log_passing_tests', 'Capture log information for passing tests to JUnit report: ', type='bool', default=True) parser.addini('junit_duration_report', 'Duration time to report: one of total|call', default='total') parser.addini('junit_family', 'Emit XML for schema: one of legacy|xunit1|xunit2', default='xunit2')
class ProcessWrapper(): def __init__(self, pathscript=None): logger.debug('Initializing %s: (pathscript: %s)', self.__class__.__name__, pathscript) self.tk_vars = get_config().tk_vars self.set_callbacks() self.pathscript = pathscript self.command = None self.statusbar = get_config().statusbar self.task = FaceswapControl(self) logger.debug('Initialized %s', self.__class__.__name__) def set_callbacks(self): logger.debug('Setting tk variable traces') self.tk_vars['action'].trace('w', self.action_command) self.tk_vars['generate'].trace('w', self.generate_command) def action_command(self, *args): if (not self.tk_vars['action'].get()): return (category, command) = self.tk_vars['action'].get().split(',') if self.tk_vars['runningtask'].get(): self.task.terminate() else: self.command = command args = self.prepare(category) self.task.execute_script(command, args) self.tk_vars['action'].set(None) def generate_command(self, *args): if (not self.tk_vars['generate'].get()): return (category, command) = self.tk_vars['generate'].get().split(',') args = self.build_args(category, command=command, generate=True) self.tk_vars['consoleclear'].set(True) logger.debug(' '.join(args)) print(' '.join(args)) self.tk_vars['generate'].set(None) def prepare(self, category): logger.debug('Preparing for execution') self.tk_vars['runningtask'].set(True) self.tk_vars['consoleclear'].set(True) if (self.command == 'train'): self.tk_vars['istraining'].set(True) print('Loading...') self.statusbar.status_message.set('Executing - {}.py'.format(self.command)) mode = ('indeterminate' if (self.command in ('effmpeg', 'train')) else 'determinate') self.statusbar.progress_start(mode) args = self.build_args(category) self.tk_vars['display'].set(self.command) logger.debug('Prepared for execution') return args def build_args(self, category, command=None, generate=False): logger.debug('Build cli arguments: (category: %s, command: %s, generate: %s)', category, command, generate) command = (self.command if (not command) else command) script = '{}.{}'.format(category, 'py') pathexecscript = os.path.join(self.pathscript, script) args = ([sys.executable] if generate else [sys.executable, '-u']) args.extend([pathexecscript, command]) cli_opts = get_config().cli_opts for cliopt in cli_opts.gen_cli_arguments(command): args.extend(cliopt) if ((command == 'train') and (not generate)): self.init_training_session(cliopt) if (not generate): args.append('-gui') if generate: args = [('"{}"'.format(arg) if ((' ' in arg) and (not arg.startswith(('[', '('))) and (not arg.endswith((']', ')')))) else arg) for arg in args] logger.debug('Built cli arguments: (%s)', args) return args def init_training_session(cliopt): session = get_config().session if (cliopt[0] == '-t'): session.modelname = cliopt[1].lower().replace('-', '_') logger.debug("modelname: '%s'", session.modelname) if (cliopt[0] == '-m'): session.modeldir = cliopt[1] logger.debug("modeldir: '%s'", session.modeldir) def terminate(self, message): logger.debug('Terminating Faceswap processes') self.tk_vars['runningtask'].set(False) if (self.task.command == 'train'): self.tk_vars['istraining'].set(False) self.statusbar.progress_stop() self.statusbar.status_message.set(message) self.tk_vars['display'].set(None) get_images().delete_preview() get_config().session.__init__() self.command = None logger.debug('Terminated Faceswap processes') print('Process exited.')
def binary_op(name: str, arg_types: list[RType], return_type: RType, c_function_name: str, error_kind: int, var_arg_type: (RType | None)=None, truncated_type: (RType | None)=None, ordering: (list[int] | None)=None, extra_int_constants: (list[tuple[(int, RType)]] | None)=None, steals: StealsDescription=False, is_borrowed: bool=False, priority: int=1) -> CFunctionDescription: if (extra_int_constants is None): extra_int_constants = [] ops = binary_ops.setdefault(name, []) desc = CFunctionDescription(name, arg_types, return_type, var_arg_type, truncated_type, c_function_name, error_kind, steals, is_borrowed, ordering, extra_int_constants, priority) ops.append(desc) return desc
class _FileStreamCloser(_StreamCloser, _FileCloser): def __init__(self, write, close_on_exit, is_binary, temp_file, chunk_size, delete_failures): _StreamCloser.__init__(self, write, close_on_exit) _FileCloser.__init__(self, temp_file, delete_failures) self.is_binary = is_binary self.chunk_size = chunk_size mode = ('wb' if is_binary else 'w') self.fp = self._make_stream((- 1), mode) def _success(self): mode = ('rb' if self.is_binary else 'r') with open(self.temp_file, mode) as fp: while True: data = fp.read(self.chunk_size) if (not data): break self._write_on_success(data) def _failure(self): _FileCloser._failure(self)
def main(): parser = argparse.ArgumentParser() parser.add_argument('-mode') parser.add_argument('-model') parser.add_argument('-cfg', nargs='*') args = parser.parse_args() cfg.init_handler(args.model) cfg.dataset = args.model.split('-')[(- 1)] if args.cfg: for pair in args.cfg: (k, v) = tuple(pair.split('=')) dtype = type(getattr(cfg, k)) if (dtype == type(None)): raise ValueError() if (dtype is bool): v = (False if (v == 'False') else True) else: v = dtype(v) setattr(cfg, k, v) logging.info(str(cfg)) if cfg.cuda: torch.cuda.set_device(cfg.cuda_device) logging.info('Device: {}'.format(torch.cuda.current_device())) cfg.mode = args.mode torch.manual_seed(cfg.seed) torch.cuda.manual_seed(cfg.seed) random.seed(cfg.seed) np.random.seed(cfg.seed) m = Model(args.model.split('-')[(- 1)]) m.count_params() if (args.mode == 'train'): m.load_glove_embedding() m.train() elif (args.mode == 'adjust'): m.load_model() m.train() elif (args.mode == 'test'): m.load_model() m.eval() elif (args.mode == 'rl'): m.load_model() m.reinforce_tune() elif (args.mode == 'interact'): m.load_model() m.interactive() elif (args.mode == 'vocab'): m.load_glove_embedding()
def test_qat(): if (version.parse(tf.version.VERSION) >= version.parse('2.00')): model = dense_functional() rand_inp = np.random.randn(10, 5) rand_out = np.random.randn(10, 2) qsim = QuantizationSimModel(model, quant_scheme='tf', default_param_bw=8, default_output_bw=8) qsim.compute_encodings((lambda m, _: m.predict(rand_inp)), None) qsim.model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.001), loss=tf.keras.losses.MeanSquaredError()) running_weights = [tf.keras.backend.get_value(param) for param in qsim.model.layers[1]._layer_to_wrap.weights] running_dense_output_quantizer_encoding_max = tf.keras.backend.get_value(qsim.model.layers[1].output_quantizers[0]._encoding_max) with tempfile.TemporaryDirectory() as tmp_dir: epochs = 10 save_model_callback = SaveModelWithoutQuantsimWrappersCallback(qsim, tmp_dir, 'test_qat') for i in range(epochs): _ = qsim.model.fit(x=rand_inp, y=rand_out, batch_size=1, callbacks=save_model_callback) ending_weights = [tf.keras.backend.get_value(param) for param in qsim.model.layers[1]._layer_to_wrap.weights] new_dense_output_quantizer_encoding_max = tf.keras.backend.get_value(qsim.model.layers[1].output_quantizers[0]._encoding_max) for (idx, weight) in enumerate(running_weights): assert (not np.array_equal(weight, ending_weights[idx])) assert np.array_equal(new_dense_output_quantizer_encoding_max, running_dense_output_quantizer_encoding_max) running_weights = ending_weights running_dense_output_quantizer_encoding_max = new_dense_output_quantizer_encoding_max h5s = encodings = yamls = saved_models_folders = 0 for file in os.listdir(tmp_dir): if file.endswith('h5'): h5s += 1 elif file.endswith('encodings'): encodings += 1 elif file.endswith('yaml'): yamls += 1 else: saved_models_folders += 1 for file_count in [h5s, encodings, yamls, saved_models_folders]: assert (file_count == 1), f'QAT Save Callback did not work'
def check_package_data(dist, attr, value): if (not isinstance(value, dict)): raise DistutilsSetupError('{!r} must be a dictionary mapping package names to lists of string wildcard patterns'.format(attr)) for (k, v) in value.items(): if (not isinstance(k, str)): raise DistutilsSetupError('keys of {!r} dict must be strings (got {!r})'.format(attr, k)) assert_string_list(dist, 'values of {!r} dict'.format(attr), v)
class Migration(migrations.Migration): dependencies = [('sponsors', '0097_sponsorship_renewal')] operations = [migrations.AlterField(model_name='sponsorship', name='renewal', field=models.BooleanField(blank=True, help_text='If true, it means the sponsorship is a renewal of a previous sponsorship and will use the renewal template for contracting.', null=True))]
def check_link_path(link: Link) -> int: if os.path.isabs(link.uri): fullname = link.uri else: dirname = os.path.dirname(link.file) fullname = os.path.join(dirname, link.uri) if os.path.exists(fullname): ok(link) return 0 else: fail(link, ('NoFile ' + fullname)) return 1
class Effect7077(BaseEffect): type = 'passive' def handler(fit, module, context, projectionRange, **kwargs): fit.modules.filteredItemMultiply((lambda mod: (mod.item.group.name == 'Precursor Weapon')), 'damageMultiplier', module.getModifiedItemAttr('damageMultiplier'), stackingPenalties=True, **kwargs)
class PreactivatedBottleneckTransformation(nn.Module): def __init__(self, dim_in, dim_out, temporal_stride, spatial_stride, num_groups, dim_inner, temporal_kernel_size=3, temporal_conv_1x1=True, spatial_stride_1x1=False, inplace_relu=True, bn_eps=1e-05, bn_mmt=0.1, disable_pre_activation=False, **kwargs): super(PreactivatedBottleneckTransformation, self).__init__() (temporal_kernel_size_1x1, temporal_kernel_size_3x3) = ((temporal_kernel_size, 1) if temporal_conv_1x1 else (1, temporal_kernel_size)) (str1x1, str3x3) = ((spatial_stride, 1) if spatial_stride_1x1 else (1, spatial_stride)) self.disable_pre_activation = disable_pre_activation if (not disable_pre_activation): self.branch2a_bn = nn.BatchNorm3d(dim_in, eps=bn_eps, momentum=bn_mmt) self.branch2a_relu = nn.ReLU(inplace=inplace_relu) self.branch2a = nn.Conv3d(dim_in, dim_inner, kernel_size=[temporal_kernel_size_1x1, 1, 1], stride=[1, str1x1, str1x1], padding=[(temporal_kernel_size_1x1 // 2), 0, 0], bias=False) self.branch2b_bn = nn.BatchNorm3d(dim_inner, eps=bn_eps, momentum=bn_mmt) self.branch2b_relu = nn.ReLU(inplace=inplace_relu) self.branch2b = nn.Conv3d(dim_inner, dim_inner, [temporal_kernel_size_3x3, 3, 3], stride=[temporal_stride, str3x3, str3x3], padding=[(temporal_kernel_size_3x3 // 2), 1, 1], groups=num_groups, bias=False) self.branch2c_bn = nn.BatchNorm3d(dim_inner, eps=bn_eps, momentum=bn_mmt) self.branch2c_relu = nn.ReLU(inplace=inplace_relu) self.branch2c = nn.Conv3d(dim_inner, dim_out, kernel_size=[1, 1, 1], stride=[1, 1, 1], padding=[0, 0, 0], bias=False) self.branch2c.final_transform_op = True def forward(self, x): if (not self.disable_pre_activation): x = self.branch2a_bn(x) x = self.branch2a_relu(x) x = self.branch2a(x) x = self.branch2b_bn(x) x = self.branch2b_relu(x) x = self.branch2b(x) x = self.branch2c_bn(x) x = self.branch2c_relu(x) x = self.branch2c(x) return x
def test_requirement_source_fix_explicit_subdep_resolver_error(req_file): source = _init_requirement([(req_file(), 'flask==2.0.1')]) flask_deps = source.collect() jinja_dep: (ResolvedDependency | None) = None for dep in flask_deps: if (isinstance(dep, ResolvedDependency) and (dep.canonical_name == 'jinja2')): jinja_dep = dep break assert (jinja_dep is not None) mock_resolver = pretend.stub(resolve=pretend.call_recorder((lambda _reqs, _req_hashes: []))) source.fix(ResolvedFixVersion(dep=jinja_dep, version=Version('4.0.0'))) assert (len(mock_resolver.resolve.calls) == 0)
def test_connection_request() -> None: event = _make_connection_request([(b'Host', b'localhost'), (b'Connection', b'Keep-Alive, Upgrade'), (b'Upgrade', b'websocket'), (b'Sec-WebSocket-Version', b'13'), (b'Sec-WebSocket-Key', generate_nonce()), (b'X-Foo', b'bar')]) assert (event.extensions == []) assert (event.host == 'localhost') assert (event.subprotocols == []) assert (event.target == '/') headers = normed_header_dict(event.extra_headers) assert (b'host' not in headers) assert (b'sec-websocket-extensions' not in headers) assert (b'sec-websocket-protocol' not in headers) assert (headers[b'connection'] == b'Keep-Alive, Upgrade') assert (headers[b'sec-websocket-version'] == b'13') assert (headers[b'upgrade'] == b'websocket') assert (headers[b'x-foo'] == b'bar')
def set_subfolders_for_roots_JIF(root, radiometry_depth): if (radiometry_depth == 8): return {'lr': os.path.join(root, 'lr_dataset', '*', 'L2A', ''), 'lrc': os.path.join(root, 'lr_dataset', '*', 'L2A', ''), 'hr': os.path.join(root, 'hr_dataset', '8bit', '*', ''), 'hr_pan': os.path.join(root, 'hr_dataset', '8bit', '*', '')} else: return {'lr': os.path.join(root, 'lr_dataset', '*', 'L2A', ''), 'lrc': os.path.join(root, 'lr_dataset', '*', 'L2A', ''), 'hr': os.path.join(root, 'hr_dataset', '*', ''), 'hr_pan': os.path.join(root, 'hr_dataset', '*', '')}
class AnsiCmd(object): def __init__(self, forceAnsi): self.forceAnsi = forceAnsi def cmdReset(self): if (sys.stdout.isatty() or self.forceAnsi): return (ESC + '[0m') else: return '' def cmdColour(self, colour): if (sys.stdout.isatty() or self.forceAnsi): return (((ESC + '[') + colour) + 'm') else: return '' def cmdColourNamed(self, colour): try: return self.cmdColour(COLOURS_NAMED[colour]) except KeyError: raise AnsiColourException(('Unknown Colour %s' % colour)) def cmdBold(self): if (sys.stdout.isatty() or self.forceAnsi): return (ESC + '[1m') else: return '' def cmdUnderline(self): if (sys.stdout.isatty() or self.forceAnsi): return (ESC + '[4m') else: return ''
class TestRequired(TestNameCheckVisitorBase): _passes() def test_typing_extensions(self): from typing_extensions import NotRequired, Required, TypedDict class RNR(TypedDict): a: int b: Required[str] c: NotRequired[float] def take_rnr(td: RNR) -> None: assert_is_value(td, TypedDictValue({'a': (True, TypedValue(int)), 'b': (True, TypedValue(str)), 'c': (False, TypedValue(float))})) class NotTotal(TypedDict, total=False): a: int b: Required[str] c: NotRequired[float] def take_not_total(td: NotTotal) -> None: assert_is_value(td, TypedDictValue({'a': (False, TypedValue(int)), 'b': (True, TypedValue(str)), 'c': (False, TypedValue(float))})) class Stringify(TypedDict): a: 'int' b: 'Required[str]' c: 'NotRequired[float]' def take_stringify(td: Stringify) -> None: assert_is_value(td, TypedDictValue({'a': (True, TypedValue(int)), 'b': (True, TypedValue(str)), 'c': (False, TypedValue(float))})) _passes() def test_typeddict_from_call(self): from typing import Any, Optional from typing_extensions import NotRequired, Required, TypedDict class Stringify(TypedDict): a: 'int' b: 'Required[str]' c: 'NotRequired[float]' def make_td() -> Any: return Stringify def return_optional() -> Optional[Stringify]: return None def return_call() -> Optional[make_td()]: return None def capybara() -> None: assert_is_value(return_optional(), (KnownValue(None) | TypedDictValue({'a': (True, TypedValue(int)), 'b': (True, TypedValue(str)), 'c': (False, TypedValue(float))}))) assert_is_value(return_call(), (KnownValue(None) | TypedDictValue({'a': (True, TypedValue(int)), 'b': (True, TypedValue(str)), 'c': (False, TypedValue(float))}))) _passes() def test_typing(self): from typing import TypedDict from typing_extensions import NotRequired, Required class RNR(TypedDict): a: int b: Required[str] c: NotRequired[float] def take_rnr(td: RNR) -> None: assert_is_value(td, TypedDictValue({'a': (True, TypedValue(int)), 'b': (True, TypedValue(str)), 'c': (False, TypedValue(float))})) class NotTotal(TypedDict, total=False): a: int b: Required[str] c: NotRequired[float] def take_not_total(td: NotTotal) -> None: assert_is_value(td, TypedDictValue({'a': (False, TypedValue(int)), 'b': (True, TypedValue(str)), 'c': (False, TypedValue(float))})) class Stringify(TypedDict): a: 'int' b: 'Required[str]' c: 'NotRequired[float]' def take_stringify(td: Stringify) -> None: assert_is_value(td, TypedDictValue({'a': (True, TypedValue(int)), 'b': (True, TypedValue(str)), 'c': (False, TypedValue(float))})) _passes() def test_unsupported_location(self): from typing_extensions import NotRequired, Required def f(x: Required[int]) -> None: pass def g() -> Required[int]: return 3 class Capybara(): x: Required[int] y: NotRequired[int]
def parse_id666(data): tags = {} tags['title'] = data[:32] tags['album'] = data[32:64] tags['dumper'] = data[64:80] tags['comments'] = data[80:112] if (data[130:(130 + 1)] < b'A'): try: tags['~#length'] = int(data[123:126].strip(b'\x00')) except ValueError: pass tags['artist'] = data[131:163] else: tags['artist'] = data[130:162] for k in list(tags.keys()): if (k[:2] == '~#'): continue tags[k] = tags[k].replace(b'\x00', b'').decode('ascii', 'ignore') if (not tags[k]): del tags[k] return tags
def test_exporter_can_export_requirements_txt_with_directory_packages_and_markers(tmp_path: Path, poetry: Poetry, fixture_root_uri: str) -> None: poetry.locker.mock_lock_data({'package': [{'name': 'foo', 'version': '1.2.3', 'optional': False, 'python-versions': '*', 'marker': "python_version < '3.7'", 'source': {'type': 'directory', 'url': 'sample_project', 'reference': ''}}], 'metadata': {'python-versions': '*', 'content-hash': '', 'files': {'foo': []}}}) set_package_requires(poetry) exporter = Exporter(poetry, NullIO()) exporter.export('requirements.txt', tmp_path, 'requirements.txt') with (tmp_path / 'requirements.txt').open(encoding='utf-8') as f: content = f.read() expected = f'''foo {fixture_root_uri}/sample_project ; {MARKER_PY27.union(MARKER_PY36_ONLY)} ''' assert (content == expected)
def _get_rw_sharding_perf(batch_sizes: List[int], world_size: int, local_world_size: int, input_lengths: List[float], emb_dim: int, input_data_type_size: float, table_data_type_size: float, fwd_a2a_comm_data_type_size: float, bwd_a2a_comm_data_type_size: float, fwd_sr_comm_data_type_size: float, bwd_sr_comm_data_type_size: float, num_poolings: List[float], device_bw: float, inter_host_bw: float, intra_host_bw: float, bwd_compute_multiplier: float, is_pooled: bool, is_weighted: bool=False) -> Perf: batch_inputs = (sum([((x * y) * z) for (x, y, z) in zip(input_lengths, num_poolings, batch_sizes)]) / world_size) batch_outputs = (sum([(x * y) for (x, y) in zip(num_poolings, batch_sizes)]) if is_pooled else batch_inputs) input_read_size = math.ceil(((batch_inputs * world_size) * input_data_type_size)) if is_weighted: input_read_size *= 2 embedding_lookup_size = (((batch_inputs * world_size) * emb_dim) * table_data_type_size) fwd_output_write_size = ((((batch_outputs * world_size) * emb_dim) * fwd_sr_comm_data_type_size) if is_pooled else (((batch_outputs * world_size) * emb_dim) * fwd_a2a_comm_data_type_size)) bwd_output_write_size = ((((batch_outputs * world_size) * emb_dim) * bwd_sr_comm_data_type_size) if is_pooled else (((batch_outputs * world_size) * emb_dim) * bwd_a2a_comm_data_type_size)) comms_bw = (inter_host_bw if (world_size > local_world_size) else intra_host_bw) fwd_comms = (fwd_output_write_size / comms_bw) fwd_compute = (((input_read_size + embedding_lookup_size) + fwd_output_write_size) / device_bw) bwd_comms = (bwd_output_write_size / comms_bw) bwd_batched_copy = ((bwd_output_write_size * BATCHED_COPY_PERF_FACTOR) / device_bw) bwd_grad_indice_weights_kernel = ((fwd_compute * WEIGHTED_KERNEL_MULTIPLIER) if is_weighted else 0) bwd_compute = (fwd_compute * bwd_compute_multiplier) return Perf(fwd_compute=fwd_compute, fwd_comms=fwd_comms, bwd_compute=(bwd_compute + bwd_grad_indice_weights_kernel), bwd_comms=(bwd_comms + bwd_batched_copy))
def _ListBoxTruncInfo(win): lineFormat = (win32defines.DT_SINGLELINE | win32defines.DT_NOPREFIX) truncData = [] for title in win.texts(): newRect = win.client_rects()[0] newRect.right -= 2 newRect.bottom -= 1 truncData.append((title, newRect, win.font(), lineFormat)) return truncData
def chunked(iterable, n, strict=False): iterator = iter(partial(take, n, iter(iterable)), []) if strict: if (n is None): raise ValueError('n must not be None when using strict mode.') def ret(): for chunk in iterator: if (len(chunk) != n): raise ValueError('iterable is not divisible by n.') (yield chunk) return iter(ret()) else: return iterator
def test_nested_sequence(): class WrappedIntent(object): effect = attr.ib() value = attr.ib() def internal(): (yield Effect(1)) (yield Effect(2)) return 'wrap' def code_under_test(): r = (yield Effect(WrappedIntent(internal(), 'field'))) r2 = (yield Effect(MyIntent('a'))) return (r, r2) seq = [(WrappedIntent(_ANY, 'field'), nested_sequence([(1, const('r1')), (2, const('r2'))])), (MyIntent('a'), const('result2'))] eff = code_under_test() assert (perform_sequence(seq, eff) == ('wrap', 'result2'))
def parseContent(openId, MsgContent): openId = openId.lower() try: list_content = MsgContent.replace(',', ' ').replace(',', ' ').replace('.', ' ').replace(':', ' ').replace('', '1').replace('', '2').replace('', '').strip().split() if ((len(list_content) < 1) or (list_content[0] not in LST_INSTR)): return replyMsg('ROBOT', MsgContent) else: instr_flg = list_content[0] list_content.remove(list_content[0]) if (instr_flg.find('') >= 0): return addTask(openId, list_content) elif (instr_flg.find('') >= 0): return cancelTask(openId) elif (instr_flg.find('') >= 0): return HELP_INFO elif ((instr_flg.find('CONFIG') >= 0) and (openId == ADMINOPENID)): return getAdminConfig() else: return replyMsg('ROBOT', MsgContent) except Exception as e: raise else: pass finally: pass
def retrieve_available_artifacts(): class Artifact(): def __init__(self, name: str): self.name = name self.paths = [] def __str__(self): return self.name def add_path(self, path: str): self.paths.append({'name': self.name, 'path': path}) _available_artifacts: Dict[(str, Artifact)] = {} directories = filter(os.path.isdir, os.listdir()) for directory in directories: artifact_name = directory if (artifact_name not in _available_artifacts): _available_artifacts[artifact_name] = Artifact(artifact_name) _available_artifacts[artifact_name].add_path(directory) return _available_artifacts
def test_no_rerun_on_strict_xfail_with_only_rerun_flag(testdir): testdir.makepyfile('\n import pytest\n .xfail(strict=True)\n def test_xfail():\n assert True\n ') result = testdir.runpytest('--reruns', '1', '--only-rerun', 'RuntimeError') assert_outcomes(result, passed=0, failed=1, rerun=0)
class CoffeeMakerMode(IntEnum): _UNKNOWN = _UNKNOWN REFILL = 0 Refill = 0 PLACE_CARAFE = 1 PlaceCarafe = 1 REFILL_WATER = 2 RefillWater = 2 READY = 3 Ready = 3 BREWING = 4 Brewing = 4 BREWED = 5 Brewed = 5 CLEANING_BREWING = 6 CleaningBrewing = 6 CLEANING_SOAKING = 7 CleaningSoaking = 7 BREW_FAILED_CARAFE_REMOVED = 8 BrewFailCarafeRemoved = 8 def _missing_(cls, value: Any) -> CoffeeMakerMode: return cls._UNKNOWN
def test_vertical_perspective_operation(): aeaop = VerticalPerspectiveConversion(viewpoint_height=10, latitude_topocentric_origin=1, longitude_topocentric_origin=2, false_easting=3, false_northing=4, ellipsoidal_height_topocentric_origin=5) assert (aeaop.name == 'unknown') assert (aeaop.method_name == 'Vertical Perspective') assert (_to_dict(aeaop) == {'Latitude of topocentric origin': 1.0, 'Longitude of topocentric origin': 2.0, 'Ellipsoidal height of topocentric origin': 5.0, 'Viewpoint height': 10.0, 'False easting': 3.0, 'False northing': 4.0})
class MemCOW(COW): def __init__(self, addr, imagefd, logger, seek_lock): self.addr = addr self.imagefd = imagefd self.seek_lock = seek_lock self.logger = helpers.get_child_logger(logger, 'FS') self.logger.info('Copy-On-Write for {0} in Memory'.format(addr)) self.fh = io.BytesIO() self.pages = []
() _context ('--add', '-a', help='Name of api key to add') ('--list', '-l', is_flag=True, help='List all API keys') ('--super', '-s', is_flag=True, help="API Key has super user priviledges (has access to other application's data)") def key(ctx, add, list, super): try: keys = APIKey.query.all() if (add in [k.name for k in keys]): click.echo('Error: An API Key with that name already exists.') return if add: api_key = add_api_key(add, super) click.echo('Key added: {0}'.format(api_key)) if list: for ak in APIKey.query.all(): click.echo(ak) except OperationalError as oe: click.echo("An error occurred. Be sure to init the db with the 'initdb' command.")
class CmdConfigureTest(unittest.TestCase): def setUp(self) -> None: self.parser = argparse.ArgumentParser() self.cmd_configure = CmdConfigure() self.cmd_configure.add_arguments(self.parser) self.test_dir = tempfile.mkdtemp(prefix='torchx_cmd_configure_test') self._old_cwd = os.getcwd() os.chdir(self.test_dir) def tearDown(self) -> None: os.chdir(self._old_cwd) shutil.rmtree(self.test_dir) def _args(self, sys_args: List[str]) -> argparse.Namespace: return self.parser.parse_args(sys_args) def test_configure_print(self) -> None: self.cmd_configure.run(self._args(['--print'])) self.cmd_configure.run(self._args(['--print', '--all'])) def test_configure(self) -> None: os.chdir(self.test_dir) self.cmd_configure.run(self._args([])) self.assertTrue((Path(self.test_dir) / '.torchxconfig').exists()) def test_configure_all(self) -> None: self.cmd_configure.run(self._args(['--all'])) self.assertTrue((Path(self.test_dir) / '.torchxconfig').exists()) def test_configure_local_cwd(self) -> None: self.cmd_configure.run(self._args(['--schedulers', 'local_cwd'])) self.assertTrue((Path(self.test_dir) / '.torchxconfig').exists())
class OptimizationTest(unittest.TestCase): def assertListAlmostEqual(self, list1, list2, tol): self.assertEqual(len(list1), len(list2)) for (a, b) in zip(list1, list2): self.assertAlmostEqual(a, b, delta=tol) def test_adam_w(self): w = torch.tensor([0.1, (- 0.2), (- 0.1)], requires_grad=True) target = torch.tensor([0.4, 0.2, (- 0.5)]) criterion = torch.nn.MSELoss() optimizer = AdamW(params=[w], lr=0.2, weight_decay=0.0) for _ in range(100): loss = criterion(w, target) loss.backward() optimizer.step() w.grad.detach_() w.grad.zero_() self.assertListAlmostEqual(w.tolist(), [0.4, 0.2, (- 0.5)], tol=0.01)
.skipif((platform.system() != 'Linux'), reason='test requires /proc/self/ mechanism') def test_open_file_usage_never_exceeds_1000(runner, monkeypatch, tmp_path): schema_path = (tmp_path / 'schema.json') schema_path.write_text('{}') args = ['--schemafile', str(schema_path)] for i in range(2000): instance_path = (tmp_path / f'file{i}.json') instance_path.write_text('{}') args.append(str(instance_path)) checker = None def fake_execute(argv): nonlocal checker checker = build_checker(argv) monkeypatch.setattr('check_jsonschema.cli.main_command.execute', fake_execute) res = runner.invoke(cli_main, args) assert (res.exit_code == 0), res.stderr assert (checker is not None) assert (len(os.listdir('/proc/self/fd')) < 2000) for (_fname, _data) in checker._instance_loader.iter_files(): assert len(os.listdir('/proc/self/fd')), 2000
class DevNetTS(BaseDeepAD): def __init__(self, epochs=100, batch_size=64, lr=0.001, network='Transformer', seq_len=100, stride=1, rep_dim=128, hidden_dims='100,50', act='ReLU', bias=False, n_heads=8, d_model=512, attn='self_attn', pos_encoding='fixed', norm='LayerNorm', margin=5.0, l=5000, epoch_steps=(- 1), prt_steps=10, device='cuda', verbose=2, random_state=42): super(DevNetTS, self).__init__(data_type='ts', model_name='DevNet', epochs=epochs, batch_size=batch_size, lr=lr, network=network, seq_len=seq_len, stride=stride, epoch_steps=epoch_steps, prt_steps=prt_steps, device=device, verbose=verbose, random_state=random_state) self.margin = margin self.l = l self.hidden_dims = hidden_dims self.act = act self.bias = bias self.n_heads = n_heads self.d_model = d_model self.attn = attn self.pos_encoding = pos_encoding self.norm = norm return def training_prepare(self, X, y): n_anom = np.where((y == 1))[0].shape[0] n_norm = (self.n_samples - n_anom) weight_map = {0: (1.0 / n_norm), 1: (1.0 / n_anom)} dataset = TensorDataset(torch.from_numpy(X).float(), torch.from_numpy(y).long()) sampler = WeightedRandomSampler(weights=[weight_map[label.item()] for (data, label) in dataset], num_samples=len(dataset), replacement=True) train_loader = DataLoader(dataset, batch_size=self.batch_size, sampler=sampler) network_params = {'n_features': self.n_features, 'n_hidden': self.hidden_dims, 'n_output': 1, 'activation': self.act, 'bias': self.bias} if (self.network == 'Transformer'): network_params['n_heads'] = self.n_heads network_params['d_model'] = self.d_model network_params['pos_encoding'] = self.pos_encoding network_params['norm'] = self.norm network_params['attn'] = self.attn network_params['seq_len'] = self.seq_len elif (self.network == 'ConvSeq'): network_params['seq_len'] = self.seq_len network_class = get_network(self.network) net = network_class(**network_params).to(self.device) criterion = DevLoss(margin=self.margin, l=self.l) if (self.verbose >= 2): print(net) return (train_loader, net, criterion) def inference_prepare(self, X): test_loader = DataLoader(X, batch_size=self.batch_size, drop_last=False, shuffle=False) self.criterion.reduction = 'none' return test_loader def training_forward(self, batch_x, net, criterion): (batch_x, batch_y) = batch_x batch_x = batch_x.float().to(self.device) batch_y = batch_y.to(self.device) pred = net(batch_x) loss = criterion(batch_y, pred) return loss def inference_forward(self, batch_x, net, criterion): batch_x = batch_x.float().to(self.device) s = net(batch_x) s = s.view((- 1)) batch_z = batch_x return (batch_z, s)
.end_to_end() def test_collect_produces_that_is_not_str_or_path(tmp_path, capsys): source = '\n import pytask\n\n .produces(True)\n def task_with_non_path_dependency():\n pass\n ' tmp_path.joinpath('task_module.py').write_text(textwrap.dedent(source)) session = build(paths=tmp_path) assert (session.exit_code == ExitCode.COLLECTION_FAILED) assert (session.collection_reports[0].outcome == CollectionOutcome.FAIL) exc_info = session.collection_reports[0].exc_info assert isinstance(exc_info[1], NodeNotCollectedError) captured = capsys.readouterr().out assert ("'.depends_on'" in captured)
def test_nested_process_search_dv_over_100_terms(s1_product: SentinelOne): list_o_terms = list(range(1, 106)) first_list = (('("' + '", "'.join([str(x) for x in list(range(1, 101))])) + '")') second_list = (('("' + '", "'.join([str(x) for x in list(range(101, 106))])) + '")') s1_product._queries = {} s1_product._pq = False s1_product.nested_process_search(Tag('list_of_105_terms'), {'process_name': list_o_terms}, {}) assert (len(s1_product._queries[Tag('list_of_105_terms')]) == 2) sdate = s1_product._queries[Tag('list_of_105_terms')][0].start_date edate = s1_product._queries[Tag('list_of_105_terms')][0].end_date assert (Query(sdate, edate, 'ProcessName', 'in contains anycase', first_list, None) in s1_product._queries[Tag('list_of_105_terms')]) assert (Query(sdate, edate, 'ProcessName', 'in contains anycase', second_list, None) in s1_product._queries[Tag('list_of_105_terms')])
class SumDiffOp(Op): def make_node(self, x, y): x = pt.as_tensor_variable(x) y = pt.as_tensor_variable(y) outdim = x.type.ndim output1 = TensorType(dtype=pytensor.scalar.upcast(x.dtype, y.dtype), shape=((None,) * outdim))() output2 = TensorType(dtype=pytensor.scalar.upcast(x.dtype, y.dtype), shape=((None,) * outdim))() return Apply(self, inputs=[x, y], outputs=[output1, output2]) def perform(self, node, inputs, output_storage): (x, y) = inputs (z1, z2) = output_storage z1[0] = (x + y) z2[0] = (x - y) def infer_shape(self, fgraph, node, i0_shapes): return [i0_shapes[0], i0_shapes[0]] def grad(self, inputs, output_grads): (og1, og2) = output_grads if (og1 is None): og1 = pt.zeros_like(og2) if (og2 is None): og2 = pt.zeros_like(og1) return [(og1 + og2), (og1 - og2)]
class encoder(nn.Module): def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.0): super().__init__() dim_0 = 2 dim_2 = 64 dim_3 = 128 dim_4 = 256 dim_5 = 512 self.fc1 = nn.Linear(dim_0, dim_2) self.fc3 = nn.Linear(dim_2, dim_3) self.fc4 = nn.Linear(dim_3, dim_4) self.fc5 = nn.Linear(dim_4, dim_5) def forward(self, x): x = self.fc1(x) x = self.fc3(x) x = self.fc4(x) x = self.fc5(x) return x
class SecuredMethod(BoundFunctionWrapper): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def _self_read_check(self, *args, **kwargs): return self._self_parent.check_right(self.read_check, self._self_instance, *args, **kwargs) def _self_write_check(self, *args, **kwargs): return self._self_parent.check_right(self.write_check, self._self_instance, *args, **kwargs)
.end_to_end() def test_collect_task_with_ignore_from_config(runner, tmp_path): source = '\n import pytask\n\n .depends_on("in_1.txt")\n .produces("out_1.txt")\n def task_example_1():\n pass\n ' tmp_path.joinpath('task_example_1.py').write_text(textwrap.dedent(source)) source = '\n .depends_on("in_2.txt")\n .produces("out_2.txt")\n def task_example_2():\n pass\n ' tmp_path.joinpath('task_example_2.py').write_text(textwrap.dedent(source)) tmp_path.joinpath('in_1.txt').touch() config = '\n [tool.pytask.ini_options]\n ignore = ["task_example_2.py"]\n ' tmp_path.joinpath('pyproject.toml').write_text(textwrap.dedent(config)) result = runner.invoke(cli, ['collect', tmp_path.as_posix()]) assert (result.exit_code == ExitCode.OK) captured = result.output.replace('\n', '').replace(' ', '') assert ('<Module' in captured) assert ('task_example_1.py>' in captured) assert ('task_example_2.py>' not in captured) assert ('<Function' in captured) assert ('task_example_1>' in captured) assert ('<Function' in captured) assert ('task_example_2>' not in captured) result = runner.invoke(cli, ['collect', tmp_path.as_posix(), '--nodes']) assert (result.exit_code == ExitCode.OK) captured = result.output.replace('\n', '').replace(' ', '') assert ('<Module' in captured) assert ('task_example_1.py>' in captured) assert ('task_example_2.py>' not in captured) assert ('<Function' in captured) assert ('task_example_1>' in captured) assert ('<Dependency' in captured) assert ('in_1.txt>' in captured) assert ('<Product' in captured) assert ('out_1.txt>' in captured)
class DoubleSubVector(_DoubleVectorBase, _matrix_ext.DoubleSubVector): def __init__(self, obj, start=0, length=None): if (not isinstance(obj, _kaldi_vector.DoubleVectorBase)): obj = numpy.array(obj, dtype=numpy.float64, copy=False, order='C') if (obj.ndim != 1): raise ValueError('obj should be a 1-D vector like object.') obj_len = len(obj) if (not (0 <= start <= obj_len)): raise IndexError('start={0} should be in the range [0,{1}] when len(obj)={1}.'.format(start, obj_len)) max_len = (obj_len - start) if (length is None): length = max_len if (not (0 <= length <= max_len)): raise IndexError('length={} should be in the range [0,{}] when start={} and len(obj)={}.'.format(length, max_len, start, obj_len)) super(DoubleSubVector, self).__init__(obj, start, length)
def preprocess_triplet_data(samples: List[Tuple[(EntityContext, EntityContext, EntityContext)]], tokenizer: PreTrainedTokenizer, max_seq_length=64, disable_tqdm=False): raw_sentences = [] for sample in samples: (ent_ctx_a, ent_ctx_b, ent_ctx_c) = sample raw_sentences.extend([ent_ctx_a.left_context, ent_ctx_a.entity, ent_ctx_a.right_context]) raw_sentences.extend([ent_ctx_b.left_context, ent_ctx_b.entity, ent_ctx_b.right_context]) raw_sentences.extend([ent_ctx_c.left_context, ent_ctx_c.entity, ent_ctx_c.right_context]) tokenizer_output = tokenizer(raw_sentences, truncation='do_not_truncate') dataset = parse_tokenizer_output(tokenizer_output, max_seq_length, disable_tqdm=disable_tqdm) dataset = [(dataset[(3 * i)], dataset[((3 * i) + 1)], dataset[((3 * i) + 2)]) for i in range(len(samples))] return dataset
def get_labels(sample, context_mode): (user_labels, agent_labels) = ([], []) for qa in sample['QA']: user_labels.extend(qa['QueSummUttIDs']) agent_labels.extend(qa['AnsSummLongUttIDs']) if (context_mode == 'both'): b_user_labels = binary_label(list(set(user_labels)), len(sample['Dialogue'])) b_agent_labels = binary_label(list(set(agent_labels)), len(sample['Dialogue'])) else: b_user_labels = binary_label_single(list(set(user_labels)), sample['Dialogue'], 'Q') b_agent_labels = binary_label_single(list(set(agent_labels)), sample['Dialogue'], 'A') b_final_labels = binary_label(list(set((user_labels + agent_labels))), len(sample['Dialogue'])) return (b_user_labels, b_agent_labels, b_final_labels)
def test_parse_summary_line_always_plural() -> None: lines = ['some output 1', 'some output 2', '======= 1 failed, 1 passed, 1 warning, 1 error in 0.13s ====', 'done.'] assert (pytester_mod.RunResult.parse_summary_nouns(lines) == {'errors': 1, 'failed': 1, 'passed': 1, 'warnings': 1}) lines = ['some output 1', 'some output 2', '======= 1 failed, 1 passed, 2 warnings, 2 errors in 0.13s ====', 'done.'] assert (pytester_mod.RunResult.parse_summary_nouns(lines) == {'errors': 2, 'failed': 1, 'passed': 1, 'warnings': 2})
.parametrize('output, version', [(MUSL_AMD64, _MuslVersion(1, 2)), (MUSL_I386, _MuslVersion(1, 2)), (MUSL_AARCH64, _MuslVersion(1, 1)), (MUSL_INVALID, None), (MUSL_UNKNOWN, None)], ids=['amd64-1.2.2', 'i386-1.2.1', 'aarch64-1.1.24', 'invalid', 'unknown']) def test_parse_musl_version(output, version): assert (_parse_musl_version(output) == version)
.parametrize('text, deleted, rest', [pytest.param('test foobar| delete', ' delete', 'test foobar|', marks=fixme), ('test foobar| delete', ' ', 'test foobar|delete'), pytest.param('test foo|delete bar', 'delete', 'test foo| bar', marks=fixme), ('test foo|delete bar', 'delete ', 'test foo|bar'), pytest.param('test foo<bar> delete', ' delete', 'test foobar|', marks=fixme), ('test foo<bar>delete', 'bardelete', 'test foo|')]) def test_rl_kill_word(lineedit, text, deleted, rest): _validate_deletion(lineedit, readlinecommands.rl_kill_word, [], text, deleted, rest)
class SelectiveKernel(nn.Module): def __init__(self, in_channels, out_channels=None, kernel_size=None, stride=1, dilation=1, groups=1, rd_ratio=(1.0 / 16), rd_channels=None, rd_divisor=8, keep_3x3=True, split_input=True, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, aa_layer=None, drop_layer=None): super(SelectiveKernel, self).__init__() out_channels = (out_channels or in_channels) kernel_size = (kernel_size or [3, 5]) _kernel_valid(kernel_size) if (not isinstance(kernel_size, list)): kernel_size = ([kernel_size] * 2) if keep_3x3: dilation = [((dilation * (k - 1)) // 2) for k in kernel_size] kernel_size = ([3] * len(kernel_size)) else: dilation = ([dilation] * len(kernel_size)) self.num_paths = len(kernel_size) self.in_channels = in_channels self.out_channels = out_channels self.split_input = split_input if self.split_input: assert ((in_channels % self.num_paths) == 0) in_channels = (in_channels // self.num_paths) groups = min(out_channels, groups) conv_kwargs = dict(stride=stride, groups=groups, act_layer=act_layer, norm_layer=norm_layer, aa_layer=aa_layer, drop_layer=drop_layer) self.paths = nn.ModuleList([ConvNormActAa(in_channels, out_channels, kernel_size=k, dilation=d, **conv_kwargs) for (k, d) in zip(kernel_size, dilation)]) attn_channels = (rd_channels or make_divisible((out_channels * rd_ratio), divisor=rd_divisor)) self.attn = SelectiveKernelAttn(out_channels, self.num_paths, attn_channels) def forward(self, x): if self.split_input: x_split = torch.split(x, (self.in_channels // self.num_paths), 1) x_paths = [op(x_split[i]) for (i, op) in enumerate(self.paths)] else: x_paths = [op(x) for op in self.paths] x = torch.stack(x_paths, dim=1) x_attn = self.attn(x) x = (x * x_attn) x = torch.sum(x, dim=1) return x
class AttentiveConvNet(Classifier): def __init__(self, dataset, config): super(AttentiveConvNet, self).__init__(dataset, config) self.attentive_conv_net_type = config.AttentiveConvNet.type self.attention_type = config.AttentiveConvNet.attention_type self.dim = config.embedding.dimension self.attention_dim = self.dim self.margin_size = config.AttentiveConvNet.margin_size assert ((self.margin_size % 2) == 1), 'AttentiveConvNet margin size should be odd!' self.radius = int((self.margin_size / 2)) if (self.attentive_conv_net_type == AttentiveConvNetType.ADVANCED): self.attention_dim *= 2 self.x_context_highway = self.get_highway(self.dim, self.margin_size) self.x_self_highway = self.get_highway(self.dim, 1) self.a_context_highway = self.get_highway(self.dim, self.margin_size) self.a_self_highway = self.get_highway(self.dim, 1) self.beneficiary_highway = self.get_highway(self.dim, 1) if (self.attention_type == AttentionType.DOT): self.dot_product_attention = DotProductAttention(1.0) elif (self.attention_type == AttentionType.BILINEAR): self.bilinear_matrix = init_tensor(torch.empty(self.attention_dim, self.attention_dim)).to(config.device) self.dot_product_attention = DotProductAttention(1.0) elif (self.attention_type == AttentionType.ADDITIVE_PROJECTION): self.additive_projection = AdditiveAttention2D(self.attention_dim) else: raise TypeError(('Unsupported AttentionType: %s.' % self.attention_type)) self.attentive_conv = init_tensor(torch.empty(self.attention_dim, self.dim)).to(config.device) self.x_conv = torch.nn.Sequential(torch.nn.Conv1d(self.dim, self.dim, self.margin_size, padding=self.radius), torch.nn.Tanh()) self.bias = torch.zeros([self.dim]).to(config.device) self.hidden_size = config.AttentiveConvNet.hidden_size self.hidden1_matrix = init_tensor(torch.empty(self.dim, self.hidden_size)).to(config.device) self.hidden2_matrix = init_tensor(torch.empty(self.hidden_size, self.hidden_size)).to(config.device) self.linear = torch.nn.Linear((self.dim + (2 * self.hidden_size)), len(dataset.label_map)) def get_highway(dimension, margin_size): radius = int((margin_size / 2)) transformer_gate = torch.nn.Sequential(torch.nn.Conv1d(dimension, dimension, margin_size, padding=radius), torch.nn.Sigmoid()) transformer_forward = torch.nn.Sequential(torch.nn.Conv1d(dimension, dimension, margin_size, padding=radius), torch.nn.Tanh()) return Highway(transformer_gate, transformer_forward) def get_parameter_optimizer_dict(self): params = super(AttentiveConvNet, self).get_parameter_optimizer_dict() if (self.attentive_conv_net_type == AttentiveConvNetType.ADVANCED): params.append({'params': self.x_context_highway.parameters()}) params.append({'params': self.x_self_highway.parameters()}) params.append({'params': self.a_context_highway.parameters()}) params.append({'params': self.a_self_highway.parameters()}) params.append({'params': self.beneficiary_highway.parameters()}) if (self.attention_type == AttentionType.DOT): params.append({'params': self.dot_product_attention.parameters()}) elif (self.attention_type == AttentionType.BILINEAR): params.append({'params': self.bilinear_matrix}) params.append({'params': self.dot_product_attention.parameters()}) elif (self.attention_type == AttentionType.ADDITIVE_PROJECTION): params.append({'params': self.additive_projection.parameters()}) params.append({'params': self.attentive_conv}) params.append({'params': self.x_conv.parameters()}) params.append({'params': self.hidden1_matrix}) params.append({'params': self.hidden2_matrix}) params.append({'params': self.linear.parameters()}) return params def forward(self, batch): (embedding, _, _) = self.get_embedding(batch) if (self.attentive_conv_net_type == AttentiveConvNetType.LIGHT): (x_multi_granularity, a_multi_granularity, x_beneficiary) = (embedding, embedding, embedding) elif (self.attentive_conv_net_type == AttentiveConvNetType.ADVANCED): embedding = embedding.permute(0, 2, 1) source_context = self.x_context_highway(embedding) source_self = self.x_self_highway(embedding) x_multi_granularity = torch.cat([source_context, source_self], 1).permute(0, 2, 1) focus_context = self.a_context_highway(embedding) focus_self = self.a_self_highway(embedding) a_multi_granularity = torch.cat([focus_context, focus_self], 1).permute(0, 2, 1) x_beneficiary = self.beneficiary_highway(embedding).permute(0, 2, 1) else: raise TypeError(('Unsupported AttentiveConvNetType: %s.' % self.attentive_conv_net_type)) if (self.attention_type == AttentionType.DOT): attentive_context = self.dot_product_attention(x_multi_granularity, a_multi_granularity, a_multi_granularity) elif (self.attention_type == AttentionType.BILINEAR): x_trans = x_multi_granularity.matmul(self.bilinear_matrix) attentive_context = self.dot_product_attention(x_trans, a_multi_granularity, a_multi_granularity) elif (self.attention_type == AttentionType.ADDITIVE_PROJECTION): attentive_context = self.additive_projection(a_multi_granularity, x_multi_granularity) attentive_conv = attentive_context.matmul(self.attentive_conv) x_conv = self.x_conv(x_beneficiary.permute(0, 2, 1)).permute(0, 2, 1) attentive_convolution = torch.tanh(((attentive_conv + x_conv) + self.bias)).permute(0, 2, 1) hidden = torch.nn.functional.max_pool1d(attentive_convolution, kernel_size=attentive_convolution.size()[(- 1)]).squeeze() hidden1 = hidden.matmul(self.hidden1_matrix) hidden2 = hidden1.matmul(self.hidden2_matrix) hidden_layer = torch.cat([hidden, hidden1, hidden2], 1) return self.dropout(self.linear(hidden_layer))
class PPOMoleculeGenerator(): def __init__(self, model: SmilesRnnActorCritic, max_seq_length, device) -> None: self.model = model self.max_seq_length = max_seq_length self.device = device self.sampler = SmilesRnnSampler(device=device, batch_size=512) def optimise(self, objective: ScoringFunction, start_population: list, **kwargs) -> List[OptResult]: if start_population: logger.warning('PPO algorithm does not support (yet) a starting population') num_epochs = kwargs['num_epochs'] episode_size = kwargs['optimize_episode_size'] batch_size = kwargs['optimize_batch_size'] entropy_weight = kwargs['entropy_weight'] kl_div_weight = kwargs['kl_div_weight'] clip_param = kwargs['clip_param'] trainer = PPOTrainer(self.model, objective, device=self.device, max_seq_length=self.max_seq_length, batch_size=batch_size, num_epochs=num_epochs, clip_param=clip_param, episode_size=episode_size, entropy_weight=entropy_weight, kl_div_weight=kl_div_weight) trainer.train() return sorted(trainer.smiles_history, reverse=True) def sample(self, num_mols) -> List[str]: return self.sampler.sample(self.model.smiles_rnn, num_to_sample=num_mols, max_seq_len=self.max_seq_length)
def fully_connected(shape, inputs, num_outputs, scope, use_xavier=True, stddev=0.001, weight_decay=0.0, activation_fn=tf.nn.relu, bn=False, bn_decay=None, is_training=None): with tf.variable_scope(scope) as sc: num_input_units = shape[(- 1)] weights = _variable_with_weight_decay('weights', shape=[num_input_units, num_outputs], use_xavier=use_xavier, stddev=stddev, wd=weight_decay) outputs = tf.matmul(inputs, weights) biases = _variable_on_cpu('biases', [num_outputs], tf.constant_initializer(0.0)) outputs = tf.nn.bias_add(outputs, biases) if bn: outputs = batch_norm_for_fc(outputs, is_training, bn_decay, 'bn') if (activation_fn is not None): outputs = activation_fn(outputs) return outputs