code stringlengths 281 23.7M |
|---|
class ResNetBasicLayer(nn.Module):
def __init__(self, in_channels: int, out_channels: int, stride: int=1, activation: str='relu'):
super().__init__()
should_apply_shortcut = ((in_channels != out_channels) or (stride != 1))
self.shortcut = (ResNetShortCut(in_channels, out_channels, stride=stride) if should_apply_shortcut else nn.Identity())
self.layer = nn.Sequential(ResNetConvLayer(in_channels, out_channels, stride=stride), ResNetConvLayer(out_channels, out_channels, activation=None))
self.activation = ACT2FN[activation]
def forward(self, hidden_state):
residual = hidden_state
hidden_state = self.layer(hidden_state)
residual = self.shortcut(residual)
hidden_state += residual
hidden_state = self.activation(hidden_state)
return hidden_state |
def test_pytest_exit_returncode(pytester: Pytester) -> None:
pytester.makepyfile(' import pytest\n def test_foo():\n pytest.exit("some exit msg", 99)\n ')
result = pytester.runpytest()
result.stdout.fnmatch_lines(['*! *Exit: some exit msg !*'])
assert (_strip_resource_warnings(result.stderr.lines) == [])
assert (result.ret == 99)
pytester.makeconftest(' import pytest\n\n def pytest_sessionstart():\n pytest.exit("during_sessionstart", 98)\n ')
result = pytester.runpytest()
result.stdout.fnmatch_lines(['*! *Exit: during_sessionstart !*'])
assert (_strip_resource_warnings(result.stderr.lines) == ['Exit: during_sessionstart'])
assert (result.ret == 98) |
_bool('is_required_a', 'is_required_b', 'is_required_c', 'is_required_d')
def test_several_extra_target(debug_ctx, debug_trail, trail_select, is_required_a, is_required_b, is_required_c, is_required_d, acc_schema):
dumper_getter = make_dumper_getter(shape=shape(TestField('a', acc_schema.accessor_maker('a', is_required=is_required_a)), TestField('b', acc_schema.accessor_maker('b', is_required=is_required_b)), TestField('c', acc_schema.accessor_maker('c', is_required=is_required_c)), TestField('d', acc_schema.accessor_maker('d', is_required=is_required_d))), name_layout=OutputNameLayout(crown=OutDictCrown({'a': OutFieldCrown('a')}, sieves={}), extra_move=ExtraTargets(('b', 'c', 'd'))), debug_trail=debug_trail, debug_ctx=debug_ctx)
dumper = dumper_getter()
assert (dumper(acc_schema.dummy(a=1, b={'b1': 2}, c={'c1': 3}, d={'d1': 4})) == {'a': 1, 'b1': 2, 'c1': 3, 'd1': 4})
assert (dumper(acc_schema.dummy(a=1, b={'b1': 2, 'b2': 3}, c={'c1': 4, 'c2': 5}, d={'d1': 6, 'd2': 7})) == {'a': 1, 'b1': 2, 'b2': 3, 'c1': 4, 'c2': 5, 'd1': 6, 'd2': 7})
assert (dumper(acc_schema.dummy(a=1, b={'d': 2}, c={'e': 3}, d={})) == {'a': 1, 'd': 2, 'e': 3})
assert (dumper(acc_schema.dummy(a=1, b={'b1': 2, 'b2': 3}, c={'c1': 4, 'b2': 5}, d={})) == {'a': 1, 'b1': 2, 'c1': 4, 'b2': 5})
if is_required_b:
raises_exc(trail_select(disable=acc_schema.access_error(ANY), first=with_trail(acc_schema.access_error(ANY), [acc_schema.trail_element_maker('b')]), all=CompatExceptionGroup(f'while dumping model {Dummy}', [with_trail(acc_schema.access_error(ANY), [acc_schema.trail_element_maker('b')])])), (lambda : dumper(acc_schema.dummy(a=1, c={'c1': 2}, d={'d1': 3}))))
if is_required_c:
raises_exc(trail_select(disable=acc_schema.access_error(ANY), first=with_trail(acc_schema.access_error(ANY), [acc_schema.trail_element_maker('c')]), all=CompatExceptionGroup(f'while dumping model {Dummy}', [with_trail(acc_schema.access_error(ANY), [acc_schema.trail_element_maker('c')])])), (lambda : dumper(acc_schema.dummy(a=1, b={'b1': 2}, d={'d1': 3}))))
if (is_required_b and is_required_c and is_required_d):
raises_exc(trail_select(disable=acc_schema.access_error(ANY), first=with_trail(acc_schema.access_error(ANY), [acc_schema.trail_element_maker('b')]), all=CompatExceptionGroup(f'while dumping model {Dummy}', [with_trail(acc_schema.access_error(ANY), [acc_schema.trail_element_maker('b')]), with_trail(acc_schema.access_error(ANY), [acc_schema.trail_element_maker('c')]), with_trail(acc_schema.access_error(ANY), [acc_schema.trail_element_maker('d')])])), (lambda : dumper(acc_schema.dummy(a=1))))
requirement = {'b': is_required_b, 'c': is_required_c, 'd': is_required_d}
assert (dumper(acc_schema.dummy(a=1, **{k: {k: 1} for (k, v) in requirement.items() if v})) == {'a': 1, **{k: 1 for (k, v) in requirement.items() if v}}) |
def _harmonic_oscillator_spectrum_frequency(n_th, w0, kappa):
if (n_th == 0):
return (lambda w: (kappa * (w >= 0)))
w_th = (w0 / np.log((1 + (1 / n_th))))
def f(t, w):
scale = (np.exp((w / w_th)) if (w < 0) else 1)
return (((n_th + 1) * kappa) * scale)
return f |
def adjust_learning_rate(lr_scheduler: Union[(optim.lr_scheduler.StepLR, optim.lr_scheduler.ReduceLROnPlateau)], epoch: int, train_loss: float, dev_f1: float) -> bool:
if isinstance(lr_scheduler, optim.lr_scheduler.StepLR):
if isinstance(lr_scheduler.optimizer, AdaBound):
lr_scheduler.step()
return (epoch < 200)
else:
raise ValueError
elif isinstance(lr_scheduler, optim.lr_scheduler.ReduceLROnPlateau):
if isinstance(lr_scheduler.optimizer, optim.SGD):
lr_scheduler.step(train_loss)
return (lr_scheduler.optimizer.param_groups[0]['lr'] >= 0.0001)
elif isinstance(lr_scheduler.optimizer, optim.Adam):
lr_scheduler.step(dev_f1)
return (lr_scheduler.optimizer.param_groups[0]['lr'] >= 1e-05)
else:
raise ValueError
else:
raise ValueError |
def FCN_aspp(img_shape, class_n=None):
input_shape = (None, img_shape[0], img_shape[1], img_shape[2], 1)
input_img = Input(shape=input_shape[1:])
conv1_1 = Conv3D(32, 3, padding='same', activation='relu')(input_img)
conv1_2 = Conv3D(32, 3, padding='same', activation='relu')(conv1_1)
pool1 = MaxPooling3D((2, 2, 2), strides=(2, 2, 2), padding='same')(conv1_2)
conv2_1 = Conv3D(64, 3, padding='same', activation='relu')(pool1)
conv2_2 = Conv3D(64, 3, padding='same', activation='relu')(conv2_1)
pool2 = MaxPooling3D((2, 2, 2), strides=(2, 2, 2), padding='same')(conv2_2)
conv3_1 = Conv3D(128, 3, padding='same', activation='relu', dilation_rate=2)(pool2)
fcp1 = Conv3D(128, 1, padding='same', activation='relu')(conv3_1)
fcp2 = Conv3D(128, 3, padding='same', activation='relu', dilation_rate=2)(conv3_1)
fcp3 = Conv3D(128, 3, padding='same', activation='relu', dilation_rate=4)(conv3_1)
cancate = concatenate([fcp1, fcp2, fcp3, conv3_1])
upscore1 = UpSampling3D((4, 4, 4))(cancate)
upscore4 = Conv3D(2, 1, padding='same')(upscore1)
output1 = Reshape((np.prod(img_shape), 2))(upscore4)
output = Activation('softmax')(output1)
model = Model(inputs=input_img, outputs=output)
return model |
class CholeskySolve(SolveBase):
def __init__(self, **kwargs):
kwargs.setdefault('lower', True)
super().__init__(**kwargs)
def perform(self, node, inputs, output_storage):
(C, b) = inputs
rval = scipy.linalg.cho_solve((C, self.lower), b, check_finite=self.check_finite)
output_storage[0][0] = rval
def L_op(self, *args, **kwargs):
raise NotImplementedError() |
def _gen_efficientnet(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs):
arch_def = [['ds_r1_k3_s1_e1_c16_se0.25'], ['ir_r2_k3_s2_e6_c24_se0.25'], ['ir_r2_k5_s2_e6_c40_se0.25'], ['ir_r3_k3_s2_e6_c80_se0.25'], ['ir_r3_k5_s1_e6_c112_se0.25'], ['ir_r4_k5_s2_e6_c192_se0.25'], ['ir_r1_k3_s1_e6_c320_se0.25']]
model_kwargs = dict(block_args=decode_arch_def(arch_def, depth_multiplier), num_features=round_channels(1280, channel_multiplier, 8, None), stem_size=32, channel_multiplier=channel_multiplier, act_layer=resolve_act_layer(kwargs, 'swish'), norm_kwargs=resolve_bn_args(kwargs), **kwargs)
model = _create_effnet(model_kwargs, variant, pretrained)
return model |
class _TestResult(TestResult):
def __init__(self, verbosity=1):
TestResult.__init__(self)
self.stdout0 = None
self.stderr0 = None
self.success_count = 0
self.failure_count = 0
self.error_count = 0
self.verbosity = verbosity
self.result = []
def startTest(self, test):
TestResult.startTest(self, test)
self.outputBuffer = io.StringIO()
stdout_redirector.fp = self.outputBuffer
stderr_redirector.fp = self.outputBuffer
self.stdout0 = sys.stdout
self.stderr0 = sys.stderr
sys.stdout = stdout_redirector
sys.stderr = stderr_redirector
def complete_output(self):
if self.stdout0:
sys.stdout = self.stdout0
sys.stderr = self.stderr0
self.stdout0 = None
self.stderr0 = None
return self.outputBuffer.getvalue()
def stopTest(self, test):
self.complete_output()
def addSuccess(self, test):
self.success_count += 1
TestResult.addSuccess(self, test)
output = self.complete_output()
self.result.append((0, test, output, ''))
if (self.verbosity > 1):
sys.stderr.write('ok ')
sys.stderr.write(str(test))
sys.stderr.write('\n')
else:
sys.stderr.write('.')
def addError(self, test, err):
self.error_count += 1
TestResult.addError(self, test, err)
(_, _exc_str) = self.errors[(- 1)]
output = self.complete_output()
self.result.append((2, test, output, _exc_str))
if (self.verbosity > 1):
sys.stderr.write('E ')
sys.stderr.write(str(test))
sys.stderr.write('\n')
else:
sys.stderr.write('E')
def addFailure(self, test, err):
self.failure_count += 1
TestResult.addFailure(self, test, err)
(_, _exc_str) = self.failures[(- 1)]
output = self.complete_output()
self.result.append((1, test, output, _exc_str))
if (self.verbosity > 1):
sys.stderr.write('F ')
sys.stderr.write(str(test))
sys.stderr.write('\n')
else:
sys.stderr.write('F') |
class PassThroughOptionParser(OptionParser):
def _process_args(self, largs, rargs, values):
while rargs:
try:
OptionParser._process_args(self, largs, rargs, values)
except (BadOptionError, AmbiguousOptionError) as e:
largs.append(e.opt_str) |
class CopyProcessor(BaseProcessor):
def __init__(self, config, *args, **kwargs):
self.max_length = config.max_length
def __call__(self, item):
blob = item['blob']
final_blob = np.zeros(((self.max_length,) + blob.shape[1:]), blob.dtype)
final_blob[:len(blob)] = blob[:len(final_blob)]
return {'blob': torch.from_numpy(final_blob)} |
def convert_requirements(requirements: list[str]) -> Iterator[str]:
for req in requirements:
parsed_requirement = Requirement(req)
spec = requires_to_requires_dist(parsed_requirement)
extras = ','.join(sorted((safe_extra(e) for e in parsed_requirement.extras)))
if extras:
extras = f'[{extras}]'
(yield ((safe_name(parsed_requirement.name) + extras) + spec)) |
class RedshiftDatasource(Datasource[Union[(ArrowRow, Any)]]):
def prepare_read(self, parallelism: int, paths: Union[(str, List[str])], content_type_provider: Callable[([str], ContentType)], path_type: S3PathType=S3PathType.MANIFEST, filesystem: Optional[Union[(S3FileSystem, s3fs.S3FileSystem)]]=None, columns: Optional[List[str]]=None, schema: Optional[pa.Schema]=None, unload_args: RedshiftUnloadTextArgs=RedshiftUnloadTextArgs(), partitioning: HivePartitionParser=None, open_stream_args: Optional[Dict[(str, Any)]]=None, read_kwargs_provider: Optional[ReadKwargsProvider]=None, **s3_client_kwargs) -> List[ReadTask]:
if (filesystem is None):
filesystem = S3FileSystem()
(paths, urls) = _normalize_s3_paths_for_filesystem(paths, filesystem)
(paths, resolved_fs) = _resolve_paths_and_filesystem(paths, filesystem)
(content_type_to_paths, meta_provider) = _expand_paths_by_content_type(paths, urls, content_type_provider, path_type, filesystem, resolved_fs, **s3_client_kwargs)
num_content_types = len(content_type_to_paths)
if ((num_content_types > 1) and (not schema)):
path = content_type_to_paths[ContentType.PARQUET][0]
with resolved_fs.open_input_file(path, **open_stream_args) as f:
schema = pq.read_schema(f)
content_type_to_reader = {ContentType.PARQUET: ParquetBaseDatasource(), ContentType.CSV: CSVDatasource()}
all_read_tasks = []
for (content_type, paths) in content_type_to_paths.items():
reader = content_type_to_reader.get(content_type)
assert reader, f'No datasource found for: {content_type}'
prepare_read_kwargs = {'parallelism': parallelism, 'paths': paths, 'filesystem': resolved_fs, 'schema': schema, 'meta_provider': meta_provider, 'partitioning': partitioning}
if (content_type == ContentType.PARQUET):
if columns:
prepare_read_kwargs['columns'] = columns
elif (content_type in DELIMITED_TEXT_CONTENT_TYPES):
prepare_read_kwargs.update(unload_args.to_arrow_reader_kwargs(columns, schema))
else:
raise NotImplementedError(f'Unsupported content type: {content_type}')
if read_kwargs_provider:
prepare_read_kwargs = read_kwargs_provider(content_type, prepare_read_kwargs)
if open_stream_args:
prepare_read_kwargs['open_stream_args'] = open_stream_args
read_tasks = reader.prepare_read(**prepare_read_kwargs)
all_read_tasks.extend(read_tasks)
return all_read_tasks
def do_write(self, blocks: List[ObjectRef[Block]], metadata: List[BlockMetadata], path: str, dataset_uuid: str, filesystem: Optional[FileSystem]=None, try_create_dir: bool=True, open_stream_args: Optional[Dict[(str, Any)]]=None, block_path_provider: BlockWritePathProvider=DefaultBlockWritePathProvider(), write_args_fn: Callable[([], Dict[(str, Any)])]=(lambda : {}), _block_udf: Optional[Callable[([Block], Block)]]=None, **write_args) -> List[ObjectRef[WriteResult]]:
if (filesystem is None):
filesystem = S3FileSystem()
(paths, _) = _normalize_s3_paths_for_filesystem(path, filesystem)
(paths, filesystem) = _resolve_paths_and_filesystem(paths, filesystem)
assert (len(paths) == 1), f'Expected 1 write path, found {len(paths)}.'
path = paths[0]
block_path_provider = CapturingBlockWritePathProvider(block_path_provider)
writer = ParquetBaseDatasource()
write_results = writer.do_write(blocks, metadata, path, dataset_uuid, filesystem, try_create_dir, open_stream_args, block_path_provider, write_args_fn, _block_udf, **write_args)
rwr = RedshiftWriteResult()
rwr.metadata = metadata
rwr.path = path
rwr.dataset_uuid = dataset_uuid
rwr.block_write_path_provider = block_path_provider
rwr.content_type = ContentType.PARQUET.value
rwr.content_encoding = ContentEncoding.IDENTITY.value
rwr.filesystem = filesystem
rwr_obj_ref = ray.put(rwr)
write_results.append(rwr_obj_ref)
return write_results
def on_write_complete(self, write_results: List[WriteResult], **kwargs) -> None:
result: RedshiftWriteResult = write_results[(len(write_results) - 1)]
write_path_args = result.block_write_path_provider.write_path_kwargs
blocks_written = len(write_path_args)
expected_blocks_written = len(result.metadata)
assert (blocks_written == expected_blocks_written), f'Dataset write result validation failed. Found {blocks_written}/{expected_blocks_written} Dataset blocks written. Refusing to commit Redshift Manifest.'
manifest_entries = ManifestEntryList()
for (block_idx, path) in enumerate(write_path_args.keys()):
file_info = result.filesystem.get_file_info(path)
if (file_info.type == FileType.File):
content_length = file_info.size
else:
raise FileNotFoundError(ENOENT, strerror(ENOENT), path)
num_rows = result.metadata[block_idx].num_rows
source_content_length = result.metadata[block_idx].size_bytes
manifest_entry_meta = ManifestMeta.of((int(num_rows) if (num_rows is not None) else None), (int(content_length) if (content_length is not None) else None), result.content_type, result.content_encoding, (int(source_content_length) if source_content_length else None))
parsed_url = parse_s3_url(path)
manifest_entry = ManifestEntry.of(parsed_url.url, manifest_entry_meta)
manifest_entries.append(manifest_entry)
manifest = Manifest.of(manifest_entries)
manifest_path = f'{result.path}/manifest'
logger.debug(f'Write succeeded for Dataset ID: {result.dataset_uuid}')
with result.filesystem.open_output_stream(manifest_path, metadata={'Content-Type': ContentType.JSON.value}) as f:
f.write(json.dumps(manifest).encode('utf-8'))
logger.debug(f'Manifest committed to: {manifest_path}') |
class ConfigSource():
def __init__(self, root_path):
self.root_path = root_path
self.is_windows = (sys.platform == 'win32')
self.xdg_home = os.environ.get('XDG_CONFIG_HOME', os.path.expanduser('~/.config'))
def user_config(self):
raise NotImplementedError()
def project_config(self, document_path):
raise NotImplementedError()
def read_config_from_files(cls, files):
config = configparser.RawConfigParser()
for filename in files:
if (os.path.exists(filename) and (not os.path.isdir(filename))):
config.read(filename)
return config
def parse_config(cls, config, key, options):
conf = {}
for (source, destination, opt_type) in options:
opt_value = cls._get_opt(config, key, source, opt_type)
if (opt_value is not None):
cls._set_opt(conf, destination, opt_value)
return conf
def _get_opt(cls, config, key, option, opt_type):
for opt_key in [option, option.replace('-', '_')]:
if (not config.has_option(key, opt_key)):
continue
if (opt_type == bool):
return config.getboolean(key, opt_key)
if (opt_type == int):
return config.getint(key, opt_key)
if (opt_type == str):
return config.get(key, opt_key)
if (opt_type == list):
return cls._parse_list_opt(config.get(key, opt_key))
raise ValueError(('Unknown option type: %s' % opt_type))
def _parse_list_opt(cls, string):
return [s.strip() for s in string.split(',') if s.strip()]
def _set_opt(cls, config_dict, path, value):
if (value is None):
return
if ('.' not in path):
config_dict[path] = value
return
(key, rest) = path.split('.', 1)
if (key not in config_dict):
config_dict[key] = {}
cls._set_opt(config_dict[key], rest, value) |
class CloudCompositorCommonMask(SingleBandCompositor):
def __call__(self, projectables, **info):
if (len(projectables) != 2):
raise ValueError(('Expected 2 datasets, got %d' % (len(projectables),)))
(data, cma) = projectables
valid_cma = (cma != cma.attrs['_FillValue'])
valid_prod = (data != data.attrs['_FillValue'])
valid_prod = np.logical_and(valid_prod, np.logical_not(np.isnan(data)))
data = data.where(np.logical_or(np.logical_not(valid_cma), valid_prod), data.attrs['scaled_FillValue'])
data = data.where(np.logical_or(valid_prod, valid_cma), np.nan)
res = SingleBandCompositor.__call__(self, [data], **data.attrs)
res.attrs['_FillValue'] = np.nan
return res |
class VTM(Codec):
fmt = '.bin'
def description(self):
return 'VTM'
def name(self):
return 'VTM'
def setup_args(cls, parser):
super().setup_args(parser)
parser.add_argument('-b', '--build-dir', type=str, default='/home/felix/disk2/VVCSoftware_VTM/bin', help='VTM build dir')
parser.add_argument('-c', '--config', type=str, default='/home/felix/disk2/VVCSoftware_VTM/cfg/encoder_intra_vtm.cfg', help='VTM config file')
parser.add_argument('--rgb', action='store_true', help='Use RGB color space (over YCbCr)')
def _set_args(self, args):
args = super()._set_args(args)
self.encoder_path = get_vtm_encoder_path(args.build_dir)
self.decoder_path = get_vtm_decoder_path(args.build_dir)
self.config_path = args.config
self.rgb = args.rgb
return args
def _run(self, img, quality, return_rec=False, return_metrics=True):
if (not (0 <= quality <= 63)):
raise ValueError(f'Invalid quality value: {quality} (0,63)')
bitdepth = 8
arr = np.asarray(read_image(img))
(fd, yuv_path) = mkstemp(suffix='.yuv')
out_filepath = (os.path.splitext(yuv_path)[0] + '.bin')
arr = arr.transpose((2, 0, 1))
if (not self.rgb):
rgb = (torch.from_numpy(arr.copy()).float() / ((2 ** bitdepth) - 1))
arr = np.clip(rgb2ycbcr(rgb).numpy(), 0, 1)
arr = (arr * ((2 ** bitdepth) - 1)).astype(np.uint8)
with open(yuv_path, 'wb') as f:
f.write(arr.tobytes())
(height, width) = arr.shape[1:]
cmd = [self.encoder_path, '-i', yuv_path, '-c', self.config_path, '-q', quality, '-o', '/dev/null', '-b', out_filepath, '-wdt', width, '-hgt', height, '-fr', '1', '-f', '1', '--InputChromaFormat=444', '--InputBitDepth=8', '--ConformanceMode=1']
if self.rgb:
cmd += ['--InputColourSpaceConvert=RGBtoGBR', '--SNRInternalColourSpace=1', '--OutputInternalColourSpace=0']
start = time.time()
run_command(cmd)
enc_time = (time.time() - start)
os.close(fd)
os.unlink(yuv_path)
cmd = [self.decoder_path, '-b', out_filepath, '-o', yuv_path, '-d', 8]
if self.rgb:
cmd.append('--OutputInternalColourSpace=GBRtoRGB')
start = time.time()
run_command(cmd)
dec_time = (time.time() - start)
rec_arr = np.fromfile(yuv_path, dtype=np.uint8)
rec_arr = rec_arr.reshape(arr.shape)
arr = (arr.astype(np.float32) / ((2 ** bitdepth) - 1))
rec_arr = (rec_arr.astype(np.float32) / ((2 ** bitdepth) - 1))
if (not self.rgb):
arr = ycbcr2rgb(torch.from_numpy(arr.copy())).numpy()
rec_arr = ycbcr2rgb(torch.from_numpy(rec_arr.copy())).numpy()
bpp = ((filesize(out_filepath) * 8.0) / (height * width))
os.unlink(yuv_path)
os.unlink(out_filepath)
out = {'bpp': bpp, 'encoding_time': enc_time, 'decoding_time': dec_time}
if return_metrics:
(psnr_val, msssim_val) = compute_metrics(arr, rec_arr, max_val=1.0)
out['psnr'] = psnr_val
out['ms-ssim'] = msssim_val
if return_rec:
rec = Image.fromarray((rec_arr.clip(0, 1).transpose(1, 2, 0) * 255.0).astype(np.uint8))
return (out, rec)
return out |
class DigitalOceanOAuth(BaseOAuth2):
name = 'digitalocean'
AUTHORIZATION_URL = '
ACCESS_TOKEN_URL = '
ACCESS_TOKEN_METHOD = 'POST'
SCOPE_SEPARATOR = ' '
EXTRA_DATA = [('expires_in', 'expires_in')]
def get_user_id(self, details, response):
return response['account'].get('uuid')
def get_user_details(self, response):
(fullname, first_name, last_name) = self.get_user_names((response.get('name') or ''))
return {'username': response['account'].get('email'), 'email': response['account'].get('email'), 'fullname': fullname, 'first_name': first_name, 'last_name': last_name}
def user_data(self, token, *args, **kwargs):
url = '
auth_header = {'Authorization': ('Bearer %s' % token)}
try:
return self.get_json(url, headers=auth_header)
except ValueError:
return None |
_module()
class NerClassifier(BaseRecognizer):
def __init__(self, encoder, decoder, loss, label_convertor, train_cfg=None, test_cfg=None, init_cfg=None):
super().__init__(init_cfg=init_cfg)
self.label_convertor = build_convertor(label_convertor)
self.encoder = build_encoder(encoder)
decoder.update(num_labels=self.label_convertor.num_labels)
self.decoder = build_decoder(decoder)
loss.update(num_labels=self.label_convertor.num_labels)
self.loss = build_loss(loss)
def extract_feat(self, imgs):
raise NotImplementedError('Extract feature module is not implemented yet.')
def forward_train(self, imgs, img_metas, **kwargs):
encode_out = self.encoder(img_metas)
(logits, _) = self.decoder(encode_out)
loss = self.loss(logits, img_metas)
return loss
def forward_test(self, imgs, img_metas, **kwargs):
encode_out = self.encoder(img_metas)
(_, preds) = self.decoder(encode_out)
pred_entities = self.label_convertor.convert_pred2entities(preds, img_metas['attention_masks'])
return pred_entities
def aug_test(self, imgs, img_metas, **kwargs):
raise NotImplementedError('Augmentation test is not implemented yet.')
def simple_test(self, img, img_metas, **kwargs):
raise NotImplementedError('Simple test is not implemented yet.') |
def touches(shape, other):
if (not hasattr(shape, GEO_INTERFACE_ATTR)):
raise TypeError((SHAPE_TYPE_ERR % shape))
if (not hasattr(other, GEO_INTERFACE_ATTR)):
raise TypeError((SHAPE_TYPE_ERR % shape))
o = geom.shape(shape)
o2 = geom.shape(other)
return o.touches(o2) |
class ConditionalDetrFeatureExtractor(ConditionalDetrImageProcessor):
def __init__(self, *args, **kwargs) -> None:
warnings.warn('The class ConditionalDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please use ConditionalDetrImageProcessor instead.', FutureWarning)
super().__init__(*args, **kwargs) |
def common_words(papers):
counter = Counter()
for paper in papers:
title = paper.lower()
splitted = title.split()
pos_word = nltk_posatg(splitted)
splitted = filter(pos_word)
counter.update(splitted)
keywords = []
for w in counter.most_common():
if (w[0] not in stopwords):
keywords.append(w)
return keywords |
def monthly_heatmap(returns, benchmark=None, annot_size=10, figsize=(10, 5), cbar=True, square=False, returns_label='Strategy', compounded=True, eoy=False, grayscale=False, fontname='Arial', ylabel=True, savefig=None, show=True, active=False):
cmap = ('gray' if grayscale else 'RdYlGn')
returns = (_stats.monthly_returns(returns, eoy=eoy, compounded=compounded) * 100)
fig_height = (len(returns) / 2.5)
if (figsize is None):
size = list(_plt.gcf().get_size_inches())
figsize = (size[0], size[1])
figsize = (figsize[0], max([fig_height, figsize[1]]))
if cbar:
figsize = ((figsize[0] * 1.051), max([fig_height, figsize[1]]))
(fig, ax) = _plt.subplots(figsize=figsize)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
fig.set_facecolor('white')
ax.set_facecolor('white')
if (active and (benchmark is not None)):
ax.set_title(f'''{returns_label} - Monthly Active Returns (%)
''', fontsize=14, y=0.995, fontname=fontname, fontweight='bold', color='black')
benchmark = (_stats.monthly_returns(benchmark, eoy=eoy, compounded=compounded) * 100)
active_returns = (returns - benchmark)
ax = _sns.heatmap(active_returns, ax=ax, annot=True, center=0, annot_kws={'size': annot_size}, fmt='0.2f', linewidths=0.5, square=square, cbar=cbar, cmap=cmap, cbar_kws={'format': '%.0f%%'})
else:
ax.set_title(f'''{returns_label} - Monthly Returns (%)
''', fontsize=14, y=0.995, fontname=fontname, fontweight='bold', color='black')
ax = _sns.heatmap(returns, ax=ax, annot=True, center=0, annot_kws={'size': annot_size}, fmt='0.2f', linewidths=0.5, square=square, cbar=cbar, cmap=cmap, cbar_kws={'format': '%.0f%%'})
if ylabel:
ax.set_ylabel('Years', fontname=fontname, fontweight='bold', fontsize=12)
ax.yaxis.set_label_coords((- 0.1), 0.5)
ax.tick_params(colors='#808080')
_plt.xticks(rotation=0, fontsize=(annot_size * 1.2))
_plt.yticks(rotation=0, fontsize=(annot_size * 1.2))
try:
_plt.subplots_adjust(hspace=0, bottom=0, top=1)
except Exception:
pass
try:
fig.tight_layout(w_pad=0, h_pad=0)
except Exception:
pass
if savefig:
if isinstance(savefig, dict):
_plt.savefig(**savefig)
else:
_plt.savefig(savefig)
if show:
_plt.show(block=False)
_plt.close()
if (not show):
return fig
return None |
def test_non_json_instance(run_line, tmp_path):
schema = (tmp_path / 'schema.json')
instance = (tmp_path / 'instance.json')
schema.write_text('{}')
instance.write_text('{')
res = run_line(['check-jsonschema', '--schemafile', str(schema), str(instance)])
assert (res.exit_code == 1)
assert (f'Failed to parse {str(instance)}' in res.stdout) |
class DescribeBaseOxmlElement():
def it_can_find_the_first_of_its_children_named_in_a_sequence(self, first_fixture):
(element, tagnames, matching_child) = first_fixture
assert (element.first_child_found_in(*tagnames) is matching_child)
def it_can_insert_an_element_before_named_successors(self, insert_fixture):
(element, child, tagnames, expected_xml) = insert_fixture
element.insert_element_before(child, *tagnames)
assert (element.xml == expected_xml)
def it_can_remove_all_children_with_name_in_sequence(self, remove_fixture):
(element, tagnames, expected_xml) = remove_fixture
element.remove_all(*tagnames)
assert (element.xml == expected_xml)
(params=[('biu', 'iu', 'i'), ('bu', 'iu', 'u'), ('bi', 'u', None), ('b', 'iu', None), ('iu', 'biu', 'i'), ('', 'biu', None)])
def first_fixture(self, request):
(present, matching, match) = request.param
element = self.rPr_bldr(present).element
tagnames = self.nsptags(matching)
matching_child = (element.find(qn(('w:%s' % match))) if match else None)
return (element, tagnames, matching_child)
(params=[('iu', 'b', 'iu', 'biu'), ('u', 'b', 'iu', 'bu'), ('', 'b', 'iu', 'b'), ('bu', 'i', 'u', 'biu'), ('bi', 'u', '', 'biu')])
def insert_fixture(self, request):
(present, new, successors, after) = request.param
element = self.rPr_bldr(present).element
child = {'b': a_b(), 'i': an_i(), 'u': a_u()}[new].with_nsdecls().element
tagnames = [('w:%s' % char) for char in successors]
expected_xml = self.rPr_bldr(after).xml()
return (element, child, tagnames, expected_xml)
(params=[('biu', 'b', 'iu'), ('biu', 'bi', 'u'), ('bbiiuu', 'i', 'bbuu'), ('biu', 'i', 'bu'), ('biu', 'bu', 'i'), ('bbiiuu', '', 'bbiiuu'), ('biu', 'u', 'bi'), ('biu', 'ui', 'b'), ('bbiiuu', 'bi', 'uu'), ('bu', 'i', 'bu'), ('', 'ui', '')])
def remove_fixture(self, request):
(present, remove, after) = request.param
element = self.rPr_bldr(present).element
tagnames = self.nsptags(remove)
expected_xml = self.rPr_bldr(after).xml()
return (element, tagnames, expected_xml)
def nsptags(self, letters):
return [('w:%s' % letter) for letter in letters]
def rPr_bldr(self, children):
rPr_bldr = an_rPr().with_nsdecls()
for char in children:
if (char == 'b'):
rPr_bldr.with_child(a_b())
elif (char == 'i'):
rPr_bldr.with_child(an_i())
elif (char == 'u'):
rPr_bldr.with_child(a_u())
else:
raise NotImplementedError(("got '%s'" % char))
return rPr_bldr |
class PermutationRV(RandomVariable):
name = 'permutation'
ndim_supp = 1
ndims_params = [1]
dtype = None
_print_name = ('permutation', '\\operatorname{permutation}')
def rng_fn(cls, rng, x, size):
return rng.permutation(x)
def _supp_shape_from_params(self, dist_params, param_shapes=None):
return supp_shape_from_ref_param_shape(ndim_supp=self.ndim_supp, dist_params=dist_params, param_shapes=param_shapes, ref_param_idx=0)
def __call__(self, x, **kwargs):
x = as_tensor_variable(x)
return super().__call__(x, dtype=x.dtype, **kwargs) |
def get_pvc_info(name: str, namespace: str) -> PVC:
pvc_exists = check_if_pvc_exists(name=name, namespace=namespace)
if pvc_exists:
pvc_info_response = cli.read_namespaced_persistent_volume_claim(name=name, namespace=namespace, pretty=True)
pod_list_response = cli.list_namespaced_pod(namespace=namespace)
capacity = pvc_info_response.status.capacity['storage']
volume_name = pvc_info_response.spec.volume_name
pvc_pod_list = []
for pod in pod_list_response.items:
for volume in pod.spec.volumes:
if ((volume.persistent_volume_claim is not None) and (volume.persistent_volume_claim.claim_name == name)):
pvc_pod_list.append(pod.metadata.name)
pvc_info = PVC(name=name, capacity=capacity, volumeName=volume_name, podNames=pvc_pod_list, namespace=namespace)
return pvc_info
else:
logging.error(("PVC '%s' doesn't exist in namespace '%s'" % (str(name), str(namespace))))
return None |
class _CommandCfdSolver(CfdCommand):
def __init__(self):
super(_CommandCfdSolver, self).__init__()
self.resources = {'Pixmap': 'cfd-solver-standard', 'MenuText': QtCore.QT_TRANSLATE_NOOP('Cfd_Solver', 'Create CFD solver'), 'Accel': 'C, S', 'ToolTip': QtCore.QT_TRANSLATE_NOOP('Cfd_Solver', 'Create a solver object for CFD anlysis')}
self.is_active = 'with_analysis'
def Activated(self):
import CfdTools
CfdTools.createSolver() |
class ItemAugInput(AugInput):
def __init__(self, image: np.ndarray, *, boxes=None, seg_info=None):
_check_img_dtype(image)
self.image = image
self.boxes = boxes
self.seg_info = seg_info
def transform(self, tfm: Transform) -> None:
self.image = tfm.apply_image(self.image)
if (self.boxes is not None):
self.boxes = tfm.apply_box(self.boxes)
if (self.seg_info is not None):
assert (type(self.seg_info) == dict), 'seg_info is dictionary'
for (key, value) in self.seg_info.items():
self.seg_info[key] = tfm.apply_segmentation(value) |
class FakeHistoryProgress():
def __init__(self, *, raise_on_tick=False):
self._started = False
self._finished = False
self._value = 0
self._raise_on_tick = raise_on_tick
def start(self, _text):
self._started = True
def set_maximum(self, _maximum):
pass
def tick(self):
if self._raise_on_tick:
raise FakeHistoryTick('tick-tock')
self._value += 1
def finish(self):
self._finished = True |
class TestReplyKeyboardMarkupWithoutRequest(TestReplyKeyboardMarkupBase):
def test_slot_behaviour(self, reply_keyboard_markup):
inst = reply_keyboard_markup
for attr in inst.__slots__:
assert (getattr(inst, attr, 'err') != 'err'), f"got extra slot '{attr}'"
assert (len(mro_slots(inst)) == len(set(mro_slots(inst)))), 'duplicate slot'
def test_expected_values(self, reply_keyboard_markup):
assert isinstance(reply_keyboard_markup.keyboard, tuple)
assert all((isinstance(row, tuple) for row in reply_keyboard_markup.keyboard))
assert isinstance(reply_keyboard_markup.keyboard[0][0], KeyboardButton)
assert isinstance(reply_keyboard_markup.keyboard[0][1], KeyboardButton)
assert (reply_keyboard_markup.resize_keyboard == self.resize_keyboard)
assert (reply_keyboard_markup.one_time_keyboard == self.one_time_keyboard)
assert (reply_keyboard_markup.selective == self.selective)
assert (reply_keyboard_markup.is_persistent == self.is_persistent)
def test_to_dict(self, reply_keyboard_markup):
reply_keyboard_markup_dict = reply_keyboard_markup.to_dict()
assert isinstance(reply_keyboard_markup_dict, dict)
assert (reply_keyboard_markup_dict['keyboard'][0][0] == reply_keyboard_markup.keyboard[0][0].to_dict())
assert (reply_keyboard_markup_dict['keyboard'][0][1] == reply_keyboard_markup.keyboard[0][1].to_dict())
assert (reply_keyboard_markup_dict['resize_keyboard'] == reply_keyboard_markup.resize_keyboard)
assert (reply_keyboard_markup_dict['one_time_keyboard'] == reply_keyboard_markup.one_time_keyboard)
assert (reply_keyboard_markup_dict['selective'] == reply_keyboard_markup.selective)
assert (reply_keyboard_markup_dict['is_persistent'] == reply_keyboard_markup.is_persistent)
def test_equality(self):
a = ReplyKeyboardMarkup.from_column(['button1', 'button2', 'button3'])
b = ReplyKeyboardMarkup.from_column([KeyboardButton(text) for text in ['button1', 'button2', 'button3']])
c = ReplyKeyboardMarkup.from_column(['button1', 'button2'])
d = ReplyKeyboardMarkup.from_column(['button1', 'button2', 'button3.1'])
e = ReplyKeyboardMarkup([['button1', 'button1'], ['button2'], ['button3.1']])
f = InlineKeyboardMarkup.from_column(['button1', 'button2', 'button3'])
assert (a == b)
assert (hash(a) == hash(b))
assert (a != c)
assert (hash(a) != hash(c))
assert (a != d)
assert (hash(a) != hash(d))
assert (a != e)
assert (hash(a) != hash(e))
assert (a != f)
assert (hash(a) != hash(f))
def test_wrong_keyboard_inputs(self):
with pytest.raises(ValueError, match='should be a sequence of sequences'):
ReplyKeyboardMarkup([['button1'], 1])
with pytest.raises(ValueError, match='should be a sequence of sequences'):
ReplyKeyboardMarkup('strings_are_not_allowed')
with pytest.raises(ValueError, match='should be a sequence of sequences'):
ReplyKeyboardMarkup(['strings_are_not_allowed_in_the_rows_either'])
with pytest.raises(ValueError, match='should be a sequence of sequences'):
ReplyKeyboardMarkup(KeyboardButton('button1'))
with pytest.raises(ValueError, match='should be a sequence of sequences'):
ReplyKeyboardMarkup([[['button1']]])
def test_from_button(self):
reply_keyboard_markup = ReplyKeyboardMarkup.from_button(KeyboardButton(text='button1')).keyboard
assert (len(reply_keyboard_markup) == 1)
assert (len(reply_keyboard_markup[0]) == 1)
reply_keyboard_markup = ReplyKeyboardMarkup.from_button('button1').keyboard
assert (len(reply_keyboard_markup) == 1)
assert (len(reply_keyboard_markup[0]) == 1)
def test_from_row(self):
reply_keyboard_markup = ReplyKeyboardMarkup.from_row([KeyboardButton(text='button1'), KeyboardButton(text='button2')]).keyboard
assert (len(reply_keyboard_markup) == 1)
assert (len(reply_keyboard_markup[0]) == 2)
reply_keyboard_markup = ReplyKeyboardMarkup.from_row(['button1', 'button2']).keyboard
assert (len(reply_keyboard_markup) == 1)
assert (len(reply_keyboard_markup[0]) == 2)
def test_from_column(self):
reply_keyboard_markup = ReplyKeyboardMarkup.from_column([KeyboardButton(text='button1'), KeyboardButton(text='button2')]).keyboard
assert (len(reply_keyboard_markup) == 2)
assert (len(reply_keyboard_markup[0]) == 1)
assert (len(reply_keyboard_markup[1]) == 1)
reply_keyboard_markup = ReplyKeyboardMarkup.from_column(['button1', 'button2']).keyboard
assert (len(reply_keyboard_markup) == 2)
assert (len(reply_keyboard_markup[0]) == 1)
assert (len(reply_keyboard_markup[1]) == 1) |
class CaptionEntity(MessageFilter):
__slots__ = ('entity_type',)
def __init__(self, entity_type: str):
self.entity_type: str = entity_type
super().__init__(name=f'filters.CaptionEntity({self.entity_type})')
def filter(self, message: Message) -> bool:
return any(((entity.type == self.entity_type) for entity in message.caption_entities)) |
class test_metrics(unittest.TestCase):
def setUp(self):
self.test = ['Oc1ccccc1-c1cccc2cnccc12', 'COc1cccc(NC(=O)Cc2coc3ccc(OC)cc23)c1']
self.test_sf = ['COCc1nnc(NC(=O)COc2ccc(C(C)(C)C)cc2)s1', 'O=C(C1CC2C=CC1C2)N1CCOc2ccccc21', 'Nc1c(Br)cccc1C(=O)Nc1ccncn1']
self.gen = ['CNC', 'Oc1ccccc1-c1cccc2cnccc12', 'INVALID', 'CCCP', 'Cc1noc(C)c1CN(C)C(=O)Nc1cc(F)cc(F)c1', 'Cc1nc(NCc2ccccc2)no1-c1ccccc1']
self.target = {'valid': (2 / 3), '': 1.0, 'FCD/Test': 52., 'SNN/Test': 0., 'Frag/Test': 0.3, 'Scaf/Test': 0.5, 'IntDiv': 0., 'Filters': 0.75, 'logP': 4., 'SA': 0., 'QED': 0., 'NP': 0., 'weight': 14761.}
def test_get_all_metrics(self):
metrics = get_all_metrics(self.test, self.gen, k=3)
fail = set()
for metric in self.target:
if (not np.allclose(metrics[metric], self.target[metric])):
warnings.warn('Metric `{}` value does not match expected value. Got {}, expected {}'.format(metric, metrics[metric], self.target[metric]))
fail.add(metric)
assert (len(fail) == 0), f"Some metrics didn't pass tests: {fail}"
def test_get_all_metrics_multiprocess(self):
metrics = get_all_metrics(self.test, self.gen, k=3, n_jobs=2)
fail = set()
for metric in self.target:
if (not np.allclose(metrics[metric], self.target[metric])):
warnings.warn('Metric `{}` value does not match expected value. Got {}, expected {}'.format(metric, metrics[metric], self.target[metric]))
fail.add(metric)
assert (len(fail) == 0), f"Some metrics didn't pass tests: {fail}"
def test_get_all_metrics_scaffold(self):
get_all_metrics(self.test, self.gen, test_scaffolds=self.test_sf, k=3, n_jobs=2)
def test_valid_unique(self):
disable_rdkit_log()
mols = ['CCNC', 'CCC', 'INVALID', 'CCC']
assert np.allclose(fraction_valid(mols), (3 / 4)), 'Failed valid'
assert np.allclose(fraction_unique(mols, check_validity=False), (3 / 4)), 'Failed unique'
assert np.allclose(fraction_unique(mols, k=2), 1), 'Failed unique'
mols = [Chem.MolFromSmiles(x) for x in mols]
assert np.allclose(fraction_valid(mols), (3 / 4)), 'Failed valid'
assert np.allclose(fraction_unique(mols, check_validity=False), (3 / 4)), 'Failed unique'
assert np.allclose(fraction_unique(mols, k=2), 1), 'Failed unique'
enable_rdkit_log() |
class Bug(object):
def __init__(self, bugzilla, bug_id=None, dict=None, autorefresh=False):
self.bugzilla = bugzilla
self._rawdata = {}
self.autorefresh = autorefresh
self._aliases = self.bugzilla._get_bug_aliases()
if (not dict):
dict = {}
if bug_id:
dict['id'] = bug_id
self._update_dict(dict)
self.weburl = self._generate_weburl()
def _generate_weburl(self):
parsed = urlparse(self.bugzilla.url)
return urlunparse((parsed.scheme, parsed.netloc, 'show_bug.cgi', '', ('id=%s' % self.bug_id), ''))
def __str__(self):
return self.__unicode__()
def __unicode__(self):
return ('#%-6s %-10s - %s - %s' % (self.bug_id, self.bug_status, self.assigned_to, self.summary))
def __repr__(self):
url = ''
if self.bugzilla:
url = self.bugzilla.url
return ('<Bug #%i on %s at %#x>' % (self.bug_id, url, id(self)))
def __getattr__(self, name):
refreshed = False
while True:
if (refreshed and (name in self.__dict__)):
return self.__dict__[name]
for (newname, oldname) in self._aliases:
if ((name == oldname) and (newname in self.__dict__)):
return self.__dict__[newname]
if (name.startswith('__') and name.endswith('__')):
break
if (refreshed or (not self.autorefresh)):
break
log.info("Bug %i missing attribute '%s' - doing implicit refresh(). This will be slow, if you want to avoid this, properly use query/getbug include_fields, and set bugzilla.bug_autorefresh = False to force failure.", self.bug_id, name)
self.refresh(extra_fields=[name])
refreshed = True
msg = ("Bug object has no attribute '%s'." % name)
if (not self.autorefresh):
msg += ("\nIf '%s' is a bugzilla attribute, it may not have been cached when the bug was fetched. You may want to adjust your include_fields for getbug/query." % name)
raise AttributeError(msg)
def get_raw_data(self):
return copy.deepcopy(self._rawdata)
def refresh(self, include_fields=None, exclude_fields=None, extra_fields=None):
extra_fields = (list(self._rawdata.keys()) + (extra_fields or []))
r = self.bugzilla._getbug(self.bug_id, include_fields=include_fields, exclude_fields=exclude_fields, extra_fields=extra_fields)
self._update_dict(r)
reload = refresh
def _translate_dict(self, newdict):
if self.bugzilla:
self.bugzilla.post_translation({}, newdict)
for (newname, oldname) in self._aliases:
if (oldname not in newdict):
continue
if (newname not in newdict):
newdict[newname] = newdict[oldname]
elif (newdict[newname] != newdict[oldname]):
log.debug('Update dict contained differing alias values d[%s]=%s and d[%s]=%s , dropping the value d[%s]', newname, newdict[newname], oldname, newdict[oldname], oldname)
del newdict[oldname]
def _update_dict(self, newdict):
self._translate_dict(newdict)
self._rawdata.update(newdict)
self.__dict__.update(newdict)
if (('id' not in self.__dict__) and ('bug_id' not in self.__dict__)):
raise TypeError('Bug object needs a bug_id')
def __getstate__(self):
ret = self._rawdata.copy()
ret['_aliases'] = self._aliases
return ret
def __setstate__(self, vals):
self._rawdata = {}
self.bugzilla = None
self._aliases = vals.get('_aliases', [])
self.autorefresh = False
self._update_dict(vals)
def setstatus(self, status, comment=None, private=False):
vals = self.bugzilla.build_update(status=status, comment=comment, comment_private=private)
log.debug('setstatus: update=%s', vals)
return self.bugzilla.update_bugs(self.bug_id, vals)
def close(self, resolution, dupeid=None, fixedin=None, comment=None, isprivate=False):
vals = self.bugzilla.build_update(comment=comment, comment_private=isprivate, resolution=resolution, dupe_of=dupeid, fixed_in=fixedin, status=str('CLOSED'))
log.debug('close: update=%s', vals)
return self.bugzilla.update_bugs(self.bug_id, vals)
def setassignee(self, assigned_to=None, qa_contact=None, comment=None):
if (not (assigned_to or qa_contact)):
raise ValueError('You must set one of assigned_to or qa_contact')
vals = self.bugzilla.build_update(assigned_to=assigned_to, qa_contact=qa_contact, comment=comment)
log.debug('setassignee: update=%s', vals)
return self.bugzilla.update_bugs(self.bug_id, vals)
def addcc(self, cclist, comment=None):
vals = self.bugzilla.build_update(comment=comment, cc_add=cclist)
log.debug('addcc: update=%s', vals)
return self.bugzilla.update_bugs(self.bug_id, vals)
def deletecc(self, cclist, comment=None):
vals = self.bugzilla.build_update(comment=comment, cc_remove=cclist)
log.debug('deletecc: update=%s', vals)
return self.bugzilla.update_bugs(self.bug_id, vals)
def addcomment(self, comment, private=False):
vals = self.bugzilla.build_update(comment=comment, comment_private=private)
log.debug('addcomment: update=%s', vals)
return self.bugzilla.update_bugs(self.bug_id, vals)
def getcomments(self):
comment_list = self.bugzilla.get_comments([self.bug_id])
return comment_list['bugs'][str(self.bug_id)]['comments']
def get_flag_type(self, name):
for t in self.flags:
if (t['name'] == name):
return t
return None
def get_flags(self, name):
ft = self.get_flag_type(name)
if (not ft):
return None
return [ft]
def get_flag_status(self, name):
f = self.get_flags(name)
if (not f):
return None
assert (len(f) <= 1)
return f[0]['status']
def updateflags(self, flags):
flaglist = []
for (key, value) in flags.items():
flaglist.append({'name': key, 'status': value})
return self.bugzilla.update_bugs([self.bug_id], self.bugzilla.build_update(flags=flaglist))
def get_attachments(self, include_fields=None, exclude_fields=None):
if ('attachments' in self.__dict__):
return self.attachments
data = self.bugzilla.get_attachments([self.bug_id], None, include_fields, exclude_fields)
return data['bugs'][str(self.bug_id)]
def get_attachment_ids(self):
return [a['id'] for a in self.get_attachments(exclude_fields=['data'])]
def get_history_raw(self):
return self.bugzilla.bugs_history_raw([self.bug_id]) |
def test_coord_generator():
(i, j, k, l) = (0, 1, 2, 3)
true_set = {(i, j, k, l), (j, i, k, l), (i, j, l, k), (j, i, l, k), (k, l, i, j), (k, l, j, i), (l, k, i, j), (l, k, j, i)}
assert (true_set == set(_coord_generator(i, j, k, l)))
(i, j, k, l) = (1, 1, 2, 3)
true_set = {(i, j, k, l), (j, i, k, l), (i, j, l, k), (j, i, l, k), (k, l, i, j), (k, l, j, i), (l, k, i, j), (l, k, j, i)}
assert (true_set == set(_coord_generator(i, j, k, l))) |
def set_model_weights_in_torch(weights, torch_model, hidden_size):
torch_model_reformer = torch_model.reformer
word_embeddings = np.asarray(weights[1])
set_param(torch_model_reformer.embeddings.word_embeddings, torch.tensor(word_embeddings))
if isinstance(weights[3], tuple):
position_embeddings = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights)):
emb_weights = np.asarray(weights[3][emb_idx][0])
assert (position_embeddings.weights[emb_idx].shape == emb_weights.shape), f'{position_embeddings[emb_idx]} emb does not match'
position_embeddings.weights[emb_idx] = nn.Parameter(torch.tensor(emb_weights))
trax_layer_weights = weights[5]
assert ((len(torch_model_reformer.encoder.layers) * 4) == len(trax_layer_weights)), 'HF and trax model do not have the same number of layers'
for (layer_idx, layer) in enumerate(torch_model_reformer.encoder.layers):
block_weights = trax_layer_weights[(4 * layer_idx):(4 * (layer_idx + 1))]
set_block_weights_in_torch(block_weights, layer, hidden_size)
layer_norm_out_weight = np.asarray(weights[7][0])
layer_norm_out_bias = np.asarray(weights[7][1])
set_param(torch_model_reformer.encoder.layer_norm, torch.tensor(layer_norm_out_weight), torch.tensor(layer_norm_out_bias))
output_embed_weights = np.asarray(weights[9][0])
output_embed_bias = np.asarray(weights[9][1])
set_param(torch_model.lm_head.decoder, torch.tensor(output_embed_weights).transpose(0, 1).contiguous(), torch.tensor(output_embed_bias)) |
def trainDataGenerator(batch_size, train_path, image_folder, mask_folder, aug_dict, image_color_mode='grayscale', mask_color_mode='grayscale', target_size=(256, 256), sal=False):
image_datagen = ImageDataGenerator(**aug_dict)
image_generator = image_datagen.flow_from_directory(train_path, classes=[image_folder], class_mode=None, color_mode=image_color_mode, target_size=target_size, batch_size=batch_size, save_to_dir=None, save_prefix='image', seed=1)
mask_datagen = ImageDataGenerator(**aug_dict)
mask_generator = mask_datagen.flow_from_directory(train_path, classes=[mask_folder], class_mode=None, color_mode=mask_color_mode, target_size=target_size, batch_size=batch_size, save_to_dir=None, save_prefix='mask', seed=1)
for (img, mask) in it.izip(image_generator, mask_generator):
(img, mask_indiv) = processSUIMDataRFHW(img, mask, sal)
(yield (img, mask_indiv)) |
('hash-set', [W_HashTable, values.W_Object, values.W_Object], simple=False)
def hash_set(table, key, val, env, cont):
from pycket.interpreter import return_value
if (not table.immutable()):
raise SchemeException('hash-set: not given an immutable table')
if isinstance(table, W_ImmutableHashTable):
new_table = table.assoc(key, val)
return return_value(new_table, env, cont)
return hash_copy(table, env, hash_set_cont(key, val, env, cont)) |
class SelfSubstitutionQuantifierEliminator(QuantifierEliminator, IdentityDagWalker):
LOGICS = [pysmt.logics.BOOL]
def __init__(self, environment, logic=None):
IdentityDagWalker.__init__(self, env=environment)
QuantifierEliminator.__init__(self)
self.logic = logic
def eliminate_quantifiers(self, formula):
return self.walk(formula)
def self_substitute(self, formula, qvars, token):
for v in qvars[::(- 1)]:
inner_sub = formula.substitute({v: token})
formula = formula.substitute({v: inner_sub})
return formula
def walk_forall(self, formula, args, **kwargs):
qvars = formula.quantifier_vars()
f = args[0]
token = self.env.formula_manager.FALSE()
qf_f = self.self_substitute(f, qvars, token)
return qf_f
def walk_exists(self, formula, args, **kwargs):
qvars = formula.quantifier_vars()
f = args[0]
token = self.env.formula_manager.TRUE()
qf_f = self.self_substitute(f, qvars, token)
return qf_f
def _exit(self):
pass |
def _decode_samples(data, image_key='jpg', image_format='RGB', target_key='cls', alt_label='', handler=log_and_continue):
for sample in data:
try:
result = _decode(sample, image_key=image_key, image_format=image_format, target_key=target_key, alt_label=alt_label)
except Exception as exn:
if handler(exn):
continue
else:
break
if (result is not None):
if (isinstance(sample, dict) and isinstance(result, dict)):
result['__key__'] = sample.get('__key__')
(yield result) |
class BacktestMonitorSettings():
def __init__(self, issue_tearsheet=True, issue_portfolio_analysis_sheet=True, issue_trade_analysis_sheet=True, issue_transaction_log=True, issue_signal_log=True, issue_config_log=True, issue_daily_portfolio_values_file=True, print_stats_to_console=True, generate_pnl_chart_per_ticker_in_portfolio_analysis=True, display_live_backtest_progress=True, live_backtest_chart_refresh_frequency=20, exposure_settings: ExposureSettings=None):
self.issue_tearsheet = issue_tearsheet
self.issue_portfolio_analysis_sheet = issue_portfolio_analysis_sheet
self.issue_trade_analysis_sheet = issue_trade_analysis_sheet
self.issue_transaction_log = issue_transaction_log
self.issue_signal_log = issue_signal_log
self.issue_config_log = issue_config_log
self.issue_daily_portfolio_value_file = issue_daily_portfolio_values_file
self.print_stats_to_console = print_stats_to_console
self.generate_pnl_chart_per_ticker_in_portfolio_analysis = generate_pnl_chart_per_ticker_in_portfolio_analysis
self.display_live_backtest_progress = display_live_backtest_progress
self.live_backtest_chart_refresh_frequency = int(live_backtest_chart_refresh_frequency)
self.exposure_settings = exposure_settings
def no_stats() -> 'BacktestMonitorSettings':
return BacktestMonitorSettings(False, False, False, False, False, False, False, False, False, False, 20, None) |
class CMDRegularizer(Regularizer):
def __init__(self, l=1.0, n_moments=5):
self.uses_learning_phase = 1
self.l = l
self.n_moments = n_moments
def set_layer(self, layer):
self.layer = layer
def __call__(self, loss):
if (not hasattr(self, 'layer')):
raise Exception('Need to call `set_layer` on ActivityRegularizer instance before calling the instance.')
regularizer_loss = loss
sim = 0
if (len(self.layer.inbound_nodes) > 1):
sim = cmd(self.layer.get_output_at(0), self.layer.get_output_at(1), self.n_moments)
add_loss = K.switch(K.equal(len(self.layer.inbound_nodes), 2), sim, 0)
regularizer_loss += (self.l * add_loss)
return K.in_train_phase(regularizer_loss, loss)
def get_config(self):
return {'name': self.__class__.__name__, 'l': float(self.l)} |
class ExpectationComputationalBasisStateTest(unittest.TestCase):
def test_expectation_fermion_operator_single_number_terms(self):
operator = (FermionOperator('3^ 3', 1.9) + FermionOperator('2^ 1'))
state = csc_matrix(([1], ([15], [0])), shape=(16, 1))
self.assertAlmostEqual(expectation_computational_basis_state(operator, state), 1.9)
def test_expectation_fermion_operator_two_number_terms(self):
operator = ((FermionOperator('2^ 2', 1.9) + FermionOperator('2^ 1')) + FermionOperator('2^ 1^ 2 1', (- 1.7)))
state = csc_matrix(([1], ([6], [0])), shape=(16, 1))
self.assertAlmostEqual(expectation_computational_basis_state(operator, state), 3.6)
def test_expectation_identity_fermion_operator(self):
operator = (FermionOperator.identity() * 1.1)
state = csc_matrix(([1], ([6], [0])), shape=(16, 1))
self.assertAlmostEqual(expectation_computational_basis_state(operator, state), 1.1)
def test_expectation_state_is_list_single_number_terms(self):
operator = (FermionOperator('3^ 3', 1.9) + FermionOperator('2^ 1'))
state = [1, 1, 1, 1]
self.assertAlmostEqual(expectation_computational_basis_state(operator, state), 1.9)
def test_expectation_state_is_list_fermion_operator_two_number_terms(self):
operator = ((FermionOperator('2^ 2', 1.9) + FermionOperator('2^ 1')) + FermionOperator('2^ 1^ 2 1', (- 1.7)))
state = [0, 1, 1]
self.assertAlmostEqual(expectation_computational_basis_state(operator, state), 3.6)
def test_expectation_state_is_list_identity_fermion_operator(self):
operator = (FermionOperator.identity() * 1.1)
state = [0, 1, 1]
self.assertAlmostEqual(expectation_computational_basis_state(operator, state), 1.1)
def test_expectation_bad_operator_type(self):
with self.assertRaises(TypeError):
expectation_computational_basis_state('never', csc_matrix(([1], ([6], [0])), shape=(16, 1)))
def test_expectation_qubit_operator_not_implemented(self):
with self.assertRaises(NotImplementedError):
expectation_computational_basis_state(QubitOperator(), csc_matrix(([1], ([6], [0])), shape=(16, 1))) |
def quantsim_custom_grad_learned_grid(inputs: tf.Tensor, encoding_min: tf.Variable, encoding_max: tf.Variable, op_mode: tf.Variable, bitwidth: tf.Variable, is_symmetric: tf.Variable, grad: tf.Tensor) -> Tuple[(tf.Variable, List[tf.Variable])]:
(dloss_by_dmin, dloss_by_dmax, dloss_by_dx) = _compute_dloss_by_dmin_dmax_and_dx(inputs, encoding_min, encoding_max, op_mode, bitwidth, is_symmetric, grad)
return (dloss_by_dx, [dloss_by_dmin, dloss_by_dmax]) |
def build_dataset(args, rank=0, is_test=True):
tok = get_tokenizer(args)
feat_db = ImageFeaturesDB(args.img_ft_file, args.image_feat_size)
obj_db = ObjectFeatureDB(args.obj_ft_file, args.obj_feat_size)
dataset_class = SoonObjectNavBatch
if (args.aug is not None):
aug_instr_data = construct_instrs(args.anno_dir, args.dataset, [args.aug], instr_type=args.instr_type, tokenizer=args.tokenizer, max_instr_len=args.max_instr_len)
aug_env = dataset_class(feat_db, obj_db, aug_instr_data, args.connectivity_dir, batch_size=args.batch_size, max_objects=args.max_objects, angle_feat_size=args.angle_feat_size, is_train=True, seed=(args.seed + rank), sel_data_idxs=None, name='aug', multi_endpoints=args.multi_endpoints, multi_startpoints=args.multi_startpoints)
else:
aug_env = None
train_instr_data = construct_instrs(args.anno_dir, args.dataset, ['train'], instr_type=args.instr_type, tokenizer=args.tokenizer, max_instr_len=args.max_instr_len)
train_env = dataset_class(feat_db, obj_db, train_instr_data, args.connectivity_dir, batch_size=args.batch_size, max_objects=args.max_objects, angle_feat_size=args.angle_feat_size, seed=(args.seed + rank), sel_data_idxs=None, name='train', is_train=True, multi_endpoints=args.multi_endpoints, multi_startpoints=args.multi_startpoints)
val_env_names = ['val_train', 'val_unseen_instrs', 'val_unseen_house']
if args.submit:
val_env_names.append('test')
val_env_names.append('test_v2')
val_envs = {}
for split in val_env_names:
val_instr_data = construct_instrs(args.anno_dir, args.dataset, [split], instr_type=args.instr_type, tokenizer=args.tokenizer, max_instr_len=args.max_instr_len)
if (split == 'val_train'):
val_instr_data = val_instr_data[:100:2]
elif (not is_test):
val_instr_data = val_instr_data[::5]
val_env = dataset_class(feat_db, obj_db, val_instr_data, args.connectivity_dir, batch_size=(args.batch_size * 2), angle_feat_size=args.angle_feat_size, seed=(args.seed + rank), sel_data_idxs=(None if (args.world_size < 2) else (rank, args.world_size)), name=split, max_objects=None, multi_endpoints=False, multi_startpoints=False, is_train=False)
val_envs[split] = val_env
return (train_env, val_envs, aug_env) |
('cms.components.page.signals.revalidate_vercel_frontend_task')
def test_revalidate_vercel_frontend(mock_task):
site = SiteFactory()
page = PageFactory()
site.root_page = page
site.save()
VercelFrontendSettingsFactory(revalidate_url=' revalidate_secret='test', site=site)
revalidate_vercel_frontend('test_revalidate_vercel_frontend', instance=page)
mock_task.delay.assert_called_with(page_id=page.id) |
def _add_validation_args(parser):
group = parser.add_argument_group(title='validation')
group.add_argument('--eval-iters', type=int, default=10, help='Number of iterations to run for evaluationvalidation/test for.')
group.add_argument('--eval-interval', type=int, default=1000, help='Interval between running evaluation on validation set.')
return parser |
def _test_sharding_ec(tables: List[EmbeddingConfig], initial_state_dict: Dict[(str, Any)], rank: int, world_size: int, kjt_input_per_rank: List[KeyedJaggedTensor], sharder: ModuleSharder[nn.Module], backend: str, constraints: Optional[Dict[(str, ParameterConstraints)]]=None, local_size: Optional[int]=None) -> None:
trec_dist.comm_ops.set_gradient_division(False)
with MultiProcessContext(rank, world_size, backend, local_size) as ctx:
kjt_input_per_rank = [kjt.to(ctx.device) for kjt in kjt_input_per_rank]
initial_state_dict = {fqn: tensor.to(ctx.device) for (fqn, tensor) in initial_state_dict.items()}
planner = EmbeddingShardingPlanner(topology=Topology(world_size, ctx.device.type, local_world_size=ctx.local_size), constraints=constraints)
model = EmbeddingCollection(tables=tables, device=ctx.device)
apply_optimizer_in_backward(torch.optim.SGD, model.embeddings['table_0'].parameters(), {'lr': 1.0})
apply_optimizer_in_backward(torch.optim.SGD, model.embeddings['table_1'].parameters(), {'lr': 4.0})
unsharded_model = model
plan: ShardingPlan = planner.collective_plan(model, [sharder], ctx.pg)
sharded_model = DistributedModelParallel(module=model, env=ShardingEnv.from_process_group(ctx.pg), plan=plan, sharders=[sharder], device=ctx.device)
unsharded_model.load_state_dict(copy.deepcopy(initial_state_dict))
copy_state_dict(sharded_model.state_dict(), copy.deepcopy(initial_state_dict))
feature_keys = []
for table in tables:
feature_keys.extend(table.feature_names)
for _it in range(5):
unsharded_model_preds = []
for rank in range(ctx.world_size):
unsharded_pred = unsharded_model(kjt_input_per_rank[rank])
unsharded_pred = torch.cat([unsharded_pred[feature].values().view((- 1)) for feature in feature_keys])
unsharded_model_preds.append(unsharded_pred)
sharded_model_pred_kt = sharded_model(kjt_input_per_rank[ctx.rank])
sharded_model_pred = torch.cat([sharded_model_pred_kt[feature].values().view((- 1)) for feature in feature_keys])
torch.testing.assert_close(sharded_model_pred.cpu(), unsharded_model_preds[ctx.rank].cpu())
sharded_model_pred.sum().backward()
all_unsharded_preds = torch.cat([pred.view((- 1)) for pred in unsharded_model_preds])
all_unsharded_preds.sum().backward() |
class MacroCommand(Command):
commands: List[Command]
def __init__(self, commands: List[Command]):
self.commands = commands
def execute(self) -> None:
for i in range(len(self.commands)):
self.commands[i].execute()
def undo(self) -> None:
for i in range(len(self.commands)):
self.commands[i].undo() |
def print_module_summary(module, inputs, max_nesting=3, skip_redundant=True):
assert isinstance(module, torch.nn.Module)
assert (not isinstance(module, torch.jit.ScriptModule))
assert isinstance(inputs, (tuple, list))
entries = []
nesting = [0]
def pre_hook(_mod, _inputs):
nesting[0] += 1
def post_hook(mod, _inputs, outputs):
nesting[0] -= 1
if (nesting[0] <= max_nesting):
outputs = (list(outputs) if isinstance(outputs, (tuple, list)) else [outputs])
outputs = [t for t in outputs if isinstance(t, torch.Tensor)]
entries.append(easydict.EasyDict(mod=mod, outputs=outputs))
hooks = [mod.register_forward_pre_hook(pre_hook) for mod in module.modules()]
hooks += [mod.register_forward_hook(post_hook) for mod in module.modules()]
outputs = module(*inputs)
for hook in hooks:
hook.remove()
tensors_seen = set()
for e in entries:
e.unique_params = [t for t in e.mod.parameters() if (id(t) not in tensors_seen)]
e.unique_buffers = [t for t in e.mod.buffers() if (id(t) not in tensors_seen)]
e.unique_outputs = [t for t in e.outputs if (id(t) not in tensors_seen)]
tensors_seen |= {id(t) for t in ((e.unique_params + e.unique_buffers) + e.unique_outputs)}
if skip_redundant:
entries = [e for e in entries if (len(e.unique_params) or len(e.unique_buffers) or len(e.unique_outputs))]
rows = [[type(module).__name__, 'Parameters', 'Buffers', 'Output shape', 'Datatype']]
rows += [(['---'] * len(rows[0]))]
param_total = 0
buffer_total = 0
submodule_names = {mod: name for (name, mod) in module.named_modules()}
for e in entries:
name = ('<top-level>' if (e.mod is module) else submodule_names[e.mod])
param_size = sum((t.numel() for t in e.unique_params))
buffer_size = sum((t.numel() for t in e.unique_buffers))
output_shapes = [str(list(t.shape)) for t in e.outputs]
output_dtypes = [str(t.dtype).split('.')[(- 1)] for t in e.outputs]
rows += [[(name + (':0' if (len(e.outputs) >= 2) else '')), (str(param_size) if param_size else '-'), (str(buffer_size) if buffer_size else '-'), (output_shapes + ['-'])[0], (output_dtypes + ['-'])[0]]]
for idx in range(1, len(e.outputs)):
rows += [[(name + f':{idx}'), '-', '-', output_shapes[idx], output_dtypes[idx]]]
param_total += param_size
buffer_total += buffer_size
rows += [(['---'] * len(rows[0]))]
rows += [['Total', str(param_total), str(buffer_total), '-', '-']]
widths = [max((len(cell) for cell in column)) for column in zip(*rows)]
print()
for row in rows:
print(' '.join(((cell + (' ' * (width - len(cell)))) for (cell, width) in zip(row, widths))))
print()
return outputs |
class CSVOutput(object):
def __init__(self, config: Config, fieldnames: List, abs_filename: str, overwrite_file: bool=True, delimiter: str=';'):
self.config = config
mode = ('w' if overwrite_file else 'a')
self.file_handler = open(os.path.join((abs_filename + '.csv')), mode)
self.csv_writer = csv.DictWriter(self.file_handler, delimiter=delimiter, fieldnames=fieldnames, extrasaction='ignore')
if overwrite_file:
self.csv_writer.writeheader()
self.file_handler.flush()
def write(self, row: Dict[(str, Union[(str, Tuple[(str, ...)])])]) -> None:
self.csv_writer.writerow(row)
self.file_handler.flush()
def close(self) -> None:
self.file_handler.close() |
def ss_windowname(screenshot_manager):
screenshot_manager.test_window('One')
screenshot_manager.take_screenshot()
screenshot_manager.c.window.toggle_maximize()
screenshot_manager.take_screenshot()
screenshot_manager.c.window.toggle_minimize()
screenshot_manager.take_screenshot()
screenshot_manager.c.window.toggle_minimize()
screenshot_manager.c.window.toggle_floating()
screenshot_manager.take_screenshot() |
class TestDirectoryRecursion():
.requires_unix
def test_infinite_loop_prevention(self, temp_dir):
project_dir = (temp_dir / 'project')
project_dir.ensure_dir_exists()
with project_dir.as_cwd():
config = {'tool': {'hatch': {'build': {'include': ['foo', 'README.md']}}}}
builder = MockBuilder(str(project_dir), config=config)
(project_dir / 'README.md').touch()
foo = (project_dir / 'foo')
foo.ensure_dir_exists()
(foo / 'bar.txt').touch()
(foo / 'baz').symlink_to(project_dir)
assert ([f.path for f in builder.recurse_included_files()] == [str((project_dir / 'README.md')), str(((project_dir / 'foo') / 'bar.txt'))])
def test_only_include(self, temp_dir):
project_dir = (temp_dir / 'project')
project_dir.ensure_dir_exists()
with project_dir.as_cwd():
config = {'tool': {'hatch': {'build': {'only-include': ['foo'], 'artifacts': ['README.md']}}}}
builder = MockBuilder(str(project_dir), config=config)
(project_dir / 'README.md').touch()
foo = (project_dir / 'foo')
foo.ensure_dir_exists()
(foo / 'bar.txt').touch()
assert ([f.path for f in builder.recurse_included_files()] == [str(((project_dir / 'foo') / 'bar.txt'))])
def test_no_duplication_force_include_only(self, temp_dir):
project_dir = (temp_dir / 'project')
project_dir.ensure_dir_exists()
with project_dir.as_cwd():
config = {'tool': {'hatch': {'build': {'force-include': {'../external.txt': 'new/target2.txt', 'old': 'new'}}}}}
builder = MockBuilder(str(project_dir), config=config)
(project_dir / 'foo.txt').touch()
old = (project_dir / 'old')
old.ensure_dir_exists()
(old / 'target1.txt').touch()
(old / 'target2.txt').touch()
(temp_dir / 'external.txt').touch()
build_data = builder.get_default_build_data()
builder.set_build_data_defaults(build_data)
with builder.config.set_build_data(build_data):
assert ([(f.path, f.distribution_path) for f in builder.recurse_included_files()] == [(str((project_dir / 'foo.txt')), 'foo.txt'), (str(((project_dir / 'old') / 'target1.txt')), f'new{path_sep}target1.txt'), (str((temp_dir / 'external.txt')), f'new{path_sep}target2.txt')])
def test_no_duplication_force_include_and_selection(self, temp_dir):
project_dir = (temp_dir / 'project')
project_dir.ensure_dir_exists()
with project_dir.as_cwd():
config = {'tool': {'hatch': {'build': {'include': ['foo.txt', 'bar.txt', 'baz.txt'], 'force-include': {'../external.txt': 'new/file.txt'}}}}}
builder = MockBuilder(str(project_dir), config=config)
(project_dir / 'foo.txt').touch()
(project_dir / 'bar.txt').touch()
(project_dir / 'baz.txt').touch()
(temp_dir / 'external.txt').touch()
build_data = builder.get_default_build_data()
builder.set_build_data_defaults(build_data)
build_data['force_include']['bar.txt'] = 'bar.txt'
with builder.config.set_build_data(build_data):
assert ([(f.path, f.distribution_path) for f in builder.recurse_included_files()] == [(str((project_dir / 'baz.txt')), 'baz.txt'), (str((project_dir / 'foo.txt')), 'foo.txt'), (str((temp_dir / 'external.txt')), f'new{path_sep}file.txt'), (str((project_dir / 'bar.txt')), 'bar.txt')])
def test_no_duplication_force_include_with_sources(self, temp_dir):
project_dir = (temp_dir / 'project')
project_dir.ensure_dir_exists()
with project_dir.as_cwd():
config = {'tool': {'hatch': {'build': {'include': ['src'], 'sources': ['src'], 'force-include': {'../external.txt': 'new/file.txt'}}}}}
builder = MockBuilder(str(project_dir), config=config)
src_dir = (project_dir / 'src')
src_dir.mkdir()
(src_dir / 'foo.txt').touch()
(src_dir / 'bar.txt').touch()
(src_dir / 'baz.txt').touch()
(temp_dir / 'external.txt').touch()
build_data = builder.get_default_build_data()
builder.set_build_data_defaults(build_data)
build_data['force_include']['src/bar.txt'] = 'bar.txt'
with builder.config.set_build_data(build_data):
assert ([(f.path, f.distribution_path) for f in builder.recurse_included_files()] == [(str((src_dir / 'baz.txt')), 'baz.txt'), (str((src_dir / 'foo.txt')), 'foo.txt'), (str((temp_dir / 'external.txt')), f'new{path_sep}file.txt'), (str((src_dir / 'bar.txt')), 'bar.txt')])
def test_exists(self, temp_dir):
project_dir = (temp_dir / 'project')
project_dir.ensure_dir_exists()
with project_dir.as_cwd():
config = {'tool': {'hatch': {'build': {'force-include': {'../notfound': 'target.txt'}}}}}
builder = MockBuilder(str(project_dir), config=config)
build_data = builder.get_default_build_data()
builder.set_build_data_defaults(build_data)
with builder.config.set_build_data(build_data), pytest.raises(FileNotFoundError, match='Forced include not found'):
list(builder.recurse_included_files())
def test_order(self, temp_dir):
project_dir = (temp_dir / 'project')
project_dir.ensure_dir_exists()
with project_dir.as_cwd():
config = {'tool': {'hatch': {'build': {'sources': ['src'], 'include': ['src/foo', 'bar', 'README.md', 'tox.ini'], 'exclude': ['**/foo/baz.txt'], 'force-include': {'../external1.txt': 'nested/target2.txt', '../external2.txt': 'nested/target1.txt', '../external': 'nested'}}}}}
builder = MockBuilder(str(project_dir), config=config)
foo = ((project_dir / 'src') / 'foo')
foo.ensure_dir_exists()
(foo / 'bar.txt').touch()
(foo / 'baz.txt').touch()
bar = (project_dir / 'bar')
bar.ensure_dir_exists()
(bar / 'foo.txt').touch()
for name in EXCLUDED_DIRECTORIES:
excluded_dir = (bar / name)
excluded_dir.ensure_dir_exists()
(excluded_dir / 'file.ext').touch()
(project_dir / 'README.md').touch()
(project_dir / 'tox.ini').touch()
(temp_dir / 'external1.txt').touch()
(temp_dir / 'external2.txt').touch()
external = (temp_dir / 'external')
external.ensure_dir_exists()
(external / 'external1.txt').touch()
(external / 'external2.txt').touch()
for name in EXCLUDED_DIRECTORIES:
excluded_dir = (external / name)
excluded_dir.ensure_dir_exists()
(excluded_dir / 'file.ext').touch()
assert ([(f.path, f.distribution_path) for f in builder.recurse_included_files()] == [(str((project_dir / 'README.md')), 'README.md'), (str((project_dir / 'tox.ini')), 'tox.ini'), (str(((project_dir / 'bar') / 'foo.txt')), f'bar{path_sep}foo.txt'), (str((((project_dir / 'src') / 'foo') / 'bar.txt')), f'foo{path_sep}bar.txt'), (str(((temp_dir / 'external') / 'external1.txt')), f'nested{path_sep}external1.txt'), (str(((temp_dir / 'external') / 'external2.txt')), f'nested{path_sep}external2.txt'), (str((temp_dir / 'external2.txt')), f'nested{path_sep}target1.txt'), (str((temp_dir / 'external1.txt')), f'nested{path_sep}target2.txt')]) |
.parametrize(('expr', 'expected_passed'), [('None', ['test_func[None]']), ('[1.3]', ['test_func[1.3]']), ('2-3', ['test_func[2-3]'])])
def test_keyword_option_parametrize(expr: str, expected_passed: List[str], pytester: Pytester) -> None:
pytester.makepyfile('\n import pytest\n .parametrize("arg", [None, 1.3, "2-3"])\n def test_func(arg):\n pass\n ')
rec = pytester.inline_run('-k', expr)
(passed, skipped, fail) = rec.listoutcomes()
passed_str = [x.nodeid.split('::')[(- 1)] for x in passed]
assert (passed_str == expected_passed) |
class DocstringParamHintingTest(AbstractHintingTest):
def test_hint_param(self):
code = dedent(' class Sample(object):\n def a_method(self, a_arg):\n """:type a_arg: threading.Thread"""\n a_arg.is_a')
result = self._assist(code)
self.assert_completion_in_result('is_alive', 'attribute', result)
def test_hierarchical_hint_param(self):
code = dedent(' class ISample(object):\n def a_method(self, a_arg):\n """:type a_arg: threading.Thread"""\n\n\n class Sample(ISample):\n def a_method(self, a_arg):\n a_arg.is_a')
result = self._assist(code)
self.assert_completion_in_result('is_alive', 'attribute', result) |
class Time2ShieldRegenGetter(SmoothPointGetter):
def _getCommonData(self, miscParams, src, tgt):
return {'maxShieldAmount': src.item.ship.getModifiedItemAttr('shieldCapacity'), 'shieldRegenTime': (src.item.ship.getModifiedItemAttr('shieldRechargeRate') / 1000)}
def _calculatePoint(self, x, miscParams, src, tgt, commonData):
time = x
shieldAmount = calculateShieldAmount(maxShieldAmount=commonData['maxShieldAmount'], shieldRegenTime=commonData['shieldRegenTime'], shieldAmountT0=(miscParams['shieldAmountT0'] or 0), time=time)
shieldRegen = calculateShieldRegen(maxShieldAmount=commonData['maxShieldAmount'], shieldRegenTime=commonData['shieldRegenTime'], currentShieldAmount=shieldAmount)
return shieldRegen |
def get_deps(factory_class: FactoryType, parent_factory_class: (FactoryType | None)=None, model_name: (str | None)=None) -> list[str]:
model_name = (get_model_name(factory_class) if (model_name is None) else model_name)
parent_model_name = (get_model_name(parent_factory_class) if (parent_factory_class is not None) else None)
def is_dep(value: Any) -> bool:
if isinstance(value, factory.RelatedFactory):
return False
if (isinstance(value, factory.SubFactory) and (get_model_name(value.get_factory()) == parent_model_name)):
return False
if isinstance(value, factory.declarations.PostGenerationDeclaration):
return True
return True
return [SEPARATOR.join((model_name, attr)) for (attr, value) in factory_class._meta.declarations.items() if is_dep(value)] |
def _optimizer(args: SharedArgs, steps_per_epoch: int) -> Optimizer:
learning_rate = _create_learning_rate(args, steps_per_epoch)
if args.decoupled_weight_decay:
weight_decay = _create_weight_decay(args, steps_per_epoch)
if (args.optimizer == OPTIMIZER_ADAM):
optimizer = tfa.optimizers.AdamW(learning_rate=learning_rate, weight_decay=weight_decay, beta_1=0.9, beta_2=0.999, amsgrad=False, epsilon=1e-07)
elif (args.optimizer == OPTIMIZER_SGD):
optimizer = tfa.optimizers.SGDW(learning_rate=learning_rate, weight_decay=weight_decay, momentum=0.9)
else:
raise ValueError(f'Unrecognized optimizer: {args.optimizer}')
elif (args.optimizer == OPTIMIZER_ADAM):
optimizer = tensorflow.keras.optimizers.Adam(learning_rate=learning_rate, beta_1=0.9, beta_2=0.999, amsgrad=False, epsilon=1e-07)
elif (args.optimizer == OPTIMIZER_SGD):
optimizer = tensorflow.keras.optimizers.SGD(learning_rate=learning_rate, momentum=0.9)
else:
raise ValueError(f'Unrecognized optimizer: {args.optimizer}')
return optimizer |
class Honest(Policy):
def __init__(self, observation_space, action_space, config):
Policy.__init__(self, observation_space, action_space, config)
self.blocks = config['blocks']
self.fiftyone = config['fiftyone']
self.extended = config['extended']
def compute_actions(self, obs_batch, state_batches, prev_action_batch=None, prev_reward_batch=None, info_batch=None, episodes=None, **kwargs):
actions = []
for obs in obs_batch:
a = int(round(obs[0]))
h = int(round(obs[1]))
o = int(round(obs[2]))
f = int(round(obs[3]))
if self.extended:
if (a > h):
actions.append(5)
else:
actions.append(0)
elif ((a >= self.blocks) or (h >= self.blocks)):
if (a > h):
actions.append(1)
else:
actions.append(0)
elif self.fiftyone:
listobs = list(obs)
maxlen = max(listobs[4:(- 1)])
if (a > maxlen):
actions.append(5)
else:
actions.append(4)
elif (a > h):
actions.append(1)
else:
actions.append(0)
return (actions, [], {})
def learn_on_batch(self, samples):
pass
def get_weights(self):
pass
def set_weights(self, weights):
pass |
class IhexParser():
def __init__(self, path):
self.mem = []
self.segments = []
self.base = 0
with open(path, 'r') as f:
for line in f.read().splitlines():
self.parse_line(line.strip())
(begin, stream) = (0, b'')
for (addr, data) in self.mem:
if (addr != (begin + len(stream))):
self.segments.append((begin, stream))
(begin, stream) = (addr, data)
else:
stream += data
self.segments.append((begin, stream))
def parse_line(self, line):
if (len(line) < 9):
return
desc = line[7:9]
size = int(line[1:3], 16)
addr = bytes.fromhex(line[3:7])
data = bytes.fromhex(line[9:(9 + (size * 2))])
if (desc == '00'):
offset = int.from_bytes(addr, byteorder='big')
self.mem.append(((self.base + offset), data))
elif (desc == '02'):
self.base = (int.from_bytes(data, byteorder='big') * 16)
elif (desc == '04'):
self.base = (int.from_bytes(data, byteorder='big') * 65536) |
class FastCronTab(CronTab):
def __init__(self, *args, **kwargs):
super(FastCronTab, self).__init__(*args, **kwargs)
self.every_minute = (args[0] == '* * * * *')
self.cached_now = None
self.cached_next = None
def next(self, now=None, *args, **kwargs):
if (now is None):
now = datetime.now()
if self.every_minute:
return ((60.0 - now.second) - (now.microsecond / 1000000))
if ((self.cached_now is not None) and (now > self.cached_now) and (now < (self.cached_now + self.cached_next))):
self.cached_next -= (now - self.cached_now)
self.cached_now = now
else:
self.cached_now = now
if ('default_utc' not in kwargs):
kwargs = kwargs.copy()
kwargs['default_utc'] = False
self.cached_next = timedelta(seconds=super(FastCronTab, self).next(now, *args, **kwargs))
return self.cached_next.total_seconds() |
class ChangeStream(Scaffold):
async def change_stream(self, chat_id: Union[(int, str)], stream: Optional[Stream]=None):
if (self._app is None):
raise NoMTProtoClientSet()
if (not self._is_running):
raise ClientNotStarted()
chat_id = (await self._resolve_chat_id(chat_id))
try:
(await ToAsync(self._binding.change_stream, chat_id, (await StreamParams.get_stream_params(stream))))
except FileError:
raise FileNotFoundError()
except Exception:
raise NotInGroupCallError() |
def test_local_filename_dictionary_installed(tmpdir, monkeypatch):
monkeypatch.setattr(spell, 'dictionary_dir', (lambda : str(tmpdir)))
for lang_file in ['en-US-11-0.bdic', 'en-US-7-1.bdic', 'pl-PL-3-0.bdic']:
(tmpdir / lang_file).ensure()
assert (spell.local_filename('en-US') == 'en-US-11-0.bdic')
assert (spell.local_filename('pl-PL') == 'pl-PL-3-0.bdic') |
def _pil_loader(path, cropArea=None, resizeDim=None, frameFlip=None):
with open(path, 'rb') as f:
img = Image.open(f)
resized_img = (img.resize(resizeDim, Image.ANTIALIAS) if (resizeDim != None) else img)
cropped_img = (resized_img.crop(cropArea) if (cropArea != None) else resized_img)
flipped_img = (cropped_img.transpose(Image.FLIP_LEFT_RIGHT) if frameFlip else cropped_img)
return flipped_img.convert('RGB') |
def seek_sequential(hashes, outdir):
len_hashes = len(hashes)
pbar = tf.contrib.keras.utils.Progbar(len_hashes)
cprogress = tf.constant(0)
dataset_i = tf.data.Dataset.range(len_hashes)
iterator_i = dataset_i.make_one_shot_iterator()
next_element_i = iterator_i.get_next()
hash_i = tf.placeholder(tf.bool, shape=[64])
hashes_j = tf.placeholder(tf.bool, shape=[None, 64])
diff_op = tf.count_nonzero(tf.not_equal(hash_i, hashes_j), 1)
pbar.update(0)
with tf.train.MonitoredSession() as sess:
for _ in range((len_hashes - 1)):
i = sess.run(next_element_i)
diff = sess.run(diff_op, feed_dict={hash_i: hashes[i], hashes_j: hashes[(i + 1):]})
pbar.update(i) |
class WorkTask(Task):
def __init__(self, i, p, w, s, r):
super().__init__(i, p, w, s, r)
def fn(self, pkt, r):
w = r
assert isinstance(w, WorkerTaskRec)
super().fn(pkt, r)
if (pkt is None):
return self.waitTask()
if (w.destination == I_HANDLERA):
dest = I_HANDLERB
else:
dest = I_HANDLERA
w.destination = dest
pkt.ident = dest
pkt.datum = 0
for i in BUFSIZE_RANGE:
w.count += 1
if (w.count > 26):
w.count = 1
pkt.data[i] = ((A + w.count) - 1)
return self.qpkt(pkt) |
class TestAutouseManagement():
def test_autouse_conftest_mid_directory(self, pytester: Pytester) -> None:
pkgdir = pytester.mkpydir('xyz123')
pkgdir.joinpath('conftest.py').write_text(textwrap.dedent(' import pytest\n (autouse=True)\n def app():\n import sys\n sys._myapp = "hello"\n '), encoding='utf-8')
sub = pkgdir.joinpath('tests')
sub.mkdir()
t = sub.joinpath('test_app.py')
t.touch()
t.write_text(textwrap.dedent(' import sys\n def test_app():\n assert sys._myapp == "hello"\n '), encoding='utf-8')
reprec = pytester.inline_run('-s')
reprec.assertoutcome(passed=1)
def test_funcarg_and_setup(self, pytester: Pytester) -> None:
pytester.makepyfile('\n import pytest\n values = []\n (scope="module")\n def arg():\n values.append(1)\n return 0\n (scope="module", autouse=True)\n def something(arg):\n values.append(2)\n\n def test_hello(arg):\n assert len(values) == 2\n assert values == [1,2]\n assert arg == 0\n\n def test_hello2(arg):\n assert len(values) == 2\n assert values == [1,2]\n assert arg == 0\n ')
reprec = pytester.inline_run()
reprec.assertoutcome(passed=2)
def test_uses_parametrized_resource(self, pytester: Pytester) -> None:
pytester.makepyfile('\n import pytest\n values = []\n (params=[1,2])\n def arg(request):\n return request.param\n\n (autouse=True)\n def something(arg):\n values.append(arg)\n\n def test_hello():\n if len(values) == 1:\n assert values == [1]\n elif len(values) == 2:\n assert values == [1, 2]\n else:\n 0/0\n\n ')
reprec = pytester.inline_run('-s')
reprec.assertoutcome(passed=2)
def test_session_parametrized_function(self, pytester: Pytester) -> None:
pytester.makepyfile('\n import pytest\n\n values = []\n\n (scope="session", params=[1,2])\n def arg(request):\n return request.param\n\n (scope="function", autouse=True)\n def append(request, arg):\n if request.function.__name__ == "test_some":\n values.append(arg)\n\n def test_some():\n pass\n\n def test_result(arg):\n assert len(values) == arg\n assert values[:arg] == [1,2][:arg]\n ')
reprec = pytester.inline_run('-v', '-s')
reprec.assertoutcome(passed=4)
def test_class_function_parametrization_finalization(self, pytester: Pytester) -> None:
p = pytester.makeconftest('\n import pytest\n import pprint\n\n values = []\n\n (scope="function", params=[1,2])\n def farg(request):\n return request.param\n\n (scope="class", params=list("ab"))\n def carg(request):\n return request.param\n\n (scope="function", autouse=True)\n def append(request, farg, carg):\n def fin():\n values.append("fin_%s%s" % (carg, farg))\n request.addfinalizer(fin)\n ')
pytester.makepyfile('\n import pytest\n\n class TestClass(object):\n def test_1(self):\n pass\n class TestClass2(object):\n def test_2(self):\n pass\n ')
reprec = pytester.inline_run('-v', '-s', '--confcutdir', pytester.path)
reprec.assertoutcome(passed=8)
config = reprec.getcalls('pytest_unconfigure')[0].config
values = config.pluginmanager._getconftestmodules(p)[0].values
assert (values == (['fin_a1', 'fin_a2', 'fin_b1', 'fin_b2'] * 2))
def test_scope_ordering(self, pytester: Pytester) -> None:
pytester.makepyfile('\n import pytest\n values = []\n (scope="function", autouse=True)\n def fappend2():\n values.append(2)\n (scope="class", autouse=True)\n def classappend3():\n values.append(3)\n (scope="module", autouse=True)\n def mappend():\n values.append(1)\n\n class TestHallo(object):\n def test_method(self):\n assert values == [1,3,2]\n ')
reprec = pytester.inline_run()
reprec.assertoutcome(passed=1)
def test_parametrization_setup_teardown_ordering(self, pytester: Pytester) -> None:
pytester.makepyfile('\n import pytest\n values = []\n def pytest_generate_tests(metafunc):\n if metafunc.cls is None:\n assert metafunc.function is test_finish\n if metafunc.cls is not None:\n metafunc.parametrize("item", [1,2], scope="class")\n class TestClass(object):\n (scope="class", autouse=True)\n def addteardown(self, item, request):\n values.append("setup-%d" % item)\n request.addfinalizer(lambda: values.append("teardown-%d" % item))\n def test_step1(self, item):\n values.append("step1-%d" % item)\n def test_step2(self, item):\n values.append("step2-%d" % item)\n\n def test_finish():\n print(values)\n assert values == ["setup-1", "step1-1", "step2-1", "teardown-1",\n "setup-2", "step1-2", "step2-2", "teardown-2",]\n ')
reprec = pytester.inline_run('-s')
reprec.assertoutcome(passed=5)
def test_ordering_autouse_before_explicit(self, pytester: Pytester) -> None:
pytester.makepyfile('\n import pytest\n\n values = []\n (autouse=True)\n def fix1():\n values.append(1)\n ()\n def arg1():\n values.append(2)\n def test_hello(arg1):\n assert values == [1,2]\n ')
reprec = pytester.inline_run()
reprec.assertoutcome(passed=1)
.parametrize('param1', ['', 'params=[1]'], ids=['p00', 'p01'])
.parametrize('param2', ['', 'params=[1]'], ids=['p10', 'p11'])
def test_ordering_dependencies_torndown_first(self, pytester: Pytester, param1, param2) -> None:
pytester.makepyfile(('\n import pytest\n values = []\n (%(param1)s)\n def arg1(request):\n request.addfinalizer(lambda: values.append("fin1"))\n values.append("new1")\n (%(param2)s)\n def arg2(request, arg1):\n request.addfinalizer(lambda: values.append("fin2"))\n values.append("new2")\n\n def test_arg(arg2):\n pass\n def test_check():\n assert values == ["new1", "new2", "fin2", "fin1"]\n ' % locals()))
reprec = pytester.inline_run('-s')
reprec.assertoutcome(passed=2) |
def downsample_block(input, num_channel, kernel_size):
net = tf.contrib.layers.layer_norm(input, scale=True)
net = tf.nn.relu(net)
residual = slim.conv2d(activation_fn=None, inputs=net, num_outputs=num_channel, biases_initializer=None, kernel_size=[1, kernel_size], stride=[1, 2], padding='SAME')
residual = tf.contrib.layers.layer_norm(residual, scale=True)
residual = tf.nn.relu(residual)
residual = slim.conv2d(activation_fn=None, inputs=residual, num_outputs=num_channel, biases_initializer=None, kernel_size=[1, kernel_size], stride=[1, 1], padding='SAME')
shortcut = slim.conv2d(activation_fn=None, inputs=net, num_outputs=num_channel, biases_initializer=None, kernel_size=[1, 1], stride=[1, 2], padding='SAME')
return (residual + shortcut) |
def catchSignals():
global catchingSigs
if catchingSigs:
return
catchingSigs = True
import signal
def f(sigNo, *args):
global inSigHandler
if inSigHandler:
return
inSigHandler = True
os.killpg(os.getpgrp(), sigNo)
sys.stderr.write(('\nCaught signal %d\n' % sigNo))
raise KeyboardInterrupt
for n in xrange(1, signal.NSIG):
if ((not (n in [signal.SIGCHLD, signal.SIGTSTP, signal.SIGCONT, signal.SIGWINCH])) and (not (signal.getsignal(n) == signal.SIG_IGN))):
try:
signal.signal(n, f)
except:
pass |
def test_new_end_state():
balance1 = 101
node_address = make_address()
end_state = NettingChannelEndState(node_address, balance1)
lock_secret = keccak(b'test_end_state')
lock_secrethash = sha256(lock_secret).digest()
assert (channel.is_lock_pending(end_state, lock_secrethash) is False)
assert (channel.is_lock_locked(end_state, lock_secrethash) is False)
assert (channel.get_next_nonce(end_state) == 1)
assert (channel.get_amount_locked(end_state) == 0)
assert (compute_locksroot(end_state.pending_locks) == LOCKSROOT_OF_NO_LOCKS)
assert (not end_state.secrethashes_to_lockedlocks)
assert (not end_state.secrethashes_to_unlockedlocks)
assert (not end_state.secrethashes_to_onchain_unlockedlocks) |
def normalize_index_name(index_name, legacy_index_map):
if (len(index_name) <= MAXIMUM_INDEX_NAME_LENGTH):
return index_name
if (index_name in legacy_index_map):
return legacy_index_map[index_name]
hashed = hashlib.sha256(index_name).hexdigest()
updated = ('%s_%s' % (index_name[0:MAXIMUM_INDEX_NAME_ALLOWANCE], hashed[0:8]))
assert (len(updated) <= MAXIMUM_INDEX_NAME_LENGTH)
return updated |
_config
def test_floating_focus(manager):
manager.c.next_layout()
assert (len(manager.c.layout.info()['stacks']) == 2)
manager.test_window('two')
manager.test_window('one')
assert (manager.c.window.info()['width'] == 398)
assert (manager.c.window.info()['height'] == 578)
manager.c.window.toggle_floating()
manager.c.window.move_floating(10, 20)
assert (manager.c.window.info()['name'] == 'one')
assert (manager.c.group.info()['focus'] == 'one')
assert ([x['current'] for x in manager.c.layout.info()['stacks']] == [0, 0])
manager.c.group.next_window()
assert (manager.c.window.info()['width'] == 398)
assert (manager.c.window.info()['height'] == 578)
assert (manager.c.window.info()['name'] != 'one')
assert (manager.c.group.info()['focus'] != 'one')
assert ([x['current'] for x in manager.c.layout.info()['stacks']] == [0, 0])
manager.c.group.next_window()
assert (manager.c.window.info()['name'] == 'one')
assert ([x['current'] for x in manager.c.layout.info()['stacks']] == [0, 0])
manager.c.layout.up()
assert (manager.c.window.info()['name'] != 'one')
manager.c.layout.up()
assert (manager.c.window.info()['name'] != 'one')
assert ([x['current'] for x in manager.c.layout.info()['stacks']] == [0, 0])
manager.c.group.next_window()
assert (manager.c.window.info()['name'] == 'one')
assert ([x['current'] for x in manager.c.layout.info()['stacks']] == [0, 0]) |
def test_direct_junction_offsets_pre_suc_2_right(direct_junction_right_lane_fixture):
(main_road, small_road, junction_creator) = direct_junction_right_lane_fixture
main_road.add_predecessor(xodr.ElementType.junction, junction_creator.id)
small_road.add_successor(xodr.ElementType.junction, junction_creator.id)
junction_creator.add_connection(main_road, small_road, (- 3), (- 1))
assert (main_road.pred_direct_junction == {small_road.id: 2})
assert (small_road.succ_direct_junction == {main_road.id: (- 2)})
assert (junction_creator.junction.connections[0].links[0] == ((- 3), (- 1))) |
def _resolve_from_appdata(criteria_, app, timeout=None, retry_interval=None):
if (timeout is None):
timeout = Timings.window_find_timeout
if (retry_interval is None):
retry_interval = Timings.window_find_retry
global cur_item
matched_control = app.GetMatchHistoryItem(cur_item)
cur_item += 1
criteria = [crit.copy() for crit in criteria_]
for unloc_attrib in ['title_re', 'title', 'name', 'name_re', 'best_match']:
for c in criteria:
if (unloc_attrib in c.keys()):
del c[unloc_attrib]
dialog_criterion = criteria[0]
dialog_criterion['class_name'] = matched_control[1]['class_name']
process_elems = findwindows.find_elements(**dialog_criterion)
dialog = None
ctrl = None
if process_elems:
for e in process_elems:
dialog = registry.wrapper_class(e)
if (len(criteria_) > 1):
ctrl_criterion = criteria[1]
ctrl_criterion['class_name'] = matched_control[2]['class_name']
ctrl_criterion['parent'] = dialog.handle
ctrl_criterion['top_level_only'] = False
ctrl_elems = findwindows.find_elements(**ctrl_criterion)
if (len(ctrl_elems) > 1):
same_ids = [elem for elem in ctrl_elems if (elem.control_id == matched_control[2]['control_id'])]
if same_ids:
ctrl_elems = same_ids
try:
ctrl = registry.wrapper_class(ctrl_elems[0])
except IndexError:
print(('-+-+=_' * 20))
raise
break
if (dialog is None):
raise findwindows.ElementNotFoundError()
if ((len(criteria_) == 2) and (ctrl is None)):
raise findwindows.ElementNotFoundError()
if ctrl:
return (dialog, ctrl)
else:
return (dialog,) |
def test_no_ordering_with_shorter_marker_prefix(marker_test):
result = marker_test.runpytest('-v', '--order-marker-prefix=m')
result.assert_outcomes(passed=3, skipped=0)
result.stdout.fnmatch_lines(['test_marker.py::test_a PASSED', 'test_marker.py::test_b PASSED', 'test_marker.py::test_c PASSED']) |
class Optim(object):
def set_parameters(self, params):
params_ = params
self.params = list(params_)
if (self.method == 'sgd'):
if (not self.zeror):
self.optimizer = optim.SGD(self.params, lr=self.lr, weight_decay=self.weight_decay, momentum=0.0)
else:
from torch.distributed.optim import ZeroRedundancyOptimizer
optimizer = ZeroRedundancyOptimizer(self.params, optimizer_class=optim.SGD, lr=self.lr, weight_decay=self.weight_decay, momentum=0.0)
self.optimizer = optimizer
elif (self.method == 'adam'):
if (not self.zeror):
if (self.weight_decay > 0):
self.optimizer = AdamWWrapper(self.params, lr=self.lr, betas=(self.beta1, self.beta2), eps=1e-09, weight_decay=self.weight_decay, amsgrad=self.amsgrad)
else:
self.optimizer = AdamWrapper(self.params, lr=self.lr, betas=(self.beta1, self.beta2), eps=1e-09, weight_decay=0.0, amsgrad=self.amsgrad)
else:
from torch.distributed.optim import ZeroRedundancyOptimizer
optimizer = ZeroRedundancyOptimizer(self.params, optimizer_class=(optim.AdamW if (self.weight_decay > 0) else optim.Adam), lr=1e-05, betas=(self.beta1, self.beta2), eps=1e-09, weight_decay=self.weight_decay)
self.optimizer = optimizer
elif (self.method in ['fused_adam']):
fast_adam = True
try:
import fused_optim
if self.amsgrad:
print('Note: AMSGRAD is not compatible with Fused Adam')
from onmt.modules.optimized.fused_adam import FusedAdam
self.optimizer = FusedAdam(self.params, lr=self.lr, betas=(self.beta1, self.beta2), eps=1e-09, weight_decay=self.weight_decay, amsgrad=False, set_grad_none=False)
except (RuntimeError, ModuleNotFoundError):
fast_adam = False
if (not fast_adam):
self.optimizer = optim.Adam(self.params, lr=self.lr, betas=(self.beta1, self.beta2), eps=1e-09, weight_decay=self.weight_decay, amsgrad=self.amsgrad)
else:
raise RuntimeError(('Invalid optim method: ' + self.method))
if self.zeror:
self._optim = self.optimizer.optim
else:
self._optim = self.optimizer
def __init__(self, opt):
self.optimizer = None
self._optim = None
self.params = None
self.lr = opt.learning_rate
self.model_size = opt.model_size
self.max_grad_norm = opt.max_grad_norm
self.update_method = opt.update_method
self.method = opt.optim
self.zeror = opt.zeror_optim
if (self.lr > 0):
if ('noam' in self.update_method):
self.init_lr = ((self.model_size ** (- 0.5)) * self.lr)
elif ('cosine' in self.update_method):
print('* Using Cosine learning rate schedule')
self.scheduler = None
self.eta_min = 0.0
self.max_step = (opt.max_step if hasattr(opt, 'max_step') else 33333)
self.init_lr = self.lr
else:
self.init_lr = self.lr
self.lr = self.init_lr
self._step = 0
self._first_step = 0
if (self.update_method == 'noam_nowarmup'):
self._step = opt.warmup_steps
if (self.update_method == 'cosine'):
self.min_lr = 0.0
self.warmup_steps = opt.warmup_steps
self.beta1 = opt.beta1
self.beta2 = opt.beta2
self.weight_decay = opt.weight_decay
self.amsgrad = opt.amsgrad
self.max_steps = opt.max_steps
def step(self, scaler=None, grad_denom=None, warmup=False):
overflow = False
if (not overflow):
self._step += 1
if (('noam' in self.update_method) or ('cosine' in self.update_method)):
self.update_learning_rate()
if (scaler is not None):
result = scaler.step(self.optimizer)
else:
self.optimizer.step()
'Reset the denom for normalization'
def normalize_grad(self, denom=None):
if (denom is None):
denom = 1
normalize_gradients(self.params, denom)
def update_learning_rate(self):
if (self.lr < 0):
return
if (self.update_method in ['noam', 'noam_nowarmup', 'noam_half']):
if (self._step <= self.warmup_steps):
self.lr = ((self.init_lr * self._step) * (self.warmup_steps ** (- 1.5)))
else:
self.lr = (self.init_lr * (self._step ** (- 0.5)))
if (self.update_method == 'noam_half'):
self.lr = (self.lr / 2)
self.optimizer.param_groups[0]['lr'] = self.lr
elif (self.update_method in ['cosine']):
self.lr = (self.min_lr + ((0.5 * (self.init_lr - self.min_lr)) * (1 + math.cos(((self._step / self.max_step) * math.pi)))))
self._optim.param_groups[0]['lr'] = self.lr
elif (self.update_method in ['regular', 'basic', 'none']):
pass
def set_learning_rate(self, lr):
self._optim.param_groups[0]['lr'] = lr
self.lr = lr
def get_learning_rate(self):
if (self._optim.param_groups[0]['lr'] is None):
return self.lr
else:
return self._optim.param_groups[0]['lr']
def reset(self):
self._step = self._first_step
for group in self._optim.param_groups:
if ('step' in group):
group['step'] = self._first_step
def state_dict(self):
state_dict = self.optimizer.state_dict()
state_dict['_step'] = self._step
return state_dict
def load_state_dict(self, state_dict):
self._step = state_dict['_step']
state_dict['step'] = self._step
self._first_step = self._step
print(('* Loading from step %d ' % self._step))
state_dict.pop('_step', None)
self.optimizer.load_state_dict(state_dict)
def zero_grad(self, set_to_none=False):
self.optimizer.zero_grad(set_to_none=set_to_none)
def set_starting_step(self, step):
self._step = step
self._first_step = step |
()
def repositories_hg_git(tmp_path: Path) -> tuple[(WorkDir, WorkDir)]:
tmp_path = tmp_path.resolve()
path_git = (tmp_path / 'repo_git')
path_git.mkdir()
wd = WorkDir(path_git)
wd('git init')
wd('git config user.email ')
wd('git config user.name "a test"')
wd.add_command = 'git add .'
wd.commit_command = 'git commit -m test-{reason}'
path_hg = (tmp_path / 'repo_hg')
run(['hg', 'clone', path_git, path_hg, '--config', 'extensions.hggit='], tmp_path)
assert path_hg.exists()
with open((path_hg / '.hg/hgrc'), 'a') as file:
file.write('[extensions]\nhggit =\n')
wd_hg = WorkDir(path_hg)
wd_hg.add_command = 'hg add .'
wd_hg.commit_command = 'hg commit -m test-{reason} -u test -d "0 0"'
return (wd_hg, wd) |
class QNTPServer():
def __init__(self, **kwargs):
self.auto_disabled = None
self.process = None
self.uuid = (((('honeypotslogger' + '_') + __class__.__name__) + '_') + str(uuid4())[:8])
self.config = kwargs.get('config', '')
if self.config:
self.logs = setup_logger(__class__.__name__, self.uuid, self.config)
set_local_vars(self, self.config)
else:
self.logs = setup_logger(__class__.__name__, self.uuid, None)
self.ip = (kwargs.get('ip', None) or (hasattr(self, 'ip') and self.ip) or '0.0.0.0')
self.port = ((kwargs.get('port', None) and int(kwargs.get('port', None))) or (hasattr(self, 'port') and self.port) or 123)
self.username = (kwargs.get('username', None) or (hasattr(self, 'username') and self.username) or 'test')
self.password = (kwargs.get('password', None) or (hasattr(self, 'password') and self.password) or 'test')
self.options = (kwargs.get('options', '') or (hasattr(self, 'options') and self.options) or getenv('HONEYPOTS_OPTIONS', '') or '')
disable_logger(1, tlog)
def ntp_server_main(self):
_q_s = self
class CustomDatagramProtocolProtocol(DatagramProtocol):
def system_time_to_ntp(self, time_):
i = (int((time_ + .0)) << 32)
f = int((((time_ + .0) - int((time_ + .0))) * ))
return (i, f)
def ntp_to_system_time(self, time_):
i = (float((time_ >> 32)) - .0)
f = (float((int(i) & )) / )
return (i, f)
def datagramReceived(self, data, addr):
version = 'UnKnown'
mode = 'UnKnown'
success = 'failed'
unpacked = None
_q_s.logs.info({'server': 'ntp_server', 'action': 'connection', 'src_ip': addr[0], 'src_port': addr[1]})
if (len(data) == calcsize('!B B B b I I I Q Q Q Q')):
version = ((data[0] >> 3) & 7)
mode = (data[0] & 7)
unpacked = unpack('!B B B b I I I Q Q Q Q', data)
if (unpacked is not None):
(i, f) = self.system_time_to_ntp(time())
response = pack('!B B B b I I I Q Q Q Q', (((0 << 6) | (3 << 3)) | 2), data[1], data[2], data[3], 0, 0, 0, 0, data[10], 0, (i + f))
self.transport.write(response, addr)
success = 'success'
_q_s.logs.info({'server': 'ntp_server', 'action': 'query', 'status': 'success', 'src_ip': addr[0], 'src_port': addr[1], 'dest_ip': _q_s.ip, 'dest_port': _q_s.port, 'data': {'version': version, 'mode': mode}})
self.transport.loseConnection()
reactor.listenUDP(port=self.port, protocol=CustomDatagramProtocolProtocol(), interface=self.ip)
reactor.run()
def run_server(self, process=False, auto=False):
status = 'error'
run = False
if process:
if (auto and (not self.auto_disabled)):
port = get_free_port()
if (port > 0):
self.port = port
run = True
elif (self.close_port() and self.kill_server()):
run = True
if run:
self.process = Popen(['python3', path.realpath(__file__), '--custom', '--ip', str(self.ip), '--port', str(self.port), '--options', str(self.options), '--config', str(self.config), '--uuid', str(self.uuid)])
if ((self.process.poll() is None) and check_if_server_is_running(self.uuid)):
status = 'success'
self.logs.info({'server': 'ntp_server', 'action': 'process', 'status': status, 'src_ip': self.ip, 'src_port': self.port, 'dest_ip': self.ip, 'dest_port': self.port})
if (status == 'success'):
return True
else:
self.kill_server()
return False
else:
self.ntp_server_main()
def close_port(self):
ret = close_port_wrapper('ntp_server', self.ip, self.port, self.logs)
return ret
def kill_server(self):
ret = kill_server_wrapper('ntp_server', self.uuid, self.process)
return ret
def test_server(self, ip=None, port=None, username=None, password=None):
with suppress(Exception):
from warnings import filterwarnings
filterwarnings(action='ignore', module='.*socket.*')
from socket import socket, AF_INET, SOCK_DGRAM
_ip = (ip or self.ip)
_port = (port or self.port)
c = socket(AF_INET, SOCK_DGRAM)
c.sendto((b'\x1b' + (47 * b'\x00')), (_ip, _port))
(data, address) = c.recvfrom(256)
ret_time = (unpack('!12I', data)[10] - )
c.close() |
class Effect4812(BaseEffect):
type = 'passive'
def handler(fit, module, context, projectionRange, **kwargs):
fit.modules.filteredItemBoost((lambda mod: (mod.item.group.name == 'ECM')), 'scanRadarStrengthBonus', module.getModifiedItemAttr('ecmStrengthBonusPercent'), stackingPenalties=True, **kwargs) |
def test_expand_line():
redirected = '/redirected'
link = '/resource'
with start_server(Response(link, 301, {'Location': redirected}), Response(redirected, 200, {})) as url:
fmt = 'before %s after'
line = (fmt % url(link))
expected = (fmt % url(redirected))
assert (expected == expand_line(line, None)) |
def _find_all_unkown_paths_per_recursive_node(node: _RecursivePathNode, include_directories: bool) -> Generator[(Path, None, None)]:
if (node.is_unknown and (node.is_file or (node.is_dir and include_directories))):
(yield node.path)
else:
for n in node.sub_nodes:
(yield from _find_all_unkown_paths_per_recursive_node(n, include_directories)) |
class SAC(object):
def __init__(self, state_dim, action_dim, max_action, batch_size=256, discount=0.99, tau=0.005, policy_noise=0.2, noise_clip=0.5, policy_freq=1, actor_lr=0.0003, critic_lr=0.0003, temp_lr=0.0003, alpha=0.2, target_entropy=None, device=torch.device('cuda')):
self.device = device
self.actor_lr = actor_lr
self.critic_lr = critic_lr
self.temp_lr = temp_lr
self.discount = discount
self.tau = tau
self.alpha = alpha
self.policy_noise = policy_noise
self.noise_clip = noise_clip
self.policy_freq = policy_freq
self.target_entropy = (target_entropy if target_entropy else (- action_dim))
self.total_it = 0
self.actor = Actor(state_dim, action_dim).to(self.device)
self.actor_target = copy.deepcopy(self.actor)
self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), lr=self.actor_lr)
self.critic = Critic(state_dim, action_dim).to(self.device)
self.critic_target = copy.deepcopy(self.critic)
self.critic_optimizer = torch.optim.Adam(self.critic.parameters(), lr=self.critic_lr)
self.critic_target.eval()
for p in self.critic_target.parameters():
p.requires_grad = False
def select_action(self, state, sample_noise=None):
with torch.no_grad():
(action, _, mean) = self.actor(torch.Tensor(state).view(1, (- 1)).to(self.device))
if (sample_noise is None):
return mean.squeeze().cpu().numpy()
else:
return np.atleast_1d(action.squeeze().cpu().numpy())
def train(self, train_tuple, state_filter=None):
(q1_loss, q2_loss, pi_loss, a_loss) = (0, 0, 0, 0)
(state, action, next_state, reward, not_done) = train_tuple
done = (1 - not_done)
with torch.no_grad():
(next_action, logprobs, _) = self.actor(next_state)
(q_t1, q_t2) = self.critic_target(next_state, next_action)
q_target = (torch.min(q_t1, q_t2) - (self.alpha * logprobs))
next_q = (reward + (((1.0 - done) * self.discount) * q_target))
(q1, q2) = self.critic(state, action)
q1_loss = F.mse_loss(q1, next_q)
q2_loss = F.mse_loss(q2, next_q)
q_loss = (q1_loss + q2_loss)
self.critic_optimizer.zero_grad()
q_loss.backward()
self.critic_optimizer.step()
(pi, logprobs, _) = self.actor(state)
(q_1, q_2) = self.critic(state, pi)
q_val = torch.min(q_1, q_2)
policy_loss = ((self.alpha * logprobs) - q_val).mean()
self.actor_optimizer.zero_grad()
policy_loss.backward()
self.actor_optimizer.step()
alpha_loss = torch.tensor(0.0).to(self.device)
if ((self.total_it % self.policy_freq) == 0):
with torch.no_grad():
for (target_param, param) in zip(self.critic_target.parameters(), self.critic.parameters()):
target_param.data.copy_(((target_param.data * (1.0 - self.tau)) + (param.data * self.tau)))
def save(self, filename):
torch.save(self.critic.state_dict(), (filename + '_critic'))
torch.save(self.critic_optimizer.state_dict(), (filename + '_critic_optimizer'))
torch.save(self.actor.state_dict(), (filename + '_actor'))
torch.save(self.actor_optimizer.state_dict(), (filename + '_actor_optimizer'))
def load(self, filename, load_optim=True):
self.critic.load_state_dict(torch.load((filename + '_critic')))
if load_optim:
self.critic_optimizer.load_state_dict(torch.load((filename + '_critic_optimizer')))
self.critic_target = copy.deepcopy(self.critic)
self.actor.load_state_dict(torch.load((filename + '_actor')))
if load_optim:
self.actor_optimizer.load_state_dict(torch.load((filename + '_actor_optimizer')))
self.actor_target = copy.deepcopy(self.actor) |
class _PickleCore(_BaseCore):
class CacheChangeHandler(PatternMatchingEventHandler):
def __init__(self, filename, core, key):
PatternMatchingEventHandler.__init__(self, patterns=[('*' + filename)], ignore_patterns=None, ignore_directories=True, case_sensitive=False)
self.core = core
self.key = key
self.observer = None
self.value = None
def inject_observer(self, observer):
self.observer = observer
def _check_calculation(self):
entry = self.core.get_entry_by_key(self.key, True)[1]
try:
if (not entry['being_calculated']):
self.value = entry['value']
self.observer.stop()
except TypeError:
self.value = None
self.observer.stop()
def on_created(self, event):
self._check_calculation()
def on_modified(self, event):
self._check_calculation()
def __init__(self, hash_func, pickle_reload, cache_dir, separate_files, wait_for_calc_timeout, default_params):
super().__init__(hash_func, default_params)
self.cache = None
if (pickle_reload is not None):
self.reload = pickle_reload
else:
self.reload = self.default_params['pickle_reload']
if (cache_dir is not None):
self.cache_dir = os.path.expanduser(cache_dir)
else:
self.cache_dir = os.path.expanduser(self.default_params['cache_dir'])
if (separate_files is not None):
self.separate_files = separate_files
else:
self.separate_files = self.default_params['separate_files']
self.wait_for_calc_timeout = wait_for_calc_timeout
self.cache_fname = None
self.cache_fpath = None
self.lock = threading.RLock()
def _cache_fname(self):
if (self.cache_fname is None):
self.cache_fname = '.{}.{}'.format(self.func.__module__, self.func.__qualname__)
self.cache_fname = self.cache_fname.replace('<', '_').replace('>', '_')
return self.cache_fname
def _cache_fpath(self):
if (self.cache_fpath is None):
if (not os.path.exists(self.cache_dir)):
os.makedirs(self.cache_dir)
self.cache_fpath = os.path.abspath(os.path.join(os.path.realpath(self.cache_dir), self._cache_fname()))
return self.cache_fpath
def _reload_cache(self):
with self.lock:
fpath = self._cache_fpath()
try:
with portalocker.Lock(fpath, mode='rb') as cache_file:
try:
self.cache = pickle.load(cache_file)
except EOFError:
self.cache = {}
except FileNotFoundError:
self.cache = {}
def _get_cache(self):
with self.lock:
if (not self.cache):
self._reload_cache()
return self.cache
def _get_cache_by_key(self, key=None, hash=None):
fpath = self._cache_fpath()
if (hash is None):
fpath += f'_{key}'
else:
fpath += f'_{hash}'
try:
with portalocker.Lock(fpath, mode='rb') as cache_file:
try:
res = pickle.load(cache_file)
except EOFError:
res = None
except FileNotFoundError:
res = None
return res
def _clear_all_cache_files(self):
fpath = self._cache_fpath()
(path, name) = os.path.split(fpath)
for subpath in os.listdir(path):
if subpath.startswith(f'{name}_'):
os.remove(os.path.join(path, subpath))
def _clear_being_calculated_all_cache_files(self):
fpath = self._cache_fpath()
(path, name) = os.path.split(fpath)
for subpath in os.listdir(path):
if subpath.startswith(name):
entry = self._get_cache_by_key(hash=subpath.split('_')[(- 1)])
if (entry is not None):
entry['being_calculated'] = False
self._save_cache(entry, hash=subpath.split('_')[(- 1)])
def _save_cache(self, cache, key=None, hash=None):
with self.lock:
self.cache = cache
fpath = self._cache_fpath()
if (key is not None):
fpath += f'_{key}'
elif (hash is not None):
fpath += f'_{hash}'
with portalocker.Lock(fpath, mode='wb') as cache_file:
pickle.dump(cache, cache_file, protocol=4)
if (key is None):
self._reload_cache()
def get_entry_by_key(self, key, reload=False):
with self.lock:
if self.separate_files:
return (key, self._get_cache_by_key(key))
if (self.reload or reload):
self._reload_cache()
return (key, self._get_cache().get(key, None))
def set_entry(self, key, func_res):
key_data = {'value': func_res, 'time': datetime.now(), 'stale': False, 'being_calculated': False}
if self.separate_files:
self._save_cache(key_data, key)
else:
with self.lock:
cache = self._get_cache()
cache[key] = key_data
self._save_cache(cache)
def mark_entry_being_calculated_separate_files(self, key):
self._save_cache({'value': None, 'time': datetime.now(), 'stale': False, 'being_calculated': True}, key=key)
def mark_entry_not_calculated_separate_files(self, key):
(_, entry) = self.get_entry_by_key(key)
entry['being_calculated'] = False
self._save_cache(entry, key=key)
def mark_entry_being_calculated(self, key):
if self.separate_files:
self.mark_entry_being_calculated_separate_files(key)
else:
with self.lock:
cache = self._get_cache()
try:
cache[key]['being_calculated'] = True
except KeyError:
cache[key] = {'value': None, 'time': datetime.now(), 'stale': False, 'being_calculated': True}
self._save_cache(cache)
def mark_entry_not_calculated(self, key):
if self.separate_files:
self.mark_entry_not_calculated_separate_files(key)
with self.lock:
cache = self._get_cache()
try:
cache[key]['being_calculated'] = False
self._save_cache(cache)
except KeyError:
pass
def wait_on_entry_calc(self, key):
if self.separate_files:
entry = self._get_cache_by_key(key)
filename = f'{self._cache_fname()}_{key}'
else:
with self.lock:
self._reload_cache()
entry = self._get_cache()[key]
filename = self._cache_fname()
if (not entry['being_calculated']):
return entry['value']
event_handler = _PickleCore.CacheChangeHandler(filename=filename, core=self, key=key)
observer = Observer()
event_handler.inject_observer(observer)
observer.schedule(event_handler, path=self.cache_dir, recursive=True)
observer.start()
time_spent = 0
while observer.is_alive():
observer.join(timeout=1.0)
time_spent += 1
self.check_calc_timeout(time_spent)
return event_handler.value
def clear_cache(self):
if self.separate_files:
self._clear_all_cache_files()
else:
self._save_cache({})
def clear_being_calculated(self):
if self.separate_files:
self._clear_being_calculated_all_cache_files()
else:
with self.lock:
cache = self._get_cache()
for key in cache:
cache[key]['being_calculated'] = False
self._save_cache(cache) |
class CharVocab():
def from_data(cls, data, *args, **kwargs):
chars = set()
for string in data:
chars.update(string)
return cls(chars, *args, **kwargs)
def __init__(self, chars, ss=SS):
if ((ss.bos in chars) or (ss.eos in chars) or (ss.pad in chars) or (ss.unk in chars)):
raise ValueError('SS in chars')
all_syms = (sorted(list(chars)) + [ss.bos, ss.eos, ss.pad, ss.unk])
self.ss = ss
self.c2i = {c: i for (i, c) in enumerate(all_syms)}
self.i2c = {i: c for (i, c) in enumerate(all_syms)}
def __len__(self):
return len(self.c2i)
def bos(self):
return self.c2i[self.ss.bos]
def eos(self):
return self.c2i[self.ss.eos]
def pad(self):
return self.c2i[self.ss.pad]
def unk(self):
return self.c2i[self.ss.unk]
def char2id(self, char):
if (char not in self.c2i):
return self.unk
return self.c2i[char]
def id2char(self, id):
if (id not in self.i2c):
return self.ss.unk
return self.i2c[id]
def string2ids(self, string, add_bos=False, add_eos=False):
ids = [self.char2id(c) for c in string]
if add_bos:
ids = ([self.bos] + ids)
if add_eos:
ids = (ids + [self.eos])
return ids
def ids2string(self, ids, rem_bos=True, rem_eos=True):
if (len(ids) == 0):
return ''
if (rem_bos and (ids[0] == self.bos)):
ids = ids[1:]
if (rem_eos and (ids[(- 1)] == self.eos)):
ids = ids[:(- 1)]
string = ''.join([self.id2char(id) for id in ids])
return string |
def sz_operator(n_spatial_orbitals: int) -> FermionOperator:
if (not isinstance(n_spatial_orbitals, int)):
raise TypeError('n_orbitals must be specified as an integer')
operator = FermionOperator()
n_spinless_orbitals = (2 * n_spatial_orbitals)
for ni in range(n_spatial_orbitals):
operator += (number_operator(n_spinless_orbitals, up_index(ni), 0.5) + number_operator(n_spinless_orbitals, down_index(ni), (- 0.5)))
return operator |
class FileType(DictMixin):
__module__ = 'mutagen'
info = None
tags = None
filename = None
_mimes = ['application/octet-stream']
def __init__(self, *args, **kwargs):
if ((not args) and (not kwargs)):
warnings.warn('FileType constructor requires a filename', DeprecationWarning)
else:
self.load(*args, **kwargs)
()
def load(self, filething, *args, **kwargs):
raise NotImplementedError
def __getitem__(self, key):
if (self.tags is None):
raise KeyError(key)
else:
return self.tags[key]
def __setitem__(self, key, value):
if (self.tags is None):
self.add_tags()
self.tags[key] = value
def __delitem__(self, key):
if (self.tags is None):
raise KeyError(key)
else:
del self.tags[key]
def keys(self) -> list:
if (self.tags is None):
return []
else:
return self.tags.keys()
(writable=True)
def delete(self, filething=None):
if (self.tags is not None):
return self.tags.delete(filething)
(writable=True)
def save(self, filething=None, **kwargs):
if (self.tags is not None):
return self.tags.save(filething, **kwargs)
def pprint(self) -> str:
assert (self.info is not None)
stream = ('%s (%s)' % (self.info.pprint(), self.mime[0]))
try:
tags = self.tags.pprint()
except AttributeError:
return stream
else:
return (stream + ((tags and ('\n' + tags)) or ''))
def add_tags(self) -> None:
raise NotImplementedError
def mime(self) -> List[str]:
mimes = []
for Kind in type(self).__mro__:
for mime in getattr(Kind, '_mimes', []):
if (mime not in mimes):
mimes.append(mime)
return mimes
def score(filename, fileobj, header) -> int:
raise NotImplementedError |
class TopologicalCircuit():
def __init__(self, treg: TopologicalRegister):
self.treg = treg
self.qreg: Dict[(str, QuantumRegister)] = {}
self.creg: Dict[(str, ClassicalRegister)] = {}
self.circ = treg.circ
def add_creg(self, size=None, name=None, bits=None, override: bool=False) -> None:
if ((name in self.creg) and (not override)):
return
creg = ClassicalRegister(size=size, name=name, bits=bits)
self.creg[name] = creg
self.circ.add_register(creg)
def add_qreg(self, size=None, name=None, bits=None, override: bool=False) -> None:
if ((name in self.qreg) and (not override)):
return
qreg = QuantumRegister(size=size, name=name, bits=bits)
self.qreg[name] = qreg
self.circ.add_register(qreg)
def _get_index(self, tqubit: Union[(TopologicalQubit, int)]) -> TopologicalQubit:
if isinstance(tqubit, int):
tqubit = cast(int, tqubit)
tqubit = self.treg[tqubit]
tqubit = cast(TopologicalQubit, tqubit)
return tqubit
def stabilize(self, tqubit: Union[(TopologicalQubit, int)]):
tqubit = self._get_index(tqubit)
tqubit.stabilize()
def id(self, tqubit: Union[(TopologicalQubit, int)]) -> None:
tqubit = self._get_index(tqubit)
tqubit.id()
def id_data(self, tqubit: Union[(TopologicalQubit, int)]) -> None:
tqubit = self._get_index(tqubit)
tqubit.id_data()
def reset_x(self, tqubit: Union[(TopologicalQubit, int)]) -> None:
tqubit = self._get_index(tqubit)
tqubit.reset_x()
def reset_z(self, tqubit: Union[(TopologicalQubit, int)]):
tqubit = self._get_index(tqubit)
tqubit.reset_z()
def x(self, tqubit: Union[(TopologicalQubit, int)]):
tqubit = self._get_index(tqubit)
tqubit.x()
def z(self, tqubit: Union[(TopologicalQubit, int)]):
tqubit = self._get_index(tqubit)
tqubit.z()
def cx(self, control: Union[(TopologicalQubit, int)], target: Union[(TopologicalQubit, int)], ancilla_ctype: Optional[str]=None, ancilla_params: Optional[Dict[(str, int)]]=None):
if ((ancilla_ctype is not None) ^ (ancilla_params is not None)):
raise ValueError('Please provide both a ctype and params or neither to use the control qubit ctype and params by default.')
elif (ancilla_ctype is None):
control_q = self._get_index(control)
ancilla_ctype = type(control_q).__name__.replace('Qubit', '')
ancilla_params = control_q.lattice.params
control = self._get_index(control)
target = self._get_index(target)
if ('ancilla' not in self.treg.tqubits):
self.treg.add_tqubit('ancilla', ancilla_ctype, ancilla_params)
ancilla = cast(TopologicalQubit, list(self.treg['ancilla'].values())[(- 1)])
self.add_creg(1, 'm1')
self.add_creg(1, 'm2')
self.add_creg(1, 'm3')
ancilla.reset_x()
self.add_qreg(1, 'cnot_readout')
readout = self.qreg['cnot_readout'][0]
self.circ.reset(readout)
control.cx(target=readout)
ancilla.cx(target=readout)
self.circ.measure(readout, self.creg['m1'][0])
self.circ.reset(readout)
self.circ.h(readout)
target.cx(control=readout)
ancilla.cx(control=readout)
self.circ.h(readout)
self.circ.measure(readout, self.creg['m2'][0])
ancilla.readout_z(readout_creg=self.creg['m3'])
control.z_c_if(self.creg['m2'], 1)
target.x_c_if(self.creg['m1'], 1)
target.x_c_if(self.creg['m3'], 1)
def measure_x(self, tqubit: Union[(TopologicalQubit, int)], readout_creg: Optional[ClassicalRegister]=None):
tqubit = self._get_index(tqubit)
tqubit.readout_x(readout_creg=readout_creg)
def measure_z(self, tqubit: Union[(TopologicalQubit, int)], readout_creg: Optional[ClassicalRegister]=None):
tqubit = self._get_index(tqubit)
tqubit.readout_z(readout_creg=readout_creg)
def measure_lattice_x(self, tqubit: Union[(TopologicalQubit, int)]):
tqubit = self._get_index(tqubit)
tqubit.lattice_readout_x()
def measure_lattice_z(self, tqubit: Union[(TopologicalQubit, int)]):
tqubit = self._get_index(tqubit)
tqubit.lattice_readout_z()
def parse_readout(self, tqubit: Union[(TopologicalQubit, int)], readout_string: str, readout_type: Optional[str]='Z') -> Tuple[(int, Dict[(str, List[Any])])]:
tqubit = self._get_index(tqubit)
return tqubit.parse_readout(readout_string, readout_type)
def draw(self, **kwargs):
return self.circ.draw(**kwargs)
def __str__(self):
return self.circ.__str__() |
def find_version(*file_paths):
try:
with io.open(os.path.join(PROJECTDIR, *file_paths), encoding='utf8') as fp:
version_file = fp.read()
pattern = '^__version__ = version = [\'\\"]([^\'\\"]*)[\'\\"]'
version_match = re.search(pattern, version_file, re.M)
return version_match.group(1)
except Exception:
return None |
def test_greater_than():
bb = BloqBuilder()
bitsize = 5
q0 = bb.add_register('a', bitsize)
q1 = bb.add_register('b', bitsize)
anc = bb.add_register('result', 1)
(q0, q1, anc) = bb.add(GreaterThan(bitsize, bitsize), a=q0, b=q1, target=anc)
cbloq = bb.finalize(a=q0, b=q1, result=anc)
cbloq.t_complexity() |
.parametrize('file_name, elem_id, source, input_text', [('textarea.html', 'qute-textarea', 'clipboard', 'qutebrowser'), ('textarea.html', 'qute-textarea', 'keypress', 'superqutebrowser'), ('input.html', 'qute-input', 'clipboard', 'amazingqutebrowser'), ('input.html', 'qute-input', 'keypress', 'awesomequtebrowser'), pytest.param('autofocus.html', 'qute-input-autofocus', 'keypress', 'cutebrowser', marks=pytest.mark.flaky)])
.parametrize('zoom', [100, 125, 250])
def test_insert_mode(file_name, elem_id, source, input_text, zoom, quteproc, request):
url_path = 'data/insert_mode_settings/html/{}'.format(file_name)
quteproc.open_path(url_path)
quteproc.send_cmd(':zoom {}'.format(zoom))
quteproc.send_cmd(':click-element --force-event id {}'.format(elem_id))
quteproc.wait_for(message='Entering mode KeyMode.insert (reason: *)')
quteproc.send_cmd(':debug-set-fake-clipboard')
if (source == 'keypress'):
quteproc.press_keys(input_text)
elif (source == 'clipboard'):
quteproc.send_cmd(':debug-set-fake-clipboard "{}"'.format(input_text))
quteproc.send_cmd(':insert-text {clipboard}')
else:
raise ValueError('Invalid source {!r}'.format(source))
quteproc.wait_for_js('contents: {}'.format(input_text))
quteproc.send_cmd(':mode-leave') |
(frozen=True, eq=False)
class SubscribingAtomicList(AtomicList):
subscriptions: defaultdict[(Event, list[int])] = dataclasses.field(default_factory=(lambda : defaultdict(list)))
def subscribe(self, filter_: UniqueFilter, *events: Event) -> None:
for event in events:
if (filter_ not in self.subscriptions[event]):
self.subscriptions[event].append(filter_.id)
async def filter_list_result(self, ctx: FilterContext) -> list[Filter]:
event_filters = [self.filters[id_] for id_ in self.subscriptions[ctx.event]]
return (await self._create_filter_list_result(ctx, self.defaults, event_filters)) |
def first_run(save_path):
txt_file = os.path.join(save_path, 'first_run.txt')
if (not os.path.exists(txt_file)):
open(txt_file, 'w').close()
else:
saved_epoch = open(txt_file).read()
if (saved_epoch is None):
print('You forgot to delete [first run file]')
return ''
return saved_epoch
return '' |
class F12_Partition(F11_Partition):
removedKeywords = F11_Partition.removedKeywords
removedAttrs = F11_Partition.removedAttrs
def _getParser(self):
op = F11_Partition._getParser(self)
op.add_argument('--escrowcert', metavar='<url>', version=F12, help='\n Load an X.509 certificate from ``<url>``. Store the\n data encryption key of this partition, encrypted using\n the certificate, as a file in ``/root``. Only relevant\n if ``--encrypted`` is specified as well.')
op.add_argument('--backuppassphrase', action='store_true', version=F12, default=False, help='\n Only relevant if ``--escrowcert`` is specified as well.\n In addition to storing the data encryption key, generate\n a random passphrase and add it to this partition. Then\n store the passphrase, encrypted using the certificate\n specified by ``--escrowcert``, as a file in ``/root``.\n If more than one LUKS volume uses ``--backuppassphrase``,\n the same passphrase will be used for all such volumes.\n ')
return op |
class Job(CPIBase, Async):
def __init__(self, api, adaptor):
_cpi_base = super(Job, self)
_cpi_base.__init__(api, adaptor)
def init_instance(self, info, ttype):
pass
def init_instance_async(self, info, ttype):
pass
def get_id(self, ttype):
pass
def get_id_async(self, ttype):
pass
def get_name(self, ttype):
pass
def get_name_async(self, ttype):
pass
def get_description(self, ttype):
pass
def get_description_async(self, ttype):
pass
def get_stdin(self, ttype):
pass
def get_stdin_async(self, ttype):
pass
def get_stdout(self, ttype):
pass
def get_stdout_async(self, ttype):
pass
def get_stderr(self, ttype):
pass
def get_stderr_async(self, ttype):
pass
def get_log(self, ttype):
pass
def get_log_async(self, ttype):
pass
def suspend(self, ttype):
pass
def suspend_async(self, ttype):
pass
def resume(self, ttype):
pass
def resume_async(self, ttype):
pass
def checkpoint(self, ttype):
pass
def checkpoint_async(self, ttype):
pass
def migrate(self, jd, ttype):
pass
def migrate_async(self, jd, ttype):
pass
def signal(self, signum, ttype):
pass
def signal_async(self, signum, ttype):
pass
def run(self, ttype):
pass
def run_async(self, ttype):
pass
def cancel(self, timeout, ttype):
pass
def cancel_async(self, timeout, ttype):
pass
def wait(self, timeout, ttype):
pass
def wait_async(self, timeout, ttype):
pass
def get_state(self, ttype):
pass
def get_state_async(self, ttype):
pass
def get_result(self, ttype):
pass
def get_result_async(self, ttype):
pass
def get_object(self, ttype):
pass
def get_object_async(self, ttype):
pass
def re_raise(self, ttype):
pass
def re_raise_async(self, ttype):
pass
def get_exit_code(self, ttype):
pass
def get_exit_code_async(self, ttype):
pass
def get_created(self, ttype):
pass
def get_created_async(self, ttype):
pass
def get_started(self, ttype):
pass
def get_started_async(self, ttype):
pass
def get_finished(self, ttype):
pass
def get_finished_async(self, ttype):
pass
def get_execution_hosts(self, ttype):
pass
def get_execution_hosts_async(self, ttype):
pass |
class PoolFormerPreTrainedModel(PreTrainedModel):
config_class = PoolFormerConfig
base_model_prefix = 'poolformer'
main_input_name = 'pixel_values'
supports_gradient_checkpointing = True
def _init_weights(self, module):
if isinstance(module, (nn.Linear, nn.Conv2d)):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if (module.bias is not None):
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, PoolFormerEncoder):
module.gradient_checkpointing = value |
def main():
parser = transformers.HfArgumentParser((ModelArguments, DataArguments, TrainingArguments))
(model_args, data_args, training_args) = parser.parse_args_into_dataclasses()
print('Setup Data')
if ('VQA_RAD' in data_args.Train_csv_path):
training_args.run_name = (training_args.run_name + '_VQA_RAD')
training_args.output_dir = (training_args.output_dir + '/VQA_RAD')
Train_dataset = VQA_RAD_Dataset(data_args.Train_csv_path, data_args.tokenizer_path, text_type='blank')
Eval_dataset = VQA_RAD_Dataset(data_args.Eval_csv_path, data_args.tokenizer_path, text_type='blank')
if ('Slake1.0' in data_args.Train_csv_path):
training_args.run_name = (training_args.run_name + '_Slake')
training_args.output_dir = (training_args.output_dir + '/Slake')
Train_dataset = Slake_Dataset(data_args.Train_csv_path, data_args.tokenizer_path, text_type='blank')
Eval_dataset = Slake_Dataset(data_args.Eval_csv_path, data_args.tokenizer_path, text_type='blank')
print('Setup Model')
ckp = (model_args.ckp + '/pytorch_model.bin')
print(ckp)
model = QA_model(model_args)
print('Start training')
trainer = Trainer(model=model, train_dataset=Train_dataset, eval_dataset=Eval_dataset, args=training_args)
trainer.train()
trainer.save_state() |
def generateVideo(df, df_complete, numFrame):
hitpointFrame = df[(df.hitpoint == 1)].reset_index(drop=True)['Frame']
actual = [0 for _ in range(len(df_complete))]
marked = [0 for _ in range(len(df_complete))]
coverage = 5
for x in hitpointFrame:
actual[(x - 1)] = 1
if ((x > coverage) and (x < (len(df_complete) - coverage))):
marked[(x - 1)] = 1
for i in range(1, (coverage + 1)):
marked[((x - 1) - i)] = 1
marked[((x - 1) + i)] = 1
elif (x < coverage):
marked[(x - 1)] = 1
for i in range(1, x):
marked[((x - 1) - i)] = 1
for i in range(1, (coverage + 1)):
marked[((x - 1) + i)] = 1
elif (x >= (len(df_complete) - coverage)):
marked[(x - 1)] = 1
for i in range(1, (coverage + 1)):
marked[((x - 1) - i)] = 1
df_complete['marked'] = marked
df_complete['actual'] = actual
markedFrame = df_complete[(df_complete.marked == 1)].reset_index(drop=True)
endFrame = df[(df.end == 1)].reset_index(drop=True)['Frame']
end = [0 for _ in range(len(df_complete))]
marked = [0 for _ in range(len(df_complete))]
coverage = 5
for x in endFrame:
end[(x - 1)] = 1
if ((x > coverage) and (x < (len(df_complete) - coverage))):
marked[(x - 1)] = 1
for i in range(1, (coverage + 1)):
marked[((x - 1) - i)] = 1
marked[((x - 1) + i)] = 1
elif (x < coverage):
marked[(x - 1)] = 1
for i in range(1, x):
marked[((x - 1) - i)] = 1
for i in range(1, (coverage + 1)):
marked[((x - 1) + i)] = 1
elif (x >= (len(df_complete) - coverage)):
marked[(x - 1)] = 1
for i in range(1, (coverage + 1)):
marked[((x - 1) - i)] = 1
df_complete = df_complete.drop(columns=['marked'], axis=1)
df_complete['marked'] = marked
df_complete['end'] = end
markedEndFrame = df_complete[(df_complete.marked == 1)].reset_index(drop=True)
position = pd.read_csv('../Data/AccuracyResult/record_circle_ballsize_predict_heatmap_new_on_new.csv')
rally = pd.read_excel('../Data/TrainTest/clip_info_18IND_TC.xlsx')
rally = rally[['frame_num', 'getpoint_player']]
realscoreA = [0 for _ in range(len(df_complete))]
realscoreB = [0 for _ in range(len(df_complete))]
index = 0
realscoreAtmp = 0
realscoreBtmp = 0
cntA = 0
cntB = 0
for i in range(len(df_complete)):
if (index == 25):
realscoreAtmp = 0
realscoreBtmp = 0
if (df_complete['Frame'][i] in list(rally['frame_num'])):
idx = rally['frame_num'][(rally['frame_num'] == df_complete['Frame'][i])].index[0]
if (rally['getpoint_player'][idx] == 'A'):
cntA += 1
realscoreAtmp += 1
index += 1
if (rally['getpoint_player'][idx] == 'B'):
cntB += 1
realscoreBtmp += 1
index += 1
realscoreA[i] = realscoreAtmp
realscoreB[i] = realscoreBtmp
df_complete['realscoreA'] = realscoreA
df_complete['realscoreB'] = realscoreB
input_video_path = '../Data/PredictVideo/TAI Tzu Ying vs CHEN Yufei 2018 Indonesia Open Final'
video = cv2.VideoCapture((input_video_path + '.mp4'))
fps = int(video.get(cv2.CAP_PROP_FPS))
output_width = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))
output_height = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))
print('Frame dimension')
print(('Width = %d' % output_width))
print(('Height = %d' % output_height))
output_video_path = (input_video_path + '_virtual_umpire.mp4')
fourcc = cv2.VideoWriter_fourcc(*'XVID')
output_video = cv2.VideoWriter(output_video_path, fourcc, fps, (output_width, output_height))
count = 0
while video.isOpened():
if ((count % 500) == 0):
print('status : ', count)
count += 1
if (count > numFrame):
break
(ret, frame) = video.read()
if (ret == True):
cv2.putText(frame, 'Our Score', (800, 30), cv2.FONT_HERSHEY_TRIPLEX, 1, (30, 144, 25), 1)
cv2.rectangle(frame, (800, 50), (1000, 130), (255, 255, 255), 3)
cv2.putText(frame, 'Real Score', (100, 30), cv2.FONT_HERSHEY_TRIPLEX, 1, (138, 43, 226), 1)
cv2.rectangle(frame, (100, 50), (300, 130), (255, 255, 255), 3)
tmp = df['Frame']
if (count in list(df['Frame'])):
idx = tmp[(tmp == count)].index[0]
cv2.putText(frame, str(df['scoreA'][idx]), (830, 110), cv2.FONT_HERSHEY_TRIPLEX, 2, (30, 144, 25), 1, cv2.LINE_AA)
cv2.putText(frame, str(df['scoreB'][idx]), (930, 110), cv2.FONT_HERSHEY_TRIPLEX, 2, (138, 43, 226), 1, cv2.LINE_AA)
if (count in list(df_complete['Frame'])):
idx = df_complete['Frame'][(df_complete['Frame'] == count)].index[0]
cv2.putText(frame, str(df_complete['realscoreA'][idx]), (130, 110), cv2.FONT_HERSHEY_TRIPLEX, 2, (30, 144, 25), 1, cv2.LINE_AA)
cv2.putText(frame, str(df_complete['realscoreB'][idx]), (230, 110), cv2.FONT_HERSHEY_TRIPLEX, 2, (138, 43, 226), 1, cv2.LINE_AA)
if (count in list(markedFrame['Frame'])):
cv2.putText(frame, 'Hit-point', (100, 120), cv2.FONT_HERSHEY_TRIPLEX, 2, (0, 0, 255), 1, cv2.LINE_AA)
tmp = markedFrame['Frame']
idx = tmp[(tmp == count)].index[0]
cv2.circle(frame, (markedFrame['X'][idx], markedFrame['Y'][idx]), 5, (0, 0, 255), 2)
if (count in list(position['Frame'])):
tmp = position['Frame']
idx = tmp[(tmp == count)].index[0]
if (position[' visibility'][idx] == 1):
cv2.circle(frame, (int(position[' x'][idx]), int(position[' y'][idx])), 4, (0, 255, 255), 2)
output_video.write(frame)
else:
break
video.release()
output_video.release()
cv2.destroyAllWindows() |
.parametrize('device', get_available_devices())
def test_memmap_same_device_as_tensor(device):
t = torch.tensor([1], device=device)
m = MemmapTensor.from_tensor(t)
assert (t.device == torch.device(device))
assert (m.device == torch.device(device))
for other_device in get_available_devices():
if (other_device != device):
with pytest.raises(RuntimeError, match=('Expected all tensors to be on the same device, ' + 'but found at least two devices')):
assert torch.all(((m + torch.ones([3, 4], device=other_device)) == 1))
m = m.to(other_device)
assert (m.device == torch.device(other_device)) |
class uvm_reg_field(uvm_object):
def __init__(self, name='uvm_reg_field'):
super().__init__(name)
self._parent = None
self._size = None
self._lsb_pos = None
self._access = None
self._is_volatile = None
self._reset = None
def configure(self, parent, size, lsb_pos, access, is_volatile, reset):
self._parent = parent
parent._add_field(self)
self._size = size
self._lsb_pos = lsb_pos
self._access = access
self._is_volatile = is_volatile
self._reset = reset
def get_parent(self):
return self._parent
def get_lsb_pos(self):
return self._lsb_pos
def get_n_bits(self):
return self._size
def get_access(self):
return self._access
def is_volatile(self):
return self._is_volatile
def get_reset(self):
return self._reset |
def prune_small_rho_grids_(ks, cell, dm, grids, kpts):
rho = ks.get_rho(dm, grids, kpts)
n = numpy.dot(rho, grids.weights)
if (abs((n - cell.nelectron)) < (NELEC_ERROR_TOL * n)):
rho *= grids.weights
idx = (abs(rho) > (ks.small_rho_cutoff / grids.weights.size))
logger.debug(ks, 'Drop grids %d', (grids.weights.size - numpy.count_nonzero(idx)))
grids.coords = numpy.asarray(grids.coords[idx], order='C')
grids.weights = numpy.asarray(grids.weights[idx], order='C')
grids.non0tab = grids.make_mask(cell, grids.coords)
return grids |
class DrawBoxTensor(object):
def __init__(self, cfgs):
self.cfgs = cfgs
self.drawer = DrawBox(cfgs)
def only_draw_boxes(self, img_batch, boxes, method, head=None, is_csl=False):
boxes = tf.stop_gradient(boxes)
img_tensor = tf.squeeze(img_batch, 0)
img_tensor = tf.cast(img_tensor, tf.float32)
labels = (tf.ones(shape=(tf.shape(boxes)[0],), dtype=tf.int32) * self.drawer.ONLY_DRAW_BOXES)
scores = tf.zeros_like(labels, dtype=tf.float32)
if (head is None):
head = (tf.ones_like(scores) * (- 1))
img_tensor_with_boxes = tf.py_func(self.drawer.draw_boxes_with_label_and_scores, inp=[img_tensor, boxes, labels, scores, method, head, is_csl], Tout=tf.uint8)
img_tensor_with_boxes = tf.reshape(img_tensor_with_boxes, tf.shape(img_batch))
return img_tensor_with_boxes
def draw_boxes_with_scores(self, img_batch, boxes, scores, method, head, is_csl=False):
if (head is None):
head = (tf.ones_like(scores) * (- 1))
boxes = tf.stop_gradient(boxes)
scores = tf.stop_gradient(scores)
img_tensor = tf.squeeze(img_batch, 0)
img_tensor = tf.cast(img_tensor, tf.float32)
labels = (tf.ones(shape=(tf.shape(boxes)[0],), dtype=tf.int32) * self.drawer.ONLY_DRAW_BOXES_WITH_SCORES)
img_tensor_with_boxes = tf.py_func(self.drawer.draw_boxes_with_label_and_scores, inp=[img_tensor, boxes, labels, scores, method, head, is_csl], Tout=[tf.uint8])
img_tensor_with_boxes = tf.reshape(img_tensor_with_boxes, tf.shape(img_batch))
return img_tensor_with_boxes
def draw_boxes_with_categories(self, img_batch, boxes, labels, method, head=None, is_csl=False):
if (head is None):
head = (tf.ones_like(labels) * (- 1))
boxes = tf.stop_gradient(boxes)
img_tensor = tf.squeeze(img_batch, 0)
img_tensor = tf.cast(img_tensor, tf.float32)
scores = tf.ones(shape=(tf.shape(boxes)[0],), dtype=tf.float32)
img_tensor_with_boxes = tf.py_func(self.drawer.draw_boxes_with_label_and_scores, inp=[img_tensor, boxes, labels, scores, method, head, is_csl], Tout=[tf.uint8])
img_tensor_with_boxes = tf.reshape(img_tensor_with_boxes, tf.shape(img_batch))
return img_tensor_with_boxes
def draw_boxes_with_categories_and_scores(self, img_batch, boxes, labels, scores, method, head=None, is_csl=False):
if (head is None):
head = (tf.ones_like(labels) * (- 1))
boxes = tf.stop_gradient(boxes)
scores = tf.stop_gradient(scores)
img_tensor = tf.squeeze(img_batch, 0)
img_tensor = tf.cast(img_tensor, tf.float32)
img_tensor_with_boxes = tf.py_func(self.drawer.draw_boxes_with_label_and_scores, inp=[img_tensor, boxes, labels, scores, method, head, is_csl], Tout=[tf.uint8])
img_tensor_with_boxes = tf.reshape(img_tensor_with_boxes, tf.shape(img_batch))
return img_tensor_with_boxes |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.