code stringlengths 281 23.7M |
|---|
.parametrize('args', [{}, {'options': {'zticks': [1]}}, {'x_basis': [1, 2, 3, 4, 5]}, {'y_basis': [1, 2, 3, 4, 5]}, {'limits': [0, 1]}, {'color_limits': [0, 1]}, {'color_style': 'phase'}, {'options': {'threshold': 0.1}}, {'color_style': 'real', 'colorbar': True}, {'color_style': 'img', 'colorbar': True}, {'color_style': 'abs', 'colorbar': True}, {'color_style': 'phase', 'colorbar': True}, {'color_limits': [0, 1], 'color_style': 'phase', 'colorbar': True}])
def test_matrix_histogram(args):
rho = qutip.rand_dm(5)
(fig, ax) = qutip.matrix_histogram(rho, **args)
plt.close()
assert isinstance(fig, mpl.figure.Figure)
assert isinstance(ax, mpl.axes.Axes) |
def test_pyproject_toml_invalid_priority() -> None:
toml: dict[(str, Any)] = TOMLFile((FIXTURE_DIR / 'complete_invalid_priority.toml')).read()
content = toml['tool']['poetry']
assert (Factory.validate(content) == {'errors': ["data.source[0].priority must be one of ['primary', 'default', 'secondary', 'supplemental', 'explicit']"], 'warnings': []}) |
(cc=STDCALL, params={'hDlg': HWND, 'nIDDlgItem': INT, 'lpString': LPSTR, 'cchMax': INT})
def hook_GetDlgItemTextA(ql: Qiling, address: int, params):
lpString = params['lpString']
cchMax = params['cchMax']
ql.os.stdout.write(b'Input DlgItemText :\n')
string = ql.os.stdin.readline().strip()[:cchMax]
ql.mem.write(lpString, string)
return len(string) |
def raw_checkboard(quality, metric='mse', pretrained=False, progress=True, **kwargs):
if (metric not in ('mse', 'ms-ssim')):
raise ValueError(f'Invalid metric "{metric}"')
if ((quality < 1) or (quality > 8)):
raise ValueError(f'Invalid quality "{quality}", should be between (1, 8)')
return _load_model('raw_checkboard', metric, quality, pretrained, progress, **kwargs) |
class TxsETHAPPRSpider(TxsETHSpider):
name = 'txs.eth.appr'
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.task_map = dict()
self.alpha = float(kwargs.get('alpha', 0.15))
self.epsilon = float(kwargs.get('epsilon', 0.001))
def start_requests(self):
if (self.filename is not None):
infos = self.load_task_info_from_json(self.filename)
for (i, info) in enumerate(infos):
self.task_map[i] = SyncSubgraphTask(strategy=APPR(source=info['source'], alpha=float(info.get('alpha', 0.15)), epsilon=float(info.get('epsilon', 0.001))), **info)
elif (self.source is not None):
self.task_map[0] = SyncSubgraphTask(strategy=APPR(source=self.source, alpha=self.alpha, epsilon=self.epsilon), **self.info)
for tid in self.task_map.keys():
task = self.task_map[tid]
for txs_type in task.info['txs_types']:
task.wait()
(yield self.txs_req_getter[txs_type](address=task.info['source'], **{'residual': 1.0, 'startblock': task.info['start_blk'], 'endblock': task.info['end_blk'], 'task_id': tid}))
def _process_response(self, response, func_txs_type_request, **kwargs):
tid = kwargs['task_id']
task = self.task_map[tid]
txs = self.load_txs_from_response(response)
if (txs is None):
kwargs['retry'] = (kwargs.get('retry', 0) + 1)
if (kwargs['retry'] < self.max_retry):
self.log(message=('On parse: Get error status from %s, retrying' % response.url), level=logging.WARNING)
(yield func_txs_type_request(address=kwargs['address'], **{k: v for (k, v) in kwargs.items() if (k != 'address')}))
return
self.log(message=('On parse: failed on %s' % response.url), level=logging.ERROR)
item = task.fuse(kwargs['address'])
if (item is not None):
for txs_type in task.info['txs_types']:
task.wait()
(yield self.txs_req_getter[txs_type](address=item['node'], **{'startblock': task.info['start_blk'], 'endblock': task.info['end_blk'], 'residual': item['residual'], 'task_id': kwargs['task_id']}))
return
self.log(message='On parse: Extend {} from seed of {}, residual {}'.format(kwargs['address'], task.info['source'], kwargs['residual']), level=logging.INFO)
(yield ImportanceItem(source=task.info['source'], importance=task.strategy.p, task_info=task.info))
for tx in task.push(node=kwargs['address'], edges=txs):
(yield SubgraphTxItem(source=task.info['source'], tx=tx, task_info=task.info))
if ((len(txs) < 10000) or (task.info['auto_page'] is False)):
if task.is_locked():
return
item = task.pop()
if (item is None):
return
for txs_type in task.info['txs_types']:
task.wait()
(yield self.txs_req_getter[txs_type](address=item['node'], **{'startblock': task.info['start_blk'], 'endblock': task.info['end_blk'], 'residual': item['residual'], 'task_id': kwargs['task_id']}))
else:
(yield func_txs_type_request(address=kwargs['address'], **{'startblock': self.get_max_blk(txs), 'endblock': task.info['end_blk'], 'residual': kwargs['residual'], 'task_id': kwargs['task_id']}))
def parse_external_txs(self, response, **kwargs):
(yield from self._process_response(response, self.get_external_txs_request, **kwargs))
def parse_internal_txs(self, response, **kwargs):
(yield from self._process_response(response, self.get_internal_txs_request, **kwargs))
def parse_erc20_txs(self, response, **kwargs):
(yield from self._process_response(response, self.get_erc20_txs_request, **kwargs))
def parse_erc721_txs(self, response, **kwargs):
(yield from self._process_response(response, self.get_erc721_txs_request, **kwargs)) |
def test_hook_tracing(he_pm: PluginManager) -> None:
saveindent = []
class api1():
def he_method1(self):
saveindent.append(he_pm.trace.root.indent)
class api2():
def he_method1(self):
saveindent.append(he_pm.trace.root.indent)
raise ValueError()
he_pm.register(api1())
out: List[Any] = []
he_pm.trace.root.setwriter(out.append)
undo = he_pm.enable_tracing()
try:
indent = he_pm.trace.root.indent
he_pm.hook.he_method1(arg=1)
assert (indent == he_pm.trace.root.indent)
assert (len(out) == 2)
assert ('he_method1' in out[0])
assert ('finish' in out[1])
out[:] = []
he_pm.register(api2())
with pytest.raises(ValueError):
he_pm.hook.he_method1(arg=1)
assert (he_pm.trace.root.indent == indent)
assert (saveindent[0] > indent)
finally:
undo() |
_tokenizers
class TokenizerVersioningTest(unittest.TestCase):
def test_local_versioning(self):
tokenizer = AutoTokenizer.from_pretrained('bert-base-cased')
json_tokenizer = json.loads(tokenizer._tokenizer.to_str())
json_tokenizer['model']['vocab']['huggingface'] = len(tokenizer)
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.init_kwargs['fast_tokenizer_files'] = ['tokenizer.4.0.0.json']
tokenizer.save_pretrained(tmp_dir)
json.dump(json_tokenizer, open(os.path.join(tmp_dir, 'tokenizer.4.0.0.json'), 'w'))
new_tokenizer = AutoTokenizer.from_pretrained(tmp_dir)
self.assertEqual(len(new_tokenizer), (len(tokenizer) + 1))
json_tokenizer = json.loads(new_tokenizer._tokenizer.to_str())
self.assertIn('huggingface', json_tokenizer['model']['vocab'])
shutil.move(os.path.join(tmp_dir, 'tokenizer.4.0.0.json'), os.path.join(tmp_dir, 'tokenizer.42.0.0.json'))
tokenizer.init_kwargs['fast_tokenizer_files'] = ['tokenizer.42.0.0.json']
tokenizer.save_pretrained(tmp_dir)
new_tokenizer = AutoTokenizer.from_pretrained(tmp_dir)
self.assertEqual(len(new_tokenizer), len(tokenizer))
json_tokenizer = json.loads(new_tokenizer._tokenizer.to_str())
self.assertNotIn('huggingface', json_tokenizer['model']['vocab'])
def test_repo_versioning(self):
repo = 'hf-internal-testing/test-two-tokenizers'
tokenizer = AutoTokenizer.from_pretrained(repo)
self.assertEqual(len(tokenizer), 28997)
json_tokenizer = json.loads(tokenizer._tokenizer.to_str())
self.assertIn('huggingface', json_tokenizer['model']['vocab'])
import transformers as old_transformers
old_transformers.tokenization_utils_base.__version__ = '3.0.0'
old_tokenizer = old_transformers.models.auto.AutoTokenizer.from_pretrained(repo)
self.assertEqual(len(old_tokenizer), 28996)
json_tokenizer = json.loads(old_tokenizer._tokenizer.to_str())
self.assertNotIn('huggingface', json_tokenizer['model']['vocab']) |
class TorchvisionDataset(Dataset):
def __init__(self, cfg: AttrDict, data_source: str, path: str, split: str, dataset_name: str):
super().__init__()
assert PathManager.isdir(path), f'Directory {path} does not exist'
self.dataset_name = dataset_name
self.path = path
self.split = split.lower()
self.dataset = self._load_dataset()
def _load_dataset(self):
is_train_split = (self.split == 'train')
if (self.dataset_name == TorchvisionDatasetName.CIFAR10):
return CIFAR10(self.path, train=is_train_split)
elif (self.dataset_name == TorchvisionDatasetName.CIFAR100):
return CIFAR100(self.path, train=is_train_split)
elif (self.dataset_name == TorchvisionDatasetName.STL10):
stl_split = ('train' if is_train_split else 'test')
return STL10(self.path, split=stl_split)
elif (self.dataset_name == TorchvisionDatasetName.MNIST):
return MNIST(root=self.path, train=is_train_split)
elif (self.dataset_name == TorchvisionDatasetName.SVHN):
stl_split = ('train' if is_train_split else 'test')
return SVHN(root=self.path, split=stl_split)
else:
raise ValueError(f'Unsupported dataset {self.dataset_name: str}')
def num_samples(self) -> int:
return len(self.dataset)
def __len__(self) -> int:
return self.num_samples()
def __getitem__(self, idx: int) -> Tuple[(Image.Image, bool)]:
image = self.dataset[idx][0]
is_success = True
return (image, is_success)
def get_labels(self) -> List[int]:
return [self.dataset[i][1] for i in range(self.num_samples())] |
def _find_foldable_bn_pair_and_bn_picked_for_folding(connected_graph: ConnectedGraph) -> Tuple[(List[Tuple[(LayerType, BatchNormType)]], List[Tuple[(BatchNormType, LayerType)]], Set)]:
conv_linear_bn_activation_info_dict = find_all_conv_bn_with_activation_in_graph(connected_graph)
bn_picked_for_folding = set()
_conv_linear_optypes = (CONV_OP_TYPES + LINEAR_OP_TYPES)
ordered_conv_fc_modules = [op.get_module() for op in connected_graph.ordered_ops if (op.type in _conv_linear_optypes)]
conv_bn_pairs = []
for module in ordered_conv_fc_modules:
if ((module in conv_linear_bn_activation_info_dict.keys()) and _is_valid_bn_fold(module, True)):
bn_info = conv_linear_bn_activation_info_dict[module]
if (bn_info.output_bn and (bn_info.output_bn not in bn_picked_for_folding)):
conv_bn_pairs.append((module, bn_info.output_bn.get_module()))
bn_picked_for_folding.add(bn_info.output_bn)
bn_conv_pairs = []
for module in ordered_conv_fc_modules:
if ((module in conv_linear_bn_activation_info_dict.keys()) and _is_valid_bn_fold(module, False)):
bn_info = conv_linear_bn_activation_info_dict[module]
if (bn_info.input_bn and (bn_info.input_bn not in bn_picked_for_folding)):
bn_conv_pairs.append((bn_info.input_bn.get_module(), module))
bn_picked_for_folding.add(bn_info.input_bn)
return (conv_bn_pairs, bn_conv_pairs, bn_picked_for_folding) |
class TestRequestRepoBuild(ApiTestCase):
def test_requestbuild_noidurl(self):
self.login(ADMIN_ACCESS_USER)
self.postResponse(RepositoryBuildList, params=dict(repository=(ADMIN_ACCESS_USER + '/simple')), data=dict(), expected_code=400)
def test_requestbuild_invalidurls(self):
self.login(ADMIN_ACCESS_USER)
self.postResponse(RepositoryBuildList, params=dict(repository=(ADMIN_ACCESS_USER + '/simple')), data=dict(archive_url='foobarbaz'), expected_code=400)
self.postResponse(RepositoryBuildList, params=dict(repository=(ADMIN_ACCESS_USER + '/simple')), data=dict(archive_url='file://foobarbaz'), expected_code=400)
def test_requestrepobuild_withurl(self):
self.login(ADMIN_ACCESS_USER)
json = self.getJsonResponse(RepositoryBuildList, params=dict(repository=(ADMIN_ACCESS_USER + '/simple')))
assert (len(json['builds']) == 0)
self.postResponse(RepositoryBuildList, params=dict(repository=(ADMIN_ACCESS_USER + '/simple')), data=dict(archive_url=' expected_code=201)
json = self.getJsonResponse(RepositoryBuildList, params=dict(repository=(ADMIN_ACCESS_USER + '/simple')))
assert (len(json['builds']) > 0)
self.assertEqual(' json['builds'][0]['archive_url'])
def test_requestrepobuild_withfile(self):
self.login(ADMIN_ACCESS_USER)
json = self.getJsonResponse(RepositoryBuildList, params=dict(repository=(ADMIN_ACCESS_USER + '/simple')))
assert (len(json['builds']) == 0)
self.postResponse(RepositoryBuildList, params=dict(repository=(ADMIN_ACCESS_USER + '/simple')), data=dict(file_id='foobarbaz'), expected_code=201)
json = self.getJsonResponse(RepositoryBuildList, params=dict(repository=(ADMIN_ACCESS_USER + '/simple')))
assert (len(json['builds']) > 0)
def test_requestrepobuild_with_robot(self):
self.login(ADMIN_ACCESS_USER)
json = self.getJsonResponse(RepositoryBuildList, params=dict(repository=(ADMIN_ACCESS_USER + '/simple')))
assert (len(json['builds']) == 0)
pull_robot = (ADMIN_ACCESS_USER + '+dtrobot')
self.postResponse(RepositoryBuildList, params=dict(repository=(ADMIN_ACCESS_USER + '/simple')), data=dict(file_id='foobarbaz', pull_robot=pull_robot), expected_code=201)
json = self.getJsonResponse(RepositoryBuildList, params=dict(repository=(ADMIN_ACCESS_USER + '/building')))
assert (len(json['builds']) > 0)
def test_requestrepobuild_with_invalid_robot(self):
self.login(ADMIN_ACCESS_USER)
pull_robot = (ADMIN_ACCESS_USER + '+invalidrobot')
self.postResponse(RepositoryBuildList, params=dict(repository=(ADMIN_ACCESS_USER + '/simple')), data=dict(file_id='foobarbaz', pull_robot=pull_robot), expected_code=404)
def test_requestrepobuild_with_unauthorized_robot(self):
self.login(ADMIN_ACCESS_USER)
pull_robot = 'freshuser+anotherrobot'
self.postResponse(RepositoryBuildList, params=dict(repository=(ADMIN_ACCESS_USER + '/simple')), data=dict(file_id='foobarbaz', pull_robot=pull_robot), expected_code=403) |
class DistanceEmbed(nn.Module):
def __init__(self, n_rbf, cutoff, feat_dim, dropout):
super().__init__()
rbf = PainnRadialBasis(n_rbf=n_rbf, cutoff=cutoff)
dense = Dense(in_features=n_rbf, out_features=feat_dim, bias=True, dropout_rate=dropout)
self.block = nn.Sequential(rbf, dense)
self.f_cut = CosineEnvelope(cutoff=cutoff)
def forward(self, dist):
rbf_feats = self.block(dist)
envelope = self.f_cut(dist).reshape((- 1), 1)
output = (rbf_feats * envelope)
return output |
def mainopt_festival_dictionary_to_espeak(i):
try:
festival_location = sys.argv[(i + 1)]
except IndexError:
return 'Error: --festival-dictionary-to-espeak must be followed by the location of the festival OALD file (see help text)'
try:
open(festival_location)
except:
return (("Error: The specified OALD location '" + festival_location) + "' could not be opened")
try:
open('en_list')
except:
return 'Error: en_list could not be opened (did you remember to cd to the eSpeak dictsource directory first?'
convert_system_festival_dictionary_to_espeak(festival_location, (not ('--without-check' in sys.argv)), (not os.system('test -e ~/.festivalrc'))) |
class IfElseIfElseIf(Op):
def __init__(self, inplace=False):
self.inplace = inplace
assert (not self.inplace)
def make_node(self, c1, t1, c2, t2, c3, t3, f3):
assert (t1.type == f3.type)
assert (t2.type == t3.type)
assert (t3.type == f3.type)
return Apply(self, [c1, t1, c2, t2, c3, t3, f3], [t1.type()])
def make_thunk(self, node, storage_map, compute_map, no_recycling, impl):
input_computed = [compute_map[v] for v in node.inputs]
output_computed = [compute_map[v] for v in node.outputs]
input_registers = [storage_map[v] for v in node.inputs]
output_registers = [storage_map[v] for v in node.outputs]
outtype = node.outputs[0].type
def thunk():
if (not input_computed[0][0]):
return [0]
else:
truthval = input_registers[0][0]
if truthval:
if (not input_computed[1][0]):
return [1]
else:
output_computed[0][0] = 1
output_registers[0][0] = outtype.filter(deepcopy(input_registers[1][0]))
return []
elif (not input_computed[2][0]):
return [2]
else:
truthval = input_registers[2][0]
if truthval:
if (not input_computed[3][0]):
return [3]
else:
output_computed[0][0] = 1
output_registers[0][0] = outtype.filter(deepcopy(input_registers[3][0]))
return []
elif (not input_computed[4][0]):
return [4]
else:
truthval = input_registers[4][0]
if truthval:
if (not input_computed[5][0]):
return [5]
else:
output_computed[0][0] = 1
output_registers[0][0] = outtype.filter(deepcopy(input_registers[5][0]))
return []
elif (not input_computed[6][0]):
return [6]
else:
output_computed[0][0] = 1
output_registers[0][0] = outtype.filter(deepcopy(input_registers[6][0]))
return []
thunk.lazy = True
return thunk
def perform(self, *args, **kwargs):
raise NotImplementedError() |
class PreActResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(PreActResNet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.bn = nn.BatchNorm2d((512 * block.expansion))
self.linear = nn.Linear((512 * block.expansion), num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = ([stride] + ([1] * (num_blocks - 1)))
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = (planes * block.expansion)
return nn.Sequential(*layers)
def forward(self, x):
out = self.conv1(x)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.relu(self.bn(out))
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), (- 1))
out = self.linear(out)
return out |
class NC_ABI_BASE(BaseFileHandler):
def __init__(self, filename, filename_info, filetype_info):
super(NC_ABI_BASE, self).__init__(filename, filename_info, filetype_info)
platform_shortname = filename_info['platform_shortname']
self.platform_name = PLATFORM_NAMES.get(platform_shortname.lower())
self.nlines = self.nc['y'].size
self.ncols = self.nc['x'].size
self.coords = {}
_property
def nc(self):
chunk_bytes = self._chunk_bytes_for_resolution()
with dask.config.set({'array.chunk-size': chunk_bytes}):
f_obj = open_file_or_filename(self.filename)
nc = xr.open_dataset(f_obj, decode_cf=True, mask_and_scale=False, chunks='auto')
nc = self._rename_dims(nc)
return nc
def _chunk_bytes_for_resolution(self) -> int:
num_high_res_elems_per_dim = math.sqrt((get_dask_chunk_size_in_bytes() / 4))
high_res_elems_disk_aligned = (round(max((num_high_res_elems_per_dim / (4 * 226)), 1)) * (4 * 226))
low_res_factor = int((self.filetype_info.get('resolution', 2000) // 500))
res_elems_per_dim = int((high_res_elems_disk_aligned / low_res_factor))
return ((res_elems_per_dim ** 2) * 2)
def _rename_dims(nc):
if (('t' in nc.dims) or ('t' in nc.coords)):
nc = nc.rename({'t': 'time'})
if ('goes_lat_lon_projection' in nc):
with suppress(ValueError):
nc = nc.rename({'lon': 'x', 'lat': 'y'})
return nc
def sensor(self):
return 'abi'
def __getitem__(self, item):
data = self.nc[item]
attrs = data.attrs
data = self._adjust_data(data, item)
data.attrs = attrs
data = self._adjust_coords(data, item)
return data
def _adjust_data(self, data, item):
factor = data.attrs.get('scale_factor', 1)
offset = data.attrs.get('add_offset', 0)
fill = data.attrs.get('_FillValue')
unsigned = data.attrs.get('_Unsigned', None)
def is_int(val):
return (np.issubdtype(val.dtype, np.integer) if hasattr(val, 'dtype') else isinstance(val, int))
if ((unsigned is not None) and (unsigned.lower() == 'true')):
data = data.astype(('u%s' % data.dtype.itemsize))
if (fill is not None):
fill = fill.astype(('u%s' % fill.dtype.itemsize))
if (fill is not None):
if (np.ndim(fill) > 0):
fill = fill.item()
if (is_int(data) and is_int(factor) and is_int(offset)):
new_fill = fill
else:
new_fill = np.float32(np.nan)
data = data.where((data != fill), new_fill)
if ((factor != 1) and (item in ('x', 'y'))):
data = ((data * np.round(float(factor), 6)) + np.round(float(offset), 6))
elif (factor != 1):
data = ((data * np.float32(factor)) + np.float32(offset))
return data
def _adjust_coords(self, data, item):
new_coords = {}
for coord_name in ('x_image', 'y_image', 'time', 'x', 'y'):
if (coord_name in data.coords):
data = data.drop_vars(coord_name)
if (item in data.coords):
self.coords[item] = data
for coord_name in data.coords.keys():
if (coord_name not in self.coords):
self.coords[coord_name] = self[coord_name]
new_coords[coord_name] = self.coords[coord_name]
data.coords.update(new_coords)
return data
def get_dataset(self, key, info):
raise NotImplementedError('Reader {} has not implemented get_dataset'.format(self.name))
def get_area_def(self, key):
if ('goes_imager_projection' in self.nc):
return self._get_areadef_fixedgrid(key)
if ('goes_lat_lon_projection' in self.nc):
return self._get_areadef_latlon(key)
raise ValueError('Unsupported projection found in the dataset')
def _get_areadef_latlon(self, key):
projection = self.nc['goes_lat_lon_projection']
a = projection.attrs['semi_major_axis']
b = projection.attrs['semi_minor_axis']
fi = projection.attrs['inverse_flattening']
pm = projection.attrs['longitude_of_prime_meridian']
proj_ext = self.nc['geospatial_lat_lon_extent']
w_lon = proj_ext.attrs['geospatial_westbound_longitude']
e_lon = proj_ext.attrs['geospatial_eastbound_longitude']
n_lat = proj_ext.attrs['geospatial_northbound_latitude']
s_lat = proj_ext.attrs['geospatial_southbound_latitude']
lat_0 = proj_ext.attrs['geospatial_lat_center']
lon_0 = proj_ext.attrs['geospatial_lon_center']
area_extent = (w_lon, s_lat, e_lon, n_lat)
proj_dict = {'proj': 'latlong', 'lon_0': float(lon_0), 'lat_0': float(lat_0), 'a': float(a), 'b': float(b), 'fi': float(fi), 'pm': float(pm)}
ll_area_def = geometry.AreaDefinition(self.nc.attrs.get('orbital_slot', 'abi_geos'), self.nc.attrs.get('spatial_resolution', 'ABI file area'), 'abi_latlon', proj_dict, self.ncols, self.nlines, np.asarray(area_extent))
return ll_area_def
def _get_areadef_fixedgrid(self, key):
projection = self.nc['goes_imager_projection']
a = projection.attrs['semi_major_axis']
b = projection.attrs['semi_minor_axis']
h = projection.attrs['perspective_point_height']
lon_0 = projection.attrs['longitude_of_projection_origin']
sweep_axis = projection.attrs['sweep_angle_axis'][0]
h = np.float64(h)
x = self['x']
y = self['y']
x_l = x[0].values
x_r = x[(- 1)].values
y_l = y[(- 1)].values
y_u = y[0].values
x_half = (((x_r - x_l) / (self.ncols - 1)) / 2.0)
y_half = (((y_u - y_l) / (self.nlines - 1)) / 2.0)
area_extent = ((x_l - x_half), (y_l - y_half), (x_r + x_half), (y_u + y_half))
area_extent = tuple((np.round((h * val), 6) for val in area_extent))
proj_dict = {'proj': 'geos', 'lon_0': float(lon_0), 'a': float(a), 'b': float(b), 'h': h, 'units': 'm', 'sweep': sweep_axis}
fg_area_def = geometry.AreaDefinition(self.nc.attrs.get('orbital_slot', 'abi_geos'), self.nc.attrs.get('spatial_resolution', 'ABI file area'), 'abi_fixed_grid', proj_dict, self.ncols, self.nlines, np.asarray(area_extent))
return fg_area_def
def start_time(self):
return datetime.strptime(self.nc.attrs['time_coverage_start'], '%Y-%m-%dT%H:%M:%S.%fZ')
def end_time(self):
return datetime.strptime(self.nc.attrs['time_coverage_end'], '%Y-%m-%dT%H:%M:%S.%fZ')
def spatial_resolution_to_number(self):
res = self.nc.attrs['spatial_resolution'].split(' ')[0]
if res.endswith('km'):
res = int((float(res[:(- 2)]) * 1000))
elif res.endswith('m'):
res = int(res[:(- 1)])
else:
raise ValueError("Unexpected 'spatial_resolution' attribute '{}'".format(res))
return res |
def update_LD_LIBRARY_PATH(install_dir):
export_statement = f'export LD_LIBRARY_PATH={install_dir}/lib:$LD_LIBRARY_PATH'
venv_path = os.environ.get('VIRTUAL_ENV')
if venv_path:
script_path = os.path.join(venv_path, 'bin/activate')
else:
script_path = os.path.join(os.environ.get('HOME'), '.bashrc')
if (os.getenv('LD_LIBRARY_PATH') and (f'{install_dir}/lib' in os.getenv('LD_LIBRARY_PATH'))):
print(f'{install_dir}/lib was found in LD_LIBRARY_PATH.')
print('--> Not updating venv activate or .bashrc scripts')
else:
with open(script_path, 'a+') as fh:
if (export_statement not in fh.read()):
fh.write(export_statement)
print(f'Adding {install_dir}/lib to LD_LIBRARY_PATH in {script_path}') |
def _encode_codepage(codepage, text):
assert isinstance(text, text_type)
if (not text):
return b''
size = (len(text.encode('utf-16-le', _surrogatepass)) // ctypes.sizeof(winapi.WCHAR))
length = winapi.WideCharToMultiByte(codepage, 0, text, size, None, 0, None, None)
if (length == 0):
raise ctypes.WinError()
buf = ctypes.create_string_buffer(length)
length = winapi.WideCharToMultiByte(codepage, 0, text, size, buf, length, None, None)
if (length == 0):
raise ctypes.WinError()
return buf[:length] |
class ConditionViewSet(ModelViewSet):
permission_classes = ((HasModelPermission | HasObjectPermission),)
serializer_class = ConditionSerializer
queryset = Condition.objects.select_related('source', 'target_option').prefetch_related('optionsets', 'pages', 'questionsets', 'questions', 'tasks', 'editors')
filter_backends = (SearchFilter, DjangoFilterBackend)
search_fields = ('uri',)
filterset_fields = ('uri', 'uri_prefix', 'uri_path', 'source', 'relation', 'target_text', 'target_option')
(detail=False)
def index(self, request):
queryset = self.filter_queryset(self.get_queryset())
serializer = ConditionIndexSerializer(queryset, many=True)
return Response(serializer.data)
(detail=False, url_path='export(/(?P<export_format>[a-z]+))?')
def export(self, request, export_format='xml'):
queryset = self.filter_queryset(self.get_queryset())
if (export_format == 'xml'):
serializer = ConditionExportSerializer(queryset, many=True)
xml = ConditionRenderer().render(serializer.data, context=self.get_export_renderer_context(request))
return XMLResponse(xml, name='conditions')
else:
return render_to_format(self.request, export_format, 'tasks', 'conditions/export/conditions.html', {'conditions': queryset})
(detail=True, url_path='export(/(?P<export_format>[a-z]+))?')
def detail_export(self, request, pk=None, export_format='xml'):
if (export_format == 'xml'):
serializer = ConditionExportSerializer(self.get_object())
xml = ConditionRenderer().render([serializer.data], context=self.get_export_renderer_context(request))
return XMLResponse(xml, name=self.get_object().uri_path)
else:
return render_to_format(self.request, export_format, self.get_object().uri_path, 'conditions/export/conditions.html', {'conditions': [self.get_object()]})
def get_export_renderer_context(self, request):
full = is_truthy(request.GET.get('full'))
return {'attributes': (full or is_truthy(request.GET.get('attributes'))), 'options': (full or is_truthy(request.GET.get('options')))} |
def test_read_header():
keys = ('SatelliteId', 'NominalLongitude', 'SatelliteStatus')
values = (324, 0.0, 1)
expected = dict(zip(keys, values))
types = (np.uint16, np.float32, np.uint8)
dtypes = np.dtype([(k, t) for (k, t) in zip(keys, types)])
hdr_data = np.array([values], dtype=dtypes)
with mock.patch('satpy.readers.seviri_l1b_native.np.fromfile') as fromfile:
fromfile.return_value = hdr_data
actual = recarray2dict(hdr_data)
assert (actual == expected) |
class FilterValidationTests(AuthenticatedAPITestCase):
def test_filter_validation(self) -> None:
test_sequences = get_test_sequences()
base_filter = test_sequences['filter']
base_filter_list = test_sequences['filter_list1']
cases = (({'infraction_reason': 'hi'}, {}, 400), ({'infraction_duration': timedelta(seconds=10)}, {}, 400), ({'infraction_reason': 'hi'}, {'infraction_type': 'NOTE'}, 200), ({'infraction_type': 'TIMEOUT', 'infraction_duration': timedelta(days=30)}, {}, 400), ({'infraction_duration': timedelta(seconds=10)}, {'infraction_type': 'TIMEOUT'}, 200), ({'enabled_channels': ['admins']}, {}, 200), ({'disabled_channels': ['123']}, {}, 200), ({'enabled_categories': ['CODE JAM']}, {}, 200), ({'disabled_categories': ['CODE JAM']}, {}, 200), ({'enabled_channels': ['admins'], 'disabled_channels': ['123', 'admins']}, {}, 400), ({'enabled_categories': ['admins'], 'disabled_categories': ['123', 'admins']}, {}, 400), ({'enabled_channels': ['admins']}, {'disabled_channels': ['123', 'admins']}, 400), ({'enabled_categories': ['admins']}, {'disabled_categories': ['123', 'admins']}, 400))
for (filter_settings, filter_list_settings, response_code) in cases:
with self.subTest(f_settings=filter_settings, fl_settings=filter_list_settings, response=response_code):
base_filter.model.objects.all().delete()
base_filter_list.model.objects.all().delete()
case_filter_dict = base_filter.object.copy()
case_fl_dict = base_filter_list.object.copy()
case_fl_dict.update(filter_list_settings)
case_fl = base_filter_list.model(**case_fl_dict)
case_filter_dict['filter_list'] = case_fl
case_filter = base_filter.model(**case_filter_dict)
save_nested_objects(case_filter)
filter_settings['filter_list'] = case_fl
response = self.client.patch(f'{base_filter.url()}/{case_filter.id}', data=clean_test_json(filter_settings))
self.assertEqual(response.status_code, response_code)
def test_filter_list_validation(self) -> None:
test_sequences = get_test_sequences()
base_filter_list = test_sequences['filter_list1']
cases = (({'infraction_reason': 'hi'}, 400), ({'infraction_duration': timedelta(seconds=10)}, 400), ({'infraction_type': 'TIMEOUT', 'infraction_duration': timedelta(days=30)}, 400), ({'infraction_reason': 'hi', 'infraction_type': 'NOTE'}, 200), ({'infraction_duration': timedelta(seconds=10), 'infraction_type': 'TIMEOUT'}, 200), ({'enabled_channels': ['admins']}, 200), ({'disabled_channels': ['123']}, 200), ({'enabled_categories': ['CODE JAM']}, 200), ({'disabled_categories': ['CODE JAM']}, 200), ({'enabled_channels': ['admins'], 'disabled_channels': ['123', 'admins']}, 400), ({'enabled_categories': ['admins'], 'disabled_categories': ['123', 'admins']}, 400))
for (filter_list_settings, response_code) in cases:
with self.subTest(fl_settings=filter_list_settings, response=response_code):
base_filter_list.model.objects.all().delete()
case_fl_dict = base_filter_list.object.copy()
case_fl = base_filter_list.model(**case_fl_dict)
save_nested_objects(case_fl)
response = self.client.patch(f'{base_filter_list.url()}/{case_fl.id}', data=clean_test_json(filter_list_settings))
self.assertEqual(response.status_code, response_code)
def test_filter_unique_constraint(self) -> None:
test_filter = get_test_sequences()['filter']
test_filter.model.objects.all().delete()
test_filter_object = test_filter.model(**test_filter.object)
save_nested_objects(test_filter_object, False)
response = self.client.post(test_filter.url(), data=clean_test_json(test_filter.object))
self.assertEqual(response.status_code, 201)
response = self.client.post(test_filter.url(), data=clean_test_json(test_filter.object))
self.assertEqual(response.status_code, 400) |
def override_eval_lm_args(args: Namespace) -> Tuple[(List[str], List[str])]:
overrides = []
overrides.extend(_override_attr('params.common', CommonParams, args))
overrides.extend(_override_attr('params.dataset', DatasetParams, args))
overrides.extend(_override_attr('params.distributed_training', DistributedTrainingParams, args))
overrides.extend(_override_attr('params.common_eval', CommonEvalParams, args))
overrides.extend(_override_attr('params.eval_lm', EvalLMParams, args))
overrides.extend(_override_attr('params.bmuf', FairseqBMUFConfig, args))
(module_overrides, module_deletes) = override_module_args(args)
overrides.extend(module_overrides)
return (overrides, module_deletes) |
def detect_project_name():
logdir = dsz.lp.GetLogsDirectory()
projectdir = os.path.split(logdir)[0]
[logroot, project] = os.path.split(projectdir)
if ((logroot.lower()[3:] != 'logs') or (not project)):
print()
dsz.ui.Echo('', dsz.ERROR)
dsz.ui.Echo('ERROR: You did not correctly configure the LP logging directory.', dsz.ERROR)
dsz.ui.Echo('', dsz.ERROR)
print()
dsz.lp.alias.DisableCommand('pc_listen')
dsz.lp.alias.DisableCommand('pc_connect')
print()
print('Correct format: D:\\Logs\\<PROJECTNAME>')
print(('You provided : %s' % projectdir))
print()
print('YOU MUST RECONFIGURE YOUR SESSION TO CONTINUE.')
print('CLEAN ANY EXTRANEOUS DATA CREATED BY THIS CONFIGURATION FROM THE LOGS DIRECTORY.')
return False
dsz.env.Set('OPS_PROJECTNAME', project, addr='')
dsz.ui.Echo(("Setting environment variable OPS_PROJECTNAME to '%s'" % project.lower()), dsz.GOOD)
return True |
class MDTextField(models.TextField):
def __init__(self, *args, **kwargs):
self.config_name = kwargs.pop('config_name', 'default')
super(MDTextField, self).__init__(*args, **kwargs)
def formfield(self, **kwargs):
defaults = {'form_class': MDTextFormField, 'config_name': self.config_name}
defaults.update(kwargs)
return super(MDTextField, self).formfield(**defaults) |
def configure_converter(converter: BaseConverter):
def gen_unstructure_mapping(cl: Any, unstructure_to=None):
key_handler = str
args = getattr(cl, '__args__', None)
if args:
if issubclass(args[0], str):
key_handler = None
elif issubclass(args[0], bytes):
def key_handler(k):
return b85encode(k).decode('utf8')
return converter.gen_unstructure_mapping(cl, unstructure_to=unstructure_to, key_handler=key_handler)
def gen_structure_mapping(cl: Any) -> StructureHook:
args = getattr(cl, '__args__', None)
if (args and issubclass(args[0], bytes)):
h = make_mapping_structure_fn(cl, converter, key_type=Base85Bytes)
else:
h = make_mapping_structure_fn(cl, converter)
return h
converter.register_structure_hook(Base85Bytes, (lambda v, _: b85decode(v)))
converter.register_unstructure_hook_factory(is_mapping, gen_unstructure_mapping)
converter.register_structure_hook_factory(is_mapping, gen_structure_mapping)
converter.register_structure_hook(ObjectId, (lambda v, _: ObjectId(v)))
configure_union_passthrough(Union[(str, bool, int, float, None, bytes, datetime, ObjectId, Int64)], converter)
converter.register_unstructure_hook(datetime, (lambda v: v))
converter.register_structure_hook(datetime, validate_datetime)
converter.register_unstructure_hook(date, (lambda v: v.isoformat()))
converter.register_structure_hook(date, (lambda v, _: date.fromisoformat(v))) |
def get_git_version() -> Optional[str]:
dir = os.path.dirname(os.path.realpath(__file__))
try:
version = subprocess.check_output(['git', 'describe', '--always', '--dirty'], cwd=dir)
version = str(version, 'utf8').strip()
except Exception:
version = None
return version |
def convert_ndarray_to_list_in_data(data: np.ndarray):
new_data = []
for item in data:
if isinstance(item, np.ndarray):
new_item = convert_ndarray_to_list_in_data(item)
elif isinstance(item, dict):
new_item = {}
for (key, value) in item.items():
new_item[key] = value.tolist()
else:
new_item = item
new_data.append(new_item)
return new_data |
def cl_parse(command, args, setup=None, details=None):
usage = subcommand_usages[command]
descr = subcommand_descriptions[command]
if isinstance(usage, str):
usage = [usage]
susage = ('%s %s' % (program_name, usage[0]))
for s in usage[1:]:
susage += ('\n%s%s %s' % ((' ' * 7), program_name, s))
description = ((descr[0].upper() + descr[1:]) + '.')
if details:
description = (description + ('\n\n%s' % details))
parser = OptionParser(usage=susage, description=description)
if setup:
setup(parser)
add_common_options(parser)
(options, args) = parser.parse_args(args)
process_common_options(command, parser, options)
return (parser, options, args) |
class Effect6054(BaseEffect):
type = 'passive'
def handler(fit, ship, context, projectionRange, **kwargs):
fit.drones.filteredItemBoost((lambda drone: drone.item.requiresSkill('Heavy Drone Operation')), 'hp', ship.getModifiedItemAttr('shipBonusGC2'), skill='Gallente Cruiser', **kwargs) |
def test_conftest_found_with_double_dash(pytester: Pytester) -> None:
sub = pytester.mkdir('sub')
sub.joinpath('conftest.py').write_text(textwrap.dedent(' def pytest_addoption(parser):\n parser.addoption("--hello-world", action="store_true")\n '), encoding='utf-8')
p = sub.joinpath('test_hello.py')
p.write_text('def test_hello(): pass', encoding='utf-8')
result = pytester.runpytest((str(p) + '::test_hello'), '-h')
result.stdout.fnmatch_lines('\n *--hello-world*\n ') |
def test_do_cleanups_on_setup_failure(pytester: Pytester) -> None:
testpath = pytester.makepyfile('\n import unittest\n class MyTestCase(unittest.TestCase):\n values = []\n def setUp(self):\n def cleanup():\n self.values.append(1)\n self.addCleanup(cleanup)\n assert False\n def test_one(self):\n pass\n def test_two(self):\n pass\n def test_cleanup_called_the_right_number_of_times():\n assert MyTestCase.values == [1, 1]\n ')
reprec = pytester.inline_run(testpath)
(passed, skipped, failed) = reprec.countoutcomes()
assert (failed == 2)
assert (passed == 1) |
def test_slowfast():
config = get_recognizer_cfg('slowfast/slowfast_r50_4x16x1_256e_kinetics400_rgb.py')
recognizer = build_recognizer(config.model)
recognizer.cfg = config
input_shape = (1, 1, 3, 32, 32, 32)
target_layer_name = 'backbone/slow_path/layer4/1/relu'
_do_test_3D_models(recognizer, target_layer_name, input_shape) |
class Depth():
def __init__(self, depth_to_copy=None, direction=1):
self.depth_to_copy = depth_to_copy
self.direction = direction
self.gate_list = []
def add_gate(self, gate):
self.gate_list.append(gate)
def add_gate_list(self, gate_list):
self.gate_list.extend(gate_list)
def get_type(self):
if (not self.gate_list):
return None
return self.gate_list[0].match_type
def set_boundaries(self, start, end):
self.start_position = start
self.end_position = end
def copy_gates(self, start_pos):
global master_depth_list
source = master_depth_list[self.depth_to_copy]
for g in source.gate_list:
(old_pos, old_dir) = g.position_list[0]
if (self.direction == 1):
distance = (old_pos - source.start_position)
else:
distance = (source.end_position - old_pos)
g.set_position((start_pos + distance), self.direction)
end_pos = (start_pos + (source.end_position - source.start_position))
self.set_boundaries(start_pos, end_pos)
return end_pos |
def test_get_decimal_symbol():
assert (numbers.get_decimal_symbol('en_US') == '.')
assert (numbers.get_decimal_symbol('en_US', numbering_system='default') == '.')
assert (numbers.get_decimal_symbol('en_US', numbering_system='latn') == '.')
assert (numbers.get_decimal_symbol('sv_SE') == ',')
assert (numbers.get_decimal_symbol('ar_EG') == '.')
assert (numbers.get_decimal_symbol('ar_EG', numbering_system='default') == '')
assert (numbers.get_decimal_symbol('ar_EG', numbering_system='latn') == '.')
assert (numbers.get_decimal_symbol('ar_EG', numbering_system='arab') == '') |
def adjust_learning_rate(p, optimizer, epoch):
lr = p['optimizer_kwargs']['lr']
if (p['scheduler'] == 'step'):
steps = np.sum((epoch > np.array(p['scheduler_kwargs']['lr_decay_epochs'])))
if (steps > 0):
lr = (lr * (p['scheduler_kwargs']['lr_decay_rate'] ** steps))
elif (p['scheduler'] == 'poly'):
lambd = pow((1 - (epoch / p['epochs'])), 0.9)
lr = (lr * lambd)
elif (p['scheduler'] == 'cosine'):
eta_min = (lr * (p['scheduler_kwargs']['lr_decay_rate'] ** 3))
lr = (eta_min + (((lr - eta_min) * (1 + math.cos(((math.pi * epoch) / p['epochs'])))) / 2))
elif (p['scheduler'] == 'constant'):
lr = lr
else:
raise ValueError('Invalid learning rate schedule {}'.format(p['scheduler']))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr |
def test_sort_by_keywords():
keywords = {'KEY1': 2, 'KEY2': 0, 'KEY3': 1}
args = 'aaaa bbbb KEY2 KEY1 kkk10 kkk11 ccc ddd KEY3 kkk3 eee'.split()
(flat, spec) = sort_by_keywords(keywords, args)
assert (flat == ['aaaa', 'bbbb', 'ccc', 'ddd', 'eee'])
assert (spec == {'KEY1': ['kkk10', 'kkk11'], 'KEY2': True, 'KEY3': ['kkk3']})
keywords = {'KEY1': 0, 'KEY2': 4}
args = 'aaaa KEY2 eee'.split()
(flat, spec) = sort_by_keywords(keywords, args)
assert (flat == ['aaaa'])
assert (spec == {'KEY2': ['eee']})
keywords = {'KEY1': 2, 'KEY2': 2}
args = 'KEY1 k10 KEY2 k20 KEY1 k11 KEY2 k21 KEY1 k13'.split()
(flat, spec) = sort_by_keywords(keywords, args)
assert (flat == [])
assert (spec == {'KEY1': ['k10', 'k11', 'k13'], 'KEY2': ['k20', 'k21']}) |
class Loss(object):
__metaclass__ = ABCMeta
def __call__(self, prediction_tensor, target_tensor, ignore_nan_targets=True, scope=None, **params):
with tf.name_scope(scope, 'Loss', [prediction_tensor, target_tensor, params]) as scope:
if ignore_nan_targets:
target_tensor = tf.where(tf.is_finite(target_tensor), target_tensor, prediction_tensor)
return self._compute_loss(prediction_tensor, target_tensor, **params)
def _compute_loss(self, prediction_tensor, target_tensor, **params):
pass |
def dropout_sparse(x, keep_prob, num_nonzero_elems):
noise_shape = [num_nonzero_elems]
random_tensor = keep_prob
random_tensor += tf.random_uniform(noise_shape)
dropout_mask = tf.cast(tf.floor(random_tensor), dtype=tf.bool)
pre_out = tf.sparse_retain(x, dropout_mask)
return (pre_out * (1.0 / keep_prob)) |
((not torch.cuda.is_available()), 'test requires a GPU')
class TestQuantization(unittest.TestCase):
def setUp(self):
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
def test_quantization(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_quantization') as data_dir:
create_dummy_data(data_dir)
preprocess_lm_data(data_dir)
_quantize_language_model(data_dir, 'transformer_lm') |
def test_background_plotting_add_callback(qtbot, monkeypatch, plotting):
class CallBack(object):
def __init__(self, sphere):
self.sphere = weakref.ref(sphere)
def __call__(self):
self.sphere().points[:] = (self.sphere().points * 0.5)
update_count = [0]
orig_update_app_icon = BackgroundPlotter.update_app_icon
def update_app_icon(slf):
update_count[0] = (update_count[0] + 1)
return orig_update_app_icon(slf)
monkeypatch.setattr(BackgroundPlotter, 'update_app_icon', update_app_icon)
plotter = BackgroundPlotter(show=False, off_screen=False, title='Testing Window', update_app_icon=True)
assert_hasattr(plotter, 'app_window', MainWindow)
assert_hasattr(plotter, '_callback_timer', QTimer)
assert_hasattr(plotter, 'counters', list)
assert (plotter._last_update_time == (- np.inf))
sphere = pyvista.Sphere()
plotter.add_mesh(sphere)
mycallback = CallBack(sphere)
window = plotter.app_window
callback_timer = plotter._callback_timer
assert callback_timer.isActive()
assert (not window.isVisible())
with qtbot.wait_exposed(window):
window.show()
assert window.isVisible()
assert (update_count[0] in [0, 1])
plotter.update_app_icon()
assert (update_count[0] in [1, 2])
plotter.update_app_icon()
assert (update_count[0] in [2, 3])
with pytest.raises(ValueError, match='ndarray with shape'):
plotter.set_icon(0.0)
plotter.set_icon(os.path.join(os.path.dirname(pyvistaqt.__file__), 'data', 'pyvista_logo_square.png'))
callback_timer.stop()
assert (not callback_timer.isActive())
plotter.add_callback(mycallback, interval=200, count=3)
callback_timer = plotter._callback_timer
assert callback_timer.isActive()
counter = plotter.counters[(- 1)]
callback_blocker = qtbot.wait_signals([callback_timer.timeout], timeout=2000)
callback_blocker.wait()
counter_blocker = qtbot.wait_signals([counter.signal_finished], timeout=2000)
counter_blocker.wait()
assert (not callback_timer.isActive())
plotter.add_callback(mycallback, interval=200)
callback_timer = plotter._callback_timer
assert callback_timer.isActive()
callback_blocker = qtbot.wait_signals([callback_timer.timeout], timeout=5000)
callback_blocker.wait()
plotter.close()
assert (not callback_timer.isActive()) |
def optimizer_kwargs_gf(parsed_args):
return {'optim': parsed_args.optim, 'lr': parsed_args.lr_gf, 'weight_decay': parsed_args.weight_decay, 'momentum': parsed_args.momentum, 'sgd_dampening': parsed_args.sgd_dampening, 'sgd_nesterov': parsed_args.sgd_nesterov, 'rmsprop_alpha': parsed_args.rmsprop_alpha, 'adam_beta1': parsed_args.adam_beta1, 'adam_beta2': parsed_args.adam_beta2} |
class ResBlock(nn.Module):
def __init__(self, num_channels, kernel_size=3, bias=True, bn=False, act=nn.ReLU(True), res_scale=1, **kwargs):
super(ResBlock, self).__init__()
m = []
for i in range(2):
m.append(nn.Conv2d(num_channels, num_channels, kernel_size, stride=1, padding=1, bias=bias))
if bn:
m.append(nn.BatchNorm2d(num_channels))
if (i == 0):
m.append(act)
self.body = nn.Sequential(*m)
self.res_scale = res_scale
initialize_weights([self.body], 0.1)
def forward(self, x):
res = self.body(x).mul(self.res_scale)
res += x
return res |
class VOCDataset(BaseDataSet):
def __init__(self, **kwargs):
self.num_classes = 21
self.palette = palette.get_voc_palette(self.num_classes)
super(VOCDataset, self).__init__(**kwargs)
def _set_files(self):
self.root = os.path.join(self.root, 'VOCdevkit/VOC2012')
self.image_dir = os.path.join(self.root, 'JPEGImages')
self.label_dir = os.path.join(self.root, 'SegmentationClass')
file_list = os.path.join(self.root, 'ImageSets/Segmentation', (self.split + '.txt'))
self.files = [line.rstrip() for line in tuple(open(file_list, 'r'))]
def _load_data(self, index):
image_id = self.files[index]
image_path = os.path.join(self.image_dir, (image_id + '.jpg'))
label_path = os.path.join(self.label_dir, (image_id + '.png'))
image = np.asarray(Image.open(image_path), dtype=np.float32)
label = np.asarray(Image.open(label_path), dtype=np.int32)
image_id = self.files[index].split('/')[(- 1)].split('.')[0]
return (image, label, image_id) |
def get_norm(norm, out_channels, **kwargs):
if isinstance(norm, str):
if (len(norm) == 0):
return None
norm = {'BN': BatchNorm, 'syncBN': SyncBatchNorm, 'GhostBN': GhostBatchNorm, 'FrozenBN': FrozenBatchNorm, 'GN': (lambda channels, **args: nn.GroupNorm(32, channels))}[norm]
return norm(out_channels, **kwargs) |
def state_dict() -> Dict[(str, Any)]:
state = base.state_dict()
musiq_state: Dict[(str, Any)] = {}
musiq_state['paused'] = storage.get('paused')
musiq_state['shuffle'] = storage.get('shuffle')
musiq_state['repeat'] = storage.get('repeat')
musiq_state['autoplay'] = storage.get('autoplay')
musiq_state['volume'] = storage.get('volume')
try:
current_song = CurrentSong.objects.get()
current_song_dict = model_to_dict(current_song)
current_song_dict = util.camelize(current_song_dict)
current_song_dict['durationFormatted'] = song_utils.format_seconds(current_song_dict['duration'])
musiq_state['currentSong'] = current_song_dict
paused = storage.get('paused')
if paused:
progress = (current_song.last_paused - current_song.created).total_seconds()
else:
progress = (timezone.now() - current_song.created).total_seconds()
progress /= current_song.duration
musiq_state['progress'] = (progress * 100)
except CurrentSong.DoesNotExist:
musiq_state['currentSong'] = None
musiq_state['paused'] = True
musiq_state['progress'] = 0
song_queue = []
total_time = 0
all_songs = queue.all()
if (storage.get('interactivity') in [storage.Interactivity.upvotes_only, storage.Interactivity.full_voting]):
all_songs = all_songs.order_by('-votes', 'index')
for song in all_songs:
song_dict = model_to_dict(song)
song_dict = util.camelize(song_dict)
song_dict['durationFormatted'] = song_utils.format_seconds(song_dict['duration'])
song_queue.append(song_dict)
if (song_dict['duration'] < 0):
continue
total_time += song_dict['duration']
musiq_state['totalTimeFormatted'] = song_utils.format_seconds(total_time)
musiq_state['songQueue'] = song_queue
if state['alarm']:
musiq_state['currentSong'] = {'queueKey': (- 1), 'manuallyRequested': False, 'votes': 0, 'created': '', **util.camelize(cast(Dict[(Any, Any)], get_alarm_metadata()))}
musiq_state['progress'] = 0
musiq_state['paused'] = False
elif redis.get('backup_playing'):
musiq_state['currentSong'] = {'queueKey': (- 1), 'manuallyRequested': False, 'votes': 0, 'internalUrl': 'backup_stream', 'externalUrl': storage.get('backup_stream'), 'artist': '', 'title': 'Backup Stream', 'duration': ((60 * 60) * 24), 'created': ''}
musiq_state['paused'] = False
state['musiq'] = musiq_state
return state |
def convert_to_one_fraction_group(dicom_dataset, fraction_group_number):
created_dicom = deepcopy(dicom_dataset)
(beam_sequence, _) = get_fraction_group_beam_sequence_and_meterset(dicom_dataset, fraction_group_number)
created_dicom.BeamSequence = beam_sequence
fraction_group_index = get_fraction_group_index(dicom_dataset, fraction_group_number)
fraction_group = created_dicom.FractionGroupSequence[fraction_group_index]
created_dicom.FractionGroupSequence = [fraction_group]
return created_dicom |
def generate_stub_for_py_module(mod: StubSource, target: str, *, parse_only: bool=False, inspect: bool=False, include_private: bool=False, export_less: bool=False, include_docstrings: bool=False, doc_dir: str='', all_modules: list[str]) -> None:
if inspect:
ngen = InspectionStubGenerator(module_name=mod.module, known_modules=all_modules, _all_=mod.runtime_all, doc_dir=doc_dir, include_private=include_private, export_less=export_less, include_docstrings=include_docstrings)
ngen.generate_module()
output = ngen.output()
else:
gen = ASTStubGenerator(mod.runtime_all, include_private=include_private, analyzed=(not parse_only), export_less=export_less, include_docstrings=include_docstrings)
assert (mod.ast is not None), 'This function must be used only with analyzed modules'
mod.ast.accept(gen)
output = gen.output()
subdir = os.path.dirname(target)
if (subdir and (not os.path.isdir(subdir))):
os.makedirs(subdir)
with open(target, 'w', encoding='utf-8') as file:
file.write(output) |
class Effect4490(BaseEffect):
dealsDamage = True
type = 'active'
def handler(fit, mod, context, projectionRange, **kwargs):
fit.ship.boostItemAttr('maxVelocity', mod.getModifiedItemAttr('speedFactor'), stackingPenalties=True, **kwargs)
fit.ship.increaseItemAttr('warpScrambleStatus', mod.getModifiedItemAttr('siegeModeWarpStatus'), **kwargs) |
class AddressType():
def __init__(self):
self.hot = {}
hot_file = os.environ.get('PYUNIT_ADDRESS_HOT_FILE', None)
if hot_file:
with open(hot_file, encoding='utf-8') as fp:
for line in fp.readlines():
(name, addr) = line.strip().split()
self.hot[name] = addr
else:
zip_file = os.path.join(os.path.dirname(__file__), 'hot.zip')
zips = zipfile.ZipFile(zip_file, 'r')
data = zips.read('hot.txt').decode('utf-8')
for line in data.split('\r\n'):
if line:
(name, addr) = line.replace(' ', '').split('\t')
self.hot[name] = addr
self.address = Address()
def get_address_type(address):
if (('' in address) or ('' in address)):
return ''
elif (('' in address) or ('' in address)):
return ''
elif ('' in address):
return ''
elif ('' in address):
return ''
return None
def address_message(self, word):
start = time.time()
result = []
for key in self.hot:
if (word and (key in word)):
types = ''
ca = self.hot[key]
result.append({'address': key, 'supplement_address': [], 'correct_address': ca, 'type': types})
finds = find_address(self.address, word)
for find in finds:
sa = supplement_address(self.address, find)
ca = correct_address(self.address, find)
s = [{'key': i} for i in sa]
types = self.get_address_type(ca)
result.append({'address': find, 'supplement_address': s, 'correct_address': ca, 'type': types})
print((time.time() - start))
return result
def add_vague_text(self, words, separators):
self.address.add_vague_text(words, separators)
def delete_vague_text(self, words):
self.address.delete_vague_text(words) |
class TestIndex():
def test_sanity_check(self):
mySymbolicMatricesList = TypedListType(TensorType(pytensor.config.floatX, shape=(None, None)))()
myMatrix = matrix()
z = Index()(mySymbolicMatricesList, myMatrix)
f = pytensor.function([mySymbolicMatricesList, myMatrix], z)
x = rand_ranged_matrix((- 1000), 1000, [100, 101])
y = rand_ranged_matrix((- 1000), 1000, [100, 101])
assert (f([x, y], y) == 1)
def test_interface(self):
mySymbolicMatricesList = TypedListType(TensorType(pytensor.config.floatX, shape=(None, None)))()
myMatrix = matrix()
z = mySymbolicMatricesList.ind(myMatrix)
f = pytensor.function([mySymbolicMatricesList, myMatrix], z)
x = rand_ranged_matrix((- 1000), 1000, [100, 101])
y = rand_ranged_matrix((- 1000), 1000, [100, 101])
assert (f([x, y], y) == 1)
def test_non_tensor_type(self):
mySymbolicNestedMatricesList = TypedListType(TensorType(pytensor.config.floatX, shape=(None, None)), 1)()
mySymbolicMatricesList = TypedListType(TensorType(pytensor.config.floatX, shape=(None, None)))()
z = Index()(mySymbolicNestedMatricesList, mySymbolicMatricesList)
f = pytensor.function([mySymbolicNestedMatricesList, mySymbolicMatricesList], z)
x = rand_ranged_matrix((- 1000), 1000, [100, 101])
y = rand_ranged_matrix((- 1000), 1000, [100, 101])
assert (f([[x, y], [x, y, y]], [x, y]) == 0)
def test_sparse(self):
sp = pytest.importorskip('scipy')
mySymbolicSparseList = TypedListType(sparse.SparseTensorType('csr', pytensor.config.floatX))()
mySymbolicSparse = sparse.csr_matrix()
z = Index()(mySymbolicSparseList, mySymbolicSparse)
f = pytensor.function([mySymbolicSparseList, mySymbolicSparse], z)
x = sp.sparse.csr_matrix(random_lil((10, 40), pytensor.config.floatX, 3))
y = sp.sparse.csr_matrix(random_lil((10, 40), pytensor.config.floatX, 3))
assert (f([x, y], y) == 1) |
class TestTreeItem(unittest.TestCase):
def test_init(self):
widget = gui.TreeItem('test tree item')
widget.append(gui.TreeItem('2nd tree item'))
self.assertIn('test tree item', widget.repr())
self.assertIn('2nd tree item', widget.repr())
assertValidHTML(widget.repr()) |
def smart_contract_filters_from_node_state(chain_state: ChainState, secret_registry_address: SecretRegistryAddress, service_registry: Optional[ServiceRegistry]) -> RaidenContractFilter:
token_network_registries = chain_state.identifiers_to_tokennetworkregistries.values()
token_networks = [tn for tnr in token_network_registries for tn in tnr.token_network_list]
channels_of_token_network = {tn.address: set(tn.channelidentifiers_to_channels.keys()) for tn in token_networks if tn.channelidentifiers_to_channels}
return RaidenContractFilter(secret_registry_address=secret_registry_address, token_network_registry_addresses={tnr.address for tnr in token_network_registries}, token_network_addresses={tn.address for tn in token_networks}, channels_of_token_network=channels_of_token_network, ignore_secret_registry_until_channel_found=(not channels_of_token_network), service_registry=service_registry) |
class BirthdayParty(QObject):
Q_CLASSINFO('DefaultProperty', 'guests')
partyStarted = pyqtSignal(QTime, arguments=['time'])
def __init__(self, parent=None):
super(BirthdayParty, self).__init__(parent)
self._host = None
self._guests = []
hostChanged = pyqtSignal()
(Person, notify=hostChanged)
def host(self):
return self._host
def host(self, host):
if (self._host != host):
self._host = host
self.hostChanged.emit()
(QQmlListProperty)
def guests(self):
return QQmlListProperty(Person, self, self._guests)
(str)
def announcement(self):
return ''
def announcement(self, announcement):
print(announcement)
def startParty(self):
self.partyStarted.emit(QTime.currentTime()) |
def write_file(path: str, contents: str) -> None:
encoded_contents = contents.encode('utf-8')
try:
with open(path, 'rb') as f:
old_contents: (bytes | None) = f.read()
except OSError:
old_contents = None
if (old_contents != encoded_contents):
os.makedirs(os.path.dirname(path), exist_ok=True)
with open(path, 'wb') as g:
g.write(encoded_contents)
new_mtime = (os.stat(path).st_mtime + 1)
os.utime(path, times=(new_mtime, new_mtime)) |
def augment_and_mix_transform(config_str, hparams):
magnitude = 3
width = 3
depth = (- 1)
alpha = 1.0
blended = False
config = config_str.split('-')
assert (config[0] == 'augmix')
config = config[1:]
for c in config:
cs = re.split('(\\d.*)', c)
if (len(cs) < 2):
continue
(key, val) = cs[:2]
if (key == 'mstd'):
hparams.setdefault('magnitude_std', float(val))
elif (key == 'm'):
magnitude = int(val)
elif (key == 'w'):
width = int(val)
elif (key == 'd'):
depth = int(val)
elif (key == 'a'):
alpha = float(val)
elif (key == 'b'):
blended = bool(val)
else:
assert False, 'Unknown AugMix config section'
ops = augmix_ops(magnitude=magnitude, hparams=hparams)
return AugMixAugment(ops, alpha=alpha, width=width, depth=depth, blended=blended) |
def begin_level(options):
global level_stack, level_list, bgcolor
if (not level_stack):
level_list = []
default_level_color = None
default_level_style = None
default_level_fill = bgcolor
else:
default_level_color = level_stack[(- 1)].get('color', None)
default_level_style = level_stack[(- 1)].get('style', None)
default_level_fill = level_stack[(- 1)].get('fill', None)
level_options = {}
level_options['color'] = options.get('color', default_level_color)
level_options['style'] = options.get('style', default_level_style)
level_options['fill'] = options.get('fill', default_level_fill)
level_stack.append(level_options) |
_task('fasthubert_pretraining', dataclass=FastHubertPretrainingConfig)
class HubertFbankPretrainingTask(HubertPretrainingTask):
cfg: FastHubertPretrainingConfig
def __init__(self, cfg: FastHubertPretrainingConfig) -> None:
super().__init__(cfg)
def setup_task(cls, cfg: FastHubertPretrainingConfig, **kwargs) -> 'HubertFbankPretrainingTask':
return cls(cfg)
def load_dataset(self, split: str, **kwargs) -> None:
manifest = f'{self.cfg.data}/{split}.tsv'
dicts = ([self.target_dictionary] if self.cfg.fine_tuning else self.dictionaries)
pad_list = [dict.pad() for dict in dicts]
eos_list = [dict.eos() for dict in dicts]
procs = [LabelEncoder(dict) for dict in dicts]
paths = [f'{self.get_label_dir()}/{split}.{l}' for l in self.cfg.labels]
self.datasets[split] = FastHubertDataset(manifest, sample_rate=self.cfg.sample_rate, label_paths=paths, label_rates=self.cfg.label_rate, pad_list=pad_list, eos_list=eos_list, stats_npz_path=self.cfg.stats_npz_path, label_processors=procs, max_keep_sample_size=self.cfg.max_keep_size, min_keep_sample_size=self.cfg.min_sample_size, max_sample_size=self.cfg.max_sample_size, pad_audio=self.cfg.pad_audio, normalize=self.cfg.normalize, store_labels=False, random_crop=self.cfg.random_crop, single_target=self.cfg.single_target) |
class WithFutureMinuteBarData(WithAssetFinder, WithTradingCalendars):
FUTURE_MINUTE_BAR_LOOKBACK_DAYS = 0
FUTURE_MINUTE_BAR_START_DATE = alias('START_DATE')
FUTURE_MINUTE_BAR_END_DATE = alias('END_DATE')
def make_future_minute_bar_data(cls):
trading_calendar = get_calendar('us_futures')
return create_minute_bar_data(trading_calendar.minutes_for_sessions_in_range(cls.future_minute_bar_days[0], cls.future_minute_bar_days[(- 1)]), cls.asset_finder.futures_sids)
def init_class_fixtures(cls):
super(WithFutureMinuteBarData, cls).init_class_fixtures()
trading_calendar = get_calendar('us_futures')
cls.future_minute_bar_days = _trading_days_for_minute_bars(trading_calendar, pd.Timestamp(cls.FUTURE_MINUTE_BAR_START_DATE), pd.Timestamp(cls.FUTURE_MINUTE_BAR_END_DATE), cls.FUTURE_MINUTE_BAR_LOOKBACK_DAYS) |
def clip_grad_norm_dp(named_parameters, target_params, max_norm, norm_type=2):
parameters = list(filter((lambda p: (p[1] - target_params[p[0]])), named_parameters))
max_norm = float(max_norm)
norm_type = float(norm_type)
if (norm_type == float('inf')):
total_norm = max((p.grad.data.abs().max() for p in parameters))
else:
total_norm = 0
for p in parameters:
param_norm = p.grad.data.norm(norm_type)
total_norm += (param_norm ** norm_type)
total_norm = (total_norm ** (1.0 / norm_type))
clip_coef = (max_norm / (total_norm + 1e-06))
if (clip_coef < 1):
for p in parameters:
p.grad.data.mul_(clip_coef)
return total_norm |
class TestAdamWOptimizer(TestOptimizer, unittest.TestCase):
def _check_momentum_buffer(self):
return False
def _get_config(self):
return {'name': 'adamw', 'num_epochs': 90, 'lr': 0.1, 'betas': (0.9, 0.99), 'eps': 1e-08, 'weight_decay': 0.0001, 'amsgrad': False}
def _instance_to_test(self):
return AdamW |
class ComplexWebQuestions(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version('1.0.0')
BUILDER_CONFIGS = [datasets.BuilderConfig(name='compwebq', version=VERSION, description='ComplexWebQuestions Dataset')]
def __init__(self, *args, writer_batch_size=None, **kwargs):
super().__init__(*args, writer_batch_size=writer_batch_size, **kwargs)
self.schema_cache = dict()
def _info(self):
features = datasets.Features({'id': datasets.Value('string'), 'question': datasets.Value('string'), 'answers': datasets.features.Sequence(datasets.Value('string')), 'kg_tuples': datasets.features.Sequence(datasets.features.Sequence(datasets.Value('string')))})
return datasets.DatasetInfo(description=_DESCRIPTION, features=features, supervised_keys=None, homepage=_HOMEPAGE, license=_LICENSE, citation=_CITATION)
def _split_generators(self, dl_manager):
downloaded_filepath = dl_manager.download_and_extract(_URL)
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={'data_filepath': (downloaded_filepath + '/compwebq/train.jsonl')}), datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={'data_filepath': (downloaded_filepath + '/compwebq/dev.jsonl')}), datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={'data_filepath': (downloaded_filepath + '/compwebq/test.jsonl')})]
def _generate_examples(self, data_filepath):
logger.info('generating examples from = %s', data_filepath)
with open(data_filepath, encoding='utf-8') as f:
for (idx, line) in enumerate(f):
ex = json.loads(line)
(yield (idx, {'id': ex['ID'], 'question': ex['question'], 'answers': ex['answers'], 'kg_tuples': ex['KG_tuples']})) |
class WindowCreateFullScreenEventSequenceTest(EventSequenceTest, unittest.TestCase):
last_sequence = 3
def on_resize(self, width, height):
self.check_sequence(1, 'on_resize')
def on_show(self):
self.check_sequence(2, 'on_show')
def on_expose(self):
self.check_sequence(3, 'on_expose')
def test_method(self):
window.Window._enable_event_queue = True
win = window.Window(fullscreen=True)
try:
win.push_handlers(self)
self.check_sequence(0, 'begin')
while ((not win.has_exit) and (not self.finished)):
win.dispatch_events()
self.check()
finally:
win.close() |
def main():
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if ((len(sys.argv) == 2) and sys.argv[1].endswith('.json')):
(model_args, data_args, training_args) = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
(model_args, data_args, training_args) = parser.parse_args_into_dataclasses()
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout)])
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.warning((f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}' + f'distributed training: {bool((training_args.local_rank != (- 1)))}, 16-bits training: {training_args.fp16}'))
logger.info(f'Training/evaluation parameters {training_args}')
last_checkpoint = None
if (os.path.isdir(training_args.output_dir) and training_args.do_train and (not training_args.overwrite_output_dir)):
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if ((last_checkpoint is None) and (len(os.listdir(training_args.output_dir)) > 0)):
raise ValueError(f'Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome.')
elif ((last_checkpoint is not None) and (training_args.resume_from_checkpoint is None)):
logger.info(f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change the `--output_dir` or add `--overwrite_output_dir` to train from scratch.')
set_seed(training_args.seed)
if (data_args.task_name is not None):
if (data_args.task_name in ['dbpedia_14']):
raw_datasets = load_dataset('dbpedia_14', cache_dir=model_args.cache_dir)
elif (data_args.task_name in ['sst5']):
raw_datasets = load_dataset('SetFit/sst5')
else:
raw_datasets = load_dataset('glue', data_args.task_name, cache_dir=model_args.cache_dir, use_auth_token=(True if model_args.use_auth_token else None))
elif (data_args.dataset_name is not None):
raw_datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir)
else:
data_files = {'train': data_args.train_file, 'validation': data_args.validation_file}
if training_args.do_predict:
if (data_args.test_file is not None):
train_extension = data_args.train_file.split('.')[(- 1)]
test_extension = data_args.test_file.split('.')[(- 1)]
assert (test_extension == train_extension), '`test_file` should have the same extension (csv or json) as `train_file`.'
data_files['test'] = data_args.test_file
else:
raise ValueError('Need either a GLUE task or a test file for `do_predict`.')
for key in data_files.keys():
logger.info(f'load a local file for {key}: {data_files[key]}')
if data_args.train_file.endswith('.csv'):
raw_datasets = load_dataset('csv', data_files=data_files, cache_dir=model_args.cache_dir)
else:
raw_datasets = load_dataset('json', data_files=data_files, cache_dir=model_args.cache_dir)
if (data_args.task_name is not None):
is_regression = (data_args.task_name == 'stsb')
if (not is_regression):
if (data_args.task_name in ['sst5']):
label_list = ['very negative', 'negative', 'neutral', 'positive', 'very positive']
else:
label_list = raw_datasets['train'].features['label'].names
num_labels = len(label_list)
else:
num_labels = 1
else:
is_regression = (raw_datasets['train'].features['label'].dtype in ['float32', 'float64'])
if is_regression:
num_labels = 1
else:
label_list = raw_datasets['train'].unique('label')
label_list.sort()
num_labels = len(label_list)
config = AutoConfig.from_pretrained((model_args.config_name if model_args.config_name else model_args.model_name_or_path), num_labels=num_labels, finetuning_task=data_args.task_name, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None))
tokenizer = AutoTokenizer.from_pretrained((model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path), cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None))
model = AutoModelForSequenceClassification.from_pretrained(model_args.model_name_or_path, from_tf=bool(('.ckpt' in model_args.model_name_or_path)), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None))
if (data_args.task_name is not None):
(sentence1_key, sentence2_key) = task_to_keys[data_args.task_name]
else:
non_label_column_names = [name for name in raw_datasets['train'].column_names if (name != 'label')]
if (('sentence1' in non_label_column_names) and ('sentence2' in non_label_column_names)):
(sentence1_key, sentence2_key) = ('sentence1', 'sentence2')
elif (len(non_label_column_names) >= 2):
(sentence1_key, sentence2_key) = non_label_column_names[:2]
else:
(sentence1_key, sentence2_key) = (non_label_column_names[0], None)
if data_args.pad_to_max_length:
padding = 'max_length'
else:
padding = False
label_to_id = None
if ((model.config.label2id != PretrainedConfig(num_labels=num_labels).label2id) and (data_args.task_name is not None) and (not is_regression)):
label_name_to_id = {k.lower(): v for (k, v) in model.config.label2id.items()}
if (list(sorted(label_name_to_id.keys())) == list(sorted(label_list))):
label_to_id = {i: int(label_name_to_id[label_list[i]]) for i in range(num_labels)}
else:
logger.warning("Your model seems to have been trained with labels, but they don't match the dataset: ", f'''model labels: {list(sorted(label_name_to_id.keys()))}, dataset labels: {list(sorted(label_list))}.
Ignoring the model labels as a result.''')
elif ((data_args.task_name is None) and (not is_regression)):
label_to_id = {v: i for (i, v) in enumerate(label_list)}
if (label_to_id is not None):
model.config.label2id = label_to_id
model.config.id2label = {id: label for (label, id) in config.label2id.items()}
elif ((data_args.task_name is not None) and (not is_regression)):
model.config.label2id = {l: i for (i, l) in enumerate(label_list)}
model.config.id2label = {id: label for (label, id) in config.label2id.items()}
if (data_args.max_seq_length > tokenizer.model_max_length):
logger.warning(f'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for themodel ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.')
max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length)
def preprocess_function(examples):
args = ((examples[sentence1_key],) if (sentence2_key is None) else (examples[sentence1_key], examples[sentence2_key]))
result = tokenizer(*args, padding=padding, max_length=max_seq_length, truncation=True)
if ((label_to_id is not None) and ('label' in examples)):
result['label'] = [(label_to_id[l] if (l != (- 1)) else (- 1)) for l in examples['label']]
return result
with training_args.main_process_first(desc='dataset map pre-processing'):
raw_datasets = raw_datasets.map(preprocess_function, batched=True, load_from_cache_file=(not data_args.overwrite_cache), desc='Running tokenizer on dataset')
if training_args.do_train:
if ('train' not in raw_datasets):
raise ValueError('--do_train requires a train dataset')
train_dataset = raw_datasets['train']
if (data_args.selection_method is not None):
if (data_args.selection_method in ['random']):
selected_indices = random.sample(range(len(train_dataset)), data_args.annotation_size)
elif ((data_args.selection_method in ['self_dissimilar']) or (data_args.selection_method.startswith('vote') and data_args.selection_method.endswith('select'))):
if os.path.isfile(os.path.join(data_args.train_emb_dir, f'{data_args.task_name}_train_embs.json')):
with open(os.path.join(data_args.train_emb_dir, f'{data_args.task_name}_train_embs.json')) as f:
embs = json.load(f)
embs = torch.tensor(embs)
else:
if (not os.path.isdir(data_args.train_emb_dir)):
os.makedirs(data_args.train_emb_dir, exist_ok=True)
embs = calculate_sentence_transformer_embedding(train_dataset, embedding_model=data_args.sentence_transformer_model, task_name=data_args.task_name, mean_normal=True)
with open(os.path.join(data_args.train_emb_dir, f'{data_args.task_name}_train_embs.json'), 'w') as f:
json.dump(embs.tolist(), f)
if (data_args.selection_method in ['self_dissimilar']):
selected_indices = find_indices_from_embeddings(embs, data_args.annotation_size)
elif (data_args.selection_method.startswith('vote') and data_args.selection_method.endswith('select')):
knn = int(data_args.selection_method.split('_')[1])
if (not os.path.isdir('outputs')):
os.makedirs('outputs', exist_ok=True)
selected_indices = vote_k_select(embeddings=embs, select_num=data_args.annotation_size, k=knn, overlap_threshold=0.5, vote_file=f'outputs/{data_args.selection_method}.json')
train_dataset = train_dataset.select(selected_indices)
if (data_args.max_train_samples is not None):
train_dataset = train_dataset.select(range(data_args.max_train_samples))
if training_args.do_eval:
if (data_args.task_name in ['dbpedia_14']):
eval_dataset = raw_datasets['test']
eval_selected_indices = random.sample(range(len(eval_dataset)), 256)
eval_dataset = eval_dataset.select(eval_selected_indices)
else:
if (('validation' not in raw_datasets) and ('validation_matched' not in raw_datasets)):
raise ValueError('--do_eval requires a validation dataset')
eval_dataset = raw_datasets[('validation_matched' if (data_args.task_name == 'mnli') else 'validation')]
eval_selected_indices = random.sample(range(len(eval_dataset)), 256)
eval_dataset = eval_dataset.select(eval_selected_indices)
if (data_args.max_eval_samples is not None):
eval_dataset = eval_dataset.select(range(data_args.max_eval_samples))
if (training_args.do_predict or (data_args.task_name is not None) or (data_args.test_file is not None)):
if (('test' not in raw_datasets) and ('test_matched' not in raw_datasets)):
raise ValueError('--do_predict requires a test dataset')
predict_dataset = raw_datasets[('test_matched' if (data_args.task_name == 'mnli') else 'test')]
predict_selected_indices = random.sample(range(len(predict_dataset)), 256)
predict_dataset = predict_dataset.select(predict_selected_indices)
if (data_args.max_predict_samples is not None):
predict_dataset = predict_dataset.select(range(data_args.max_predict_samples))
if (data_args.task_name in ['sst5']):
metric = load_metric('glue', 'sst2')
elif (not (data_args.task_name in ['dbpedia_14'])):
if (data_args.task_name is not None):
metric = load_metric('glue', data_args.task_name)
else:
metric = load_metric('accuracy')
def compute_metrics(p: EvalPrediction):
preds = (p.predictions[0] if isinstance(p.predictions, tuple) else p.predictions)
preds = (np.squeeze(preds) if is_regression else np.argmax(preds, axis=1))
if (data_args.task_name in ['dbpedia_14']):
return acc_and_f1_dbpedia(preds=preds, labels=p.label_ids)
elif (data_args.task_name is not None):
result = metric.compute(predictions=preds, references=p.label_ids)
if (len(result) > 1):
result['combined_score'] = np.mean(list(result.values())).item()
return result
elif is_regression:
return {'mse': ((preds - p.label_ids) ** 2).mean().item()}
else:
return {'accuracy': (preds == p.label_ids).astype(np.float32).mean().item()}
if data_args.pad_to_max_length:
data_collator = default_data_collator
elif training_args.fp16:
data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8)
else:
data_collator = None
trainer = Trainer(model=model, args=training_args, train_dataset=(train_dataset if training_args.do_train else None), eval_dataset=(eval_dataset if training_args.do_eval else None), compute_metrics=compute_metrics, tokenizer=tokenizer, data_collator=data_collator)
if training_args.do_train:
checkpoint = None
if (training_args.resume_from_checkpoint is not None):
checkpoint = training_args.resume_from_checkpoint
elif (last_checkpoint is not None):
checkpoint = last_checkpoint
train_result = trainer.train(resume_from_checkpoint=checkpoint)
metrics = train_result.metrics
max_train_samples = (data_args.max_train_samples if (data_args.max_train_samples is not None) else len(train_dataset))
metrics['train_samples'] = min(max_train_samples, len(train_dataset))
trainer.save_model()
trainer.log_metrics('train', metrics)
trainer.save_metrics('train', metrics)
trainer.save_state()
if training_args.do_eval:
logger.info('*** Evaluate ***')
tasks = [data_args.task_name]
eval_datasets = [eval_dataset]
if (data_args.task_name == 'mnli'):
tasks.append('mnli-mm')
eval_datasets.append(raw_datasets['validation_mismatched'])
for (eval_dataset, task) in zip(eval_datasets, tasks):
metrics = trainer.evaluate(eval_dataset=eval_dataset)
max_eval_samples = (data_args.max_eval_samples if (data_args.max_eval_samples is not None) else len(eval_dataset))
metrics['eval_samples'] = min(max_eval_samples, len(eval_dataset))
trainer.log_metrics('eval', metrics)
trainer.save_metrics('eval', metrics)
if training_args.do_predict:
logger.info('*** Predict ***')
tasks = [data_args.task_name]
predict_datasets = [predict_dataset]
if (data_args.task_name == 'mnli'):
tasks.append('mnli-mm')
predict_datasets.append(raw_datasets['test_mismatched'])
for (predict_dataset, task) in zip(predict_datasets, tasks):
predict_dataset = predict_dataset.remove_columns('label')
predictions = trainer.predict(predict_dataset, metric_key_prefix='predict').predictions
print(predictions[0].shape, predictions[1].shape)
predictions = (np.squeeze(predictions) if is_regression else np.argmax(predictions, axis=1))
output_predict_file = os.path.join(training_args.output_dir, f'predict_results_{task}.txt')
if trainer.is_world_process_zero():
with open(output_predict_file, 'w') as writer:
logger.info(f'***** Predict results {task} *****')
writer.write('index\tprediction\n')
for (index, item) in enumerate(predictions):
if is_regression:
writer.write(f'''{index} {item:3.3f}
''')
else:
item = label_list[item]
writer.write(f'''{index} {item}
''')
kwargs = {'finetuned_from': model_args.model_name_or_path, 'tasks': 'text-classification'}
if (data_args.task_name is not None):
kwargs['language'] = 'en'
kwargs['dataset_tags'] = 'glue'
kwargs['dataset_args'] = data_args.task_name
kwargs['dataset'] = f'GLUE {data_args.task_name.upper()}'
if training_args.push_to_hub:
trainer.push_to_hub(**kwargs)
else:
trainer.create_model_card(**kwargs) |
def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate, num_train_steps, num_warmup_steps, use_tpu, use_one_hot_embeddings):
def model_fn(features, labels, mode, params):
tf.logging.info('*** Features ***')
for name in sorted(features.keys()):
tf.logging.info((' name = %s, shape = %s' % (name, features[name].shape)))
input_ids = features['input_ids']
input_mask = features['input_mask']
segment_ids = features['segment_ids']
label_ids = features['label_ids']
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
(total_loss, per_example_loss, logits) = create_model(bert_config, is_training, input_ids, input_mask, segment_ids, label_ids, num_labels, use_one_hot_embeddings)
tvars = tf.trainable_variables()
scaffold_fn = None
if init_checkpoint:
(assignment_map, initialized_variable_names) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.logging.info('**** Trainable Variables ****')
for var in tvars:
init_string = ''
if (var.name in initialized_variable_names):
init_string = ', *INIT_FROM_CKPT*'
tf.logging.info(' name = %s, shape = %s%s', var.name, var.shape, init_string)
output_spec = None
if (mode == tf.estimator.ModeKeys.TRAIN):
train_op = optimization.create_optimizer(total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)
output_spec = tf.contrib.tpu.TPUEstimatorSpec(mode=mode, loss=total_loss, train_op=train_op, scaffold_fn=scaffold_fn)
elif (mode == tf.estimator.ModeKeys.EVAL):
def metric_fn(per_example_loss, label_ids, logits):
predictions = tf.argmax(logits, axis=(- 1), output_type=tf.int32)
accuracy = tf.metrics.accuracy(label_ids, predictions)
loss = tf.metrics.mean(per_example_loss)
return {'eval_accuracy': accuracy, 'eval_loss': loss}
eval_metrics = (metric_fn, [per_example_loss, label_ids, logits])
output_spec = tf.contrib.tpu.TPUEstimatorSpec(mode=mode, loss=total_loss, eval_metrics=eval_metrics, scaffold_fn=scaffold_fn)
else:
raise ValueError(('Only TRAIN and EVAL modes are supported: %s' % mode))
return output_spec
return model_fn |
def gable_process_box(bm, roof_faces, prop):
top_faces = [f for f in roof_faces if f.normal.z]
result = bmesh.ops.extrude_face_region(bm, geom=top_faces).get('geom')
bmesh.ops.translate(bm, verts=filter_geom(result, BMVert), vec=(0, 0, prop.thickness))
bmesh.ops.delete(bm, geom=top_faces, context='FACES')
link_faces = {f for fc in filter_geom(result, BMFace) for e in fc.edges for f in e.link_faces if (not f.normal.z)}
link_faces.update(set(validate(roof_faces)))
add_faces_to_group(bm, list(link_faces), MaterialGroup.ROOF_HANGS) |
def _getsafeword(agearg):
targetgmt = ops.system.clocks.gmtime()
ageseconds = ops.timehelper.get_seconds_from_age(agearg)
afterdatetime = (targetgmt - timedelta(seconds=ageseconds))
beforedatetime = (targetgmt + timedelta(seconds=0))
return (afterdatetime.strftime('%Y-%m-%d %H:%M:%S'), beforedatetime.strftime('%Y-%m-%d %H:%M:%S')) |
class Parser(object):
_extensions = []
_shebangKeywords = []
_keywords = []
def getParserName(cls):
name = cls.__name__
if (name.endswith('Parser') and (len(name) >= 6)):
name = name[:(- 6)].lower()
return name
def disambiguate(cls, text):
return cls.getParserName()
def parseLine(self, line, previousState=0):
(yield tokens.TextToken(line, 0, len(line)))
def name(self):
name = self.__class__.__name__.lower()
if name.endswith('parser'):
name = name[:(- 6)]
return name
def __repr__(self):
return ('<Parser for "%s">' % self.name())
def keywords(self):
return [k for k in self._keywords]
def filenameExtensions(self):
return [('.' + e.lstrip('.').lower()) for e in self._extensions]
def shebangKeywords(self):
return self._shebangKeywords.copy()
def getStyleElementDescriptions(cls):
descriptions = {}
for token in cls.getUsedTokens(cls):
descriptions[token.description.key] = token.description
return list(descriptions.values())
def getUsedTokens(self):
try:
mod = sys.modules[self.__module__]
except KeyError:
return []
tokenClasses = []
for name in mod.__dict__:
member = mod.__dict__[name]
if (isinstance(member, type) and issubclass(member, tokens.Token)):
if (member is not tokens.Token):
tokenClasses.append(member)
return [t() for t in tokenClasses]
def _isTodoItem(self, text):
word = text.lstrip().split(' ', 1)[0].rstrip(':')
if (word.lower() in ['todo', '2do', 'fixme']):
return True
else:
return False |
class StaffAdvertiserReportView(BaseReportView):
impression_model = AdvertiserImpression
report = OptimizedAdvertiserReport
template_name = 'adserver/reports/staff-advertisers.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
impressions = self.get_queryset(start_date=context['start_date'], end_date=context['end_date'])
advertisers = Advertiser.objects.filter(id__in=impressions.values('advertiser_id'))
advertisers_and_reports = []
for advertiser in advertisers:
queryset = self.get_queryset(start_date=context['start_date'], end_date=context['end_date']).filter(advertiser=advertiser)
report = self.report(queryset)
report.generate()
if (report.total['views'] > 0):
advertisers_and_reports.append((advertiser, report))
total_clicks = sum((report.total['clicks'] for (_, report) in advertisers_and_reports))
total_views = sum((report.total['views'] for (_, report) in advertisers_and_reports))
total_cost = sum((report.total['cost'] for (_, report) in advertisers_and_reports))
context.update({'advertisers': [a for (a, _) in advertisers_and_reports], 'advertisers_and_reports': advertisers_and_reports, 'total_clicks': total_clicks, 'total_cost': total_cost, 'total_views': total_views, 'total_ctr': calculate_ctr(total_clicks, total_views), 'total_ecpm': calculate_ecpm(total_cost, total_views), 'metabase_advertisers_breakdown': settings.METABASE_QUESTIONS.get('ALL_ADVERTISERS_BREAKDOWN')})
return context |
def test_one_accumulator_while_loop() -> None:
number = 10
test_list = [10, 20, 30]
sum_so_far = 0
with AccumulationTable(['number', 'sum_so_far']) as table:
while (number in test_list):
sum_so_far = (sum_so_far + number)
number += 10
assert (table.loop_accumulators == {'number': [10, 20, 30, 40], 'sum_so_far': [0, 10, 30, 60]}) |
class MCTCTFeatureExtractor(SequenceFeatureExtractor):
model_input_names = ['input_features', 'attention_mask']
def __init__(self, feature_size=80, sampling_rate=16000, padding_value=0.0, hop_length=10, win_length=25, win_function='hamming_window', frame_signal_scale=32768.0, preemphasis_coeff=0.97, mel_floor=1.0, normalize_means=True, normalize_vars=True, return_attention_mask=False, **kwargs):
super().__init__(feature_size=feature_size, sampling_rate=sampling_rate, padding_value=padding_value, **kwargs)
self.feature_size = feature_size
self.sampling_rate = sampling_rate
self.padding_value = padding_value
self.hop_length = hop_length
self.win_length = win_length
self.frame_signal_scale = frame_signal_scale
self.preemphasis_coeff = preemphasis_coeff
self.mel_floor = mel_floor
self.normalize_means = normalize_means
self.normalize_vars = normalize_vars
self.win_function = win_function
self.return_attention_mask = return_attention_mask
self.sample_size = ((win_length * sampling_rate) // 1000)
self.sample_stride = ((hop_length * sampling_rate) // 1000)
self.n_fft = (2 ** int(np.ceil(np.log2(self.sample_size))))
self.n_freqs = ((self.n_fft // 2) + 1)
def _num_frames_calc(in_size, frame_size, frame_stride):
return int((1 + np.floor((((in_size - frame_size) * 1) / frame_stride))))
def _frame_signal(one_waveform, n_frames, frame_signal_scale, window_length, sample_stride):
scale = frame_signal_scale
frames = np.zeros((n_frames * window_length))
for frame_idx in range(n_frames):
start = (frame_idx * window_length)
end = ((frame_idx + 1) * window_length)
wave_start = (frame_idx * sample_stride)
wave_end = ((frame_idx * sample_stride) + window_length)
frames[start:end] = (scale * one_waveform[wave_start:wave_end])
return frames
def _apply_preemphasis_inplace(frames, window_length, preemphasis_coeff):
if ((frames.size % window_length) != 0):
raise ValueError(f'`frames` is supposed to have length divisble by `window_length`, but is {frames.size} with window_length={window_length}.')
n_frames = (frames.size // window_length)
for frame_idx in range(n_frames, 0, (- 1)):
start = ((frame_idx - 1) * window_length)
end = ((frame_idx * window_length) - 1)
frames[(start + 1):(end + 1)] -= (preemphasis_coeff * frames[start:end])
frames[start] *= (1 - preemphasis_coeff)
def _windowing(frames, window_length, window):
if ((frames.size % window_length) != 0):
raise ValueError(f'`frames` is supposed to have length divisble by `window_length`, but is {frames.size} with window_length={window_length}.')
shaped = frames.reshape((- 1), window_length)
shaped = (window * shaped)
return shaped
def _dft(frames, K, n_frames, n_samples, n_fft):
dft = np.zeros([n_frames, K])
for frame in range(n_frames):
begin = (frame * n_samples)
inwards_buffer = frames[begin:(begin + n_samples)]
inwards_buffer = np.pad(inwards_buffer, (0, (n_fft - n_samples)), 'constant')
out = np.fft.rfft(inwards_buffer)
dft[frame] = np.abs(out[:K])
return dft
def _extract_mfsc_features(self, one_waveform: np.array) -> np.ndarray:
if (self.win_function == 'hamming_window'):
window = torch.hamming_window(window_length=self.sample_size, periodic=False, alpha=0.54, beta=0.46)
else:
window = getattr(torch, self.win_function)()
window = window.numpy()
fbanks = torchaudio.functional.melscale_fbanks(n_freqs=self.n_freqs, f_min=0.0, f_max=(self.sampling_rate / 2.0), n_mels=self.feature_size, sample_rate=self.sampling_rate)
fbanks = fbanks.numpy()
n_frames = self._num_frames_calc(one_waveform.size, self.sample_size, self.sample_stride)
frames = self._frame_signal(one_waveform, n_frames, self.frame_signal_scale, self.sample_size, self.sample_stride)
self._apply_preemphasis_inplace(frames, self.sample_size, self.preemphasis_coeff)
frames = self._windowing(frames, self.sample_size, window)
dft_out = self._dft(frames.flatten(), self.n_freqs, n_frames, self.sample_size, self.n_fft)
msfc_features = np.einsum('...tf,fm->...tm', dft_out, fbanks)
msfc_features = np.maximum(msfc_features, self.mel_floor)
msfc_features = np.log(msfc_features)
return msfc_features
def _normalize_one(self, x, input_length, padding_value):
if self.normalize_means:
mean = x[:input_length].mean(axis=0)
x = np.subtract(x, mean)
if self.normalize_vars:
std = x[:input_length].std(axis=0)
x = np.divide(x, std)
if (input_length < x.shape[0]):
x[input_length:] = padding_value
x = x.astype(np.float32)
return x
def normalize(self, input_features: List[np.ndarray], attention_mask: Optional[np.ndarray]=None) -> List[np.ndarray]:
lengths = (attention_mask.sum((- 1)) if (attention_mask is not None) else [x.shape[0] for x in input_features])
return [self._normalize_one(x, n, self.padding_value) for (x, n) in zip(input_features, lengths)]
def __call__(self, raw_speech: Union[(np.ndarray, List[float], List[np.ndarray], List[List[float]])], padding: Union[(bool, str, PaddingStrategy)]=False, max_length: Optional[int]=None, truncation: bool=False, pad_to_multiple_of: Optional[int]=None, return_attention_mask: Optional[bool]=None, return_tensors: Optional[Union[(str, TensorType)]]=None, sampling_rate: Optional[int]=None, **kwargs) -> BatchFeature:
if (sampling_rate is not None):
if (sampling_rate != self.sampling_rate):
raise ValueError(f'The model corresponding to this feature extractor: {self} was trained using a sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with {self.sampling_rate} and not {sampling_rate}.')
else:
logger.warning('It is strongly recommended to pass the ``sampling_rate`` argument to this function. Failing to do so can result in silent errors that might be hard to debug.')
is_batched = bool((isinstance(raw_speech, (list, tuple)) and (isinstance(raw_speech[0], np.ndarray) or isinstance(raw_speech[0], (tuple, list)))))
if is_batched:
raw_speech = [np.asarray(speech, dtype=np.float32) for speech in raw_speech]
elif ((not is_batched) and (not isinstance(raw_speech, np.ndarray))):
raw_speech = np.asarray(raw_speech, dtype=np.float32)
elif (isinstance(raw_speech, np.ndarray) and (raw_speech.dtype is np.dtype(np.float64))):
raw_speech = raw_speech.astype(np.float32)
if (not is_batched):
raw_speech = [raw_speech]
features = [self._extract_mfsc_features(one_waveform) for one_waveform in raw_speech]
encoded_inputs = BatchFeature({'input_features': features})
padded_inputs = self.pad(encoded_inputs, padding=padding, max_length=max_length, truncation=truncation, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=True, **kwargs)
input_features = padded_inputs.get('input_features')
if isinstance(input_features[0], list):
padded_inputs['input_features'] = [np.asarray(feature, dtype=np.float32) for feature in input_features]
attention_mask = padded_inputs.get('attention_mask')
if (attention_mask is not None):
padded_inputs['attention_mask'] = [np.asarray(array, dtype=np.int32) for array in attention_mask]
if (self.normalize_means or self.normalize_vars):
attention_mask = (np.array(attention_mask, dtype=np.int32) if ((self._get_padding_strategies(padding, max_length=max_length) is not PaddingStrategy.DO_NOT_PAD) and padding) else None)
padded_inputs['input_features'] = self.normalize(padded_inputs['input_features'], attention_mask=attention_mask)
if (return_tensors is not None):
padded_inputs = padded_inputs.convert_to_tensors(return_tensors)
return padded_inputs |
class AsmCmdBase(with_metaclass(AsmCmdManager, object)):
_id = (- 1)
_active = None
_toolbarName = 'Assembly3'
_menuGroupName = ''
_contextMenuName = 'Assembly'
_accel = None
_cmdType = None
_iconName = None
def checkActive(cls):
cls._active = True
def getIconName(cls):
if cls._iconName:
return addIconToFCAD(cls._iconName)
def GetResources(cls):
ret = {'MenuText': cls.getMenuText(), 'ToolTip': cls.getToolTip()}
name = cls.getIconName()
if name:
ret['Pixmap'] = name
if cls._accel:
ret['Accel'] = cls._accel
if (cls._cmdType is not None):
ret['CmdType'] = cls._cmdType
return ret |
def get_possible_variants(typ: Type) -> list[Type]:
typ = get_proper_type(typ)
if isinstance(typ, TypeVarType):
if (len(typ.values) > 0):
return typ.values
else:
return [typ.upper_bound]
elif isinstance(typ, ParamSpecType):
return [typ.upper_bound]
elif isinstance(typ, TypeVarTupleType):
return [typ.upper_bound]
elif isinstance(typ, UnionType):
return list(typ.items)
elif isinstance(typ, Overloaded):
return list(typ.items)
else:
return [typ] |
def test_export_compound_crs():
crs = CRS('urn:ogc:def:crs,crs:EPSG::2393,crs:EPSG::5717')
expected_cf = {'semi_major_axis': 6378388.0, 'semi_minor_axis': crs.ellipsoid.semi_minor_metre, 'inverse_flattening': 297.0, 'reference_ellipsoid_name': 'International 1924', 'longitude_of_prime_meridian': 0.0, 'prime_meridian_name': 'Greenwich', 'geographic_crs_name': 'KKJ', 'horizontal_datum_name': 'Kartastokoordinaattijarjestelma (1966)', 'projected_crs_name': 'KKJ / Finland Uniform Coordinate System', 'grid_mapping_name': 'transverse_mercator', 'latitude_of_projection_origin': 0.0, 'longitude_of_central_meridian': 27.0, 'false_easting': 3500000.0, 'false_northing': 0.0, 'scale_factor_at_central_meridian': 1.0, 'geopotential_datum_name': 'Helsinki 1960'}
cf_dict = crs.to_cf()
assert cf_dict.pop('crs_wkt').startswith('COMPOUNDCRS[')
assert (cf_dict == expected_cf)
_test_roundtrip(expected_cf, 'COMPOUNDCRS[')
assert (crs.cs_to_cf() == [{'axis': 'Y', 'long_name': 'Northing', 'standard_name': 'projection_y_coordinate', 'units': 'metre'}, {'axis': 'X', 'long_name': 'Easting', 'standard_name': 'projection_x_coordinate', 'units': 'metre'}, {'standard_name': 'height_above_reference_ellipsoid', 'long_name': 'Gravity-related height', 'units': 'metre', 'positive': 'up', 'axis': 'Z'}]) |
class Baseline(object):
def wrap_dataset(self, dataset):
return dataset
def unwrap_batch(self, batch):
return (batch, None)
def eval(self, x, c):
raise NotImplementedError('Override this method')
def get_learnable_parameters(self):
return []
def epoch_callback(self, model, epoch):
pass
def state_dict(self):
return {}
def load_state_dict(self, state_dict):
pass |
class RestoreFormerModel(pl.LightningModule):
def __init__(self, ddconfig, lossconfig, ckpt_path=None, ignore_keys=[], image_key='lq', colorize_nlabels=None, monitor=None, special_params_lr_scale=1.0, comp_params_lr_scale=1.0, schedule_step=[80000, 200000]):
super().__init__()
self.image_key = image_key
self.vqvae = instantiate_from_config(ddconfig)
lossconfig['params']['distill_param'] = ddconfig['params']
self.loss = instantiate_from_config(lossconfig)
if (ckpt_path is not None):
self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)
if ((('comp_weight' in lossconfig['params']) and lossconfig['params']['comp_weight']) or (('comp_style_weight' in lossconfig['params']) and lossconfig['params']['comp_style_weight'])):
self.use_facial_disc = True
else:
self.use_facial_disc = False
self.fix_decoder = ddconfig['params']['fix_decoder']
self.disc_start = lossconfig['params']['disc_start']
self.special_params_lr_scale = special_params_lr_scale
self.comp_params_lr_scale = comp_params_lr_scale
self.schedule_step = schedule_step
def init_from_ckpt(self, path, ignore_keys=list()):
sd = torch.load(path, map_location='cpu')['state_dict']
keys = list(sd.keys())
for k in keys:
for ik in ignore_keys:
if k.startswith(ik):
print('Deleting key {} from state_dict.'.format(k))
del sd[k]
state_dict = self.state_dict()
require_keys = state_dict.keys()
keys = sd.keys()
un_pretrained_keys = []
for k in require_keys:
if (k not in keys):
if (k[6:] in keys):
state_dict[k] = sd[k[6:]]
else:
un_pretrained_keys.append(k)
else:
state_dict[k] = sd[k]
self.load_state_dict(state_dict, strict=True)
print(f'Restored from {path}')
def forward(self, input):
(dec, diff, info, hs) = self.vqvae(input)
return (dec, diff, info, hs)
def training_step(self, batch, batch_idx, optimizer_idx):
x = batch[self.image_key]
(xrec, qloss, info, hs) = self(x)
if (self.image_key != 'gt'):
x = batch['gt']
if self.use_facial_disc:
loc_left_eyes = batch['loc_left_eye']
loc_right_eyes = batch['loc_right_eye']
loc_mouths = batch['loc_mouth']
face_ratio = (xrec.shape[(- 1)] / 512)
components = get_roi_regions(x, xrec, loc_left_eyes, loc_right_eyes, loc_mouths, face_ratio)
else:
components = None
if (optimizer_idx == 0):
(aeloss, log_dict_ae) = self.loss(qloss, x, xrec, components, optimizer_idx, self.global_step, last_layer=self.get_last_layer(), split='train')
self.log('train/aeloss', aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=True)
return aeloss
if (optimizer_idx == 1):
(discloss, log_dict_disc) = self.loss(qloss, x, xrec, components, optimizer_idx, self.global_step, last_layer=None, split='train')
self.log('train/discloss', discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=True)
return discloss
if (self.disc_start <= self.global_step):
if (optimizer_idx == 2):
(disc_left_loss, log_dict_disc) = self.loss(qloss, x, xrec, components, optimizer_idx, self.global_step, last_layer=None, split='train')
self.log('train/disc_left_loss', disc_left_loss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=True)
return disc_left_loss
if (optimizer_idx == 3):
(disc_right_loss, log_dict_disc) = self.loss(qloss, x, xrec, components, optimizer_idx, self.global_step, last_layer=None, split='train')
self.log('train/disc_right_loss', disc_right_loss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=True)
return disc_right_loss
if (optimizer_idx == 4):
(disc_mouth_loss, log_dict_disc) = self.loss(qloss, x, xrec, components, optimizer_idx, self.global_step, last_layer=None, split='train')
self.log('train/disc_mouth_loss', disc_mouth_loss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=True)
return disc_mouth_loss
def validation_step(self, batch, batch_idx):
x = batch[self.image_key]
(xrec, qloss, info, hs) = self(x)
if (self.image_key != 'gt'):
x = batch['gt']
(aeloss, log_dict_ae) = self.loss(qloss, x, xrec, None, 0, self.global_step, last_layer=self.get_last_layer(), split='val')
(discloss, log_dict_disc) = self.loss(qloss, x, xrec, None, 1, self.global_step, last_layer=None, split='val')
rec_loss = log_dict_ae['val/rec_loss']
self.log('val/rec_loss', rec_loss, prog_bar=True, logger=True, on_step=True, on_epoch=True, sync_dist=True)
self.log('val/aeloss', aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True, sync_dist=True)
self.log_dict(log_dict_ae)
self.log_dict(log_dict_disc)
return self.log_dict
def configure_optimizers(self):
lr = self.learning_rate
normal_params = []
special_params = []
for (name, param) in self.vqvae.named_parameters():
if (not param.requires_grad):
continue
if (('decoder' in name) and ('attn' in name)):
special_params.append(param)
else:
normal_params.append(param)
opt_ae_params = [{'params': normal_params, 'lr': lr}, {'params': special_params, 'lr': (lr * self.special_params_lr_scale)}]
opt_ae = torch.optim.Adam(opt_ae_params, betas=(0.5, 0.9))
opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(), lr=lr, betas=(0.5, 0.9))
optimizations = [opt_ae, opt_disc]
s0 = torch.optim.lr_scheduler.MultiStepLR(opt_ae, milestones=self.schedule_step, gamma=0.1, verbose=True)
s1 = torch.optim.lr_scheduler.MultiStepLR(opt_disc, milestones=self.schedule_step, gamma=0.1, verbose=True)
schedules = [s0, s1]
if self.use_facial_disc:
opt_l = torch.optim.Adam(self.loss.net_d_left_eye.parameters(), lr=(lr * self.comp_params_lr_scale), betas=(0.9, 0.99))
opt_r = torch.optim.Adam(self.loss.net_d_right_eye.parameters(), lr=(lr * self.comp_params_lr_scale), betas=(0.9, 0.99))
opt_m = torch.optim.Adam(self.loss.net_d_mouth.parameters(), lr=(lr * self.comp_params_lr_scale), betas=(0.9, 0.99))
optimizations += [opt_l, opt_r, opt_m]
s2 = torch.optim.lr_scheduler.MultiStepLR(opt_l, milestones=self.schedule_step, gamma=0.1, verbose=True)
s3 = torch.optim.lr_scheduler.MultiStepLR(opt_r, milestones=self.schedule_step, gamma=0.1, verbose=True)
s4 = torch.optim.lr_scheduler.MultiStepLR(opt_m, milestones=self.schedule_step, gamma=0.1, verbose=True)
schedules += [s2, s3, s4]
return (optimizations, schedules)
def get_last_layer(self):
if self.fix_decoder:
return self.vqvae.quant_conv.weight
return self.vqvae.decoder.conv_out.weight
def log_images(self, batch, **kwargs):
log = dict()
x = batch[self.image_key]
x = x.to(self.device)
(xrec, _, _, _) = self(x)
log['inputs'] = x
log['reconstructions'] = xrec
if (self.image_key != 'gt'):
x = batch['gt']
log['gt'] = x
return log |
def test_color_yes_collection_on_non_atty(pytester, request) -> None:
tr = request.config.pluginmanager.getplugin('terminalreporter')
if (not hasattr(tr, 'isatty')):
pytest.skip('only valid for newer pytest versions')
pytester.makepyfile("\n import pytest\n .parametrize('i', range(10))\n def test_this(i):\n assert 1\n ")
args = ['--color=yes', '-n2']
result = pytester.runpytest(*args)
assert ('test session starts' in result.stdout.str())
assert ('\x1b[1m' in result.stdout.str())
assert ('created: 2/2 workers' in result.stdout.str())
assert ('2 workers [10 items]' in result.stdout.str())
assert ('collecting:' not in result.stdout.str()) |
def create_cancel_build_in_queue(build_phase, build_queue_id, build_queue):
def cancel_build():
cancelled = False
if (build_queue_id is not None):
cancelled = build_queue.cancel(build_queue_id)
if (build_phase != BUILD_PHASE.WAITING):
return False
return cancelled
return cancel_build |
class Solution():
def dig_sum(self, n):
total = 0
while (n > 0):
rem = (n % 10)
total += rem
n = (n // 10)
return total
def countLargestGroup(self, n: int) -> int:
from collections import Counter
d = dict()
for i in range(1, (n + 1)):
sum_ = self.dig_sum(i)
d[sum_] = (d.get(sum_, 0) + 1)
new = Counter(d.values())
return new[max(new.keys())] |
class ScopeTimer():
def __init__(self, name):
self.name = name
def __enter__(self):
self.start = time.time()
return self
def __exit__(self, *args):
self.end = time.time()
self.interval = (self.end - self.start)
print('{} {:.3E}'.format(self.name, self.interval)) |
.script
def _multilabel_recall_at_fixed_precision_compute(input: torch.Tensor, target: torch.Tensor, num_labels: int, min_precision: float) -> Tuple[(List[torch.Tensor], List[torch.Tensor])]:
(precision, recall, thresholds) = _multilabel_precision_recall_curve_compute(input, target, num_labels)
(max_recall, best_threshold) = ([], [])
for (p, r, t) in zip(precision, recall, thresholds):
(max_r, best_t) = _recall_at_precision(p, r, t, min_precision)
max_recall.append(max_r)
best_threshold.append(best_t)
return (max_recall, best_threshold) |
class WebRTCManager(Runnable):
def __init__(self, node_address: Address, process_messages: Callable[([List[ReceivedRaidenMessage]], None)], signaling_send: Callable[([Address, str], None)], stop_event: GEvent) -> None:
super().__init__()
self.node_address = node_address
self._process_messages = process_messages
self._signaling_send = signaling_send
self._stop_event = stop_event
self._address_to_connection: Dict[(Address, _RTCConnection)] = {}
self._address_to_lock: Dict[(Address, Semaphore)] = {}
self.log = log.bind(node=to_checksum_address(node_address))
def get_lock(self, address: Address) -> Semaphore:
if (address not in self._address_to_lock):
self._address_to_lock[address] = Semaphore()
return self._address_to_lock[address]
def is_locked(self, address: Address) -> bool:
return self.get_lock(address).locked()
def _handle_message(self, message_data: str, partner_address: Address) -> None:
messages: List[ReceivedRaidenMessage] = []
for msg in validate_and_parse_message(message_data, partner_address):
messages.append(ReceivedRaidenMessage(message=msg, sender=partner_address))
self._process_messages(messages)
def _handle_ice_connection_closed(self, conn: _RTCConnection) -> None:
self._address_to_connection.pop(conn.partner_address, None)
if (conn.initiator_address == self.node_address):
self.health_check(conn.partner_address)
def _wrapped_initialize_web_rtc(self, address: Address) -> None:
attempt = 0
while ((attempt < 3) and (not self.has_ready_channel(address))):
self._initialize_web_rtc(address)
attempt += 1
def _initialize_web_rtc(self, partner_address: Address) -> None:
if (partner_address in self._address_to_connection):
return
if self._stop_event.is_set():
return
self.log.debug('Establishing WebRTC channel', partner_address=to_checksum_address(partner_address))
if self.is_locked(partner_address):
return
conn = _RTCConnection(partner_address, self.node_address, self._signaling_send, self._handle_ice_connection_closed, self._handle_message)
self._add_connection(partner_address, conn)
conn.initialize_signaling()
if self._stop_event.wait(timeout=self.get_channel_init_timeout()):
return
if (conn is not self._address_to_connection.get(partner_address, None)):
return
if (not self.has_ready_channel(partner_address)):
self.log.debug('Could not establish channel', partner_address=to_checksum_address(partner_address))
conn.send_hangup_message()
with self.get_lock(partner_address):
self.close_connection(partner_address)
def get_channel_init_timeout(self) -> float:
return 30.0
def _add_connection(self, partner_address: Address, conn: _RTCConnection) -> None:
assert (partner_address not in self._address_to_connection), 'must not be there already'
self._address_to_connection[partner_address] = conn
def has_ready_channel(self, partner_address: Address) -> bool:
conn = self._address_to_connection.get(partner_address)
return ((conn is not None) and conn.channel_open())
def _reset_state(self) -> None:
self._address_to_connection = {}
def _set_candidates_for_address(self, partner_address: Address, content: Dict[(str, Any)]) -> None:
conn = self._address_to_connection.get(partner_address)
if (conn is not None):
conn.set_candidates(content)
def _process_signaling_for_address(self, partner_address: Address, rtc_message_type: str, description: Dict[(str, str)]) -> None:
conn = self._address_to_connection.get(partner_address)
if (rtc_message_type == _RTCMessageType.OFFER.value):
if (conn is not None):
if (conn.call_id < description['call_id']):
self.close_connection(partner_address)
else:
return
if self._stop_event.is_set():
return
conn = _RTCConnection.from_offer(partner_address, self.node_address, self._signaling_send, self._handle_ice_connection_closed, self._handle_message, description)
self._add_connection(partner_address, conn)
elif (conn is None):
return
conn.process_signaling(description)
def send_message(self, partner_address: Address, message: str) -> None:
conn = self._address_to_connection[partner_address]
conn.send_message(message)
def health_check(self, partner_address: Address) -> None:
if (partner_address in self._address_to_connection):
return
self._schedule_new_greenlet(self._wrapped_initialize_web_rtc, partner_address)
def close_connection(self, partner_address: Address) -> None:
conn = self._address_to_connection.get(partner_address)
if (conn is not None):
yield_future(conn.close())
def _process_signaling_message(self, partner_address: Address, rtc_message_type: str, content: Dict[(str, str)]) -> None:
if ((rtc_message_type in [_RTCMessageType.OFFER.value, _RTCMessageType.ANSWER.value]) and ('sdp' in content)):
with self.get_lock(partner_address):
self._process_signaling_for_address(partner_address, rtc_message_type, content)
elif (rtc_message_type == _RTCMessageType.HANGUP.value):
self.close_connection(partner_address)
elif (rtc_message_type == _RTCMessageType.CANDIDATES.value):
self._set_candidates_for_address(partner_address, content)
else:
self.log.error('Unknown WebRTC message type', partner_address=to_checksum_address(partner_address), type=rtc_message_type)
def process_signaling_message(self, partner_address: Address, rtc_message_type: str, content: Dict[(str, str)]) -> None:
self._schedule_new_greenlet(self._process_signaling_message, partner_address, rtc_message_type, content)
def stop(self) -> None:
self.log.debug('Closing WebRTC connections')
for conn in tuple(self._address_to_connection.values()):
conn.send_hangup_message()
for partner_address in self._address_to_connection.copy().keys():
self.close_connection(partner_address)
gevent.killall(self.greenlets)
self._reset_state() |
class Solution(object):
def backspaceCompare(self, S, T):
if (S == T):
return True
s_stack = []
t_stack = []
for c in S:
if (c != '#'):
s_stack.append(c)
elif (len(s_stack) != 0):
s_stack.pop((- 1))
for c in T:
if (c != '#'):
t_stack.append(c)
elif (len(t_stack) != 0):
t_stack.pop((- 1))
return (''.join(s_stack) == ''.join(t_stack)) |
def print_timer(rb_node, idx):
timerqueue = utils.container_of(rb_node, timerqueue_node_type.pointer(), 'node')
timer = utils.container_of(timerqueue, hrtimer_type.pointer(), 'node')
function = str(timer['function']).split(' ')[1].strip('<>')
softexpires = timer['_softexpires']
expires = timer['node']['expires']
now = ktime_get()
text = ' #{}: <{}>, {}, '.format(idx, timer, function)
text += 'S:{:02x}\n'.format(int(timer['state']))
text += ' # expires at {}-{} nsecs [in {} to {} nsecs]\n'.format(softexpires, expires, (softexpires - now), (expires - now))
return text |
class _BlockInfo(object):
def __init__(self, seen_open_brace):
self.seen_open_brace = seen_open_brace
self.open_parentheses = 0
self.inline_asm = _NO_ASM
def CheckBegin(self, filename, clean_lines, linenum, error):
pass
def CheckEnd(self, filename, clean_lines, linenum, error):
pass |
class TestMongoDBCollectorWithReplica(CollectorTestCase):
def setUp(self):
config = get_collector_config('MongoDBCollector', {'host': 'localhost:27017', 'databases': '^db', 'replica': True})
self.collector = MongoDBCollector(config, None)
self.connection = MagicMock()
def test_import(self):
self.assertTrue(MongoDBCollector)
_only_if_pymongo_is_available
('pymongo.MongoClient')
(Collector, 'publish')
def test_should_publish_replset_status_if_enabled(self, publish_mock, connector_mock):
data = {'more_keys': long(1), 'key': 2, 'string': 'str'}
self._annotate_connection(connector_mock, data)
self.collector.collect()
self.connection.admin.command.assert_called_once_with('replSetGetStatus')
def _annotate_connection(self, connector_mock, data):
connector_mock.return_value = self.connection
self.connection.db.command.return_value = data
self.connection.database_names.return_value = ['db1', 'baddb'] |
def _run(main_wrapper: Callable[([TextIO, TextIO], None)]) -> tuple[(str, str, int)]:
stdout = StringIO()
stderr = StringIO()
try:
main_wrapper(stdout, stderr)
exit_status = 0
except SystemExit as system_exit:
assert isinstance(system_exit.code, int)
exit_status = system_exit.code
return (stdout.getvalue(), stderr.getvalue(), exit_status) |
def get_app_from_path(request_path, base, index, reload=False):
path = valid_and_norm_path(base, request_path)
if (path is None):
return ('error', 403)
if os.path.isdir(path):
if (not request_path.endswith('/')):
return ('error', 404)
if os.path.isfile(os.path.join(path, 'index.py')):
path = os.path.join(path, 'index.py')
elif index:
content = index(path)
return ('html', content)
else:
return ('error', 404)
else:
path += '.py'
if (not os.path.isfile(path)):
return ('error', 404)
module = _get_module(path, reload=reload)
if hasattr(module, 'main'):
return ('app', make_applications(module.main))
return ('error', 404) |
class Generator(nn.Module):
def __init__(self, G_ch=64, dim_z=128, bottom_width=4, resolution=128, G_kernel_size=3, G_attn='64', n_classes=1000, num_G_SVs=1, num_G_SV_itrs=1, G_shared=True, shared_dim=0, hier=False, cross_replica=False, mybn=False, G_activation=nn.ReLU(inplace=False), G_lr=5e-05, G_B1=0.0, G_B2=0.999, adam_eps=1e-08, BN_eps=1e-05, SN_eps=1e-12, G_mixed_precision=False, G_fp16=False, G_init='ortho', skip_init=False, no_optim=False, G_param='SN', norm_style='bn', E1_fea_w=None, **kwargs):
super(Generator, self).__init__()
self.ch = G_ch
self.dim_z = dim_z
self.bottom_width = bottom_width
self.resolution = resolution
self.kernel_size = G_kernel_size
self.attention = G_attn
self.n_classes = n_classes
self.G_shared = G_shared
self.shared_dim = (shared_dim if (shared_dim > 0) else dim_z)
self.hier = hier
self.cross_replica = cross_replica
self.mybn = mybn
self.activation = G_activation
self.init = G_init
self.G_param = G_param
self.norm_style = norm_style
self.BN_eps = BN_eps
self.SN_eps = SN_eps
self.fp16 = G_fp16
self.arch = G_arch(self.ch, self.attention)[resolution]
self.E1_fea_w = E1_fea_w
if self.hier:
self.num_slots = (len(self.arch['in_channels']) + 1)
self.z_chunk_size = (self.dim_z // self.num_slots)
self.dim_z = (self.z_chunk_size * self.num_slots)
else:
self.num_slots = 1
self.z_chunk_size = 0
if (self.G_param == 'SN'):
self.which_conv = functools.partial(layers.SNConv2d, kernel_size=3, padding=1, num_svs=num_G_SVs, num_itrs=num_G_SV_itrs, eps=self.SN_eps)
self.which_linear = functools.partial(layers.SNLinear, num_svs=num_G_SVs, num_itrs=num_G_SV_itrs, eps=self.SN_eps)
else:
self.which_conv = functools.partial(nn.Conv2d, kernel_size=3, padding=1)
self.which_linear = nn.Linear
self.which_embedding = nn.Embedding
bn_linear = (functools.partial(self.which_linear, bias=False) if self.G_shared else self.which_embedding)
self.which_bn = functools.partial(layers.ccbn, which_linear=bn_linear, cross_replica=self.cross_replica, mybn=self.mybn, input_size=((self.shared_dim + self.z_chunk_size) if self.G_shared else self.n_classes), norm_style=self.norm_style, eps=self.BN_eps)
self.shared = (self.which_embedding(n_classes, self.shared_dim) if G_shared else layers.identity())
self.linear = self.which_linear((self.dim_z // self.num_slots), (self.arch['in_channels'][0] * (self.bottom_width ** 2)))
self.blocks = []
for index in range(len(self.arch['out_channels'])):
self.blocks += [[layers.GBlock(in_channels=self.arch['in_channels'][index], out_channels=self.arch['out_channels'][index], which_conv=self.which_conv, which_bn=self.which_bn, activation=self.activation, upsample=(functools.partial(F.interpolate, scale_factor=2) if self.arch['upsample'][index] else None))]]
if self.arch['attention'][self.arch['resolution'][index]]:
print(('Adding attention layer in G at resolution %d' % self.arch['resolution'][index]))
self.blocks[(- 1)] += [layers.Attention(self.arch['out_channels'][index], self.which_conv)]
self.blocks = nn.ModuleList([nn.ModuleList(block) for block in self.blocks])
self.output_layer = nn.Sequential(layers.bn(self.arch['out_channels'][(- 1)], cross_replica=self.cross_replica, mybn=self.mybn), self.activation, self.which_conv(self.arch['out_channels'][(- 1)], 3))
self.init_weights()
if no_optim:
return
(self.lr, self.B1, self.B2, self.adam_eps) = (G_lr, G_B1, G_B2, adam_eps)
if G_mixed_precision:
print('Using fp16 adam in G...')
import utils
self.optim = utils.Adam16(params=self.parameters(), lr=self.lr, betas=(self.B1, self.B2), weight_decay=0, eps=self.adam_eps)
else:
self.optim = optim.Adam(params=self.parameters(), lr=self.lr, betas=(self.B1, self.B2), weight_decay=0, eps=self.adam_eps)
def init_weights(self):
self.param_count = 0
for module in self.modules():
if (isinstance(module, nn.Conv2d) or isinstance(module, nn.Linear) or isinstance(module, nn.Embedding)):
if (self.init == 'ortho'):
init.orthogonal_(module.weight)
elif (self.init == 'N02'):
init.normal_(module.weight, 0, 0.02)
elif (self.init in ['glorot', 'xavier']):
init.xavier_uniform_(module.weight)
else:
print('Init style not recognized...')
self.param_count += sum([p.data.nelement() for p in module.parameters()])
print(('Param count for Gs initialized parameters: %d' % self.param_count))
def forward(self, z, y, E1_L_feat):
if self.hier:
zs = torch.split(z, self.z_chunk_size, 1)
z = zs[0]
ys = [torch.cat([y, item], 1) for item in zs[1:]]
else:
ys = ([y] * len(self.blocks))
h = self.linear(z)
h = h.view(h.size(0), (- 1), self.bottom_width, self.bottom_width)
h += (self.E1_fea_w[self.bottom_width] * E1_L_feat[self.bottom_width])
for (index, blocklist) in enumerate(self.blocks):
for block in blocklist:
h = block(h, ys[index])
if ((h.shape[(- 1)] >= (self.resolution / 32)) and ((self.resolution / 4) >= h.shape[(- 1)])):
h += (self.E1_fea_w[h.shape[(- 1)]] * E1_L_feat[h.shape[(- 1)]])
return torch.tanh(self.output_layer(h)) |
class PooledEmbeddingsAllToAllTest(MultiProcessTestBase):
def _run_test_dist(cls, rank: int, world_size: int, _input: torch.Tensor, output: torch.Tensor, backend: str, dim_sum_per_rank: List[int], batch_size_per_rank: List[int], qcomms_config: Optional[QCommsConfig]=None) -> None:
dist.init_process_group(rank=rank, world_size=world_size, backend=backend)
pg = dist.group.WORLD
if (backend == 'gloo'):
device = torch.device('cpu')
else:
device = torch.device(f'cuda:{rank}')
_input = _input.to(device=device)
output = output.to(device=device)
codecs = get_qcomm_codecs(qcomms_config)
a2a = PooledEmbeddingsAllToAll(pg=pg, dim_sum_per_rank=dim_sum_per_rank, device=device, codecs=codecs)
_input.requires_grad = True
if (len(set(batch_size_per_rank)) > 1):
res = a2a(_input, batch_size_per_rank).wait()
else:
res = a2a(_input).wait()
res.backward(res)
(atol, rtol) = (None, None)
if (qcomms_config is not None):
(atol, rtol) = (0.01, 0.01)
if ((qcomms_config.forward_precision == CommType.FP8) or (qcomms_config.backward_precision == CommType.FP8)):
(atol, rtol) = (0.05, 0.05)
torch.testing.assert_close(res, output, rtol=rtol, atol=atol)
torch.testing.assert_close(_input.cpu().detach().div_(world_size), _input.grad.cpu().detach(), atol=atol, rtol=rtol)
((torch.cuda.device_count() <= 1), 'Not enough GPUs, this test requires at least two GPUs')
(backend=st.sampled_from(['nccl']), B=st.integers(min_value=2, max_value=3), features=st.integers(min_value=3, max_value=4), is_reversed=st.booleans(), variable_batch_size=st.booleans(), qcomms_config=st.sampled_from([None, QCommsConfig(forward_precision=CommType.FP16, backward_precision=CommType.FP16), QCommsConfig(forward_precision=CommType.FP16, backward_precision=CommType.BF16), QCommsConfig(forward_precision=CommType.FP16, backward_precision=CommType.FP16, backward_loss_scale=128.0), QCommsConfig(forward_precision=CommType.FP32, backward_precision=CommType.BF16), QCommsConfig(forward_precision=CommType.FP8, backward_precision=CommType.FP8), QCommsConfig(forward_precision=CommType.FP8, backward_precision=CommType.BF16)]))
(max_examples=4, deadline=None)
def test_pooled_embeddings(self, backend: str, B: int, features: int, is_reversed: bool, variable_batch_size: bool, qcomms_config: Optional[QCommsConfig]) -> None:
world_size = 2
keys = [f'F{feature}' for feature in range(features)]
dims = random.sample(([8, 16, 32] * features), features)
rank0_split = random.randint(1, (features - 1))
splits = [rank0_split, (features - rank0_split)]
if is_reversed:
splits.reverse()
dim_sum_per_rank = [sum(dims[:splits[0]]), sum(dims[splits[0]:])]
if variable_batch_size:
batch_size_per_rank = [random.randint(B, (B + 4)), random.randint(B, (B + 4))]
else:
batch_size_per_rank = [B, B]
(_input, output) = _generate_pooled_embedding_batch(keys=keys, dims=dims, splits=splits, batch_size_per_rank=batch_size_per_rank)
kwargs_per_rank = []
for rank in range(world_size):
kwargs_per_rank.append({'_input': _input[rank], 'output': output[rank], 'backend': backend, 'dim_sum_per_rank': dim_sum_per_rank, 'batch_size_per_rank': batch_size_per_rank, 'qcomms_config': qcomms_config})
self._run_multi_process_test_per_rank(callable=self._run_test_dist, world_size=world_size, kwargs_per_rank=kwargs_per_rank) |
def group_norm(out_channels, affine=True, divisor=1):
out_channels = (out_channels // divisor)
dim_per_gp = (config.MODEL.GROUP_NORM.DIM_PER_GP // divisor)
num_groups = (config.MODEL.GROUP_NORM.NUM_GROUPS // divisor)
eps = config.MODEL.GROUP_NORM.EPSILON
return torch.nn.GroupNorm(get_group_gn(out_channels, dim_per_gp, num_groups), out_channels, eps, affine) |
.parametrize('recap_type, expected_proto_type', [(NullType(name='some_field'), 'google.protobuf.NullValue'), (BoolType(name='some_field'), 'bool'), (IntType(signed=True, bits=32, name='some_field'), 'int32'), (IntType(signed=True, bits=64, name='some_field'), 'int64'), (IntType(signed=False, bits=32, name='some_field'), 'uint32'), (IntType(signed=False, bits=64, name='some_field'), 'uint64'), (FloatType(bits=32, name='some_field'), 'float'), (FloatType(bits=64, name='some_field'), 'double'), (StringType(bytes_=100, name='some_field'), 'string'), (BytesType(bytes_=100, name='some_field'), 'bytes')])
def test_from_recap(recap_type, expected_proto_type):
converter = ProtobufConverter()
struct_type = StructType(fields=[recap_type], alias='build.recap.MyStruct')
result = converter.from_recap(struct_type)
assert isinstance(result, ast.File)
assert (len(result.file_elements) == 2)
package = result.file_elements[0]
assert isinstance(package, ast.Package)
assert (package.name == 'build.recap')
message = result.file_elements[1]
assert isinstance(message, ast.Message)
assert (message.name == 'MyStruct')
assert (len(message.elements) == 1)
field = message.elements[0]
assert isinstance(field, ast.Field)
assert (field.name == 'some_field')
assert (field.type == expected_proto_type)
assert (field.number == 1) |
def _msg_sendv(ql: Qiling, coid, smsg, sparts, rmsg, rparts, *args, **kw):
assert (coid in ql.os.connections), 'Connection Id must exist in connections mapping'
conn = ql.os.connections[coid]
if ((conn.pid == SYSMGR_PID) and (conn.chid == SYSMGR_CHID)):
sbody = get_message_body(ql, smsg, sparts)
type_ = ql.unpack16(sbody[:2])
msg_name = map_msgtype(ql, type_)
_msg_handler = ql_get_module_function(f'.os.qnx', 'message')
if (msg_name in dir(_msg_handler)):
msg_hook = eval(msg_name)
msg_name = msg_hook.__name__
else:
msg_hook = None
msg_name = None
if msg_hook:
ret = msg_hook(ql, coid, smsg, sparts, rmsg, rparts, *args, **kw)
else:
ql.log.warning(f'_msg_sendv: no hook for message type {type_:#04x}')
ret = (- 1)
else:
ql.log.warn(f'syscall_msg_sendv(coid = {coid}): unhandled message for pid = {conn.pid}, chid = {conn.chid}')
ret = (- 1)
return ret |
def test_mult_multiplication() -> None:
assert (parse('(a{2,3}){1,1}').reduce() == parse('a{2,3}').reduce())
assert (parse('(a{2,3}){1}').reduce() == parse('a{2,3}').reduce())
assert (parse('(a{2,3})').reduce() == parse('a{2,3}').reduce())
assert (parse('(a{2,3}){4,5}').reduce() == parse('a{8,15}').reduce())
assert (parse('(a{2,}){2,}').reduce() == parse('a{4,}').reduce()) |
def xdg_get_system_data_dirs():
'
if (os.name == 'nt'):
from gi.repository import GLib
dirs = []
for dir_ in GLib.get_system_data_dirs():
dirs.append(dir_)
return dirs
data_dirs = os.getenv('XDG_DATA_DIRS')
if data_dirs:
return [os.path.abspath(d) for d in data_dirs.split(':')]
else:
return ('/usr/local/share/', '/usr/share/') |
class LossValley(SWADBase):
def __init__(self, n_converge, n_tolerance, tolerance_ratio):
self.n_converge = n_converge
self.n_tolerance = n_tolerance
self.tolerance_ratio = tolerance_ratio
self.converge_Q = deque(maxlen=n_converge)
self.smooth_Q = deque(maxlen=n_tolerance)
self.final_model = None
self.converge_step = None
self.dead_valley = False
self.threshold = None
def get_smooth_loss(self, idx):
smooth_loss = min([model.end_loss for model in list(self.smooth_Q)[idx:]])
return smooth_loss
def is_converged(self):
return (self.converge_step is not None)
def update_and_evaluate(self, segment_swa, val_acc, val_loss):
if self.dead_valley:
return
frozen = copy.deepcopy(segment_swa.cpu())
frozen.end_loss = val_loss
self.converge_Q.append(frozen)
self.smooth_Q.append(frozen)
if (not self.is_converged):
if (len(self.converge_Q) < self.n_converge):
return
min_idx = np.argmin([model.end_loss for model in self.converge_Q])
untilmin_segment_swa = self.converge_Q[min_idx]
if (min_idx == 0):
self.converge_step = self.converge_Q[0].end_step
self.final_model = swa_utils.AveragedModel(untilmin_segment_swa)
th_base = np.mean([model.end_loss for model in self.converge_Q])
self.threshold = (th_base * (1.0 + self.tolerance_ratio))
if (self.n_tolerance < self.n_converge):
for i in range((self.n_converge - self.n_tolerance)):
model = self.converge_Q[(1 + i)]
self.final_model.update_parameters(model, start_step=model.start_step, end_step=model.end_step)
elif (self.n_tolerance > self.n_converge):
converge_idx = (self.n_tolerance - self.n_converge)
Q = list(self.smooth_Q)[:(converge_idx + 1)]
start_idx = 0
for i in reversed(range(len(Q))):
model = Q[i]
if (model.end_loss > self.threshold):
start_idx = (i + 1)
break
for model in Q[(start_idx + 1):]:
self.final_model.update_parameters(model, start_step=model.start_step, end_step=model.end_step)
print(f'Model converged at step {self.converge_step}, Start step = {self.final_model.start_step}; Threshold = {self.threshold:.6f}, ')
return
if (self.smooth_Q[0].end_step < self.converge_step):
return
min_vloss = self.get_smooth_loss(0)
if (min_vloss > self.threshold):
self.dead_valley = True
print(f'Valley is dead at step {self.final_model.end_step}')
return
model = self.smooth_Q[0]
self.final_model.update_parameters(model, start_step=model.start_step, end_step=model.end_step)
def get_final_model(self):
if (not self.is_converged):
print('Requested final model, but model is not yet converged; return last model instead')
return self.converge_Q[(- 1)].cuda()
if (not self.dead_valley):
self.smooth_Q.popleft()
while self.smooth_Q:
smooth_loss = self.get_smooth_loss(0)
if (smooth_loss > self.threshold):
break
segment_swa = self.smooth_Q.popleft()
self.final_model.update_parameters(segment_swa, step=segment_swa.end_step)
return self.final_model.cuda() |
class Effect6076(BaseEffect):
type = 'passive'
def handler(fit, module, context, projectionRange, **kwargs):
fit.modules.filteredChargeMultiply((lambda mod: mod.charge.requiresSkill('Missile Launcher Operation')), 'maxVelocity', (1 / module.getModifiedItemAttr('modeMaxRangePostDiv')), stackingPenalties=True, penaltyGroup='postDiv', **kwargs) |
def print_array_diagnostics(array, indices, comparator=(lambda a, b: (a < b))):
ordered_array = reindex_array(array, indices)
info = [('Original array', array), ('Ordered array', ordered_array), ('Is sorted?', is_sorted(array, indices, comparator)), ('Sort score', sort_score(array, indices, comparator)), ('Has unique indices?', has_unique_indices(indices)), ('Unique index score', unique_index_score(indices))]
df = pd.DataFrame(map((lambda pair: {'Description': pair[0], 'Value': pair[1]}), info))
display(df) |
def get_test_dependencies(test_fname):
with open(os.path.join(PATH_TO_TRANFORMERS, test_fname), 'r', encoding='utf-8') as f:
content = f.read()
relative_imports = re.findall('from\\s+(\\.\\S+)\\s+import\\s+([^\\n]+)\\n', content)
relative_imports = [test for (test, imp) in relative_imports if ('# tests_ignore' not in imp)]
parent_imports = [imp.strip('.') for imp in relative_imports if ('..' in imp)]
parent_imports = [os.path.join('tests', f"{test.replace('.', os.path.sep)}.py") for test in parent_imports]
current_dir_imports = [imp.strip('.') for imp in relative_imports if ('..' not in imp)]
directory = os.path.sep.join(test_fname.split(os.path.sep)[:(- 1)])
current_dir_imports = [os.path.join(directory, f"{test.replace('.', os.path.sep)}.py") for test in current_dir_imports]
return [f for f in [*parent_imports, *current_dir_imports] if os.path.isfile(f)] |
def shape_text_hb(text, font_filename, direction=None):
ref_size = REF_GLYPH_SIZE
buf = uharfbuzz.Buffer()
buf.add_str(text)
buf.guess_segment_properties()
is_horizontal = True
if (direction is not None):
buf.direction = direction
is_horizontal = (direction in ('ltr', 'rtl'))
cached = CACHE_HB.get(font_filename)
if cached:
(blob, face, font) = cached
else:
blob = uharfbuzz.Blob.from_file_path(font_filename)
face = uharfbuzz.Face(blob)
font = uharfbuzz.Font(face)
font.scale = (ref_size, ref_size)
CACHE_HB.set(font_filename, (blob, face, font))
uharfbuzz.shape(font, buf)
glyph_infos = buf.glyph_infos
glyph_positions = buf.glyph_positions
n_glyphs = len(glyph_infos)
glyph_indices = np.zeros((n_glyphs,), np.uint32)
positions = np.zeros((n_glyphs, 2), np.float32)
pen_x = pen_y = 0
for i in range(n_glyphs):
glyph_indices[i] = glyph_infos[i].codepoint
pos = glyph_positions[i]
positions[i] = (((pen_x + pos.x_offset) / ref_size), ((pen_y + pos.y_offset) / ref_size))
pen_x += pos.x_advance
pen_y += pos.y_advance
font_ext = font.get_font_extents(buf.direction)
meta = {'extent': ((pen_x if is_horizontal else pen_y) / ref_size), 'ascender': (font_ext.ascender / ref_size), 'descender': (font_ext.descender / ref_size), 'direction': buf.direction, 'script': buf.script}
return (glyph_indices, positions, meta) |
class BILSTMONLY(object):
def __init__(self, params: dict):
self.char_embedding = tf.Variable(np.load(params['embedding_path']), dtype=tf.float32, name='input_char_embedding')
self.word_embedding = tf.Variable(np.load(params['word_embedding_path']), dtype=tf.float32, name='input_word_embedding')
self.dropout_rate = params['dropout_prob']
self.num_labels = params['num_labels']
self.rnn_size = params['rnn_size']
self.num_layers = params['num_layers']
self.hidden_units = params['hidden_units']
def __call__(self, input_ids=None, input_word_ids=None, labels=None, text_length_list=None, is_training=True, is_testing=False):
input_char_embeddings = tf.nn.embedding_lookup(self.char_embedding, input_ids)
input_word_embeddings = tf.nn.embedding_lookup(self.word_embedding, input_word_ids)
input_embeddings = (input_char_embeddings + input_word_embeddings)
input_embeddings = tf.layers.dropout(input_embeddings, rate=self.dropout_rate, training=is_training)
lstm_layer = BLSTM(input_embeddings, self.rnn_size, self.num_layers, (1.0 - self.dropout_rate), lengths=text_length_list, is_training=is_training)
lstm_output = lstm_layer.blstm_layer(input_embeddings)
lstm_project = tf.layers.dense(lstm_output, self.num_labels)
weight = tf.sequence_mask(text_length_list, dtype=tf.float32, name='mask')
pred_prob = tf.nn.softmax(lstm_project, axis=(- 1), name='pred_probs')
pred_ids = tf.argmax(pred_prob, axis=(- 1), name='pred_ids')
if (not is_testing):
loss = dice_dsc_loss(lstm_project, labels, text_length_list, weight, self.num_labels)
return (loss, pred_ids, weight)
else:
return pred_ids |
class LazyString(object):
def __init__(self, func, *args, **kwargs):
self._func = func
self._args = args
self._kwargs = kwargs
def __getattr__(self, attr):
if (attr == '__setstate__'):
raise AttributeError(attr)
string = str(self)
if hasattr(string, attr):
return getattr(string, attr)
raise AttributeError(attr)
def __repr__(self):
return "l'{0}'".format(str(self))
def __str__(self):
return str(self._func(*self._args, **self._kwargs))
def __len__(self):
return len(str(self))
def __getitem__(self, key):
return str(self)[key]
def __iter__(self):
return iter(str(self))
def __contains__(self, item):
return (item in str(self))
def __add__(self, other):
return (str(self) + other)
def __radd__(self, other):
return (other + str(self))
def __mul__(self, other):
return (str(self) * other)
def __rmul__(self, other):
return (other * str(self))
def __lt__(self, other):
return (str(self) < other)
def __le__(self, other):
return (str(self) <= other)
def __eq__(self, other):
return (str(self) == other)
def __ne__(self, other):
return (str(self) != other)
def __gt__(self, other):
return (str(self) > other)
def __ge__(self, other):
return (str(self) >= other)
def __html__(self):
return str(self)
def __hash__(self):
return hash(str(self))
def __mod__(self, other):
return (str(self) % other)
def __rmod__(self, other):
return (other + str(self)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.