code stringlengths 281 23.7M |
|---|
def apply_overrides(env_name, source, condition, condition_value, options, new_config, option_types=None):
if (option_types is None):
option_types = RESERVED_OPTIONS
for (raw_option, data) in options.items():
(_, separator, option) = raw_option.rpartition('set-')
overwrite = bool(separator)
if ((option_types is not RESERVED_OPTIONS) and (option in RESERVED_OPTIONS)):
continue
override_type = option_types.get(option)
if (override_type in TYPE_OVERRIDES):
TYPE_OVERRIDES[override_type](env_name, option, data, source, condition, condition_value, new_config, overwrite)
elif (isinstance(data, dict) and ('value' in data)):
if _resolve_condition(env_name, option, source, condition, condition_value, data):
new_config[option] = data['value']
elif (option_types is not RESERVED_OPTIONS):
message = f'Untyped option `tool.hatch.envs.{env_name}.overrides.{source}.{condition}.{option}` must be defined as a table with a `value` key'
raise ValueError(message) |
def reflected_binary_operator(op):
assert (not is_comparison(op))
_name(method_name_for_op(op, commute=True))
_numbers_to_my_dtype
def reflected_binary_operator(self, other):
if isinstance(self, NumericalExpression):
(self_expr, other_expr, new_inputs) = self.build_binary_op(op, other)
return NumExprFactor('({left}) {op} ({right})'.format(left=other_expr, right=self_expr, op=op), new_inputs, dtype=binop_return_dtype(op, other.dtype, self.dtype))
elif isinstance(other, Number):
return NumExprFactor('{constant} {op} x_0'.format(op=op, constant=other), binds=(self,), dtype=binop_return_dtype(op, other.dtype, self.dtype))
raise BadBinaryOperator(op, other, self)
return reflected_binary_operator |
def test_drrgloss():
drrgloss = losses.DRRGLoss()
assert np.allclose(drrgloss.ohem_ratio, 3.0)
pred = torch.tensor([[0, 1, 0], [1, 1, 1], [0, 1, 0]], dtype=torch.float)
target = torch.tensor([[0, 1, 0], [1, 0, 1], [0, 1, 0]], dtype=torch.long)
mask = torch.tensor([[0, 1, 0], [1, 0, 1], [0, 1, 0]], dtype=torch.long)
bce_loss = drrgloss.balance_bce_loss(pred, target, mask).item()
assert np.allclose(bce_loss, 0)
pred = torch.ones((16, 16), dtype=torch.float)
target = torch.ones((16, 16), dtype=torch.long)
mask = torch.zeros((16, 16), dtype=torch.long)
bce_loss = drrgloss.balance_bce_loss(pred, target, mask).item()
assert np.allclose(bce_loss, 0)
gcn_preds = torch.tensor([[0.0, 1.0], [1.0, 0.0]])
labels = torch.tensor([1, 0], dtype=torch.long)
gcn_loss = drrgloss.gcn_loss((gcn_preds, labels))
assert gcn_loss.item()
mask = [[1, 0, 1], [1, 1, 1], [0, 0, 1]]
target = [[1, 0, 1, 0, 0], [1, 1, 1, 0, 0], [0, 0, 1, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]
masks = [np.array(mask)]
bitmasks = BitmapMasks(masks, 3, 3)
target_sz = (6, 5)
results = drrgloss.bitmasks2tensor([bitmasks], target_sz)
assert (len(results) == 1)
assert (torch.sum(torch.abs((results[0].float() - torch.Tensor(target)))).item() == 0)
target_maps = [BitmapMasks([np.random.randn(20, 20)], 20, 20)]
target_masks = [BitmapMasks([np.ones((20, 20))], 20, 20)]
gt_masks = [BitmapMasks([np.ones((20, 20))], 20, 20)]
preds = (torch.randn((1, 6, 20, 20)), (gcn_preds, labels))
loss_dict = drrgloss(preds, 1.0, target_masks, target_masks, gt_masks, target_maps, target_maps, target_maps, target_maps)
assert isinstance(loss_dict, dict)
assert ('loss_text' in loss_dict.keys())
assert ('loss_center' in loss_dict.keys())
assert ('loss_height' in loss_dict.keys())
assert ('loss_sin' in loss_dict.keys())
assert ('loss_cos' in loss_dict.keys())
assert ('loss_gcn' in loss_dict.keys())
target_maps = [BitmapMasks([np.random.randn(40, 40)], 40, 40)]
target_masks = [BitmapMasks([np.ones((40, 40))], 40, 40)]
gt_masks = [BitmapMasks([np.ones((40, 40))], 40, 40)]
preds = (torch.randn((1, 6, 20, 20)), (gcn_preds, labels))
loss_dict = drrgloss(preds, 0.5, target_masks, target_masks, gt_masks, target_maps, target_maps, target_maps, target_maps)
assert isinstance(loss_dict, dict)
target_maps = [BitmapMasks([np.random.randn(20, 20)], 20, 20)]
target_masks = [BitmapMasks([np.ones((20, 20))], 20, 20)]
gt_masks = [BitmapMasks([np.zeros((20, 20))], 20, 20)]
preds = (torch.randn((1, 6, 20, 20)), (gcn_preds, labels))
loss_dict = drrgloss(preds, 1.0, target_masks, target_masks, gt_masks, target_maps, target_maps, target_maps, target_maps)
assert isinstance(loss_dict, dict) |
def _get_channel_state_by_partner_address(chain_state: ChainState, token_network_registry_address: TokenNetworkRegistryAddress, token_address: TokenAddress, partner_address: Address) -> Optional[NettingChannelState]:
token_network = views.get_token_network_by_token_address(chain_state=chain_state, token_network_registry_address=token_network_registry_address, token_address=token_address)
if (token_network is None):
raise ValueError(f'The token {to_checksum_address(token_address)} is not registered on the network {to_checksum_address(token_network_registry_address)}.')
return views.get_channelstate_by_token_network_and_partner(chain_state, token_network.address, partner_address) |
class ResidualConv(nn.Module):
def __init__(self, input_dim, output_dim, stride, padding):
super(ResidualConv, self).__init__()
self.conv_block = nn.Sequential(nn.BatchNorm2d(input_dim), nn.ReLU(), nn.Conv2d(input_dim, output_dim, kernel_size=3, stride=stride, padding=padding), nn.BatchNorm2d(output_dim), nn.ReLU(), nn.Conv2d(output_dim, output_dim, kernel_size=3, padding=1))
self.conv_skip = nn.Sequential(nn.Conv2d(input_dim, output_dim, kernel_size=3, stride=stride, padding=1), nn.BatchNorm2d(output_dim))
def forward(self, x):
return (self.conv_block(x) + self.conv_skip(x)) |
class Random_NAS():
def __init__(self, B, model, seed, save_dir):
self.save_dir = save_dir
self.B = B
self.model = model
self.seed = seed
self.iters = 0
self.arms = {}
self.node_id = 0
def print_summary(self):
logging.info(self.parents)
objective_vals = [(n, self.arms[n].objective_val) for n in self.arms if hasattr(self.arms[n], 'objective_val')]
objective_vals = sorted(objective_vals, key=(lambda x: x[1]))
best_arm = self.arms[objective_vals[0][0]]
val_ppl = self.model.evaluate(best_arm.arch, split='valid')
logging.info(objective_vals)
logging.info(('best valid ppl: %.2f' % val_ppl))
def get_arch(self):
arch = self.model.sample_arch()
self.arms[self.node_id] = Node(self.node_id, arch, self.node_id, 0)
self.node_id += 1
return arch
def save(self):
to_save = {a: self.arms[a].to_dict() for a in self.arms}
with open(os.path.join(self.save_dir, 'results_tmp.pkl'), 'wb') as f:
pickle.dump(to_save, f)
shutil.copyfile(os.path.join(self.save_dir, 'results_tmp.pkl'), os.path.join(self.save_dir, 'results.pkl'))
self.model.save(epoch=self.model.epochs)
def run(self):
epochs = 0
while (self.iters < self.B):
arch = self.get_arch()
self.model.train_batch(arch)
self.iters += 1
if (epochs < self.model.epochs):
epochs = self.model.epochs
self.get_eval_arch(1)
def get_eval_arch(self, epoch, rounds=None):
if (rounds is None):
n_rounds = max(1, int((self.B / 10000)))
else:
n_rounds = rounds
best_rounds = []
for r in range(n_rounds):
sample_vals = []
for _ in range(1000):
arch = self.model.sample_arch()
try:
ppl = self.model.evaluate(arch)
except Exception as e:
ppl = 1000000
sample_vals.append((arch, ppl))
with open(os.path.join(self.save_dir, 'sample_val_architecture_epoch_{}.obj'.format(self.model.epochs)), 'wb') as f:
pickle.dump(sample_vals, f)
sample_vals = sorted(sample_vals, key=(lambda x: x[1]))
full_vals = []
if ('split' in inspect.getfullargspec(self.model.evaluate).args):
for i in range(5):
arch = sample_vals[i][0]
try:
ppl = self.model.evaluate(arch, split='valid')
except Exception as e:
ppl = 1000000
full_vals.append((arch, ppl))
full_vals = sorted(full_vals, key=(lambda x: x[1]))
logging.info(('best arch: %s, best arch valid performance: %.3f' % (' '.join([str(i) for i in full_vals[0][0]]), full_vals[0][1])))
best_rounds.append(full_vals[0])
logging.info('STARTING EVALUATION')
(test, valid, runtime, params) = naseval.eval_model(config=args.__dict__, model=full_vals[0][0])
index = np.random.choice(list(range(3)))
(test, valid, runtime, params) = (np.mean(test), np.mean(valid), np.mean(runtime), np.mean(params))
logging.info(('TEST ERROR: %.3f | VALID ERROR: %.3f | RUNTIME: %f | PARAMS: %d' % (test, valid, runtime, params)))
else:
best_rounds.append(sample_vals[0])
with open(os.path.join(self.save_dir, 'full_val_architecture_epoch_{}.obj'.format(self.model.epochs)), 'wb') as f:
pickle.dump(full_vals, f)
return best_rounds |
def load_model(model, model_path, location=None):
state_dict = torch.load(model_path, map_location=location)
if ('state_dict' in state_dict.keys()):
state_dict = state_dict['state_dict']
state_dict = {(k[7:] if k.startswith('module.') else k): v for (k, v) in state_dict.items()}
model.load_state_dict(state_dict)
return model |
class SpatialAdaptiveSynBatchNorm2d(nn.Module):
def __init__(self, num_features, num_w=512, batchnorm_func=SynchronizedBatchNorm2d, eps=1e-05, momentum=0.1, affine=False, track_running_stats=True):
super(SpatialAdaptiveSynBatchNorm2d, self).__init__()
self.num_features = num_features
self.weight_proj = nn.utils.spectral_norm(nn.Linear(num_w, num_features))
self.bias_proj = nn.utils.spectral_norm(nn.Linear(num_w, num_features))
self.batch_norm2d = batchnorm_func(num_features, eps=eps, momentum=momentum, affine=affine)
def forward(self, x, vector, bbox):
output = self.batch_norm2d(x)
(b, o, bh, bw) = bbox.size()
(_, _, h, w) = x.size()
if ((bh != h) or (bw != w)):
bbox = F.interpolate(bbox, size=(h, w), mode='bilinear')
(weight, bias) = (self.weight_proj(vector), self.bias_proj(vector))
(weight, bias) = (weight.view(b, o, (- 1)), bias.view(b, o, (- 1)))
weight = ((torch.sum((bbox.unsqueeze(2) * weight.unsqueeze((- 1)).unsqueeze((- 1))), dim=1, keepdim=False) / (torch.sum(bbox.unsqueeze(2), dim=1, keepdim=False) + 1e-06)) + 1)
bias = (torch.sum((bbox.unsqueeze(2) * bias.unsqueeze((- 1)).unsqueeze((- 1))), dim=1, keepdim=False) / (torch.sum(bbox.unsqueeze(2), dim=1, keepdim=False) + 1e-06))
return ((weight * output) + bias)
def __repr__(self):
return (((self.__class__.__name__ + '(') + str(self.num_features)) + ')') |
def plot_erie(y_true, mean, lb, ub, trainlen, n, r):
plt.plot(range(len(y_true)), y_true, 'b', label='Actual')
plt.plot(range(len(y_true)), mean, 'r', label='ESN Prediction')
plt.fill_between(range(len(y_true)), lb, ub, facecolor='grey', alpha=0.3)
(lo, hi) = plt.ylim()
plt.plot([trainlen, trainlen], [(lo + np.spacing(1)), (hi - np.spacing(1))], 'k:')
plt.xlabel('Months since Aug 1922')
plt.ylabel('Water Level')
plt.legend(loc=2)
plt.show() |
def return_somethingv2(modality):
filename_categories = 'something/v2/category.txt'
if (modality == 'RGB'):
root_data = '/mnt/localssd2/aandonia/something/v2/20bn-something-something-v2-frames'
filename_imglist_train = 'something/v2/train_videofolder.txt'
filename_imglist_val = 'something/v2/val_videofolder.txt'
prefix = '{:06d}.jpg'
elif (modality == 'Flow'):
root_data = '/mnt/localssd2/aandonia/something/v2/flow'
filename_imglist_train = 'something/v2/train_videofolder.txt'
filename_imglist_val = 'something/v2/val_videofolder.txt'
prefix = '{:06d}.jpg'
else:
print(('no such modality:' + modality))
return (filename_categories, filename_imglist_train, filename_imglist_val, root_data, prefix) |
class PortfolioLayer(nn.Module):
def __init__(self, latent_size, stock_size, hidden_size=32):
super(PortfolioLayer, self).__init__()
self.net = MLP(input_size=latent_size, output_size=1, hidden_size=hidden_size)
def forward(self, latent_features):
out = self.net(latent_features)
out = torch.softmax(out, dim=1).squeeze((- 1))
return out |
def _create_keras_model(args: SharedArgs, input_shape: InputShape, predictor_heads: List[PredictorHeadInterface]) -> Model:
main_input = create_main_input(input_shape)
if (args.input_weight_decay is None):
input_weight_decay = args.layer_weight_decay
else:
input_weight_decay = args.input_weight_decay
num_reduction_out_channels = (args.reduction_width_factor * args.width)
num_chunk_frames = input_shape[0]
num_features = input_shape[1]
input_reduction_out = input_reduction(main_input, num_features, num_reduction_out_channels, input_weight_decay, args.batch_norm, args.dropout)
nodes = _create_backbone(args, num_chunk_frames, num_reduction_out_channels, input_reduction_out)
output_tensors = _create_output_tensors(predictor_heads, nodes)
model = Model(main_input, output_tensors)
maybe_convert_model_to_sam(model, args.sam_rho, SAM_EPSILON)
return model |
class _KeySerializationEncryption(KeySerializationEncryption):
def __init__(self, format: PrivateFormat, password: bytes, *, kdf_rounds: (int | None), hmac_hash: (HashAlgorithm | None), key_cert_algorithm: (PBES | None)):
self._format = format
self.password = password
self._kdf_rounds = kdf_rounds
self._hmac_hash = hmac_hash
self._key_cert_algorithm = key_cert_algorithm |
def corpus_align(src_data, tgt_data, ali_data):
(align_dict, align_dict_rev) = (dict(), dict())
for idx in range(len(src_data)):
src = src_data[idx].strip('\n').split()
tgt = tgt_data[idx].strip('\n').split()
ali = ali_data[idx].strip('\n').split()
(align_dict, align_dict_rev) = count_align(src, tgt, ali, align_dict, align_dict_rev)
return (align_dict, align_dict_rev) |
class ComponentMetadata():
def pyproject_file(cls):
return pathlib.Path('pyproject.toml').absolute()
def from_pyproject(cls):
data = {}
if cls.pyproject_file().exists():
try:
data = toml.load(cls.pyproject_file()).get('tool', {}).get('reahl-component', {})
except Exception as ex:
raise DistutilsSetupError(('Exception when trying to load %s: %s' % (cls.pyproject_file(), ex))) from ex
return cls(data)
def __init__(self, data):
self.data = data
self.data['metadata_version'] = '1.1.0'
def exists(self):
return bool(self.data)
def as_toml_string(self):
return toml.dumps(self.data)
def validate(self):
allowed_top_level_keys = set(['metadata_version', 'configuration', 'persisted', 'schedule', 'versions'])
unsupported_keys = (set(self.data.keys()) - allowed_top_level_keys)
if unsupported_keys:
raise DistutilsSetupError(('[%s] Unsupported keys for [tool.reahl-component]: %s' % (self.pyproject_file(), ', '.join(unsupported_keys))))
if ('configuration' in self.data):
if (not isinstance(self.data['configuration'], str)):
raise DistutilsSetupError(('[%s] "configuration" should be a str' % self.pyproject_file()))
validate_list_of_str('persisted', self.data)
validate_list_of_str('schedule', self.data)
if ('versions' in self.data):
if (not isinstance(self.data['versions'], dict)):
raise DistutilsSetupError('"versions" should be a dict')
for (version_number, version) in self.data['versions'].items():
validate_list_of_str('migrations', version)
validate_list_of_str('dependencies', version)
if ('install_requires' in version):
raise DistutilsSetupError(('[%s] "install_requires" not allowed in [tool.reahl-component.versions."%s"]. Did you mean "dependencies"?' % (self.pyproject_file(), version_number)))
unsupported_version_keys = (set(version.keys()) - {'migrations', 'dependencies'})
if unsupported_version_keys:
raise DistutilsSetupError(('[%s] Unsupported keys for [tool.reahl-component.versions."%s"]: %s' % (self.pyproject_file(), version_number, ', '.join(unsupported_version_keys)))) |
class EasybytezComFolder(XFSDecrypter):
__name__ = 'EasybytezComFolder'
__type__ = 'decrypter'
__version__ = '0.19'
__status__ = 'testing'
__pattern__ = '
__config__ = [('enabled', 'bool', 'Activated', True), ('use_premium', 'bool', 'Use premium account if available', True), ('folder_per_package', 'Default;Yes;No', 'Create folder for each package', 'Default'), ('max_wait', 'int', 'Reconnect if waiting time is greater than minutes', 10)]
__description__ = 'Easybytez.com folder decrypter plugin'
__license__ = 'GPLv3'
__authors__ = [('stickell', 'l.')]
PLUGIN_DOMAIN = 'easybytez.com'
LOGIN_ACCOUNT = True |
_fixtures(WebFixture)
def test_event_names_are_canonicalised(web_fixture):
fixture = web_fixture
class ModelObject():
def handle_event(self, some_argument):
self.received_argument = some_argument
events = ExposedNames()
events.an_event = (lambda i: Event(label='click me', action=Action(i.handle_event, ['some_argument']), some_argument=Field(default='default value')))
model_object = ModelObject()
class MyForm(Form):
def __init__(self, view, name):
super().__init__(view, name)
self.define_event_handler(model_object.events.an_event)
self.add_child(ButtonInput(self, model_object.events.an_event.with_arguments(some_argument='f~nnystuff')))
class MainUI(UserInterface):
def assemble(self):
self.define_page(HTML5Page).use_layout(BasicPageLayout())
home = self.define_view('/', title='Home page')
home.set_slot('main', MyForm.factory('myform'))
wsgi_app = fixture.new_wsgi_app(site_root=MainUI)
browser = Browser(wsgi_app)
browser.open('/')
csrf_token = browser.get_value('//input[="myform-_reahl_csrf_token"]')
browser.post('/__myform_method', {'event.myform-an_event?some_argument=f~nnystuff': '', 'myform-_reahl_database_concurrency_digest': '', 'myform-_reahl_csrf_token': csrf_token})
assert (model_object.received_argument == 'f~nnystuff') |
.xfail(reason='merge_frame is deprecated.')
def test_return_dataframe_merge_is_None(returns_frame_1):
expected_output = returns_frame_1['ticker'].str.split(' ', expand=True)
result = returns_frame_1.process_text(column_name='ticker', string_function='split', expand=True, pat=' ')
assert_frame_equal(result, expected_output) |
class MaxActivationFusion(nn.Module):
def __init__(self, features=64, feature_extractor=Features4Layer, activation=relu):
super(MaxActivationFusion, self).__init__()
self.features = feature_extractor(features, activation=activation)
def forward(self, frame_1, frame_2, frame_3, frame_4, frame_5):
frame_1_feature = self.features(frame_1)
frame_2_feature = self.features(frame_2)
frame_3_feature = self.features(frame_3)
frame_4_feature = self.features(frame_4)
frame_5_feature = self.features(frame_5)
frame_1_feature = frame_1_feature.view(((1,) + frame_1_feature.size()))
frame_2_feature = frame_2_feature.view(((1,) + frame_2_feature.size()))
frame_3_feature = frame_3_feature.view(((1,) + frame_3_feature.size()))
frame_4_feature = frame_4_feature.view(((1,) + frame_4_feature.size()))
frame_5_feature = frame_5_feature.view(((1,) + frame_5_feature.size()))
cat = torch.cat((frame_1_feature, frame_2_feature, frame_3_feature, frame_4_feature, frame_5_feature), dim=0)
return torch.max(cat, 0)[0] |
class ToPandasMixin():
def to_pandas(self):
pandas_type = pd.Series
if hasattr(self, 'to_json'):
data = self.to_json()
if isinstance(data, Sequence):
data = [try_to_dict(d) for d in data]
pandas_type = pd.DataFrame
elif hasattr(self, 'to_dict'):
data = self.to_dict()
df = pd.json_normalize(data)
if (pandas_type is pd.Series):
if (len(df) == 1):
series = df.iloc[0]
series.name = self.__class__.__name__
return series
else:
raise ValueError(f'expected single row, got {len(df)}')
return df |
class Recognizer(object):
def __init__(self, decoder, symbols=None, allow_partial=True, acoustic_scale=0.1):
self.decoder = decoder
self.symbols = symbols
self.allow_partial = allow_partial
self.acoustic_scale = acoustic_scale
def _make_decodable(self, loglikes):
if (loglikes.num_rows == 0):
raise ValueError('Empty loglikes matrix.')
return _dec.DecodableMatrixScaled(loglikes, self.acoustic_scale)
def _determinize_lattice(self, lattice):
opts = self.decoder.get_options()
if opts.determinize_lattice:
det_opts = _lat_funcs.DeterminizeLatticePrunedOptions()
det_opts.max_mem = opts.det_opts.max_mem
return _lat_funcs.determinize_lattice_pruned(lattice, opts.lattice_beam, det_opts, True)
else:
return lattice
def decode(self, input):
self.decoder.decode(self._make_decodable(input))
if (not (self.allow_partial or self.decoder.reached_final())):
raise RuntimeError('No final state was active on the last frame.')
try:
best_path = self.decoder.get_best_path()
except RuntimeError:
raise RuntimeError('Empty decoding output.')
(ali, words, weight) = _fst_utils.get_linear_symbol_sequence(best_path)
if self.symbols:
text = ' '.join(_fst.indices_to_symbols(self.symbols, words))
else:
text = ' '.join(map(str, words))
likelihood = (- (weight.value1 + weight.value2))
if (self.acoustic_scale != 0.0):
scale = _fst_utils.acoustic_lattice_scale((1.0 / self.acoustic_scale))
_fst_utils.scale_lattice(scale, best_path)
best_path = _fst_utils.convert_lattice_to_compact_lattice(best_path)
try:
lat = self.decoder.get_raw_lattice()
except AttributeError:
return {'alignment': ali, 'best_path': best_path, 'likelihood': likelihood, 'text': text, 'weight': weight, 'words': words}
if (lat.num_states() == 0):
raise RuntimeError('Empty output lattice.')
lat.connect()
lat = self._determinize_lattice(lat)
if (self.acoustic_scale != 0.0):
if isinstance(lat, _fst.CompactLatticeVectorFst):
_fst_utils.scale_compact_lattice(scale, lat)
else:
_fst_utils.scale_lattice(scale, lat)
return {'alignment': ali, 'best_path': best_path, 'lattice': lat, 'likelihood': likelihood, 'text': text, 'weight': weight, 'words': words} |
def summary_detail_baseline(memo):
DETAIL_ARTERIAL = True
total_summary = []
records_dir = os.path.join('records', memo)
for traffic_file in os.listdir(records_dir):
ANON_ENV = False
if (('.xml' not in traffic_file) and ('anon' not in traffic_file)):
continue
if ('anon' in traffic_file):
ANON_ENV = True
exp_conf = open(os.path.join(records_dir, traffic_file, 'exp.conf'), 'r')
dic_exp_conf = json.load(exp_conf)
run_counts = dic_exp_conf['RUN_COUNTS']
avg_pressure = 0
print(traffic_file)
train_dir = os.path.join(records_dir, traffic_file)
if (os.path.getsize(os.path.join(train_dir, 'inter_0.pkl')) > 0):
with open(os.path.join(records_dir, traffic_file, 'agent.conf'), 'r') as agent_conf:
dic_agent_conf = json.load(agent_conf)
df_vehicle = []
NUM_OF_INTERSECTIONS = (int(traffic_file.split('_')[1]) * int(traffic_file.split('_')[2]))
list_f = [('inter_%d.pkl' % i) for i in range(int(NUM_OF_INTERSECTIONS))]
for f in list_f:
pressure_each_inter = 0
node_index = f.split('inter_')[1].split('.pkl')[0]
print('node', node_index)
f = open(os.path.join(train_dir, f), 'rb')
samples = pkl.load(f)
for sample in samples:
pressure_each_inter += sum(sample['state']['lane_num_vehicle_been_stopped_thres1'])
f.close()
pressure_each_inter = (pressure_each_inter / len(samples))
avg_pressure += pressure_each_inter
vehicle_csv = 'vehicle_inter_{0}.csv'.format(node_index)
df_vehicle_inter_0 = pd.read_csv(os.path.join(train_dir, vehicle_csv), sep=',', header=0, dtype={0: str, 1: float, 2: float}, names=['vehicle_id', 'enter_time', 'leave_time'])
if ANON_ENV:
flow_car = pd.DataFrame(df_vehicle_inter_0['vehicle_id'].str.split('_', (- 1)).tolist(), columns=['flow', 'flow_id', 'car_id'])
else:
flow_car = pd.DataFrame(df_vehicle_inter_0['vehicle_id'].str.split('.', 1).tolist(), columns=['flow_id', 'car_id'])
df_vehicle_inter_0 = pd.concat([flow_car, df_vehicle_inter_0], axis=1)
df_vehicle_inter_0.fillna(run_counts, inplace=True)
df_vehicle_inter_0['duration'] = (df_vehicle_inter_0['leave_time'] - df_vehicle_inter_0['enter_time'])
df_vehicle.append(df_vehicle_inter_0)
print(df_vehicle_inter_0.groupby(['flow_id'])['duration'].mean())
df_vehicle = pd.concat(df_vehicle, axis=0)
flow_df = df_vehicle.groupby(['flow_id', 'car_id']).sum()
arterial_duration = 0
side_street_duration = 0
if DETAIL_ARTERIAL:
detail_arterial = flow_df.groupby('flow_id').mean()
save_path = os.path.join('records', memo, traffic_file).replace('records', 'summary')
if (not os.path.exists(save_path)):
os.makedirs(save_path)
detail_arterial.to_csv(os.path.join(save_path, 'flow.csv'))
arterial_duration = np.average(detail_arterial[:2])
side_street_duration = np.average(detail_arterial[3:])
avg_pressure = (avg_pressure / NUM_OF_INTERSECTIONS)
car_num_out_df = df_vehicle.groupby(by=['flow_id', 'car_id'])['leave_time'].apply((lambda x: (x.shape[0] != x.count())))
car_num_out = car_num_out_df[car_num_out_df].count()
ave_duration_all = flow_df['duration'].mean()
total_summary.append([traffic_file, ave_duration_all, avg_pressure, flow_df.shape[0], car_num_out, dic_agent_conf['FIXED_TIME'], arterial_duration, side_street_duration])
else:
shutil.rmtree(train_dir)
total_summary = pd.DataFrame(total_summary)
total_summary.sort_values([0], ascending=[True], inplace=True)
total_summary.columns = ['TRAFFIC', 'DURATION', 'PRESSURE', 'CAR_NUMBER_IN', 'CAR_NUMBER_OUT', 'CONFIG', 'ARTERIAL', 'SIDE_STREET']
total_summary.to_csv(os.path.join('records', memo, 'total_baseline_results.txt').replace('records', 'summary'), sep='\t', index=False) |
class InfiniteSampler(torch.utils.data.Sampler):
def __init__(self, dataset, rank=0, num_replicas=1, shuffle=True, seed=0, window_size=0.5):
assert (len(dataset) > 0)
assert (num_replicas > 0)
assert (0 <= rank < num_replicas)
assert (0 <= window_size <= 1)
super().__init__(dataset)
self.dataset = dataset
self.rank = rank
self.num_replicas = num_replicas
self.shuffle = shuffle
self.seed = seed
self.window_size = window_size
def __iter__(self):
order = np.arange(len(self.dataset))
rnd = None
window = 0
if self.shuffle:
rnd = np.random.RandomState(self.seed)
rnd.shuffle(order)
window = int(np.rint((order.size * self.window_size)))
idx = 0
while True:
i = (idx % order.size)
if ((idx % self.num_replicas) == self.rank):
(yield order[i])
if (window >= 2):
j = ((i - rnd.randint(window)) % order.size)
(order[i], order[j]) = (order[j], order[i])
idx += 1 |
def parse_args(argv: List[str]) -> argparse.Namespace:
parser = argparse.ArgumentParser(description='torchrec dlrm example trainer')
parser.add_argument('--epochs', type=int, default=1, help='number of epochs to train')
parser.add_argument('--batch_size', type=int, default=32, help='batch size to use for training')
parser.add_argument('--num_batches', type=int, default=10, help='batch size to use for training')
parser.add_argument('--test_batch_size', type=int, default=10, help='batch size to use for validation and testing')
parser.add_argument('--num_embeddings', type=int, default=100000, help='max_ind_size. The number of embeddings in each embedding table. Defaults to 100_000 if num_embeddings_per_feature is not supplied.')
parser.add_argument('--num_embeddings_per_feature', type=str, default=None, help='Comma separated max_ind_size per sparse feature. The number of embeddings in each embedding table. 26 values are expected for the Criteo dataset.')
parser.add_argument('--dense_arch_layer_sizes', type=str, default='512,256,64', help='Comma separated layer sizes for dense arch.')
parser.add_argument('--over_arch_layer_sizes', type=str, default='512,512,256,1', help='Comma separated layer sizes for over arch.')
parser.add_argument('--embedding_dim', type=int, default=64, help='Size of each embedding.')
parser.add_argument('--pin_memory', dest='pin_memory', action='store_true', help='Use pinned memory when loading data.')
parser.add_argument('--seed', type=int, default=None, help='seed')
parser.add_argument('--learning_rate', type=float, default=0.1, help='Learning rate.')
parser.add_argument('--log_every_n_steps', type=int, default=10, help='log every n steps')
parser.add_argument('--lr_warmup_steps', type=int, default=0)
parser.add_argument('--lr_decay_start', type=int, default=0)
parser.add_argument('--lr_decay_steps', type=int, default=0)
parser.set_defaults(pin_memory=None)
return parser.parse_args(argv) |
class ATSBase(Instrument):
remote_mode = Instrument.setting('%s', '``True`` disables TS GUI but displays a Return to local" switch.', validator=strict_discrete_set, values={True: '%RM', False: '%GL'}, map_values=True)
maximum_test_time = Instrument.control('TTIM?', 'TTIM %g', 'Control maximum allowed test time (s).\n\n :type: float\n\n This prevents TS from staying at a single temperature forever.\n Valid range: 0 to 9999\n ', validator=truncated_range, values=[0, 9999])
dut_mode = Instrument.control('DUTM?', 'DUTM %g', ' ``On`` enables DUT mode, ``OFF`` enables air mode\n\n :type: string\n\n ', validator=strict_discrete_set, values={'ON': 1, 'OFF': 0}, map_values=True)
dut_type = Instrument.control('DSNS?', 'DSNS %g', "Control DUT sensor type.\n\n :type: string\n\n Possible values are:\n\n ====== ======\n String Meaning\n ====== ======\n '' no DUT\n 'T' T-DUT\n 'K' K-DUT\n ====== ======\n\n Warning: If in DUT mode without DUT being connected, TS flags DUT error\n\n ", validator=strict_discrete_set, values={None: 0, 'T': 1, 'K': 2}, map_values=True)
dut_constant = Instrument.control('DUTC?', 'DUTC %g', 'Control thermal constant (default 100) of DUT.\n\n :type: float\n\n Lower values indicate lower thermal mass, higher values indicate higher\n thermal mass respectively.\n ', validator=truncated_range, values=[20, 500])
head = Instrument.control('HEAD?', 'HEAD %s', 'Control TS head position.\n\n :type: string\n\n ``down``: transfer head to lower position\n ``up``: transfer head to elevated position\n ', validator=strict_discrete_set, values={'up': 0, 'down': 1}, map_values=True)
enable_air_flow = Instrument.setting('FLOW %g', 'Set TS air flow.\n\n ``True`` enables air flow, ``False`` disables it\n\n :type: bool\n\n ', validator=strict_discrete_set, map_values=True, values={True: 1, False: 0})
temperature_limit_air_low = Instrument.control('LLIM?', 'LLIM %g', 'Control lower air temperature limit.\n\n :type: float\n\n Valid range between -99 to 25 (C). Setpoints below current value cause\n out of range error in TS.\n ', validator=truncated_range, values=[(- 99), 25], dynamic=True)
temperature_limit_air_high = Instrument.control('ULIM?', 'ULIM %g', 'upper air temperature limit.\n\n :type: float\n\n Valid range between 25 to 255 (C). Setpoints above current value cause\n out of range error in TS.\n ', validator=truncated_range, values=[25, 225])
temperature_limit_air_dut = Instrument.control('ADMD?', 'ADMD %g', 'Air to DUT temperature limit.\n\n :type: float\n\n Allowed difference between nozzle air and DUT temperature during\n settling. Valid range between 10 to 300 C in 1 degree increments.\n ', validator=truncated_range, values=[10, 300])
temperature_setpoint = Instrument.control('SETP?', 'SETP %g', "Set or get selected setpoint's temperature.\n\n :type: float\n\n Valid range is -99.9 to 225.0 (C) or as indicated by\n :attr:`~.temperature_limit_air_high`\n and :attr:`~.temperature_limit_air_low`.\n Use convenience function :meth:`~ATSBase.set_temperature`\n to prevent unexpected behavior.\n ", validator=truncated_range, values=[(- 99.9), 225])
temperature_setpoint_window = Instrument.control('WNDW?', 'WNDW %g', "Setpoint's temperature window.\n\n :type: float\n\n Valid range is between 0.1 to 9.9 (C). Temperature status register\n flags ``at temperature`` in case soak time elapsed while temperature\n stays in between bounds given by this value around the current setpoint.\n ", validator=truncated_range, values=[0.1, 9.9])
temperature_soak_time = Instrument.control('SOAK?', 'SOAK %g', '\n Set the soak time for the currently selected setpoint.\n\n :type: float\n\n Valid range is between 0 to 9999 (s). Lower values shorten cycle times.\n Higher values increase cycle times, but may reduce settling errors.\n See :attr:`~.temperature_setpoint_window` for further information.\n ', validator=truncated_range, values=[0.0, 9999])
temperature = Instrument.measurement('TEMP?', 'Read current temperature with 0.1 C resolution.\n\n :type: float\n\n Temperature readings origin depends on :attr:`dut_mode` setting.\n Reading higher than 400 (C) indicates invalidity.\n ')
temperature_condition_status_code = Instrument.measurement('TECR?', 'Temperature condition status register.\n\n :type: :class:`.TemperatureStatusCode`\n ', values=[0, 255], get_process=(lambda v: TemperatureStatusCode(int(v))))
set_point_number = Instrument.control('SETN?', 'SETN %g', 'Select a setpoint to be the current setpoint.\n\n :type: int\n\n Valid range is 0 to 17 when on the Cycle screen or\n or 0 to 2 in case of operator screen (0=hot, 1=ambient, 2=cold).\n ', validator=truncated_range, values=[0, 17])
local_lockout = Instrument.setting('%s', '``True`` disables TS GUI, ``False`` enables it.\n ', validator=strict_discrete_set, values={True: '%LL', False: '%GL'}, map_values=True)
auxiliary_condition_code = Instrument.measurement('AUXC?', 'Read out auxiliary condition status register.\n\n :type: int\n\n Relevant flags are:\n\n ====== ======\n Bit Meaning\n ====== ======\n 10 None\n 9 Ramp mode\n 8 Mode: 0 programming, 1 manual\n 7 None\n 6 TS status: 0 start-up, 1 ready\n 5 Flow: 0 off, 1 on\n 4 Sense mode: 0 air, 1 DUT\n 3 Compressor: 0 on, 1 off (heating possible)\n 2 Head: 0 lower, upper\n 1 None\n 0 None\n ====== ======\n\n Refere to chapter 4 in the manual\n\n ')
copy_active_setup_file = Instrument.setting('CFIL %g', 'Copy active setup file (0) to setup n (1 - 12).\n\n :type: int\n ', validator=strict_range, values=[1, 12])
compressor_enable = Instrument.setting('COOL %g', ' ``True`` enables compressors, ``False`` disables it.\n\n :type: Boolean\n\n ', validator=strict_discrete_set, map_values=True, values={True: 1, False: 0})
total_cycle_count = Instrument.control('CYCC?', 'CYCC %g', 'Set or read current cycle count (1 - 9999).\n\n :type: int\n\n Sending 0 will stop cycling\n\n ', validator=truncated_range, values=[0, 9999])
cycling_enable = Instrument.setting('CYCL %g', 'CYCL Start/stop cycling.\n\n :type: bool\n\n cycling_enable = True (start cycling)\n cycling_enable = False (stop cycling)\n ', validator=strict_discrete_set, map_values=True, values={True: 1, False: 0})
current_cycle_count = Instrument.measurement('CYCL?', 'Read the number of cycles to do\n\n :type: int\n\n ')
error_code = Instrument.measurement('EROR?', 'Read the device-specific error register (16 bits).\n\n :type: :class:`ErrorCode`\n ', get_process=(lambda v: ErrorCode(int(v))))
nozzle_air_flow_rate = Instrument.measurement('FLWR?', 'Read main nozzle air flow rate in scfm.\n ')
main_air_flow_rate = Instrument.measurement('FLRL?', 'Read main nozzle air flow rate in liters/sec.\n ')
learn_mode = Instrument.control('LRNM?', 'LRNM %g', 'Control DUT automatic tuning (learning).\n\n :type: bool\n ``False``: off\n ``True``: automatic tuning on\n\n ', validator=strict_discrete_set, map_values=True, values={True: 1, False: 0})
ramp_rate = Instrument.control('RAMP?', 'RAMP %g', 'Control ramp rate (K / min).\n\n :type: float\n\n allowed values:\n nn.n: 0 to 99.9 in 0.1 K per minute steps.\n nnnn: 100 to 9999 in 1 K per minute steps.\n ', validator=strict_discrete_set, values=({(i / 10) for i in range(1000)} | {i for i in range(100, 10000)}))
dynamic_temperature_setpoint = Instrument.measurement('SETD?', 'Read the dynamic temperature setpoint.\n\n :type: float\n ')
load_setup_file = Instrument.setting('SFIL %g', 'loads setup file SFIL.\n\n Valid range is between 1 to 12.\n\n :type: int\n ', validator=strict_range, values=[1, 12])
temperature_event_status = Instrument.measurement('TESR?', ' temperature event status register.\n\n :type: :class:`.TemperatureStatusCode`\n\n Hint: Reading will clear register content.\n\n ')
air_temperature = Instrument.measurement('TMPA?', 'Read air temperature in 0.1 C increments.\n\n :type: float\n ')
dut_temperature = Instrument.measurement('TMPD?', 'Read DUT temperature, in 0.1 C increments.\n\n :type: float\n\n ')
mode = Instrument.measurement('WHAT?', 'Returns a string indicating what the system is doing at the time the query is processed.\n\n :type: string\n\n ', values={'manual': 5, 'program': 6}, map_values=True, dynamic=True)
def __init__(self, adapter, name='ATSBase', **kwargs):
super().__init__(adapter, name=name, query_delay=0.05, **kwargs)
def reset(self):
self.write('RSTO')
return self
def enter_cycle(self):
self.write('RMPC 1')
return self
def enter_ramp(self):
self.write('RMPS 0')
return self
def clear(self):
self.write('CLER')
return self
def next_setpoint(self):
self.write('NEXT')
def configure(self, temp_window=1, dut_type='T', soak_time=30, dut_constant=100, temp_limit_air_low=(- 60), temp_limit_air_high=220, temp_limit_air_dut=50, maximum_test_time=1000):
self.temperature_setpoint_window = temp_window
self.temperature_limit_air_low = temp_limit_air_low
self.temperature_limit_air_high = temp_limit_air_high
self.dut_type = dut_type
self.maximum_test_time = maximum_test_time
if (dut_type is None):
self.dut_mode = 'OFF'
else:
self.dut_constant = dut_constant
self.dut_mode = 'ON'
self.temperature_limit_air_dut = temp_limit_air_dut
self.temperature_soak_time = soak_time
wd = self.temperature_setpoint_window
airflwlimlow = self.temperature_limit_air_low
airflwlimhigh = self.temperature_limit_air_high
dut = self.dut_type
tst_time = self.maximum_test_time
airdutlim = self.temperature_limit_air_dut
sktime = self.temperature_soak_time
message = f'''Configuring TS finished, reading back:
DUT type: {dut}
Temperature Window: {wd} K
Maximum test time: {tst_time} s
Air flow temperature limit low: {airflwlimlow:.1f} K
Air flow temperature limit high: {airflwlimhigh:.1f} K
Air to DUT temperature limit: {airdutlim} degC
Soak time {sktime} s
'''
log.info(message)
return self
def set_temperature(self, set_temp):
if (self.mode == 'manual'):
message = f'new set point temperature: {set_temp:.1f} Deg'
log.info(message)
if (set_temp <= 20):
self.set_point_number = 2
elif (set_temp < 30):
self.set_point_number = 1
elif (set_temp >= 30):
self.set_point_number = 0
else:
raise ValueError(f'Temperature {set_temp} is impossible to set!')
self.temperature_setpoint = set_temp
return self
def wait_for_settling(self, time_limit=300):
time.sleep(1)
t = 0
t_start = time.time()
while (not self.at_temperature()):
time.sleep(1)
t = (time.time() - t_start)
tstatus = self.temperature_condition_status_code
message = 'temp_set= %4.1f deg, temp= %4.1f deg, time= %.2f s, status= %s'
log.info(message, self.temperature_setpoint, self.temperature, t, tstatus)
if (t > time_limit):
log.info('no settling achieved')
break
log.info('finished this temperature point')
return self
def shutdown(self, head=False):
self.enable_air_flow = 0
self.remote_mode = False
if head:
self.head = 'up'
super().shutdown()
return self
def start(self, enable_air_flow=True):
self.remote_mode = 1
self.enable_air_flow = enable_air_flow
return self
def error_status(self):
code = self.error_code
if (not (code == 0)):
log.warning('%s', code)
return code
def cycling_stopped(self):
return (TemperatureStatusCode.CYCLING_STOPPED in self.temperature_condition_status_code)
def end_of_all_cycles(self):
return (TemperatureStatusCode.END_OF_ALL_CYCLES in self.temperature_condition_status_code)
def end_of_one_cycle(self):
return (TemperatureStatusCode.END_OF_ONE_CYCLE in self.temperature_condition_status_code)
def end_of_test(self):
return (TemperatureStatusCode.END_OF_TEST in self.temperature_condition_status_code)
def not_at_temperature(self):
return (TemperatureStatusCode.NOT_AT_TEMPERATURE in self.temperature_condition_status_code)
def at_temperature(self):
return (TemperatureStatusCode.AT_TEMPERATURE in self.temperature_condition_status_code) |
def test_validate_problem_qubit_nodes():
def random_sk_model_with_qubit_nodes(n: int):
graph = nx.complete_graph(n)
graph = nx.relabel_nodes(graph, mapping={i: cirq.LineQubit(i) for i in range(n)})
return random_plus_minus_1_weights(graph)
problem_graph = random_sk_model_with_qubit_nodes(n=3)
with pytest.raises(ValueError) as e:
_validate_problem_graph(problem_graph)
assert e.match('Problem graph must have contiguous, 0-indexed integer nodes.*') |
class Interpolate(nn.Module):
def __init__(self, scale_factor, mode):
super(Interpolate, self).__init__()
self.interp = nn.functional.interpolate
self.scale_factor = scale_factor
self.mode = mode
def forward(self, x):
x = self.interp(x, scale_factor=self.scale_factor, mode=self.mode, align_corners=True)
return x |
def test_discovery_fallback_ok(session_app_data, caplog):
caplog.set_level(logging.DEBUG)
builtin = Builtin(Namespace(app_data=session_app_data, try_first_with=[], python=['magic-one', sys.executable], env=os.environ))
result = builtin.run()
assert (result is not None), caplog.text
assert (result.executable == sys.executable), caplog.text
assert ('accepted' in caplog.text) |
class Migration(migrations.Migration):
dependencies = [('views', '0027_view_editors')]
operations = [migrations.AlterModelOptions(name='view', options={'ordering': ('uri',), 'verbose_name': 'View', 'verbose_name_plural': 'Views'}), migrations.RenameField(model_name='view', old_name='key', new_name='uri_path'), migrations.AlterField(model_name='view', name='uri', field=models.URLField(blank=True, help_text='The Uniform Resource Identifier of this view (auto-generated).', max_length=800, verbose_name='URI')), migrations.AlterField(model_name='view', name='uri_path', field=models.SlugField(blank=True, help_text='The path for the URI of this view.', max_length=512, verbose_name='URI Path'))] |
def test_bad_optional_dumping(retort, debug_trail):
raises_exc(with_cause(NoSuitableProvider(f'Cannot produce dumper for type {Union[(int, Callable[([int], str)])]}'), with_notes(CannotProvide(message=f'All cases of union must be class, but found {[Callable[([int], str)]]}', is_demonstrative=True, is_terminal=True), f'Location: type={Union[(int, Callable[([int], str)])]}')), func=(lambda : retort.replace(debug_trail=debug_trail).get_dumper(Union[(int, Callable[([int], str)])]))) |
class MaskingFilter(logging.Filter):
REPLACE_STR = ('*' * 4)
_UNWANTED = frozenset([s for obj in ('', None) for s in (repr(obj), str(obj))])
def __init__(self, _use_named_masks: bool=False, **patterns: Iterable[(str | re.Pattern[str])]) -> None:
super().__init__()
self._redact_patterns = defaultdict(set)
for (k, vs) in patterns.items():
self._redact_patterns[k] = {v for v in vs if (v and (v not in self._UNWANTED))}
self._use_named_masks = _use_named_masks
def add_mask_for(self, data: str, name: str='redacted') -> MaskingFilter:
if (data and (data not in self._UNWANTED)):
log.debug('Adding redact pattern %r to _redact_patterns', name)
self._redact_patterns[name].add(data)
return self
def filter(self, record: logging.LogRecord) -> bool:
record.msg = self.mask(record.msg)
if (record.args is None):
pass
elif isinstance(record.args, dict):
record.args = {k: (v if (type(v) in (bool, int, float)) else self.mask(str(v))) for (k, v) in record.args.items()}
else:
record.args = tuple(((arg if (type(arg) in (bool, int, float)) else self.mask(str(arg))) for arg in record.args))
return True
def mask(self, msg: str) -> str:
if (not isinstance(msg, str)):
log.debug('cannot mask object of type %s', type(msg))
return msg
for (mask, values) in self._redact_patterns.items():
repl_string = (self.REPLACE_STR if (not self._use_named_masks) else f'<{mask!r} (value removed)>')
for data in values:
if isinstance(data, str):
msg = msg.replace(data, repl_string)
elif isinstance(data, re.Pattern):
msg = data.sub(repl_string, msg)
return msg |
def test_bare_parameters():
proj = CRS.from_string('+proj=lcc +lon_0=-95 +ellps=GRS80 +y_0=0 +no_defs=True +x_0=0 +units=m +lat_2=77 +lat_1=49 +lat_0=0')
with pytest.warns(UserWarning):
assert ('+no_defs' in proj.to_proj4(4))
proj = CRS.from_string('+lon_0=-95 +ellps=GRS80 +proj=lcc +y_0=0 +no_defs=False +x_0=0 +units=m +lat_2=77 +lat_1=49 +lat_0=0') |
class MultiViewPCDataset(torch.utils.data.Dataset):
def __init__(self, root_path, data_list_path, labels_path):
self.root_path = root_path
self.data_list_path = data_list_path
self.labels_path = labels_path
self.labels = self.load_labels(labels_path)
self.data_list = self.load_data_list(data_list_path)
self.transform = transforms.Compose([transforms.Resize((224, 224)), (lambda image: image.convert('RGB')), transforms.ToTensor()])
def __len__(self):
return len(self.data_list)
def load_labels(self, labels_path):
label_list = []
with open(labels_path, 'r') as f:
for line in f:
label_list.append(line.strip())
return label_list
def load_data_list(self, data_list_path):
data_list = []
with open(data_list_path, 'r') as f:
for line in f:
items = line.strip().split(':')
scene_id = items[1].split(' ')[0]
instance_id = items[2].split(' ')[0]
label_name = ' '.join(items[3].split(' ')[0:(- 1)])
label_id = self.labels.index(label_name)
all_path = []
for angle in [0, 30, (- 30), 60, (- 60)]:
path = f'{instance_id}_{label_name}_{angle}.jpg'
path = osp.join(self.root_path, scene_id, 'multi_view_pc', path)
all_path.append(path)
data_list.append((all_path, label_id))
return data_list
def __getitem__(self, idx):
(paths, label) = self.data_list[idx]
image_features = []
for path in paths:
image = Image.open(path)
image_features.append(self.transform(image))
image_features = torch.stack(image_features, dim=0)
return (image_features, int(label)) |
def init_default_config(pelican):
from pelican.settings import DEFAULT_CONFIG
bootstrapify_default = {'table': ['table', 'table-striped', 'table-hover'], 'img': ['img-responsive']}
set_default_config(DEFAULT_CONFIG, bootstrapify_default)
if pelican:
set_default_config(pelican.settings, bootstrapify_default) |
def compute_on_dataset(model, data_loader, device, predict_folder, timer=None, vis=False, eval_score_iou=False, eval_depth=False, eval_trunc_recall=False):
model.eval()
cpu_device = torch.device('cpu')
dis_ious = defaultdict(list)
depth_errors = defaultdict(list)
differ_ious = []
with torch.no_grad():
for (idx, batch) in enumerate(tqdm(data_loader)):
(images, targets, image_ids) = (batch['images'], batch['targets'], batch['img_ids'])
images = images.to(device)
vis_target = targets[0]
targets = [target.to(device) for target in targets]
if timer:
timer.tic()
(output, eval_utils, visualize_preds) = model(images, targets)
output = output.to(cpu_device)
if timer:
torch.cuda.synchronize()
timer.toc()
dis_iou = eval_utils['dis_ious']
if (dis_iou is not None):
for key in dis_iou:
dis_ious[key] += dis_iou[key].tolist()
if vis:
show_image_with_boxes(vis_target.get_field('ori_img'), output, vis_target, visualize_preds, vis_scores=eval_utils['vis_scores'])
predict_txt = (image_ids[0] + '.txt')
predict_txt = os.path.join(predict_folder, predict_txt)
generate_kitti_3d_detection(output, predict_txt)
for (key, value) in dis_ious.items():
mean_iou = (sum(value) / len(value))
dis_ious[key] = mean_iou
return dis_ious |
class GVector(object):
def __init__(self, x=0, y=0, z=0):
self.x = x
self.y = y
self.z = z
def Mag(self):
return math.sqrt((((self.x ** 2) + (self.y ** 2)) + (self.z ** 2)))
def dist(self, other):
return math.sqrt(((((self.x - other.x) ** 2) + ((self.y - other.y) ** 2)) + ((self.z - other.z) ** 2)))
def __add__(self, other):
if (not isinstance(other, GVector)):
raise ValueError(("Can't add GVector to " + str(type(other))))
v = GVector((self.x + other.x), (self.y + other.y), (self.z + other.z))
return v
def __sub__(self, other):
return (self + (other * (- 1)))
def __mul__(self, other):
v = GVector((self.x * other), (self.y * other), (self.z * other))
return v
__rmul__ = __mul__
def linear_combination(self, other, l1, l2=None):
if (l2 is None):
l2 = (1 - l1)
v = GVector(((self.x * l1) + (other.x * l2)), ((self.y * l1) + (other.y * l2)), ((self.z * l1) + (other.z * l2)))
return v
def __str__(self):
return ('<%f, %f, %f>' % (self.x, self.y, self.z))
def __repr__(self):
return ('GVector(%f, %f, %f)' % (self.x, self.y, self.z)) |
def main(_):
(model_config, train_config, input_config) = get_configs_from_pipeline_file()
model_fn = functools.partial(build_man_model, model_config=model_config, is_training=True)
create_input_dict_fn = functools.partial(input_reader.read, input_config)
trainer.train(model_fn, create_input_dict_fn, train_config, FLAGS.train_dir, FLAGS.image_root) |
def get_scanengine(job, timeout=None):
job_type = job[0]
for import_job in scan_job_description.keys():
if (re.search(import_job, job_type) is not None):
name = scan_job_description[import_job]._whats_your_name()
if (timeout is None):
return scan_job_description[import_job].__dict__[name](job)
else:
return scan_job_description[import_job].__dict__[name](job, timeout)
return False |
class DistanceAdj(nn.Module):
def __init__(self, sigma, bias):
super(DistanceAdj, self).__init__()
self.w = nn.Parameter(torch.FloatTensor(1))
self.b = nn.Parameter(torch.FloatTensor(1))
self.w.data.fill_(sigma)
self.b.data.fill_(bias)
def forward(self, batch_size, seq_len):
arith = np.arange(seq_len).reshape((- 1), 1)
dist = pdist(arith, metric='cityblock').astype(np.float32)
dist = torch.from_numpy(squareform(dist)).cuda()
dist = torch.exp((- torch.abs(((self.w * (dist ** 2)) - self.b))))
dist = torch.unsqueeze(dist, 0).repeat(batch_size, 1, 1)
return dist |
class DemtHead(BaseHead):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.head_endpoints = ['final']
out_channels = (self.in_channels // 8)
dim_ = 256
self.bottleneck = nn.ModuleDict({t: utils_heads.ConvBNReLU(dim_, out_channels, kernel_size=3, norm_layer=nn.BatchNorm2d, activation_layer=nn.ReLU) for t in self.tasks})
self.final_logits = nn.ModuleDict({t: nn.Conv2d(out_channels, self.task_channel_mapping[t]['final'], kernel_size=1, bias=True) for t in self.tasks})
self.init_weights()
self.defor_mixers = nn.ModuleList([DefMixer(dim_in=dim_, dim=dim_, depth=1) for t in range(len(self.tasks))])
self.linear1 = nn.Sequential(nn.Linear(self.in_channels, dim_), nn.LayerNorm(dim_))
self.task_fusion = nn.MultiheadAttention(embed_dim=dim_, num_heads=4, dropout=0.0)
self.smlp = nn.Sequential(nn.Linear(dim_, dim_), nn.LayerNorm(dim_))
self.smlp2 = nn.ModuleList([nn.Sequential(nn.Linear(dim_, dim_), nn.LayerNorm(dim_)) for t in range(len(self.tasks))])
self.task_querys = nn.ModuleList([nn.MultiheadAttention(embed_dim=dim_, num_heads=4, dropout=0.0) for t in range(len(self.tasks))])
def forward(self, inp, inp_shape, **kwargs):
inp = self._transform_inputs(inp)
(b, c, h, w) = inp.shape
inp = self.linear1(inp.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
outs = []
for (ind, defor_mixer) in enumerate(self.defor_mixers):
out = defor_mixer(inp)
out = rearrange(out, 'b c h w -> b (h w) c').contiguous()
outs.append(out)
task_cat = torch.cat(outs, dim=1)
task_cat = self.task_fusion(task_cat, task_cat, task_cat)[0]
task_cat = self.smlp(task_cat)
outs_ls = []
for (ind, task_query) in enumerate(self.task_querys):
inp = (outs[ind] + self.smlp2[ind](task_query(outs[ind], task_cat, task_cat)[0]))
outs_ls.append(rearrange(inp, 'b (h w) c -> b c h w', h=h, w=w).contiguous())
inp_dict = {t: outs_ls[idx] for (idx, t) in enumerate(self.tasks)}
task_specific_feats = {t: self.bottleneck[t](inp_dict[t]) for t in self.tasks}
final_pred = {t: self.final_logits[t](task_specific_feats[t]) for t in self.tasks}
final_pred = {t: nn.functional.interpolate(final_pred[t], size=inp_shape, mode='bilinear', align_corners=False) for t in self.tasks}
return {'final': final_pred} |
class vgg16(_fasterRCNN):
def __init__(self, classes, pretrained=False, class_agnostic=False):
self.model_path = 'data/pretrained_model/vgg16_caffe.pth'
self.dout_base_model = 512
self.pretrained = pretrained
self.class_agnostic = class_agnostic
_fasterRCNN.__init__(self, classes, class_agnostic)
def _init_modules(self):
vgg = models.vgg16()
if self.pretrained:
print(('Loading pretrained weights from %s' % self.model_path))
state_dict = torch.load(self.model_path)
vgg.load_state_dict({k: v for (k, v) in state_dict.items() if (k in vgg.state_dict())})
vgg.classifier = nn.Sequential(*list(vgg.classifier._modules.values())[:(- 1)])
self.RCNN_base = nn.Sequential(*list(vgg.features._modules.values())[:(- 1)])
for layer in range(10):
for p in self.RCNN_base[layer].parameters():
p.requires_grad = False
self.RCNN_top = vgg.classifier
self.RCNN_cls_score = nn.Linear(4096, self.n_classes)
if self.class_agnostic:
self.RCNN_bbox_pred = nn.Linear(4096, 4)
else:
self.RCNN_bbox_pred = nn.Linear(4096, (4 * self.n_classes))
def _head_to_tail(self, pool5):
pool5_flat = pool5.view(pool5.size(0), (- 1))
fc7 = self.RCNN_top(pool5_flat)
return fc7 |
def get_monitorengine(job):
job_type = job[0]
for import_job in monitor_description.keys():
if (re.search(import_job, job_type) is not None):
name = monitor_description[import_job]._whats_your_name()
return monitor_description[import_job].__dict__[name](job)
return False |
def report_notprint(counts, out=None):
if (out is None):
out = sys.stdout
(overall, by_type) = metrics(counts)
c = counts
final_report = []
line = []
line.append(('processed %d tokens with %d phrases; ' % (c.token_counter, c.found_correct)))
line.append(('found: %d phrases; correct: %d.\n' % (c.found_guessed, c.correct_chunk)))
final_report.append(''.join(line))
if (c.token_counter > 0):
line = []
line.append(('accuracy: %6.2f%%; ' % ((100.0 * c.correct_tags) / c.token_counter)))
line.append(('precision: %6.2f%%; ' % (100.0 * overall.prec)))
line.append(('recall: %6.2f%%; ' % (100.0 * overall.rec)))
line.append(('FB1: %6.2f\n' % (100.0 * overall.fscore)))
final_report.append(''.join(line))
for (i, m) in sorted(by_type.items()):
line = []
line.append(('%17s: ' % i))
line.append(('precision: %6.2f%%; ' % (100.0 * m.prec)))
line.append(('recall: %6.2f%%; ' % (100.0 * m.rec)))
line.append(('FB1: %6.2f %d\n' % ((100.0 * m.fscore), c.t_found_guessed[i])))
final_report.append(''.join(line))
return final_report |
class TrainDataset(Dataset):
def __init__(self, args, raw_datasets, cache_root):
self.raw_datasets = raw_datasets
self.tab_processor = get_default_processor(max_cell_length=100, tokenizer=AutoTokenizer.from_pretrained(args.bert.location, use_fast=False), max_input_length=args.seq2seq.table_truncation_max_length)
cache_path = os.path.join(cache_root, 'kvret_glmp_train.cache')
if (os.path.exists(cache_path) and args.dataset.use_cache):
self.extended_data = torch.load(cache_path)
else:
self.extended_data = []
for raw_data in tqdm(self.raw_datasets):
extend_data = copy.deepcopy(raw_data)
history = kvret_get_constructed_history(history=extend_data['history'])
table_context = {'header': extend_data['kb']['header'], 'rows': extend_data['kb']['rows']}
for truncate_func in self.tab_processor.table_truncate_funcs:
truncate_func.truncate_table(table_context, history, [])
linear_table = self.tab_processor.table_linearize_func.process_table(table_context)
extend_data.update({'struct_in': linear_table.lower(), 'text_in': history.lower(), 'seq_out': extend_data['response'].lower()})
self.extended_data.append(extend_data)
if args.dataset.use_cache:
torch.save(self.extended_data, cache_path)
def __getitem__(self, index) -> T_co:
return self.extended_data[index]
def __len__(self):
return len(self.extended_data) |
def detr_resnet50(pretrained=False, num_classes=91, return_postprocessor=False):
model = _make_detr('resnet50', dilation=False, num_classes=num_classes)
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(url=' map_location='cpu', check_hash=True)
model.load_state_dict(checkpoint['model'])
if return_postprocessor:
return (model, PostProcess())
return model |
class ImageOverlay(Layer):
_template = Template('\n {% macro script(this, kwargs) %}\n var {{ this.get_name() }} = L.imageOverlay(\n {{ this.url|tojson }},\n {{ this.bounds|tojson }},\n {{ this.options|tojson }}\n );\n {% endmacro %}\n ')
def __init__(self, image: Any, bounds: TypeBounds, origin: str='upper', colormap: Optional[Callable]=None, mercator_project: bool=False, pixelated: bool=True, name: Optional[str]=None, overlay: bool=True, control: bool=True, show: bool=True, **kwargs):
super().__init__(name=name, overlay=overlay, control=control, show=show)
self._name = 'ImageOverlay'
self.bounds = bounds
self.options = parse_options(**kwargs)
self.pixelated = pixelated
if mercator_project:
image = mercator_transform(image, (bounds[0][0], bounds[1][0]), origin=origin)
self.url = image_to_url(image, origin=origin, colormap=colormap)
def render(self, **kwargs) -> None:
super().render()
figure = self.get_root()
assert isinstance(figure, Figure), 'You cannot render this Element if it is not in a Figure.'
if self.pixelated:
pixelated = '\n <style>\n .leaflet-image-layer {\n /* old android/safari*/\n image-rendering: -webkit-optimize-contrast;\n image-rendering: crisp-edges; /* safari */\n image-rendering: pixelated; /* chrome */\n image-rendering: -moz-crisp-edges; /* firefox */\n image-rendering: -o-crisp-edges; /* opera */\n -ms-interpolation-mode: nearest-neighbor; /* ie */\n }\n </style>\n '
figure.header.add_child(Element(pixelated), name='leaflet-image-layer')
def _get_self_bounds(self) -> TypeBounds:
return self.bounds |
_model_architecture('lightconv_lm', 'lightconv_lm')
def base_lm_architecture(args):
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 2048)
args.decoder_layers = getattr(args, 'decoder_layers', 6)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 8)
args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None)
args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0)
args.adaptive_softmax_factor = getattr(args, 'adaptive_softmax_factor', 4)
args.decoder_learned_pos = getattr(args, 'decoder_learned_pos', False)
args.character_embeddings = getattr(args, 'character_embeddings', False)
args.decoder_output_dim = getattr(args, 'decoder_output_dim', args.decoder_embed_dim)
args.decoder_input_dim = getattr(args, 'decoder_input_dim', args.decoder_embed_dim)
args.decoder_normalize_before = True
args.adaptive_input = getattr(args, 'adaptive_input', False)
args.adaptive_input_factor = getattr(args, 'adaptive_input_factor', 4)
args.adaptive_input_cutoff = getattr(args, 'adaptive_input_cutoff', None)
args.tie_adaptive_weights = getattr(args, 'tie_adaptive_weights', False)
args.tie_adaptive_proj = getattr(args, 'tie_adaptive_proj', False)
args.decoder_kernel_size_list = getattr(args, 'decoder_kernel_size_list', [3, 7, 15, 31, 31, 31])
if (len(args.decoder_kernel_size_list) == 1):
args.decoder_kernel_size_list = (args.decoder_kernel_size_list * args.decoder_layers) |
class ConvNextFeatureExtractor(FeatureExtractionMixin, ImageFeatureExtractionMixin):
model_input_names = ['pixel_values']
def __init__(self, do_resize=True, size=224, resample=Image.BICUBIC, crop_pct=None, do_normalize=True, image_mean=None, image_std=None, **kwargs):
super().__init__(**kwargs)
self.do_resize = do_resize
self.size = size
self.resample = resample
self.crop_pct = crop_pct
self.do_normalize = do_normalize
self.image_mean = (image_mean if (image_mean is not None) else IMAGENET_DEFAULT_MEAN)
self.image_std = (image_std if (image_std is not None) else IMAGENET_DEFAULT_STD)
def __call__(self, images: ImageInput, return_tensors: Optional[Union[(str, TensorType)]]=None, **kwargs) -> BatchFeature:
valid_images = False
if (isinstance(images, (Image.Image, np.ndarray)) or is_torch_tensor(images)):
valid_images = True
elif isinstance(images, (list, tuple)):
if ((len(images) == 0) or isinstance(images[0], (Image.Image, np.ndarray)) or is_torch_tensor(images[0])):
valid_images = True
if (not valid_images):
raise ValueError('Images must of type `PIL.Image.Image`, `np.ndarray` or `torch.Tensor` (single example), `List[PIL.Image.Image]`, `List[np.ndarray]` or `List[torch.Tensor]` (batch of examples).')
is_batched = bool((isinstance(images, (list, tuple)) and (isinstance(images[0], (Image.Image, np.ndarray)) or is_torch_tensor(images[0]))))
if (not is_batched):
images = [images]
if (self.do_resize and (self.size is not None)):
if (self.size >= 384):
images = [self.resize(image=image, size=self.size, resample=self.resample) for image in images]
else:
if (self.crop_pct is None):
self.crop_pct = (224 / 256)
size = int((self.size / self.crop_pct))
images = [self.resize(image=image, size=size, default_to_square=False, resample=self.resample) for image in images]
images = [self.center_crop(image=image, size=self.size) for image in images]
if self.do_normalize:
images = [self.normalize(image=image, mean=self.image_mean, std=self.image_std) for image in images]
data = {'pixel_values': images}
encoded_inputs = BatchFeature(data=data, tensor_type=return_tensors)
return encoded_inputs |
.chrome
def test_launch(testdir):
file_test = testdir.makepyfile("\n import pytest\n .nondestructive\n def test_pass(webtext):\n assert webtext == u'Success!'\n ")
testdir.quick_qa('--driver', 'Remote', '--capability', 'browserName', 'chrome', file_test, passed=1) |
def run_program(args):
if (len(args) < 1):
sys.exit(usage)
command = args.pop(0)
if (command in ('--help', '-h', 'help')):
sys.exit(usage)
if (command in ('--multiple', '-m')):
glbs = globals()
cmds = []
while (args and (args[0] in subcmds_desc)):
cmds.append(args.pop(0))
for command in cmds:
glbs[('command_' + command)](args)
sys.exit()
if (command not in subcmds_desc):
sys.exit('{0}: error: no such subcommand: {1}'.format(program_name, command))
if ((len(args) == 0) or ((len(args) == 1) and (args[0] in ('-d', '--show_defaults')))):
args.append('--help')
try:
lst = globals()[('command_' + command)](command, args)
if (lst is not None):
gfts = lst[0]
with open(lst[(- 1)], 'w') as f:
if isinstance(gfts, gftest):
f.write(gfts.dump())
else:
for i in lst[0]:
f.write(i.dump())
except FomostoReportError as e:
sys.exit(str(e)) |
def decode_data_with_region_reader(data: dict) -> tuple[(RegionReader, GameDescription)]:
data = game_migration.migrate_to_current(data)
game = RandovaniaGame(data['game'])
resource_database = read_resource_database(game, data['resource_database'])
dock_weakness_database = read_dock_weakness_database(data['dock_weakness_database'], resource_database)
layers = frozen_lib.wrap(data['layers'])
region_reader = RegionReader(resource_database, dock_weakness_database)
region_list = region_reader.read_region_list(data['regions'])
victory_condition = read_requirement(data['victory_condition'], resource_database)
starting_location = NodeIdentifier.from_json(data['starting_location'])
initial_states = read_initial_states(data['initial_states'], resource_database)
minimal_logic = read_minimal_logic_db(data['minimal_logic'])
used_trick_levels = read_used_trick_levels(data['used_trick_levels'], resource_database)
return (region_reader, GameDescription(game=game, resource_database=resource_database, layers=layers, dock_weakness_database=dock_weakness_database, region_list=region_list, victory_condition=victory_condition, starting_location=starting_location, initial_states=initial_states, minimal_logic=minimal_logic, used_trick_levels=used_trick_levels)) |
.usefixtures('temp_app_data')
def test_create_parallel(tmp_path):
def create(count):
subprocess.check_call([sys.executable, '-m', 'virtualenv', '-vvv', str((tmp_path / f'venv{count}')), '--without-pip'])
threads = [Thread(target=create, args=(i,)) for i in range(1, 4)]
for thread in threads:
thread.start()
for thread in threads:
thread.join() |
class Marker():
def __init__(self, marker: str) -> None:
try:
self._markers = _normalize_extra_values(_parse_marker(marker))
except ParserSyntaxError as e:
raise InvalidMarker(str(e)) from e
def __str__(self) -> str:
return _format_marker(self._markers)
def __repr__(self) -> str:
return f"<Marker('{self}')>"
def __hash__(self) -> int:
return hash((self.__class__.__name__, str(self)))
def __eq__(self, other: Any) -> bool:
if (not isinstance(other, Marker)):
return NotImplemented
return (str(self) == str(other))
def evaluate(self, environment: Optional[Dict[(str, str)]]=None) -> bool:
current_environment = default_environment()
current_environment['extra'] = ''
if (environment is not None):
current_environment.update(environment)
if (current_environment['extra'] is None):
current_environment['extra'] = ''
return _evaluate_markers(self._markers, current_environment) |
.parametrize('data', [[10, 100, (- 1), 1, 3], [10, 50, (- 1), 1, 3], [10, 100, (- 1), 1, 3], [10, 100, (- 1), 2, 3], [10, 100, (- 1), 10, 3], [10, 100, (- 1), 10, 5]])
def test_create_straight_road(data):
road = xodr.create_road([xodr.Line(data[1])], data[0], data[3], data[3], data[2], lane_width=data[4])
odr = xodr.OpenDrive('myroad')
odr.add_road(road)
odr.adjust_roads_and_lanes()
redict = road.get_attributes()
assert (int(redict['id']) == data[0])
assert (int(redict['length']) == data[1])
assert (int(redict['junction']) == data[2])
assert (len(road.lanes.lanesections[0].leftlanes) == data[3])
assert (len(road.lanes.lanesections[0].rightlanes) == data[3])
assert (road.lanes.lanesections[0].leftlanes[0].widths[0].a == data[4])
assert (road.lanes.lanesections[0].leftlanes[0].widths[0].b == 0)
assert (road.lanes.lanesections[0].leftlanes[0].widths[0].c == 0)
assert (road.lanes.lanesections[0].leftlanes[0].widths[0].d == 0)
assert (version_validation(None, odr, wanted_schema='xodr') == ValidationResponse.OK) |
class Sentinel(type):
def __new__(cls: Type[_T_Sentinel], name: str, bases: Tuple[(type, ...)], namespace: Dict[(str, Any)], **kwds: Any) -> _T_Sentinel:
assert (bases == (Sentinel,))
v = super().__new__(cls, name, bases, namespace, **kwds)
v.__class__ = v
return v
def __repr__(self) -> str:
return self.__name__ |
def main(birdsongrec_root, data_root):
birdsongrec_root = Path(birdsongrec_root).expanduser().resolve()
if (not birdsongrec_root.exists()):
raise NotADirectoryError(f'birdsongrec_root not recognized as a directory: {birdsongrec_root}')
data_root = Path(data_root).expanduser().resolve()
if (not data_root.exists()):
raise NotADirectoryError(f'data_root not recognized as a directory: {data_root}')
copyto_dir = (data_root / 'annotation_converted')
copyto_dir.mkdir(exist_ok=True)
scribe = crowsetta.Transcriber(format='koumura')
pbar = tqdm(BIRDS)
for bird in pbar:
pbar.set_description(f'bird={bird}')
bird_dir = (birdsongrec_root / bird)
annot_path = (bird_dir / 'Annotation.xml')
annots = scribe.from_file(annot_path)
unique_labels = set([lbl for annot in annots for lbl in annot.seq.labels.tolist()])
labelmap = {lbl: float(lbl_ind) for (lbl, lbl_ind) in zip(sorted(unique_labels), range(len(unique_labels)))}
(keys, elements) = ([], [])
wave_dir = (bird_dir / 'Wave')
for (filenum, annot) in enumerate(annots):
key = annot.audio_path.name
keys.append(key)
labels = np.array([labelmap[lbl] for lbl in annot.seq.labels.tolist()]).reshape((- 1), 1)
(fs, _) = wavfile.read(str((wave_dir / key)))
element = dict(filenum=str(filenum), segFileStartTimes=annot.seq.onsets_s.tolist(), segFileEndTimes=annot.seq.offsets_s.tolist(), segType=labels, fs=fs)
elements.append(element)
for key_ind in range(len(keys)):
src = (wave_dir / keys[key_ind])
dst_name = f"{bird}_{key_ind:04}_{datetime.now().strftime('%Y_%m_%d_%H_%M_%S')}.wav"
dst = (wave_dir / dst_name)
shutil.move(src, dst)
keys[key_ind] = dst_name
annot_dict = {'keys': np.array(keys, dtype=np.object), 'elements': elements}
annot_fname = f'{bird}_annotation.mat'
for dst in (bird_dir, copyto_dir, wave_dir):
if (dst == wave_dir):
annot_fname = f'tweetynet-{annot_fname}'
savemat((dst / annot_fname), annot_dict)
WAVS_STRUCT_FIELDS = ['filename', 'startTime', 'endTime', 'fs', 'wav', 'segType']
WAVS_STRUCT_TUPLE = namedtuple('wav', field_names=WAVS_STRUCT_FIELDS)
FAKE_WAV_CLIP_SIZE = (100,)
wavs_structs = []
for (key, element) in zip(keys, elements):
(labels, onsets, offsets) = (element['segType'], element['segFileStartTimes'], element['segFileEndTimes'])
labels = np.squeeze(labels).tolist()
for (label, onset, offset) in zip(labels, onsets, offsets):
if (label not in [wav.segType for wav in wavs_structs]):
(fs, _) = wavfile.read(str((wave_dir / key)))
fake_wav_clip = np.zeros(FAKE_WAV_CLIP_SIZE)
new_wav_struct = WAVS_STRUCT_TUPLE(filename=key, startTime=onset, endTime=offset, fs=fs, wav=fake_wav_clip, segType=label)
wavs_structs.append(new_wav_struct)
if (set([wav.segType for wav in wavs_structs]) == unique_labels):
break
if (set([wav.segType for wav in wavs_structs]) == unique_labels):
break
FILENAME_SIZE = len(dst_name)
FIELD_DTYPES = ((np.unicode, FILENAME_SIZE), np.double, np.double, np.double, (np.float64, FAKE_WAV_CLIP_SIZE), np.float64)
WAVS_STRUCT_DTYPE = np.dtype([field_dtype for field_dtype in zip(WAVS_STRUCT_FIELDS, FIELD_DTYPES)])
wavs_struct_array = np.array(wavs_structs, dtype=WAVS_STRUCT_DTYPE)
template_dict = {'templates': {'wavs': wavs_struct_array}}
template_fname = f'{bird}_templates.mat'
for dst in (bird_dir, copyto_dir, wave_dir):
if (dst == wave_dir):
template_fname = f'tweetynet-{template_fname}'
savemat((dst / template_fname), template_dict) |
class Solution():
def findPairs(self, nums: List[int], k: int) -> int:
if (k < 0):
return 0
count = Counter(nums)
pairs = set([])
for num in count.keys():
if (k == 0):
if (count[num] > 1):
pairs.add((num, num))
else:
otherNum = (num + k)
if (otherNum in count):
if (num <= otherNum):
pairs.add((num, otherNum))
else:
pairs.add((otherNum, num))
return len(pairs) |
def test_format_check_passing(run_line_simple, tmp_path):
schemafile = (tmp_path / 'schema.json')
schemafile.write_text(json.dumps(FORMAT_SCHEMA))
doc1 = (tmp_path / 'doc1.json')
doc1.write_text(json.dumps(PASSING_DOCUMENT))
run_line_simple(['--schemafile', str(schemafile), str(doc1)]) |
def make_custom_sort(orders):
orders = [{k: (- i) for (i, k) in enumerate(reversed(order), 1)} for order in orders]
def process(stuff):
if isinstance(stuff, dict):
l = [(k, process(v)) for (k, v) in stuff.items()]
keys = set(stuff)
for order in orders:
if (keys.issubset(order) or keys.issuperset(order)):
return OrderedDict(sorted(l, key=(lambda x: order.get(x[0], 0))))
return OrderedDict(sorted(l))
if isinstance(stuff, list):
return [process(x) for x in stuff]
return stuff
return process |
class MultiEpochSampler(torch.utils.data.Sampler):
def __init__(self, data_source, num_epochs, start_itr=0, batch_size=128):
self.data_source = data_source
self.num_samples = len(self.data_source)
self.num_epochs = num_epochs
self.start_itr = start_itr
self.batch_size = batch_size
if ((not isinstance(self.num_samples, int)) or (self.num_samples <= 0)):
raise ValueError('num_samples should be a positive integeral value, but got num_samples={}'.format(self.num_samples))
def __iter__(self):
n = len(self.data_source)
num_epochs = int(np.ceil((((n * self.num_epochs) - (self.start_itr * self.batch_size)) / float(n))))
out = [torch.randperm(n) for epoch in range(self.num_epochs)][(- num_epochs):]
out[0] = out[0][((self.start_itr * self.batch_size) % n):]
output = torch.cat(out).tolist()
print(('Length dataset output is %d' % len(output)))
return iter(output)
def __len__(self):
return ((len(self.data_source) * self.num_epochs) - (self.start_itr * self.batch_size)) |
def test_default_caps_in_W3C(monkeypatch, testdir):
capabilities = {'browserName': 'chrome', 'bstack:options': {}}
monkeypatch.setenv('BROWSERSTACK_USERNAME', 'foo')
monkeypatch.setenv('BROWSERSTACK_ACCESS_KEY', 'bar')
variables = testdir.makefile('.json', '{{"capabilities": {}}}'.format(json.dumps(capabilities)))
file_test = testdir.makepyfile("\n import pytest\n .nondestructive\n def test_bstack_capabilities(driver_kwargs):\n assert driver_kwargs['options'].capabilities['bstack:options'] == {\n 'userName': 'foo',\n 'accessKey': 'bar',\n 'sessionName': 'test_default_caps_in_W3C.test_bstack_capabilities'\n }\n ")
testdir.quick_qa('--driver', 'BrowserStack', '--variables', variables, file_test, passed=1) |
def test_as_composite_bloq():
tb = TestAtom()
assert (not tb.supports_decompose_bloq())
cb = tb.as_composite_bloq()
assert isinstance(cb, CompositeBloq)
bloqs = list(cb.bloq_instances)
assert (len(bloqs) == 1)
assert (bloqs[0].bloq == tb)
cb2 = cb.as_composite_bloq()
assert (cb is cb2) |
def _get_config(config_name, subfolder):
if (config_name is not None):
with open(os.path.join(os.path.dirname(__file__), 'config', subfolder, '{}.yaml'.format(config_name)), 'r') as f:
try:
config_dict = yaml.safe_load(f)
except yaml.YAMLError as exc:
assert False, '{}.yaml error: {}'.format(config_name, exc)
return config_dict |
def reg_event(bot):
gif_media_id = functools.partial(_gif_media_id, bot=bot)
def media_id_by(keyword):
img = meme.image_url(keyword)
if img:
media_id = gif_media_id(*img)
logger.info('image: "%s", media_id: %s', img, media_id)
return media_id
(msg_types=TEXT, except_self=False)
def reply(msg: Message):
(keyword, times) = (None, 0)
if bot.setting.suffix_reply:
(keyword, times) = keyword_by_suffix(msg.text)
if ((not keyword) and bot.setting.at_reply and msg.is_at and isinstance(msg.sender, Group)):
(keyword, times) = keyword_by_at(msg.text, msg.sender.self.name)
if keyword:
logger.info('%s searched keyword "%s" x %d', bot.self.name, keyword, times)
for media_id in pool.map(media_id_by, ([keyword] * times), chunksize=times):
msg.reply_image('.gif', media_id=media_id) |
def _vgg_loader(arch: str) -> Callable[(..., torchvision.models.VGG)]:
loader = cast(Callable[(..., torchvision.models.VGG)], getattr(torchvision.models, arch))
def vgg(pretrained: bool=False, framework: str='torch', progress: bool=True, num_classes: int=1000) -> torchvision.models.VGG:
if (pretrained and (num_classes != 1000)):
raise RuntimeError
model = loader(pretrained=False, num_classes=num_classes)
if (not pretrained):
return model
state_dict = hub.load_state_dict_from_url(select_url(arch, framework), progress=progress, check_hash=True)
model.load_state_dict(state_dict)
return model
vgg.__doc__ = _make_vgg_docstring(arch)
return vgg |
def load_resume_state(opt):
resume_state_path = None
if opt['auto_resume']:
state_path = osp.join('experiments', opt['name'], 'training_states')
if osp.isdir(state_path):
states = list(scandir(state_path, suffix='state', recursive=False, full_path=False))
if (len(states) != 0):
states = [float(v.split('.state')[0]) for v in states]
resume_state_path = osp.join(state_path, f'{max(states):.0f}.state')
opt['path']['resume_state'] = resume_state_path
elif opt['path'].get('resume_state'):
resume_state_path = opt['path']['resume_state']
if (resume_state_path is None):
resume_state = None
else:
device_id = torch.cuda.current_device()
resume_state = torch.load(resume_state_path, map_location=(lambda storage, loc: storage.cuda(device_id)))
check_resume(opt, resume_state['iter'])
return resume_state |
class AcceptRejectTests(unittest.TestCase):
def test_receive_accept(self):
with unittest.mock.patch('websockets.client.generate_key', return_value=KEY):
client = ClientProtocol(parse_uri('ws://example.com/test'))
client.connect()
client.receive_data(f'''HTTP/1.1 101 Switching Protocols
Upgrade: websocket
Connection: Upgrade
Sec-WebSocket-Accept: {ACCEPT}
Date: {DATE}
'''.encode())
[response] = client.events_received()
self.assertIsInstance(response, Response)
self.assertEqual(client.data_to_send(), [])
self.assertFalse(client.close_expected())
self.assertEqual(client.state, OPEN)
def test_receive_reject(self):
with unittest.mock.patch('websockets.client.generate_key', return_value=KEY):
client = ClientProtocol(parse_uri('ws://example.com/test'))
client.connect()
client.receive_data(f'''HTTP/1.1 404 Not Found
Date: {DATE}
Content-Length: 13
Content-Type: text/plain; charset=utf-8
Connection: close
Sorry folks.
'''.encode())
[response] = client.events_received()
self.assertIsInstance(response, Response)
self.assertEqual(client.data_to_send(), [])
self.assertTrue(client.close_expected())
self.assertEqual(client.state, CONNECTING)
def test_accept_response(self):
with unittest.mock.patch('websockets.client.generate_key', return_value=KEY):
client = ClientProtocol(parse_uri('ws://example.com/test'))
client.connect()
client.receive_data(f'''HTTP/1.1 101 Switching Protocols
Upgrade: websocket
Connection: Upgrade
Sec-WebSocket-Accept: {ACCEPT}
Date: {DATE}
'''.encode())
[response] = client.events_received()
self.assertEqual(response.status_code, 101)
self.assertEqual(response.reason_phrase, 'Switching Protocols')
self.assertEqual(response.headers, Headers({'Upgrade': 'websocket', 'Connection': 'Upgrade', 'Sec-WebSocket-Accept': ACCEPT, 'Date': DATE}))
self.assertIsNone(response.body)
def test_reject_response(self):
with unittest.mock.patch('websockets.client.generate_key', return_value=KEY):
client = ClientProtocol(parse_uri('ws://example.com/test'))
client.connect()
client.receive_data(f'''HTTP/1.1 404 Not Found
Date: {DATE}
Content-Length: 13
Content-Type: text/plain; charset=utf-8
Connection: close
Sorry folks.
'''.encode())
[response] = client.events_received()
self.assertEqual(response.status_code, 404)
self.assertEqual(response.reason_phrase, 'Not Found')
self.assertEqual(response.headers, Headers({'Date': DATE, 'Content-Length': '13', 'Content-Type': 'text/plain; charset=utf-8', 'Connection': 'close'}))
self.assertEqual(response.body, b'Sorry folks.\n')
def test_no_response(self):
with unittest.mock.patch('websockets.client.generate_key', return_value=KEY):
client = ClientProtocol(parse_uri('ws://example.com/test'))
client.connect()
client.receive_eof()
self.assertEqual(client.events_received(), [])
def test_partial_response(self):
with unittest.mock.patch('websockets.client.generate_key', return_value=KEY):
client = ClientProtocol(parse_uri('ws://example.com/test'))
client.connect()
client.receive_data(b'HTTP/1.1 101 Switching Protocols\r\n')
client.receive_eof()
self.assertEqual(client.events_received(), [])
def test_random_response(self):
with unittest.mock.patch('websockets.client.generate_key', return_value=KEY):
client = ClientProtocol(parse_uri('ws://example.com/test'))
client.connect()
client.receive_data(b'220 smtp.invalid\r\n')
client.receive_data(b'250 Hello relay.invalid\r\n')
client.receive_data(b'250 Ok\r\n')
client.receive_data(b'250 Ok\r\n')
client.receive_eof()
self.assertEqual(client.events_received(), [])
def make_accept_response(self, client):
request = client.connect()
return Response(status_code=101, reason_phrase='Switching Protocols', headers=Headers({'Upgrade': 'websocket', 'Connection': 'Upgrade', 'Sec-WebSocket-Accept': accept_key(request.headers['Sec-WebSocket-Key'])}))
def test_basic(self):
client = ClientProtocol(parse_uri('wss://example.com/'))
response = self.make_accept_response(client)
client.receive_data(response.serialize())
[response] = client.events_received()
self.assertEqual(client.state, OPEN)
def test_missing_connection(self):
client = ClientProtocol(parse_uri('wss://example.com/'))
response = self.make_accept_response(client)
del response.headers['Connection']
client.receive_data(response.serialize())
[response] = client.events_received()
self.assertEqual(client.state, CONNECTING)
with self.assertRaises(InvalidHeader) as raised:
raise client.handshake_exc
self.assertEqual(str(raised.exception), 'missing Connection header')
def test_invalid_connection(self):
client = ClientProtocol(parse_uri('wss://example.com/'))
response = self.make_accept_response(client)
del response.headers['Connection']
response.headers['Connection'] = 'close'
client.receive_data(response.serialize())
[response] = client.events_received()
self.assertEqual(client.state, CONNECTING)
with self.assertRaises(InvalidHeader) as raised:
raise client.handshake_exc
self.assertEqual(str(raised.exception), 'invalid Connection header: close')
def test_missing_upgrade(self):
client = ClientProtocol(parse_uri('wss://example.com/'))
response = self.make_accept_response(client)
del response.headers['Upgrade']
client.receive_data(response.serialize())
[response] = client.events_received()
self.assertEqual(client.state, CONNECTING)
with self.assertRaises(InvalidHeader) as raised:
raise client.handshake_exc
self.assertEqual(str(raised.exception), 'missing Upgrade header')
def test_invalid_upgrade(self):
client = ClientProtocol(parse_uri('wss://example.com/'))
response = self.make_accept_response(client)
del response.headers['Upgrade']
response.headers['Upgrade'] = 'h2c'
client.receive_data(response.serialize())
[response] = client.events_received()
self.assertEqual(client.state, CONNECTING)
with self.assertRaises(InvalidHeader) as raised:
raise client.handshake_exc
self.assertEqual(str(raised.exception), 'invalid Upgrade header: h2c')
def test_missing_accept(self):
client = ClientProtocol(parse_uri('wss://example.com/'))
response = self.make_accept_response(client)
del response.headers['Sec-WebSocket-Accept']
client.receive_data(response.serialize())
[response] = client.events_received()
self.assertEqual(client.state, CONNECTING)
with self.assertRaises(InvalidHeader) as raised:
raise client.handshake_exc
self.assertEqual(str(raised.exception), 'missing Sec-WebSocket-Accept header')
def test_multiple_accept(self):
client = ClientProtocol(parse_uri('wss://example.com/'))
response = self.make_accept_response(client)
response.headers['Sec-WebSocket-Accept'] = ACCEPT
client.receive_data(response.serialize())
[response] = client.events_received()
self.assertEqual(client.state, CONNECTING)
with self.assertRaises(InvalidHeader) as raised:
raise client.handshake_exc
self.assertEqual(str(raised.exception), 'invalid Sec-WebSocket-Accept header: more than one Sec-WebSocket-Accept header found')
def test_invalid_accept(self):
client = ClientProtocol(parse_uri('wss://example.com/'))
response = self.make_accept_response(client)
del response.headers['Sec-WebSocket-Accept']
response.headers['Sec-WebSocket-Accept'] = ACCEPT
client.receive_data(response.serialize())
[response] = client.events_received()
self.assertEqual(client.state, CONNECTING)
with self.assertRaises(InvalidHeader) as raised:
raise client.handshake_exc
self.assertEqual(str(raised.exception), f'invalid Sec-WebSocket-Accept header: {ACCEPT}')
def test_no_extensions(self):
client = ClientProtocol(parse_uri('wss://example.com/'))
response = self.make_accept_response(client)
client.receive_data(response.serialize())
[response] = client.events_received()
self.assertEqual(client.state, OPEN)
self.assertEqual(client.extensions, [])
def test_no_extension(self):
client = ClientProtocol(parse_uri('wss://example.com/'), extensions=[ClientOpExtensionFactory()])
response = self.make_accept_response(client)
response.headers['Sec-WebSocket-Extensions'] = 'x-op; op'
client.receive_data(response.serialize())
[response] = client.events_received()
self.assertEqual(client.state, OPEN)
self.assertEqual(client.extensions, [OpExtension()])
def test_extension(self):
client = ClientProtocol(parse_uri('wss://example.com/'), extensions=[ClientRsv2ExtensionFactory()])
response = self.make_accept_response(client)
response.headers['Sec-WebSocket-Extensions'] = 'x-rsv2'
client.receive_data(response.serialize())
[response] = client.events_received()
self.assertEqual(client.state, OPEN)
self.assertEqual(client.extensions, [Rsv2Extension()])
def test_unexpected_extension(self):
client = ClientProtocol(parse_uri('wss://example.com/'))
response = self.make_accept_response(client)
response.headers['Sec-WebSocket-Extensions'] = 'x-op; op'
client.receive_data(response.serialize())
[response] = client.events_received()
self.assertEqual(client.state, CONNECTING)
with self.assertRaises(InvalidHandshake) as raised:
raise client.handshake_exc
self.assertEqual(str(raised.exception), 'no extensions supported')
def test_unsupported_extension(self):
client = ClientProtocol(parse_uri('wss://example.com/'), extensions=[ClientRsv2ExtensionFactory()])
response = self.make_accept_response(client)
response.headers['Sec-WebSocket-Extensions'] = 'x-op; op'
client.receive_data(response.serialize())
[response] = client.events_received()
self.assertEqual(client.state, CONNECTING)
with self.assertRaises(InvalidHandshake) as raised:
raise client.handshake_exc
self.assertEqual(str(raised.exception), "Unsupported extension: name = x-op, params = [('op', None)]")
def test_supported_extension_parameters(self):
client = ClientProtocol(parse_uri('wss://example.com/'), extensions=[ClientOpExtensionFactory('this')])
response = self.make_accept_response(client)
response.headers['Sec-WebSocket-Extensions'] = 'x-op; op=this'
client.receive_data(response.serialize())
[response] = client.events_received()
self.assertEqual(client.state, OPEN)
self.assertEqual(client.extensions, [OpExtension('this')])
def test_unsupported_extension_parameters(self):
client = ClientProtocol(parse_uri('wss://example.com/'), extensions=[ClientOpExtensionFactory('this')])
response = self.make_accept_response(client)
response.headers['Sec-WebSocket-Extensions'] = 'x-op; op=that'
client.receive_data(response.serialize())
[response] = client.events_received()
self.assertEqual(client.state, CONNECTING)
with self.assertRaises(InvalidHandshake) as raised:
raise client.handshake_exc
self.assertEqual(str(raised.exception), "Unsupported extension: name = x-op, params = [('op', 'that')]")
def test_multiple_supported_extension_parameters(self):
client = ClientProtocol(parse_uri('wss://example.com/'), extensions=[ClientOpExtensionFactory('this'), ClientOpExtensionFactory('that')])
response = self.make_accept_response(client)
response.headers['Sec-WebSocket-Extensions'] = 'x-op; op=that'
client.receive_data(response.serialize())
[response] = client.events_received()
self.assertEqual(client.state, OPEN)
self.assertEqual(client.extensions, [OpExtension('that')])
def test_multiple_extensions(self):
client = ClientProtocol(parse_uri('wss://example.com/'), extensions=[ClientOpExtensionFactory(), ClientRsv2ExtensionFactory()])
response = self.make_accept_response(client)
response.headers['Sec-WebSocket-Extensions'] = 'x-op; op'
response.headers['Sec-WebSocket-Extensions'] = 'x-rsv2'
client.receive_data(response.serialize())
[response] = client.events_received()
self.assertEqual(client.state, OPEN)
self.assertEqual(client.extensions, [OpExtension(), Rsv2Extension()])
def test_multiple_extensions_order(self):
client = ClientProtocol(parse_uri('wss://example.com/'), extensions=[ClientOpExtensionFactory(), ClientRsv2ExtensionFactory()])
response = self.make_accept_response(client)
response.headers['Sec-WebSocket-Extensions'] = 'x-rsv2'
response.headers['Sec-WebSocket-Extensions'] = 'x-op; op'
client.receive_data(response.serialize())
[response] = client.events_received()
self.assertEqual(client.state, OPEN)
self.assertEqual(client.extensions, [Rsv2Extension(), OpExtension()])
def test_no_subprotocols(self):
client = ClientProtocol(parse_uri('wss://example.com/'))
response = self.make_accept_response(client)
client.receive_data(response.serialize())
[response] = client.events_received()
self.assertEqual(client.state, OPEN)
self.assertIsNone(client.subprotocol)
def test_no_subprotocol(self):
client = ClientProtocol(parse_uri('wss://example.com/'), subprotocols=['chat'])
response = self.make_accept_response(client)
client.receive_data(response.serialize())
[response] = client.events_received()
self.assertEqual(client.state, OPEN)
self.assertIsNone(client.subprotocol)
def test_subprotocol(self):
client = ClientProtocol(parse_uri('wss://example.com/'), subprotocols=['chat'])
response = self.make_accept_response(client)
response.headers['Sec-WebSocket-Protocol'] = 'chat'
client.receive_data(response.serialize())
[response] = client.events_received()
self.assertEqual(client.state, OPEN)
self.assertEqual(client.subprotocol, 'chat')
def test_unexpected_subprotocol(self):
client = ClientProtocol(parse_uri('wss://example.com/'))
response = self.make_accept_response(client)
response.headers['Sec-WebSocket-Protocol'] = 'chat'
client.receive_data(response.serialize())
[response] = client.events_received()
self.assertEqual(client.state, CONNECTING)
with self.assertRaises(InvalidHandshake) as raised:
raise client.handshake_exc
self.assertEqual(str(raised.exception), 'no subprotocols supported')
def test_multiple_subprotocols(self):
client = ClientProtocol(parse_uri('wss://example.com/'), subprotocols=['superchat', 'chat'])
response = self.make_accept_response(client)
response.headers['Sec-WebSocket-Protocol'] = 'superchat'
response.headers['Sec-WebSocket-Protocol'] = 'chat'
client.receive_data(response.serialize())
[response] = client.events_received()
self.assertEqual(client.state, CONNECTING)
with self.assertRaises(InvalidHandshake) as raised:
raise client.handshake_exc
self.assertEqual(str(raised.exception), 'multiple subprotocols: superchat, chat')
def test_supported_subprotocol(self):
client = ClientProtocol(parse_uri('wss://example.com/'), subprotocols=['superchat', 'chat'])
response = self.make_accept_response(client)
response.headers['Sec-WebSocket-Protocol'] = 'chat'
client.receive_data(response.serialize())
[response] = client.events_received()
self.assertEqual(client.state, OPEN)
self.assertEqual(client.subprotocol, 'chat')
def test_unsupported_subprotocol(self):
client = ClientProtocol(parse_uri('wss://example.com/'), subprotocols=['superchat', 'chat'])
response = self.make_accept_response(client)
response.headers['Sec-WebSocket-Protocol'] = 'otherchat'
client.receive_data(response.serialize())
[response] = client.events_received()
self.assertEqual(client.state, CONNECTING)
with self.assertRaises(InvalidHandshake) as raised:
raise client.handshake_exc
self.assertEqual(str(raised.exception), 'unsupported subprotocol: otherchat') |
def update(i: int, j: int, order, score):
edge_bump = 0
old_score = 0
new_score = 0
for k in range(j, (i + 1)):
z = order.get(k)
z_parents = order.get_parents(z)
edge_bump -= len(z_parents)
old_score += order.get_local_score(z)
candidates = [order.get(l) for l in range(0, k)]
for w in [w for w in z_parents if (w not in candidates)]:
z_parents.remove(w)
shrink(z, z_parents, score)
for w in z_parents:
candidates.remove(w)
grow(z, z_parents, candidates, score)
local_score = shrink(z, z_parents, score)
order.set_local_score(z, local_score)
edge_bump += len(z_parents)
new_score += local_score
return (edge_bump, (new_score - old_score)) |
def crf_inference_inf(img, probs, t=10, scale_factor=1, labels=21):
import pydensecrf.densecrf as dcrf
from pydensecrf.utils import unary_from_softmax
(h, w) = img.shape[:2]
n_labels = labels
d = dcrf.DenseCRF2D(w, h, n_labels)
unary = unary_from_softmax(probs)
unary = np.ascontiguousarray(unary)
img_c = np.ascontiguousarray(img)
d.setUnaryEnergy(unary)
d.addPairwiseGaussian(sxy=(4 / scale_factor), compat=3)
d.addPairwiseBilateral(sxy=(83 / scale_factor), srgb=5, rgbim=np.copy(img_c), compat=3)
Q = d.inference(t)
return np.array(Q).reshape((n_labels, h, w)) |
def test_remove_all_and_version(tester: CommandTester, venvs_in_cache_dirs: list[str], venv_name: str, venv_cache: Path) -> None:
expected = {''}
tester.execute(f'--all {venvs_in_cache_dirs[0]}')
for name in venvs_in_cache_dirs:
assert (not (venv_cache / name).exists())
expected.add(f'Deleted virtualenv: {(venv_cache / name)}')
assert (set(tester.io.fetch_output().split('\n')) == expected) |
def _format_envvar(param):
(yield '.. envvar:: {}'.format(param.envvar))
(yield ' :noindex:')
(yield '')
if isinstance(param, click.Argument):
param_ref = param.human_readable_name
else:
param_ref = param.opts[0]
(yield _indent('Provide a default for :option:`{}`'.format(param_ref))) |
def test_call_inexisting_address(deploy_client: JSONRPCClient) -> None:
inexisting_address = (b'\x01\x02\x03\x04\x05' * 4)
assert (len(deploy_client.web3.eth.get_code(inexisting_address)) == 0)
transaction = {'from': deploy_client.address, 'to': inexisting_address, 'data': b'', 'value': 0}
assert (deploy_client.web3.eth.call(transaction) == b'') |
def test_init_sanity():
parent = QtWidgets.QMainWindow()
figsize = (1.0, 4.0)
dpi = 256
assert (figsize != default_figsize)
assert (dpi != default_dpi)
mplw = MatplotlibWidget(parent, figsize=figsize)
assert_widget_fields(mplw, parent, figsize, default_dpi)
mplw = MatplotlibWidget(parent, dpi=dpi)
assert_widget_fields(mplw, parent, default_figsize, dpi)
mplw = MatplotlibWidget(parent, figsize, dpi)
assert_widget_fields(mplw, parent, figsize, dpi)
mplw = MatplotlibWidget(figsize, dpi)
assert_widget_fields(mplw, default_parent, figsize, dpi)
mplw = MatplotlibWidget(figsize, dpi, parent)
assert_widget_fields(mplw, parent, figsize, dpi)
mplw = MatplotlibWidget(dpi=dpi, parent=parent)
assert_widget_fields(mplw, parent, default_figsize, dpi) |
def main(argv=sys.argv[1:]):
global _old_hook
dist = pkg_resources.get_distribution('pyladies')
parser = argparse.ArgumentParser(description='Everything you need to start your own PyLadies location')
parser.add_argument('handbook', help='read the handbook')
parser.add_argument('--version', action='version', default=argparse.SUPPRESS, version=dist.version, help="show program's version number and exit")
parsed_args = parser.parse_args(argv)
if parsed_args.handbook:
_handbook()
return _exit_code |
def loader(snr):
path = 'dataset/'
dataset = np.load(os.path.join(path, (('dataset_snr' + str(snr)) + '.npz')))
data = dataset['data']
label = dataset['label']
data = np.expand_dims(data, axis=4)
label1 = np.reshape(label, (label.shape[0], np.prod(label.shape[1:])))
label = keras.utils.to_categorical(label1, num_classes=2)
num = data.shape[0]
tmp = int((0.9 * num))
trainX = data[0:tmp]
trainY = label1[0:tmp]
trainY = keras.utils.to_categorical(trainY, num_classes=2)
testX = data[tmp:]
testY = label1[tmp:]
testY = keras.utils.to_categorical(testY, num_classes=2)
return (trainX, trainY, testX, testY) |
def test_importorskip(monkeypatch) -> None:
importorskip = pytest.importorskip
def f():
importorskip('asdlkj')
try:
sysmod = importorskip('sys')
assert (sysmod is sys)
excinfo = pytest.raises(pytest.skip.Exception, f)
assert (excinfo is not None)
excrepr = excinfo.getrepr()
assert (excrepr is not None)
assert (excrepr.reprcrash is not None)
path = Path(excrepr.reprcrash.path)
assert (path.stem == 'test_runner')
pytest.raises(SyntaxError, pytest.importorskip, 'x y z')
pytest.raises(SyntaxError, pytest.importorskip, 'x=y')
mod = types.ModuleType('hello123')
mod.__version__ = '1.3'
monkeypatch.setitem(sys.modules, 'hello123', mod)
with pytest.raises(pytest.skip.Exception):
pytest.importorskip('hello123', minversion='1.3.1')
mod2 = pytest.importorskip('hello123', minversion='1.3')
assert (mod2 == mod)
except pytest.skip.Exception:
assert False, f'spurious skip: {ExceptionInfo.from_current()}' |
def validate(model, dataloader, criterion):
model.eval()
device = model.device
epoch_start = time.time()
running_loss = 0.0
preds = []
golds = []
with torch.no_grad():
for batch in dataloader:
premises = batch['premise'].to(device)
premises_lengths = batch['premise_length'].to(device)
hypotheses = batch['hypothesis'].to(device)
hypotheses_lengths = batch['hypothesis_length'].to(device)
labels = batch['label'].to(device)
logits = model(premises, premises_lengths, hypotheses, hypotheses_lengths)
loss = criterion(logits.squeeze(1), labels)
running_loss += loss.item()
preds.extend(logits.squeeze(1).data.cpu().numpy())
golds.extend(labels.data.cpu().numpy())
p = pearsonr(preds, golds)
s = spearmanr(preds, golds)
epoch_time = (time.time() - epoch_start)
epoch_loss = (running_loss / len(dataloader))
epoch_accuracy = p[0]
return (epoch_time, epoch_loss, p[0], s[0]) |
class SwapNetworkProblemUnitary(ProblemUnitary):
def _decompose_(self, qubits) -> 'cirq.OP_TREE':
(yield from super()._decompose_(qubits))
(yield cirq.QubitPermutationGate(list(range(len(qubits)))[::(- 1)]).on(*qubits))
def _circuit_diagram_info_(self, args: 'cirq.CircuitDiagramInfoArgs') -> 'cirq.CircuitDiagramInfo':
excess_q = (self.num_qubits() - 2)
return cirq.CircuitDiagramInfo(wire_symbols=(('swap-network', f'g={self.gamma:.3f}') + tuple((f'#{((i + 2) + 1)}' for i in range(excess_q))))) |
def sample_sdf(num_sample, bandwidth, iso_val, sdf_dict, sdf_res, reduce):
start = time.time()
params = sdf_dict['param']
sdf_values = sdf_dict['value'].flatten()
n_sample = (sdf_res // reduce)
x = np.linspace(params[0], params[3], num=n_sample).astype(np.float32)
y = np.linspace(params[1], params[4], num=n_sample).astype(np.float32)
z = np.linspace(params[2], params[5], num=n_sample).astype(np.float32)
(z_vals, y_vals, x_vals) = np.meshgrid(z, y, x, indexing='ij')
print('x_vals', x_vals[(0, 0, ((sdf_res // reduce) - 1))])
x_original = np.linspace(params[0], params[3], num=(sdf_res + 1)).astype(np.float32)
y_original = np.linspace(params[1], params[4], num=(sdf_res + 1)).astype(np.float32)
z_original = np.linspace(params[2], params[5], num=(sdf_res + 1)).astype(np.float32)
x_ind = np.arange(n_sample).astype(np.int32)
y_ind = np.arange(n_sample).astype(np.int32)
z_ind = np.arange(n_sample).astype(np.int32)
(zv, yv, xv) = np.meshgrid(z_ind, y_ind, x_ind, indexing='ij')
choosen_ind = (((xv * reduce) + ((yv * (sdf_res + 1)) * reduce)) + ((zv * ((sdf_res + 1) ** 2)) * reduce))
choosen_ind = np.asarray(choosen_ind, dtype=np.int32).reshape((- 1))
vals = sdf_values[choosen_ind]
x_vals = x[xv.reshape((- 1))]
y_vals = y[yv.reshape((- 1))]
z_vals = z[zv.reshape((- 1))]
sdf_pt_val = np.expand_dims(vals, axis=(- 1))
print('sdf_pt_val.shape', sdf_pt_val.shape)
print('sample_sdf: {} s'.format((time.time() - start)))
return (sdf_pt_val, check_insideout(sdf_values, sdf_res, x_original, y_original, z_original)) |
.end_to_end()
def test_raise_error_if_parametrization_produces_non_unique_tasks(tmp_path):
source = '\n from pytask import task\n\n for i in [0, 0]:\n (id=str(i))\n def task_func(i=i):\n pass\n '
tmp_path.joinpath('task_module.py').write_text(textwrap.dedent(source))
session = build(paths=tmp_path)
assert (session.exit_code == ExitCode.COLLECTION_FAILED)
assert isinstance(session.collection_reports[0].exc_info[1], ValueError) |
_module()
class RSN(BaseBackbone):
def __init__(self, unit_channels=256, num_stages=4, num_units=4, num_blocks=[2, 2, 2, 2], num_steps=4, norm_cfg=dict(type='BN'), res_top_channels=64, expand_times=26):
norm_cfg = cp.deepcopy(norm_cfg)
num_blocks = cp.deepcopy(num_blocks)
super().__init__()
self.unit_channels = unit_channels
self.num_stages = num_stages
self.num_units = num_units
self.num_blocks = num_blocks
self.num_steps = num_steps
self.norm_cfg = norm_cfg
assert (self.num_stages > 0)
assert (self.num_steps > 1)
assert (self.num_units > 1)
assert (self.num_units == len(self.num_blocks))
self.top = ResNet_top(norm_cfg=norm_cfg)
self.multi_stage_rsn = nn.ModuleList([])
for i in range(self.num_stages):
if (i == 0):
has_skip = False
else:
has_skip = True
if (i != (self.num_stages - 1)):
gen_skip = True
gen_cross_conv = True
else:
gen_skip = False
gen_cross_conv = False
self.multi_stage_rsn.append(Single_stage_RSN(has_skip, gen_skip, gen_cross_conv, unit_channels, num_units, num_steps, num_blocks, norm_cfg, res_top_channels, expand_times))
def forward(self, x):
out_feats = []
skip1 = None
skip2 = None
x = self.top(x)
for i in range(self.num_stages):
(out, skip1, skip2, x) = self.multi_stage_rsn[i](x, skip1, skip2)
out_feats.append(out)
return out_feats
def init_weights(self, pretrained=None):
for m in self.multi_stage_rsn.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, nn.BatchNorm2d):
constant_init(m, 1)
elif isinstance(m, nn.Linear):
normal_init(m, std=0.01)
for m in self.top.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m) |
def setsub(a, b):
junks_a = []
useless_constraint = ['temperature', 'week', 'est ', 'quick', 'reminder', 'near']
for i in a:
flg = False
for j in b:
if similar(i, j):
flg = True
if (not flg):
junks_a.append(i)
for junk in junks_a:
flg = False
for item in useless_constraint:
if (item in junk):
flg = True
if (not flg):
return False
return True |
class MixedCfcCell(tf.keras.layers.Layer):
def __init__(self, units, hparams, **kwargs):
self.units = units
self.state_size = (units, units)
self.initializer = 'glorot_uniform'
self.recurrent_initializer = 'orthogonal'
self.forget_gate_bias = 1
if ('forget_bias' in hparams.keys()):
self.forget_gate_bias = hparams['forget_bias']
self.cfc = CfcCell(self.units, hparams)
super(MixedCfcCell, self).__init__(**kwargs)
def get_initial_state(self, inputs=None, batch_size=None, dtype=None):
return (tf.zeros([batch_size, self.units], dtype=tf.float32), tf.zeros([batch_size, self.units], dtype=tf.float32))
def build(self, input_shape):
input_dim = input_shape[(- 1)]
if isinstance(input_shape[0], tuple):
input_dim = input_shape[0][(- 1)]
self.cfc.build(input_shape)
self.input_kernel = self.add_weight(shape=(input_dim, (4 * self.units)), initializer=self.initializer, name='input_kernel')
self.recurrent_kernel = self.add_weight(shape=(self.units, (4 * self.units)), initializer=self.recurrent_initializer, name='recurrent_kernel')
self.bias = self.add_weight(shape=(4 * self.units), initializer=tf.keras.initializers.Zeros(), name='bias')
self.built = True
def call(self, inputs, states, **kwargs):
(cell_state, ode_state) = states
elapsed = tf.zeros((1,), dtype=tf.float32)
if ((isinstance(inputs, tuple) or isinstance(inputs, list)) and (len(inputs) > 1)):
elapsed = inputs[1]
inputs = inputs[0]
z = ((tf.matmul(inputs, self.input_kernel) + tf.matmul(ode_state, self.recurrent_kernel)) + self.bias)
(i, ig, fg, og) = tf.split(z, 4, axis=(- 1))
input_activation = tf.nn.tanh(i)
input_gate = tf.nn.sigmoid(ig)
forget_gate = tf.nn.sigmoid((fg + self.forget_gate_bias))
output_gate = tf.nn.sigmoid(og)
new_cell = ((cell_state * forget_gate) + (input_activation * input_gate))
ode_input = (tf.nn.tanh(new_cell) * output_gate)
(ode_output, new_ode_state) = self.cfc([ode_input, elapsed], [ode_state])
return (ode_output, [new_cell, new_ode_state[0]]) |
class KeyChecker():
def __init__(self, keys, warn_empty=True, important=default_important, essential=None):
self.keys = keys
self.warn_empty = warn_empty
self.important = important
self.essential = essential
if (self.essential is None):
self.essential = [] |
.parametrize('archive_file', ['.git_archival.txt', '.hg_archival.txt'])
def test_archive(wd: WorkDir, monkeypatch: pytest.MonkeyPatch, archive_file: str) -> None:
monkeypatch.chdir(wd.cwd)
sha = 'a1bda3d984d1a40d7b00ae1d0869354d6d503001'
(wd.cwd / archive_file).write_text(f'node: {sha}', encoding='utf-8')
(wd.cwd / 'data').mkdir()
((wd.cwd / 'data') / 'datafile').touch()
datalink = ((wd.cwd / 'data') / 'datalink')
if (sys.platform != 'win32'):
datalink.symlink_to('data/datafile')
else:
os.link('data/datafile', datalink)
assert (set(find_files()) == _sep({archive_file, 'data/datafile', 'data/datalink'})) |
def main(options):
if (options['model']['name'] == 'GaLR'):
from layers import GaLR as models
else:
raise NotImplementedError
vocab = deserialize_vocab(options['dataset']['vocab_path'])
vocab_word = sorted(vocab.word2idx.items(), key=(lambda x: x[1]), reverse=False)
vocab_word = [tup[0] for tup in vocab_word]
test_loader = data.get_test_loader(vocab, options)
model = models.factory(options['model'], vocab_word, cuda=True, data_parallel=False)
print('Model has {} parameters'.format(utils.params_count(model)))
if os.path.isfile(options['optim']['resume']):
print("=> loading checkpoint '{}'".format(options['optim']['resume']))
checkpoint = torch.load(options['optim']['resume'])
start_epoch = checkpoint['epoch']
best_rsum = checkpoint['best_rsum']
model.load_state_dict(checkpoint['model'])
else:
print("=> no checkpoint found at '{}'".format(options['optim']['resume']))
sims = engine.validate_test(test_loader, model)
((r1i, r5i, r10i, medri, meanri), _) = utils.acc_i2t2(sims)
logging.info(('Image to text: %.1f, %.1f, %.1f, %.1f, %.1f' % (r1i, r5i, r10i, medri, meanri)))
((r1t, r5t, r10t, medrt, meanrt), _) = utils.acc_t2i2(sims)
logging.info(('Text to image: %.1f, %.1f, %.1f, %.1f, %.1f' % (r1t, r5t, r10t, medrt, meanrt)))
currscore = ((((((r1t + r5t) + r10t) + r1i) + r5i) + r10i) / 6.0)
all_score = 'r1i:{} r5i:{} r10i:{} medri:{} meanri:{}\n r1t:{} r5t:{} r10t:{} medrt:{} meanrt:{}\n sum:{}\n ------\n'.format(r1i, r5i, r10i, medri, meanri, r1t, r5t, r10t, medrt, meanrt, currscore)
print(all_score)
return [r1i, r5i, r10i, r1t, r5t, r10t, currscore] |
def test_do_posterior_predictive():
with pm.Model() as m:
x = pm.Normal('x', 0, 1)
y = pm.Normal('y', x, 1)
z = pm.Normal('z', (y + x), 0.001)
idata_m = az.from_dict({'x': np.full((2, 500), 25), 'y': np.full((2, 500), np.nan), 'z': np.full((2, 500), np.nan)})
m_do = do(m, {y: 100.0})
with m_do:
idata_do = pm.sample_posterior_predictive(idata_m, var_names='z')
assert (120 < idata_do.posterior_predictive['z'].mean() < 130) |
def add_args(parser, cfg, prefix=''):
for (k, v) in cfg.items():
if isinstance(v, str):
parser.add_argument((('--' + prefix) + k))
elif isinstance(v, int):
parser.add_argument((('--' + prefix) + k), type=int)
elif isinstance(v, float):
parser.add_argument((('--' + prefix) + k), type=float)
elif isinstance(v, bool):
parser.add_argument((('--' + prefix) + k), action='store_true')
elif isinstance(v, dict):
add_args(parser, v, (k + '.'))
elif isinstance(v, Iterable):
parser.add_argument((('--' + prefix) + k), type=type(v[0]), nargs='+')
else:
print('connot parse key {} of type {}'.format((prefix + k), type(v)))
return parser |
def read_dataset(fid, key):
dsid = DSET_NAMES[key['name']]
dset = fid[('/PWLR/' + dsid)]
if (dset.ndim == 3):
dims = ['y', 'x', 'level']
else:
dims = ['y', 'x']
data = xr.DataArray(da.from_array(dset[()], chunks=CHUNK_SIZE), name=key['name'], dims=dims).astype(np.float32)
data = xr.where((data > 1e+30), np.nan, data)
dset_attrs = dict(dset.attrs)
data.attrs.update(dset_attrs)
return data |
('section.{propname}.is_linked_to_previous is True')
def then_section_hdrftr_prop_is_linked_to_previous_is_True(context: Context, propname: str):
actual = getattr(context.section, propname).is_linked_to_previous
expected = True
assert (actual == expected), ('section.%s.is_linked_to_previous is %s' % (propname, actual)) |
def warn_population_size(*, step: Union[(BlockedStep, CompoundStep)], initial_points: Sequence[PointType], model: Model, chains: int):
has_demcmc = np.any([isinstance(m, DEMetropolis) for m in (step.methods if isinstance(step, CompoundStep) else [step])])
initial_point_model_size = sum((initial_points[0][n.name].size for n in model.value_vars))
if (has_demcmc and (chains < 3)):
raise ValueError('DEMetropolis requires at least 3 chains. For this {}-dimensional model you should use {} chains'.format(initial_point_model_size, (initial_point_model_size + 1)))
if (has_demcmc and (chains <= initial_point_model_size)):
warnings.warn('DEMetropolis should be used with more chains than dimensions! (The model has {} dimensions.)'.format(initial_point_model_size), UserWarning, stacklevel=2)
return |
()
def cs_panties_pickup(default_generator_params) -> PickupEntry:
cs_pickup_database = default_database.pickup_database_for_game(RandovaniaGame.CAVE_STORY)
return PickupEntry(name="Curly's Panties", model=PickupModel(game=RandovaniaGame.CAVE_STORY, name=''), pickup_category=cs_pickup_database.pickup_categories['useless'], broad_category=cs_pickup_database.pickup_categories['useless'], progression=((ItemResourceInfo(resource_index=22, long_name="Curly's Panties", short_name='panties', max_capacity=1, extra=frozendict({'it+': 35, 'flag': 7133, 'trade': 'lewd', 'text': "Got =Curly's Underwear=!"})), 1),), generator_params=default_generator_params, resource_lock=None, unlocks_resource=False) |
class Effect6021(BaseEffect):
type = 'passive'
def handler(fit, src, context, projectionRange, **kwargs):
fit.modules.filteredItemBoost((lambda mod: (mod.item.group.name == 'Energy Nosferatu')), 'maxRange', src.getModifiedItemAttr('eliteBonusReconShip3'), skill='Recon Ships', **kwargs) |
(scope='session')
def swath_def_1d_xarray_dask():
chunks = 5
tlons_1d = xr.DataArray(da.from_array(np.array([11.280789, 12.649354, 12.080402]), chunks=chunks), dims=('my_dim1',))
tlats_1d = xr.DataArray(da.from_array(np.array([56.011037, 55.629675, 55.641535]), chunks=chunks), dims=('my_dim1',))
return SwathDefinition(lons=tlons_1d, lats=tlats_1d) |
def deepspeed_init(trainer, num_training_steps, resume_from_checkpoint=None, inference=False):
import deepspeed
from deepspeed.utils import logger as ds_logger
model = trainer.model
args = trainer.args
if hasattr(trainer, 'hf_deepspeed_config_orig'):
hf_deepspeed_config = deepcopy(trainer.hf_deepspeed_config_orig)
else:
hf_deepspeed_config = args.hf_deepspeed_config
trainer.hf_deepspeed_config_orig = deepcopy(args.hf_deepspeed_config)
hf_deepspeed_config.trainer_config_finalize(args, model, num_training_steps)
config = hf_deepspeed_config.config
ds_logger.setLevel(args.get_process_log_level())
if inference:
if (not hf_deepspeed_config.is_zero3()):
raise ValueError('ZeRO inference only makes sense with ZeRO Stage 3 - please adjust your config')
hf_deepspeed_config.del_config_sub_tree('optimizer')
hf_deepspeed_config.del_config_sub_tree('lr_scheduler')
(optimizer, lr_scheduler) = (None, None)
model_parameters = None
else:
trainer.optimizer = None
(optimizer, lr_scheduler) = deepspeed_optim_sched(trainer, hf_deepspeed_config, args, num_training_steps)
model_parameters = list(filter((lambda p: p.requires_grad), model.parameters()))
kwargs = {'model': model, 'model_parameters': model_parameters, 'config_params': config, 'optimizer': optimizer, 'lr_scheduler': lr_scheduler}
(deepspeed_engine, optimizer, _, lr_scheduler) = deepspeed.initialize(**kwargs)
if (resume_from_checkpoint is not None):
import glob
deepspeed_checkpoint_dirs = sorted(glob.glob(f'{resume_from_checkpoint}/global_step*'))
if (len(deepspeed_checkpoint_dirs) > 0):
logger.info(f'Attempting to resume from {resume_from_checkpoint}')
(load_path, _) = deepspeed_engine.load_checkpoint(resume_from_checkpoint, load_optimizer_states=True, load_lr_scheduler_states=True)
if (load_path is None):
raise ValueError(f'[deepspeed] failed to resume from checkpoint {resume_from_checkpoint}')
else:
raise ValueError(f"Can't find a valid checkpoint at {resume_from_checkpoint}")
return (deepspeed_engine, optimizer, lr_scheduler) |
def main():
learner_ip = get_environ()
args = argparser()
param_queue = Queue(maxsize=3)
procs = [Process(target=exploration, args=(args, (- 1), param_queue)), Process(target=recv_param, args=(learner_ip, (- 1), param_queue))]
for p in procs:
p.start()
for p in procs:
p.join()
return True |
def mypycify(paths: list[str], *, only_compile_paths: (Iterable[str] | None)=None, verbose: bool=False, opt_level: str='3', debug_level: str='1', strip_asserts: bool=False, multi_file: bool=False, separate: (bool | list[tuple[(list[str], (str | None))]])=False, skip_cgen_input: (Any | None)=None, target_dir: (str | None)=None, include_runtime_files: (bool | None)=None) -> list[Extension]:
compiler_options = CompilerOptions(strip_asserts=strip_asserts, multi_file=multi_file, verbose=verbose, separate=(separate is not False), target_dir=target_dir, include_runtime_files=include_runtime_files)
(groups, group_cfilenames) = mypyc_build(paths, only_compile_paths=only_compile_paths, compiler_options=compiler_options, separate=separate, skip_cgen_input=skip_cgen_input)
setup_mypycify_vars()
compiler: Any = ccompiler.new_compiler()
sysconfig.customize_compiler(compiler)
build_dir = compiler_options.target_dir
cflags: list[str] = []
if (compiler.compiler_type == 'unix'):
cflags += [f'-O{opt_level}', f'-g{debug_level}', '-Werror', '-Wno-unused-function', '-Wno-unused-label', '-Wno-unreachable-code', '-Wno-unused-variable', '-Wno-unused-command-line-argument', '-Wno-unknown-warning-option', '-Wno-unused-but-set-variable', '-Wno-ignored-optimization-argument', '-Wno-cpp']
elif (compiler.compiler_type == 'msvc'):
if (opt_level == '0'):
opt_level = 'd'
elif (opt_level in ('1', '2', '3')):
opt_level = '2'
if (debug_level == '0'):
debug_level = 'NONE'
elif (debug_level == '1'):
debug_level = 'FASTLINK'
elif (debug_level in ('2', '3')):
debug_level = 'FULL'
cflags += [f'/O{opt_level}', f'/DEBUG:{debug_level}', '/wd4102', '/wd4101', '/wd4146']
if multi_file:
cflags += ['/GL-', '/wd9025']
shared_cfilenames = []
if (not compiler_options.include_runtime_files):
for name in RUNTIME_C_FILES:
rt_file = os.path.join(build_dir, name)
with open(os.path.join(include_dir(), name), encoding='utf-8') as f:
write_file(rt_file, f.read())
shared_cfilenames.append(rt_file)
extensions = []
for ((group_sources, lib_name), (cfilenames, deps)) in zip(groups, group_cfilenames):
if lib_name:
extensions.extend(build_using_shared_lib(group_sources, lib_name, (cfilenames + shared_cfilenames), deps, build_dir, cflags))
else:
extensions.extend(build_single_module(group_sources, (cfilenames + shared_cfilenames), cflags))
return extensions |
def test_colored_ansi_esc_caplogtext(pytester: Pytester) -> None:
pytester.makepyfile("\n import logging\n\n logger = logging.getLogger(__name__)\n\n def test_foo(caplog):\n logger.info('text going to logger from call')\n assert '\x1b' not in caplog.text\n ")
result = pytester.runpytest('--log-level=INFO', '--color=yes')
assert (result.ret == 0) |
(Role)
class RoleAdmin(admin.ModelAdmin):
search_fields = ('user__username', 'user__email')
list_filter = ('member', 'manager', 'editor', 'reviewer')
list_display = ('user', 'email', 'members', 'managers', 'editors', 'reviewers')
def get_queryset(self, request):
return Role.objects.prefetch_related('member', 'manager', 'editor', 'reviewer').annotate(Count('member'), Count('manager'), Count('editor'), Count('reviewer'), sites_count=Value(Site.objects.count()))
def render_all_sites_or_join(obj, field_name: str) -> str:
if (getattr(obj, f'{field_name}__count', 0) == obj.sites_count):
return 'all Sites'
return ', '.join([site.domain for site in getattr(obj, field_name).all()])
def email(self, obj):
return obj.user.email
def members(self, obj):
return self.render_all_sites_or_join(obj, 'member')
def managers(self, obj):
return self.render_all_sites_or_join(obj, 'manager')
def editors(self, obj):
return self.render_all_sites_or_join(obj, 'editor')
def reviewers(self, obj):
return self.render_all_sites_or_join(obj, 'reviewer') |
class GeneratorReach():
def reach_from_state(cls, game: GameDescription, initial_state: State) -> GeneratorReach:
raise NotImplementedError
def game(self) -> GameDescription:
raise NotImplementedError
def victory_condition_satisfied(self) -> bool:
return self.game.victory_condition.satisfied(self.state.resources, self.state.energy, self.state.resource_database)
def iterate_nodes(self) -> Iterator[Node]:
(yield from self.game.region_list.iterate_nodes())
def state(self) -> State:
raise NotImplementedError
def advance_to(self, new_state: State, is_safe: bool=False) -> None:
raise NotImplementedError
def act_on(self, node: ResourceNode) -> None:
raise NotImplementedError
def node_context(self) -> NodeContext:
return self.state.node_context()
def is_reachable_node(self, node: Node) -> bool:
raise NotImplementedError
def connected_nodes(self) -> Iterator[Node]:
raise NotImplementedError
def nodes(self) -> Iterator[Node]:
raise NotImplementedError
def safe_nodes(self) -> Iterator[Node]:
raise NotImplementedError
def is_safe_node(self, node: Node) -> bool:
raise NotImplementedError
def unreachable_nodes_with_requirements(self) -> dict[(Node, RequirementSet)]:
raise NotImplementedError |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.