code stringlengths 281 23.7M |
|---|
def parse_args_and_config():
parser = argparse.ArgumentParser(description=globals()['__doc__'])
parser.add_argument('--config', type=str, required=True, help='Path to the config file')
parser.add_argument('--seed', type=int, default=1234, help='Set different seeds for diverse results')
parser.add_argument('--exp', type=str, default='exp', help='Path for saving running related data.')
parser.add_argument('--deg', type=str, required=True, help='Degradation')
parser.add_argument('--path_y', type=str, required=True, help='Path of the test dataset.')
parser.add_argument('--sigma_y', type=float, default=0.0, help='sigma_y')
parser.add_argument('--eta', type=float, default=0.85, help='Eta')
parser.add_argument('--simplified', action='store_true', help='Use simplified DDNM, without SVD')
parser.add_argument('-i', '--image_folder', type=str, default='images', help='The folder name of samples')
parser.add_argument('--deg_scale', type=float, default=0.0, help='deg_scale')
parser.add_argument('--verbose', type=str, default='info', help='Verbose level: info | debug | warning | critical')
parser.add_argument('--ni', action='store_true', help='No interaction. Suitable for Slurm Job launcher')
parser.add_argument('--subset_start', type=int, default=(- 1))
parser.add_argument('--subset_end', type=int, default=(- 1))
parser.add_argument('-n', '--noise_type', type=str, default='gaussian', help='gaussian | 3d_gaussian | poisson | speckle')
parser.add_argument('--add_noise', action='store_true')
args = parser.parse_args()
with open(os.path.join('configs', args.config), 'r') as f:
config = yaml.safe_load(f)
new_config = dict2namespace(config)
level = getattr(logging, args.verbose.upper(), None)
if (not isinstance(level, int)):
raise ValueError('level {} not supported'.format(args.verbose))
handler1 = logging.StreamHandler()
formatter = logging.Formatter('%(levelname)s - %(filename)s - %(asctime)s - %(message)s')
handler1.setFormatter(formatter)
logger = logging.getLogger()
logger.addHandler(handler1)
logger.setLevel(level)
os.makedirs(os.path.join(args.exp, 'image_samples'), exist_ok=True)
args.image_folder = os.path.join(args.exp, 'image_samples', args.image_folder)
if (not os.path.exists(args.image_folder)):
os.makedirs(args.image_folder)
else:
overwrite = False
if args.ni:
overwrite = True
else:
response = input(f'Image folder {args.image_folder} already exists. Overwrite? (Y/N)')
if (response.upper() == 'Y'):
overwrite = True
if overwrite:
shutil.rmtree(args.image_folder)
os.makedirs(args.image_folder)
else:
print('Output image folder exists. Program halted.')
sys.exit(0)
device = (torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu'))
logging.info('Using device: {}'.format(device))
new_config.device = device
torch.manual_seed(args.seed)
np.random.seed(args.seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(args.seed)
torch.backends.cudnn.benchmark = True
return (args, new_config) |
_params(node='x')
def test_if_reassignment_in_body(condition: str, satisfy_val: (int | None), fail_val: (int | None)) -> None:
node = builder.extract_node(f'''
def f(x, y):
if {condition}:
if y:
x = {fail_val}
return (
x #
)
''')
inferred = node.inferred()
assert (len(inferred) == 2)
assert (inferred[0] is Uninferable)
assert isinstance(inferred[1], nodes.Const)
assert (inferred[1].value == fail_val) |
_criterion('wsc')
class WSCCriterion(FairseqCriterion):
def __init__(self, args, task):
super().__init__(args, task)
if (self.args.save_predictions is not None):
self.prediction_h = open(self.args.save_predictions, 'w')
else:
self.prediction_h = None
self.bpe = encoders.build_bpe(args)
self.tokenizer = encoders.build_tokenizer(args)
def __del__(self):
if (self.prediction_h is not None):
self.prediction_h.close()
def add_args(parser):
parser.add_argument('--wsc-margin-alpha', type=float, metavar='A', default=1.0)
parser.add_argument('--wsc-margin-beta', type=float, metavar='B', default=0.0)
parser.add_argument('--wsc-cross-entropy', action='store_true', help='use cross entropy formulation instead of margin loss')
parser.add_argument('--save-predictions', metavar='FILE', help='file to save predictions to')
def get_masked_input(self, tokens, mask):
masked_tokens = tokens.clone()
masked_tokens[mask] = self.task.mask
return masked_tokens
def get_lprobs(self, model, tokens, mask):
(logits, _) = model(src_tokens=self.get_masked_input(tokens, mask))
lprobs = F.log_softmax(logits, dim=(- 1), dtype=torch.float)
scores = lprobs.gather(2, tokens.unsqueeze((- 1))).squeeze((- 1))
mask = mask.type_as(scores)
scores = ((scores * mask).sum(dim=(- 1)) / mask.sum(dim=(- 1)))
return scores
def get_loss(self, query_lprobs, cand_lprobs):
if self.args.wsc_cross_entropy:
return F.cross_entropy(torch.cat([query_lprobs, cand_lprobs]).unsqueeze(0), query_lprobs.new([0]).long())
else:
return ((- query_lprobs) + (self.args.wsc_margin_alpha * ((cand_lprobs - query_lprobs) + self.args.wsc_margin_beta).clamp(min=0))).sum()
def forward(self, model, sample, reduce=True):
(loss, nloss) = (0.0, 0)
(ncorrect, nqueries) = (0, 0)
for (i, label) in enumerate(sample['labels']):
query_lprobs = self.get_lprobs(model, sample['query_tokens'][i].unsqueeze(0), sample['query_masks'][i].unsqueeze(0))
cand_lprobs = self.get_lprobs(model, sample['candidate_tokens'][i], sample['candidate_masks'][i])
pred = (query_lprobs >= cand_lprobs).all().item()
if (label is not None):
label = (1 if label else 0)
ncorrect += (1 if (pred == label) else 0)
nqueries += 1
if label:
nloss += 1
loss += self.get_loss(query_lprobs, cand_lprobs)
id = sample['id'][i].item()
if (self.prediction_h is not None):
print('{}\t{}\t{}'.format(id, pred, label), file=self.prediction_h)
if (nloss == 0):
loss = torch.tensor(0.0, requires_grad=True)
sample_size = (nqueries if (nqueries > 0) else 1)
logging_output = {'loss': (utils.item(loss.data) if reduce else loss.data), 'ntokens': sample['ntokens'], 'nsentences': sample['nsentences'], 'sample_size': sample_size, 'ncorrect': ncorrect, 'nqueries': nqueries}
return (loss, sample_size, logging_output)
def aggregate_logging_outputs(logging_outputs):
loss_sum = sum((log.get('loss', 0) for log in logging_outputs))
ntokens = sum((log.get('ntokens', 0) for log in logging_outputs))
nsentences = sum((log.get('nsentences', 0) for log in logging_outputs))
sample_size = sum((log.get('sample_size', 0) for log in logging_outputs))
agg_output = {'loss': ((loss_sum / sample_size) / math.log(2)), 'ntokens': ntokens, 'nsentences': nsentences, 'sample_size': sample_size}
ncorrect = sum((log.get('ncorrect', 0) for log in logging_outputs))
nqueries = sum((log.get('nqueries', 0) for log in logging_outputs))
if (nqueries > 0):
agg_output['accuracy'] = (ncorrect / float(nqueries))
return agg_output |
def dataset_id_generator(dataset_spec, split, pool, sampler):
chunk_sizes = sampler.compute_chunk_sizes()
(flush_chunk_size, other_chunk_sizes) = (chunk_sizes[0], chunk_sizes[1:])
class_set = dataset_spec.get_classes(split)
num_classes = len(class_set)
dummy_dataset_id = num_classes
total_images_per_class = dict(((class_idx, dataset_spec.get_total_images_per_class(class_set[class_idx], pool)) for class_idx in range(num_classes)))
cursors = ([0] * num_classes)
while True:
flushed_dataset_indices = []
selected_dataset_indices = [[] for _ in other_chunk_sizes]
episode_description = sampler.sample_episode_description()
for element in episode_description:
(class_idx, distribution) = (element[0], element[1:])
total_requested = sum(distribution)
if (total_requested > total_images_per_class[class_idx]):
raise ValueError("Requesting more images than what's available for the whole class")
remaining = (total_images_per_class[class_idx] - cursors[class_idx])
if (total_requested > remaining):
flushed_dataset_indices.extend(([class_idx] * remaining))
cursors[class_idx] = 0
for (num_to_allocate, dataset_indices) in zip(distribution, selected_dataset_indices):
dataset_indices.extend(([class_idx] * num_to_allocate))
cursors[class_idx] += total_requested
_pad(flushed_dataset_indices, flush_chunk_size, dummy_dataset_id)
for (dataset_indices, chunk_size) in zip(selected_dataset_indices, other_chunk_sizes):
_pad(dataset_indices, chunk_size, dummy_dataset_id)
dataset_indices = itertools.chain(flushed_dataset_indices, *selected_dataset_indices)
for i in dataset_indices:
(yield i) |
def test_should_fail_no_providers(context, app):
test_config = json.loads(_TEST_CONFIG_JSON)
test_config['providers'] = []
with pytest.raises(InvalidStorageConfigurationException) as exc_info:
engine = MultiCDNStorage(context, **test_config)
assert ('providers should be a dict of storage providers with their configs' in str(exc_info.value)) |
class Dataset(torch.utils.data.Dataset):
def __init__(self, data_folder, image_size):
self.data_folder = data_folder
if (not os.path.exists(self.data_folder)):
raise Exception(f'[!] {self.data_folder} not exists.')
self.objects_path = []
self.image_name = check_data(data_folder)
if (len(self.image_name) == 0):
raise Exception(f'No image found in {self.image_name}')
for p in os.listdir(data_folder):
if (p == 'images'):
continue
self.objects_path.append(os.path.join(data_folder, p))
self.image_size = image_size
def __getitem__(self, index):
image = Image.open(os.path.join(self.data_folder, 'images', self.image_name[index])).convert('RGB')
mask = Image.open(os.path.join(self.data_folder, 'masks', self.image_name[index]))
(image, mask) = transform(image, mask)
return (image, mask)
def __len__(self):
return len(self.image_name) |
.usefixtures('toggle_batching')
def test_nn_linear(tmp_path: Path) -> None:
foo = torch.nn.Linear(128, 64)
bar = torch.nn.Linear(128, 64)
assert (not check_state_dict_eq(foo.state_dict(), bar.state_dict()))
snapshot = Snapshot.take(str(tmp_path), {'foo': foo})
snapshot.restore({'foo': bar})
assert check_state_dict_eq(foo.state_dict(), bar.state_dict()) |
class LinuxMips32Stat(ctypes.Structure):
_fields_ = [('st_dev', ctypes.c_uint32), ('st_pad1', (ctypes.c_int32 * 3)), ('st_ino', ctypes.c_uint32), ('st_mode', ctypes.c_uint32), ('st_nlink', ctypes.c_uint32), ('st_uid', ctypes.c_uint32), ('st_gid', ctypes.c_uint32), ('st_rdev', ctypes.c_uint32), ('st_pad2', (ctypes.c_uint32 * 2)), ('st_size', ctypes.c_uint32), ('st_pad3', ctypes.c_uint32), ('st_atime', ctypes.c_uint32), ('st_atime_ns', ctypes.c_uint32), ('st_mtime', ctypes.c_uint32), ('st_mtime_ns', ctypes.c_uint32), ('st_ctime', ctypes.c_uint32), ('st_ctime_ns', ctypes.c_uint32), ('st_blksize', ctypes.c_uint32), ('st_blocks', ctypes.c_uint32), ('st_pad4', (ctypes.c_uint32 * 14))]
_pack_ = 4 |
class BatchTransferParameter1(DataElementGroup):
max_transfer_count = DataElementField(type='num', max_length=7, _d='Maximale Anzahl CreditTransferTransactionInformation')
sum_amount_required = DataElementField(type='jn', _d='Summenfeld benotigt')
single_booking_allowed = DataElementField(type='jn', _d='Einzelbuchung erlaubt') |
def voteaction(mutator):
(mutator)
def decorator(_root, info, pk):
(entry, sender) = (Entry.objects_published.select_related('author').only('id', 'author_id', 'author__karma').get(pk=pk), info.context.user)
if (entry.author == sender):
raise PermissionDenied(_("we couldn't handle your request. try again later."))
if (not sender.is_authenticated):
if settings.DISABLE_ANONYMOUS_VOTING:
return Mutation()
sender = AnonymousUserStorage(info.context)
(upvoted, downvoted) = (sender.upvoted_entries, sender.downvoted_entries)
(in_upvoted, in_downvoted) = (upvoted.filter(pk=pk).exists(), downvoted.filter(pk=pk).exists())
(exceeded, reason) = sender.has_exceeded_vote_limit(against=entry.author)
constants = (F('karma'), settings.KARMA_RATES['cost'], settings.KARMA_RATES['downvote'], settings.KARMA_RATES['upvote'])
return mutator(_root, entry, sender, upvoted, downvoted, in_upvoted, in_downvoted, constants, exceeded, reason)
return decorator |
class Pad(object):
def __init__(self, padding, fill=0):
assert isinstance(padding, (numbers.Number, tuple))
assert isinstance(fill, (numbers.Number, str, tuple))
if (isinstance(padding, collections.Sequence) and (len(padding) not in [2, 4])):
raise ValueError(('Padding must be an int or a 2, or 4 element tuple, not a ' + '{} element tuple'.format(len(padding))))
self.padding = padding
self.fill = fill
def __call__(self, img):
return F.pad(img, self.padding, self.fill) |
_dataframe_method
_alias(groupby_column_name='by', sort_column_name='column')
def groupby_topk(df: pd.DataFrame, by: Union[(list, Hashable)], column: Hashable, k: int, dropna: bool=True, ascending: bool=True, ignore_index: bool=True) -> pd.DataFrame:
if isinstance(by, Hashable):
by = [by]
check('by', by, [Hashable, list])
check_column(df, [column])
check_column(df, by)
if (k < 1):
raise ValueError('Numbers of rows per group to be returned must be greater than 0.')
indices = df.groupby(by=by, dropna=dropna, sort=False, observed=True)
indices = indices[column]
try:
if ascending:
indices = indices.nsmallest(n=k)
else:
indices = indices.nlargest(n=k)
except TypeError:
indices = indices.apply((lambda d: d.sort_values(ascending=ascending).head(k)))
indices = indices.index.get_level_values((- 1))
if ignore_index:
return df.loc[indices].reset_index(drop=True)
return df.loc[indices] |
class JWSzRestrictStateTest(unittest.TestCase):
def test_jw_sz_restrict_state(self):
n_sites = numpy.random.randint(1, 10)
n_qubits = (2 * n_sites)
sz_int = (((- 1) ** numpy.random.randint(2)) * numpy.random.randint((n_sites + 1)))
sz_value = (sz_int / 2)
sz_indices = jw_sz_indices(sz_value, n_qubits)
subspace_dimension = len(sz_indices)
vector = numpy.zeros((2 ** n_qubits), dtype=float)
vector[sz_indices] = 1
restricted_vector = jw_sz_restrict_state(vector, sz_value)
self.assertEqual(restricted_vector.shape[0], subspace_dimension)
self.assertAlmostEqual(inner_product(vector, vector), inner_product(restricted_vector, restricted_vector)) |
def test_token_network_proxy_update_transfer(token_network_proxy, private_keys, token_proxy, chain_id, web3, contract_manager):
token_network_address = to_canonical_address(token_network_proxy.proxy.address)
c1_client = JSONRPCClient(web3, private_keys[1])
c1_proxy_manager = ProxyManager(rpc_client=c1_client, contract_manager=contract_manager, metadata=ProxyManagerMetadata(token_network_registry_deployed_at=GENESIS_BLOCK_NUMBER, filters_start_at=GENESIS_BLOCK_NUMBER))
c1_signer = LocalSigner(private_keys[1])
c2_client = JSONRPCClient(web3, private_keys[2])
c2_proxy_manager = ProxyManager(rpc_client=c2_client, contract_manager=contract_manager, metadata=ProxyManagerMetadata(token_network_registry_deployed_at=GENESIS_BLOCK_NUMBER, filters_start_at=GENESIS_BLOCK_NUMBER))
c1_token_network_proxy = c1_proxy_manager.token_network(address=token_network_address, block_identifier=BLOCK_ID_LATEST)
c2_token_network_proxy = c2_proxy_manager.token_network(address=token_network_address, block_identifier=BLOCK_ID_LATEST)
channel_details = c1_token_network_proxy.new_netting_channel(partner=c2_client.address, settle_timeout=10, given_block_identifier=BLOCK_ID_LATEST)
channel_identifier = channel_details.channel_identifier
initial_balance = 100
token_proxy.transfer(c1_client.address, initial_balance)
token_proxy.transfer(c2_client.address, initial_balance)
initial_balance_c1 = token_proxy.balance_of(c1_client.address)
assert (initial_balance_c1 == initial_balance)
initial_balance_c2 = token_proxy.balance_of(c2_client.address)
assert (initial_balance_c2 == initial_balance)
c1_token_network_proxy.approve_and_set_total_deposit(given_block_identifier=BLOCK_ID_LATEST, channel_identifier=channel_identifier, total_deposit=10, partner=c2_client.address)
c2_token_network_proxy.approve_and_set_total_deposit(given_block_identifier=BLOCK_ID_LATEST, channel_identifier=channel_identifier, total_deposit=10, partner=c1_client.address)
transferred_amount_c1 = 1
transferred_amount_c2 = 3
balance_proof_c1 = BalanceProof(channel_identifier=channel_identifier, token_network_address=token_network_address, nonce=1, chain_id=chain_id, transferred_amount=transferred_amount_c1)
balance_proof_c1.signature = encode_hex(LocalSigner(private_keys[1]).sign(data=balance_proof_c1.serialize_bin()))
balance_proof_c2 = BalanceProof(channel_identifier=channel_identifier, token_network_address=token_network_address, nonce=2, chain_id=chain_id, transferred_amount=transferred_amount_c2)
balance_proof_c2.signature = encode_hex(LocalSigner(private_keys[2]).sign(data=balance_proof_c2.serialize_bin()))
non_closing_data = (balance_proof_c1.serialize_bin(msg_type=MessageTypeId.BALANCE_PROOF_UPDATE) + decode_hex(balance_proof_c1.signature))
non_closing_signature = LocalSigner(c2_client.privkey).sign(data=non_closing_data)
with pytest.raises(RaidenUnrecoverableError) as exc:
c2_token_network_proxy.update_transfer(channel_identifier=channel_identifier, partner=c1_client.address, balance_hash=balance_proof_c1.balance_hash, nonce=balance_proof_c1.nonce, additional_hash=decode_hex(balance_proof_c1.additional_hash), closing_signature=decode_hex(balance_proof_c1.signature), non_closing_signature=non_closing_signature, given_block_identifier=BLOCK_ID_LATEST)
assert ('not in a closed state' in str(exc))
closing_data = (balance_proof_c2.serialize_bin(msg_type=MessageTypeId.BALANCE_PROOF) + decode_hex(balance_proof_c2.signature))
c1_token_network_proxy.close(channel_identifier=channel_identifier, partner=c2_client.address, balance_hash=balance_proof_c2.balance_hash, nonce=balance_proof_c2.nonce, additional_hash=decode_hex(balance_proof_c2.additional_hash), non_closing_signature=decode_hex(balance_proof_c2.signature), closing_signature=c1_signer.sign(data=closing_data), given_block_identifier=BLOCK_ID_LATEST)
with pytest.raises(RaidenUnrecoverableError) as excinfo:
c2_token_network_proxy.update_transfer(channel_identifier=channel_identifier, partner=c1_client.address, balance_hash=balance_proof_c1.balance_hash, nonce=balance_proof_c1.nonce, additional_hash=decode_hex(balance_proof_c1.additional_hash), closing_signature=b'', non_closing_signature=b'', given_block_identifier=BLOCK_ID_LATEST)
assert (str(excinfo.value) == "Couldn't verify the balance proof signature")
non_closing_data = balance_proof_c1.serialize_bin(msg_type=MessageTypeId.BALANCE_PROOF_UPDATE)
non_closing_signature = LocalSigner(c2_client.privkey).sign(data=non_closing_data)
with pytest.raises(RaidenUnrecoverableError):
c2_token_network_proxy.update_transfer(channel_identifier=channel_identifier, partner=c1_client.address, balance_hash=balance_proof_c1.balance_hash, nonce=balance_proof_c1.nonce, additional_hash=decode_hex(balance_proof_c1.additional_hash), closing_signature=decode_hex(balance_proof_c1.signature), non_closing_signature=non_closing_signature, given_block_identifier=BLOCK_ID_LATEST)
non_closing_data = (balance_proof_c1.serialize_bin(msg_type=MessageTypeId.BALANCE_PROOF_UPDATE) + decode_hex(balance_proof_c1.signature))
non_closing_signature = LocalSigner(c2_client.privkey).sign(data=non_closing_data)
transaction_hash = c2_token_network_proxy.update_transfer(channel_identifier=channel_identifier, partner=c1_client.address, balance_hash=balance_proof_c1.balance_hash, nonce=balance_proof_c1.nonce, additional_hash=decode_hex(balance_proof_c1.additional_hash), closing_signature=decode_hex(balance_proof_c1.signature), non_closing_signature=non_closing_signature, given_block_identifier=BLOCK_ID_LATEST)
assert is_tx_hash_bytes(transaction_hash)
with pytest.raises(BrokenPreconditionError) as exc:
c1_token_network_proxy.settle(channel_identifier=channel_identifier, transferred_amount=transferred_amount_c1, locked_amount=0, locksroot=LOCKSROOT_OF_NO_LOCKS, partner=c2_client.address, partner_transferred_amount=transferred_amount_c2, partner_locked_amount=0, partner_locksroot=LOCKSROOT_OF_NO_LOCKS, given_block_identifier=BLOCK_ID_LATEST)
assert ('cannot be settled before settlement window is over' in str(exc))
c1_client.wait_until_block(target_block_number=(c1_client.block_number() + 10))
with pytest.raises(BrokenPreconditionError):
c1_token_network_proxy.settle(channel_identifier=channel_identifier, transferred_amount=2, locked_amount=0, locksroot=LOCKSROOT_OF_NO_LOCKS, partner=c2_client.address, partner_transferred_amount=2, partner_locked_amount=0, partner_locksroot=LOCKSROOT_OF_NO_LOCKS, given_block_identifier=BLOCK_ID_LATEST)
transaction_hash = c1_token_network_proxy.settle(channel_identifier=channel_identifier, transferred_amount=transferred_amount_c1, locked_amount=0, locksroot=LOCKSROOT_OF_NO_LOCKS, partner=c2_client.address, partner_transferred_amount=transferred_amount_c2, partner_locked_amount=0, partner_locksroot=LOCKSROOT_OF_NO_LOCKS, given_block_identifier=BLOCK_ID_LATEST)
assert is_tx_hash_bytes(transaction_hash)
assert (token_proxy.balance_of(c2_client.address) == ((initial_balance_c2 + transferred_amount_c1) - transferred_amount_c2))
assert (token_proxy.balance_of(c1_client.address) == ((initial_balance_c1 + transferred_amount_c2) - transferred_amount_c1))
with pytest.raises(BrokenPreconditionError) as exc:
c2_token_network_proxy.approve_and_set_total_deposit(given_block_identifier=BLOCK_ID_LATEST, channel_identifier=channel_identifier, total_deposit=20, partner=c1_client.address)
assert ('getChannelIdentifier returned 0' in str(exc)) |
class FcNet(nn.Module):
def __init__(self, input_dim, hidden_dims, output_dim, dropout_p=0.0):
super().__init__()
self.input_dim = input_dim
self.hidden_dims = hidden_dims
self.output_dim = output_dim
self.dropout_p = dropout_p
self.dims = [self.input_dim]
self.dims.extend(hidden_dims)
self.dims.append(self.output_dim)
self.layers = nn.ModuleList([])
for i in range((len(self.dims) - 1)):
ip_dim = self.dims[i]
op_dim = self.dims[(i + 1)]
self.layers.append(nn.Linear(ip_dim, op_dim, bias=True))
self.__init_net_weights__()
def __init_net_weights__(self):
for m in self.layers:
m.weight.data.normal_(0.0, 0.1)
m.bias.data.fill_(0.1)
def forward(self, x):
x = x.view((- 1), self.input_dim)
for (i, layer) in enumerate(self.layers):
x = layer(x)
if (i < (len(self.layers) - 1)):
x = F.relu(x)
if (i < (len(self.layers) - 1)):
x = F.dropout(x, p=self.dropout_p, training=self.training)
return x |
_task_action
class GrabOrReleaseActionIdBased(SimulatorTaskAction):
def run_checks(self, task, kwargs, gripped_object_id, distance_threshold, episode):
curr_observations = self._sim.get_sensor_observations()
avail_iids = np.unique(curr_observations['semantic']).tolist()
agent_position = self._sim.get_agent_state().position
fail_reason = 'none'
if (('iid' not in kwargs) or (kwargs['iid'] < 0)):
distance = (- 1)
distance_threshold = (- 1)
fail_action = True
fail_reason = 'No iid found!'
elif (kwargs['iid'] != (- 1)):
sim_obj_id = task.iid_to_sim_obj_id[kwargs['iid']]
sim_obj_type = task.sim_obj_id_to_type[sim_obj_id]
if (sim_obj_type == 'obj'):
obj_key = task.sim_obj_id_to_obj_key[sim_obj_id]
rec_key = episode.get_rec(obj_key, episode.state_matrix)
rec_id = task.obj_key_to_sim_obj_id[rec_key]
rec_iid = task.sim_obj_id_to_iid[rec_id]
obj_distance = self._sim.get_or_dist(sim_obj_id, 'l2')
else:
obj_distance = 1000
rec_id = sim_obj_id
rec_iid = kwargs['iid']
rec_distance = self._sim.get_or_dist(rec_id, 'l2')
distance = min(rec_distance, obj_distance)
fail_action = (distance > distance_threshold)
if fail_action:
fail_reason = 'Far from object'
obj_type = task.sim_obj_id_to_type[sim_obj_id]
if ((gripped_object_id == (- 1)) and (obj_type == 'rec') and (not fail_action)):
fail_action = True
fail_reason = 'pick on receptacle'
if ((gripped_object_id != (- 1)) and (obj_type == 'obj') and (not fail_action)):
fail_action = True
fail_reason = 'place on object'
if ((kwargs['iid'] not in avail_iids) and (rec_iid not in avail_iids)):
fail_action = True
fail_reason = f"iid not-visible/incorrect; chosen: {kwargs['iid']}, avail: {avail_iids}"
else:
raise AssertionError
return (distance, distance_threshold, fail_action, fail_reason, curr_observations)
def step(self, task: CosRearrangementTask, *args: Any, **kwargs: Any):
assert ('episode' in kwargs)
episode: CosRearrangementEpisode = kwargs['episode']
gripped_object_id = self._sim.gripped_object_id
grab_type = task._config.ACTIONS.GRAB_RELEASE.GRAB_TYPE
distance_threshold = task._config.ACTIONS.GRAB_RELEASE.GRAB_DISTANCE
if (grab_type == 'crosshair'):
crosshair_pos = task._config.ACTIONS.GRAB_RELEASE.CROSSHAIR_POS
obj_id = raycast(self._sim, crosshair_pos=crosshair_pos, max_distance=distance_threshold)
kwargs['iid'] = (task.sim_obj_id_to_iid[obj_id] if (obj_id >= 0) else obj_id)
(distance, distance_threshold, fail_action, fail_reason, curr_observations) = self.run_checks(task, kwargs, gripped_object_id, distance_threshold, episode)
if (not fail_action):
if (gripped_object_id != (- 1)):
if (kwargs['iid'] == (- 1)):
raise AssertionError
else:
rec_id = task.iid_to_sim_obj_id[kwargs['iid']]
success = add_object_on_receptacle(gripped_object_id, rec_id, self._sim, task.rec_packers)
if success:
episode.update_mapping(gripped_object_id, 'place', task, rec_id)
gripped_object_id = (- 1)
else:
fail_action = True
fail_reason = 'Not enough space on receptacle'
else:
gripped_object_id = task.iid_to_sim_obj_id[kwargs['iid']]
try:
if (gripped_object_id != (- 1)):
obj_key = task.sim_obj_id_to_obj_key[gripped_object_id]
rec_key = episode.get_rec(obj_key, episode.state_matrix)
rec_id = task.obj_key_to_sim_obj_id[rec_key]
remove_result = task.rec_packers[rec_id].remove(gripped_object_id)
assert remove_result
episode.update_mapping(gripped_object_id, 'pick', task)
else:
import pdb
pdb.set_trace()
except:
import pdb
pdb.set_trace()
if (not fail_action):
self._sim._sync_agent()
self._sim._sync_gripped_object(gripped_object_id, invisible=True)
if fail_action:
pass
print(f'Pick/place action failed because {fail_reason} distance: {round(distance, 2)} and threshold: {round(distance_threshold, 2)}')
else:
print(f'Pick/place success')
self._sim._prev_sim_obs.update(curr_observations)
self._sim._prev_sim_obs['gripped_object_id'] = gripped_object_id
observations = self._sim._sensor_suite.get_observations(self._sim._prev_sim_obs)
observations['fail_action'] = fail_action
return observations |
class ShortCunk_CNN_AutoTagging_Classifier(nn.Module):
def __init__(self, n_channels=128, sample_rate=16000, n_fft=512, f_min=0.0, f_max=8000.0, n_mels=64, n_class=50):
super(ShortCunk_CNN_AutoTagging_Classifier, self).__init__()
self.spec = torchaudio.transforms.MelSpectrogram(sample_rate=sample_rate, n_fft=n_fft, f_min=f_min, f_max=f_max, n_mels=n_mels)
self.to_db = torchaudio.transforms.AmplitudeToDB()
self.spec_bn = nn.BatchNorm2d(1)
self.layer1 = Res_2d(1, n_channels, stride=2)
self.layer2 = Res_2d(n_channels, n_channels, stride=2)
self.layer3 = Res_2d(n_channels, (n_channels * 2), stride=2)
self.layer4 = Res_2d((n_channels * 2), (n_channels * 2), stride=2)
self.layer5 = Res_2d((n_channels * 2), (n_channels * 2), stride=2)
self.layer6 = Res_2d((n_channels * 2), (n_channels * 2), stride=2)
self.layer7 = Res_2d((n_channels * 2), (n_channels * 4), stride=2)
self.dense1 = nn.Linear((n_channels * 2), (n_channels * 2))
self.bn = nn.BatchNorm1d((n_channels * 2))
self.dense2 = nn.Linear((n_channels * 2), n_class)
self.dropout = nn.Dropout(0.5)
self.relu = nn.ReLU()
def forward(self, x):
x = x.unsqueeze(1)
x = self.spec_bn(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.layer5(x)
x = self.layer6(x)
x = x.squeeze(2)
if (x.size((- 1)) != 1):
x = nn.MaxPool1d(x.size((- 1)))(x)
x = x.squeeze(2)
x = self.dense1(x)
x = self.bn(x)
x = self.relu(x)
x = self.dropout(x)
x = self.dense2(x)
x = nn.Sigmoid()(x)
return x |
def test_ordered():
check_vector_transform(tr.ordered, SortedVector(6))
with pytest.warns(FutureWarning, match='ndim_supp argument is deprecated'):
tr.Ordered(1)
check_jacobian_det(tr.ordered, Vector(R, 2), pt.vector, floatX(np.array([0, 0])), elemwise=False)
vals = get_values(tr.ordered, Vector(R, 3), pt.vector, floatX(np.zeros(3)))
assert_array_equal((np.diff(vals) >= 0), True) |
def compare_original_date(a1, a2):
(a1, a2) = (a1.album, a2.album)
if (a1 is None):
return (- 1)
if (a2 is None):
return 1
if (not a1.title):
return 1
if (not a2.title):
return (- 1)
a1_date = a1.get('originaldate', a1.date)
a2_date = a2.get('originaldate', a2.date)
return (cmpa(a1_date, a2_date) or cmpa(a1.sort, a2.sort) or cmp(a1.key, a2.key)) |
def run(model_args, data_args, training_args, additional_training_args):
setup_logging(training_args)
set_seed(training_args.seed)
datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name)
features = datasets['train'].features
text_column_name = 'tokens'
label_column_name = 'ner_tags'
label_list = features[label_column_name].feature.names
label_to_id = {i: i for i in range(len(label_list))}
num_labels = len(label_list)
config = AutoConfig.from_pretrained(model_args.model_name_or_path, num_labels=num_labels, finetuning_task=data_args.task_name)
if (model_args.model_name_or_path == 'neuropark/sahajBERT'):
tokenizer = AlbertBengaliTokenizerFast.from_pretrained(model_args.model_name_or_path)
else:
tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, use_fast=True)
model = AutoModelForTokenClassification.from_pretrained(model_args.model_name_or_path, config=config)
padding = ('max_length' if data_args.pad_to_max_length else False)
if (data_args.max_seq_length > tokenizer.model_max_length):
logger.warning(f'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for themodel ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.')
max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length)
def tokenize_and_align_labels(examples):
tokenized_inputs = tokenizer(examples[text_column_name], padding=padding, max_length=max_seq_length, truncation=True, is_split_into_words=True)
labels = []
for (i, label) in enumerate(examples[label_column_name]):
word_ids = tokenized_inputs.word_ids(batch_index=i)
previous_word_idx = None
label_ids = []
for word_idx in word_ids:
if (word_idx is None):
label_ids.append((- 100))
elif (word_idx != previous_word_idx):
label_ids.append(label_to_id[label[word_idx]])
else:
label_ids.append((label_to_id[label[word_idx]] if data_args.label_all_tokens else (- 100)))
previous_word_idx = word_idx
labels.append(label_ids)
tokenized_inputs['labels'] = labels
return tokenized_inputs
train_dataset = datasets['train']
train_dataset = train_dataset.map(tokenize_and_align_labels, batched=True)
valid_dataset = datasets['validation']
valid_dataset = valid_dataset.map(tokenize_and_align_labels, batched=True)
test_dataset = datasets['test']
test_dataset = test_dataset.map(tokenize_and_align_labels, batched=True)
data_collator = DataCollatorForTokenClassification(tokenizer, pad_to_multiple_of=(8 if training_args.fp16 else None))
metric = load_metric('seqeval')
def compute_metrics(p):
(predictions, labels) = p
predictions = np.argmax(predictions, axis=2)
true_predictions = [[label_list[p] for (p, l) in zip(prediction, label) if (l != (- 100))] for (prediction, label) in zip(predictions, labels)]
true_labels = [[label_list[l] for (p, l) in zip(prediction, label) if (l != (- 100))] for (prediction, label) in zip(predictions, labels)]
results = metric.compute(predictions=true_predictions, references=true_labels)
return {'precision': results['overall_precision'], 'recall': results['overall_recall'], 'f1': results['overall_f1'], 'accuracy': results['overall_accuracy']}
early_stopping = EarlyStoppingCallback(early_stopping_patience=additional_training_args.early_stopping_patience, early_stopping_threshold=additional_training_args.early_stopping_threshold)
callbacks = [early_stopping]
trainer = Trainer(model=model, args=training_args, train_dataset=train_dataset, eval_dataset=valid_dataset, tokenizer=tokenizer, data_collator=data_collator, compute_metrics=compute_metrics, callbacks=callbacks)
train_result = trainer.train()
metrics = train_result.metrics
trainer.save_model()
metrics['train_samples'] = len(train_dataset)
trainer.log_metrics('train', metrics)
trainer.save_metrics('train', metrics)
trainer.save_state()
logger.info('*** Evaluate ***')
metrics = trainer.evaluate(eval_dataset=test_dataset)
metrics['eval_samples'] = len(test_dataset)
trainer.log_metrics('eval', metrics)
trainer.save_metrics('eval', metrics) |
def name2cp(k):
if (k == 'apos'):
return ord("'")
if hasattr(htmlentitydefs, 'name2codepoint'):
return htmlentitydefs.name2codepoint[k]
else:
k = htmlentitydefs.entitydefs[k]
if (k.startswith('&#') and k.endswith(';')):
return int(k[2:(- 1)])
return ord(codecs.latin_1_decode(k)[0]) |
class Hopper():
def __init__(self, dirname='./Hopper'):
self.mean = np.array([1., (- 0.), (- 0.), (- 0.), 0., 2., (- 0.0085352), 0.0068375, (- 0.), (- 0.), (- 0.)])
self.std = np.array([0., 0., 0., 0., 0., 0., 1., 1., 1., 3., 5.7164752])
self.dims = 33
self.lb = ((- 1) * np.ones(self.dims))
self.ub = (1 * np.ones(self.dims))
self.counter = 0
self.env = gym.make('Hopper-v2')
self.num_rollouts = 3
self.render = False
self.policy_shape = (3, 11)
self.Cp = 10
self.leaf_size = 100
self.kernel_type = 'poly'
self.gamma_type = 'auto'
self.ninits = 150
print('initialization')
print('mean:', self.mean)
print('std:', self.std)
print('dims:', self.dims)
print('policy:', self.policy_shape)
self.tracker = tracker(dirname)
def __call__(self, x):
self.counter += 1
assert (len(x) == self.dims)
assert (x.ndim == 1)
assert (np.all((x <= self.ub)) and np.all((x >= self.lb)))
M = x.reshape(self.policy_shape)
returns = []
observations = []
actions = []
for i in range(self.num_rollouts):
obs = self.env.reset()
done = False
totalr = 0.0
steps = 0
while (not done):
inputs = ((obs - self.mean) / self.std)
action = np.dot(M, inputs)
observations.append(obs)
actions.append(action)
(obs, r, done, _) = self.env.step(action)
totalr += r
steps += 1
if self.render:
self.env.render()
returns.append(totalr)
res = (np.mean(returns) * (- 1))
self.tracker.track(res)
return res |
def train(training_data_loader, G_optimizer, model, criterion, epoch):
lr = adjust_learning_rate(G_optimizer, (epoch - 1))
mse = []
for param_group in G_optimizer.param_groups:
param_group['lr'] = lr
print('Epoch={}, lr={}'.format(epoch, G_optimizer.param_groups[0]['lr']))
for (iteration, batch) in enumerate(training_data_loader, 1):
target = Variable(batch)
input = (np.random.poisson((opt.noise_lambda * target.numpy())) / opt.noise_lambda)
input = torch.from_numpy(input).float()
target = (np.random.poisson((opt.noise_lambda * target.numpy())) / opt.noise_lambda)
target = torch.from_numpy(target).float()
if opt.cuda:
target = target.cuda()
input = input.cuda()
model.zero_grad()
G_result = model(input)
mse_loss = (torch.mean(((G_result - target) ** 2)) ** 0.5)
mse.append(mse_loss.data)
G_train_loss = mse_loss
G_train_loss.backward()
G_optimizer.step()
if ((iteration % 10) == 0):
print('===> Epoch[{}]({}/{}): Loss_mse: {:.5}'.format(epoch, iteration, len(training_data_loader), mse_loss.data))
save_image(G_result.data, './checksample/output.png')
save_image(input.data, './checksample/input.png')
save_image(target.data, './checksample/gt.png')
return torch.mean(torch.FloatTensor(mse))
return torch.mean(torch.FloatTensor(mse)) |
.parametrize('data, expdata', [([1, 0, 0, 0], [1, 0, 0]), ([1, 0, 0, (np.pi / 2)], [0, 1, (np.pi / 2)]), ([1, 1, 1, 0], [2, 1, 0]), ([1, 1, 1, 0], [2, 1, 0]), ([1, 1, 1, (np.pi / 4)], [1., 1., (np.pi / 4)])])
def test_line_calc(data, expdata):
line = pyodrx.Line(data[0])
(x, y, h, l) = line.get_end_data(data[1], data[2], data[3])
assert (pytest.approx(x, 1e-06) == expdata[0])
assert (pytest.approx(y, 1e-06) == expdata[1])
assert (pytest.approx(h, 1e-06) == expdata[2])
assert (pytest.approx(l, 1e-06) == data[0]) |
class XModel(nn.Module):
def __init__(self, cfg):
super(XModel, self).__init__()
self.t = cfg.t_step
self.self_attention = XEncoder(d_model=cfg.feat_dim, hid_dim=cfg.hid_dim, out_dim=cfg.out_dim, n_heads=cfg.head_num, win_size=cfg.win_size, dropout=cfg.dropout, gamma=cfg.gamma, bias=cfg.bias, norm=cfg.norm)
self.classifier = nn.Conv1d(cfg.out_dim, 1, self.t, padding=0)
self.logit_scale = nn.Parameter((torch.ones([]) * np.log((1 / cfg.temp))))
self.apply(weight_init)
def forward(self, x, seq_len):
(x_e, x_v) = self.self_attention(x, seq_len)
logits = F.pad(x_e, ((self.t - 1), 0))
logits = self.classifier(logits)
logits = logits.permute(0, 2, 1)
logits = torch.sigmoid(logits)
return (logits, x_v) |
class GPUTimer():
def __init__(self, stream):
self.start_ = torch.cuda.Event(enable_timing=True)
self.stop_ = torch.cuda.Event(enable_timing=True)
self.stream_ = stream
def start(self):
self.stream_.record_event(self.start_)
def stop(self):
self.stream_.record_event(self.stop_)
def sync(self):
self.stream_.synchronize()
def millis(self):
return self.start_.elapsed_time(self.stop_) |
def rnms_gpu(det_boxes, iou_threshold, device_id):
if (det_boxes.shape[0] == 0):
return np.array([], np.int64)
else:
assert (det_boxes.shape[1] == 6), 'shape of det_boxes is not 6, {}'.format(det_boxes)
keep = rotate_gpu_nms(det_boxes, iou_threshold, device_id)
keep = np.reshape(keep, [(- 1)])
return np.array(keep, np.int64) |
class TestCompareBasicModels(TestCase):
def test_compare_full(self):
basic_full = pybamm.lead_acid.BasicFull()
full = pybamm.lead_acid.Full()
parameter_values = pybamm.ParameterValues('Sulzer2019')
parameter_values['Current function [A]'] = 10
basic_sim = pybamm.Simulation(basic_full, solver=pybamm.CasadiSolver(), parameter_values=parameter_values)
t_eval = np.linspace(0, 400)
basic_sim.solve(t_eval)
basic_sol = basic_sim.solution
sim = pybamm.Simulation(full, solver=pybamm.CasadiSolver(), parameter_values=parameter_values)
t_eval = np.linspace(0, 400)
sim.solve(t_eval)
sol = sim.solution
np.testing.assert_allclose(basic_sol.t, sol.t, rtol=0.0001)
for name in basic_full.variables:
np.testing.assert_allclose(basic_sol[name].entries, sol[name].entries, rtol=0.0001, atol=1e-08) |
def get_where_function(where_expr=None):
if (where_expr is None):
return None
where_expr = where_expr.strip()
try:
where_code = compile(where_expr, '--where', 'eval')
except Exception as err:
raise ValueError(('Cannot parse: %s' % (err,)))
check_eval_names(where_code, ['__builtins__', 'None', 'True', 'False'])
def where_function(property_rule):
d = property_rule.to_dict()
d.update({'__builtins__': None, 'None': None, 'True': True, 'False': False})
try:
result = eval(where_code, d)
except Exception as err:
raise EvalError(("Could not evaluate 'where' expression %r: %s" % (where_expr, err)))
return result
return where_function |
def get_proj_libdirs(proj_dir: Path) -> list[str]:
proj_libdir = os.environ.get('PROJ_LIBDIR')
libdirs = []
if (proj_libdir is None):
libdir_search_paths = ((proj_dir / 'lib'), (proj_dir / 'lib64'))
for libdir_search_path in libdir_search_paths:
if libdir_search_path.exists():
libdirs.append(str(libdir_search_path))
if (not libdirs):
raise SystemExit('ERROR: PROJ_LIBDIR dir not found. Please set PROJ_LIBDIR.')
else:
libdirs.append(proj_libdir)
return libdirs |
def test_discover_raw_target(local_client, grpc_client):
random_image_vector = random_vector(image_vector_size)
def f(client: QdrantBase, **kwargs: Dict[(str, Any)]) -> List[models.ScoredPoint]:
return client.discover(collection_name=COLLECTION_NAME, target=random_image_vector, context=[models.ContextExamplePair(positive=10, negative=19)], limit=10, using='image')
compare_client_results(grpc_client, f)
compare_client_results(local_client, f) |
class AND(BinaryBitOp):
identity = (- 1)
commutative = True
associative = True
nfunc_spec = ('bitwise_and', 2, 1)
def impl(self, x, y):
return (x & y)
def c_code(self, node, name, inputs, outputs, sub):
(x, y) = inputs
(z,) = outputs
return f'{z} = ({x} & {y});'
def c_code_cache_version(self):
super_version = super().c_code_cache_version()
return (super_version + (3,)) |
def make_dataset(dir, class_to_idx):
images = []
dir = os.path.expanduser(dir)
for target in tqdm(sorted(os.listdir(dir))):
d = os.path.join(dir, target)
if (not os.path.isdir(d)):
continue
for (root, _, fnames) in sorted(os.walk(d)):
for fname in sorted(fnames):
if is_image_file(fname):
path = os.path.join(root, fname)
item = (path, class_to_idx[target])
images.append(item)
return images |
def test_package_include_with_multiple_dirs() -> None:
pkg_include = PackageInclude(base=fixtures_dir, include='with_includes')
assert (pkg_include.elements == [(with_includes / '__init__.py'), (with_includes / 'bar'), (with_includes / 'bar/baz.py'), (with_includes / 'extra_package'), (with_includes / 'extra_package/some_dir'), (with_includes / 'extra_package/some_dir/foo.py'), (with_includes / 'extra_package/some_dir/quux.py'), (with_includes / 'not_a_python_pkg'), (with_includes / 'not_a_python_pkg/baz.txt')]) |
def _is_ambiguous(tags, skip_space_ambiguity=True):
if (len(tags) < 2):
return False
if skip_space_ambiguity:
space_pos = [(tag.index(' ') if (' ' in tag) else None) for tag in map(str, tags)]
if (len(space_pos) == len(set(space_pos))):
return False
return True |
class TestGetHandlers(TestCase):
DEFAULT_APP = 'rapidsms.contrib.default'
ECHO_APP = 'rapidsms.contrib.echo'
ECHO_HANDLER = 'rapidsms.contrib.echo.handlers.echo'
PING_HANDLER = 'rapidsms.contrib.echo.handlers.ping'
ECHO_HANDLER_CLASS = 'rapidsms.contrib.echo.handlers.echo.EchoHandler'
PING_HANDLER_CLASS = 'rapidsms.contrib.echo.handlers.ping.PingHandler'
def setUp(self):
self.settings = {'INSTALLED_APPS': [], 'INSTALLED_HANDLERS': None, 'EXCLUDED_HANDLERS': [], 'RAPIDSMS_HANDLERS_EXCLUDE_APPS': []}
def _check_get_handlers(self, *args):
with override_settings(**self.settings):
with mock.patch('rapidsms.contrib.handlers.utils.warn') as warn:
handlers = get_handlers()
self.assertEqual(set(handlers), set(args))
self.assertEqual(warn.called, ('RAPIDSMS_HANDLERS' not in self.settings))
def test_no_installed_apps(self):
self._check_get_handlers()
def test_no_relevant_installed_apps(self):
self.settings['INSTALLED_APPS'] = [self.DEFAULT_APP]
self._check_get_handlers()
def test_installed_apps(self):
self.settings['INSTALLED_APPS'] = [self.ECHO_APP]
self._check_get_handlers(EchoHandler, PingHandler)
def test_installed_handler__installed_apps(self):
self.settings['INSTALLED_APPS'] = [self.ECHO_APP]
self.settings['INSTALLED_HANDLERS'] = [self.PING_HANDLER]
self._check_get_handlers(PingHandler)
def test_installed_handlers__installed_apps(self):
self.settings['INSTALLED_APPS'] = [self.ECHO_APP]
self.settings['INSTALLED_HANDLERS'] = [self.PING_HANDLER, self.ECHO_HANDLER]
self._check_get_handlers(PingHandler, EchoHandler)
def test_installed_handlers__no_installed_apps(self):
self.settings['INSTALLED_HANDLERS'] = [self.PING_HANDLER]
self._check_get_handlers()
def test_installed_app(self):
self.settings['INSTALLED_APPS'] = [self.ECHO_APP]
self.settings['INSTALLED_HANDLERS'] = [self.ECHO_APP]
self._check_get_handlers(EchoHandler, PingHandler)
def test_exclude_handlers__installed_apps(self):
self.settings['INSTALLED_APPS'] = [self.ECHO_APP]
self.settings['EXCLUDED_HANDLERS'] = [self.PING_HANDLER]
self._check_get_handlers(EchoHandler)
def test_exclude_handlers__no_installed_apps(self):
self.settings['EXCLUDED_HANDLERS'] = [self.PING_HANDLER]
self._check_get_handlers()
def test_exclude_app(self):
self.settings['INSTALLED_APPS'] = [self.ECHO_APP]
self.settings['EXCLUDED_HANDLERS'] = [self.ECHO_APP]
self._check_get_handlers()
def test_empty_rapidsms_handlers(self):
self.settings['INSTALLED_APPS'] = [self.ECHO_APP]
self.settings['INSTALLED_HANDLERS'] = [self.ECHO_APP]
self.settings['RAPIDSMS_HANDLERS'] = []
self._check_get_handlers()
def test_rapidsms_handlers(self):
self.settings['INSTALLED_APPS'] = [self.DEFAULT_APP]
self.settings['INSTALLED_HANDLERS'] = []
self.settings['EXCLUDED_HANDLERS'] = [self.PING_HANDLER]
self.settings['RAPIDSMS_HANDLERS'] = [self.ECHO_HANDLER_CLASS, self.PING_HANDLER_CLASS]
self._check_get_handlers(EchoHandler, PingHandler) |
_end_docstrings(INIT_TOKENIZER_DOCSTRING)
class PreTrainedTokenizerFast(PreTrainedTokenizerBase):
vocab_files_names = VOCAB_FILES_NAMES
slow_tokenizer_class: PreTrainedTokenizer = None
can_save_slow_tokenizer: bool = True
def __init__(self, *args, **kwargs):
tokenizer_object = kwargs.pop('tokenizer_object', None)
slow_tokenizer = kwargs.pop('__slow_tokenizer', None)
fast_tokenizer_file = kwargs.pop('tokenizer_file', None)
from_slow = kwargs.pop('from_slow', False)
if (from_slow and (slow_tokenizer is None) and (self.slow_tokenizer_class is None)):
raise ValueError("Cannot instantiate this tokenizer from a slow version. If it's based on sentencepiece, make sure you have sentencepiece installed.")
if (tokenizer_object is not None):
fast_tokenizer = tokenizer_object
elif ((fast_tokenizer_file is not None) and (not from_slow)):
fast_tokenizer = TokenizerFast.from_file(fast_tokenizer_file)
elif (slow_tokenizer is not None):
fast_tokenizer = convert_slow_tokenizer(slow_tokenizer)
elif (self.slow_tokenizer_class is not None):
slow_tokenizer = self.slow_tokenizer_class(*args, **kwargs)
fast_tokenizer = convert_slow_tokenizer(slow_tokenizer)
else:
raise ValueError("Couldn't instantiate the backend tokenizer from one of: \n(1) a `tokenizers` library serialization file, \n(2) a slow tokenizer instance to convert or \n(3) an equivalent slow tokenizer class to instantiate and convert. \nYou need to have sentencepiece installed to convert a slow tokenizer to a fast one.")
self._tokenizer = fast_tokenizer
if (slow_tokenizer is not None):
kwargs.update(slow_tokenizer.init_kwargs)
self._decode_use_source_tokenizer = False
super().__init__(**kwargs)
def is_fast(self) -> bool:
return True
def vocab_size(self) -> int:
return self._tokenizer.get_vocab_size(with_added_tokens=False)
def get_vocab(self) -> Dict[(str, int)]:
return self._tokenizer.get_vocab(with_added_tokens=True)
def vocab(self) -> Dict[(str, int)]:
return self.get_vocab()
def get_added_vocab(self) -> Dict[(str, int)]:
base_vocab = self._tokenizer.get_vocab(with_added_tokens=False)
full_vocab = self._tokenizer.get_vocab(with_added_tokens=True)
added_vocab = dict(((tok, index) for (tok, index) in full_vocab.items() if (tok not in base_vocab)))
return added_vocab
def __len__(self) -> int:
return self._tokenizer.get_vocab_size(with_added_tokens=True)
def backend_tokenizer(self) -> TokenizerFast:
return self._tokenizer
def decoder(self) -> DecoderFast:
return self._tokenizer.decoder
def _convert_encoding(self, encoding: EncodingFast, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True) -> Tuple[(Dict[(str, Any)], List[EncodingFast])]:
if (return_token_type_ids is None):
return_token_type_ids = ('token_type_ids' in self.model_input_names)
if (return_attention_mask is None):
return_attention_mask = ('attention_mask' in self.model_input_names)
if (return_overflowing_tokens and (encoding.overflowing is not None)):
encodings = ([encoding] + encoding.overflowing)
else:
encodings = [encoding]
encoding_dict = defaultdict(list)
for e in encodings:
encoding_dict['input_ids'].append(e.ids)
if return_token_type_ids:
encoding_dict['token_type_ids'].append(e.type_ids)
if return_attention_mask:
encoding_dict['attention_mask'].append(e.attention_mask)
if return_special_tokens_mask:
encoding_dict['special_tokens_mask'].append(e.special_tokens_mask)
if return_offsets_mapping:
encoding_dict['offset_mapping'].append(e.offsets)
if return_length:
encoding_dict['length'].append(len(e.ids))
return (encoding_dict, encodings)
def convert_tokens_to_ids(self, tokens: Union[(str, List[str])]) -> Union[(int, List[int])]:
if (tokens is None):
return None
if isinstance(tokens, str):
return self._convert_token_to_id_with_added_voc(tokens)
ids = []
for token in tokens:
ids.append(self._convert_token_to_id_with_added_voc(token))
return ids
def _convert_token_to_id_with_added_voc(self, token: str) -> int:
index = self._tokenizer.token_to_id(token)
if (index is None):
return self.unk_token_id
return index
def _convert_id_to_token(self, index: int) -> Optional[str]:
return self._tokenizer.id_to_token(int(index))
def _add_tokens(self, new_tokens: List[Union[(str, AddedToken)]], special_tokens=False) -> int:
if special_tokens:
return self._tokenizer.add_special_tokens(new_tokens)
return self._tokenizer.add_tokens(new_tokens)
def num_special_tokens_to_add(self, pair: bool=False) -> int:
return self._tokenizer.num_special_tokens_to_add(pair)
def convert_ids_to_tokens(self, ids: Union[(int, List[int])], skip_special_tokens: bool=False) -> Union[(str, List[str])]:
if isinstance(ids, int):
return self._tokenizer.id_to_token(ids)
tokens = []
for index in ids:
index = int(index)
if (skip_special_tokens and (index in self.all_special_ids)):
continue
tokens.append(self._tokenizer.id_to_token(index))
return tokens
def tokenize(self, text: str, pair: Optional[str]=None, add_special_tokens: bool=False, **kwargs) -> List[str]:
return self.encode_plus(text=text, text_pair=pair, add_special_tokens=add_special_tokens, **kwargs).tokens()
def set_truncation_and_padding(self, padding_strategy: PaddingStrategy, truncation_strategy: TruncationStrategy, max_length: int, stride: int, pad_to_multiple_of: Optional[int]):
_truncation = self._tokenizer.truncation
_padding = self._tokenizer.padding
if (truncation_strategy == TruncationStrategy.DO_NOT_TRUNCATE):
if (_truncation is not None):
self._tokenizer.no_truncation()
else:
target = {'max_length': max_length, 'stride': stride, 'strategy': truncation_strategy.value, 'direction': self.truncation_side}
if (_truncation is None):
current = None
else:
current = {k: _truncation.get(k, None) for k in target}
if (current != target):
self._tokenizer.enable_truncation(**target)
if (padding_strategy == PaddingStrategy.DO_NOT_PAD):
if (_padding is not None):
self._tokenizer.no_padding()
else:
length = (max_length if (padding_strategy == PaddingStrategy.MAX_LENGTH) else None)
target = {'length': length, 'direction': self.padding_side, 'pad_id': self.pad_token_id, 'pad_token': self.pad_token, 'pad_type_id': self.pad_token_type_id, 'pad_to_multiple_of': pad_to_multiple_of}
if (_padding != target):
self._tokenizer.enable_padding(**target)
def _batch_encode_plus(self, batch_text_or_text_pairs: Union[(List[TextInput], List[TextInputPair], List[PreTokenizedInput], List[PreTokenizedInputPair])], add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, is_split_into_words: bool=False, pad_to_multiple_of: Optional[int]=None, return_tensors: Optional[str]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True) -> BatchEncoding:
if (not isinstance(batch_text_or_text_pairs, list)):
raise TypeError(f'batch_text_or_text_pairs has to be a list (got {type(batch_text_or_text_pairs)})')
self.set_truncation_and_padding(padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of)
encodings = self._tokenizer.encode_batch(batch_text_or_text_pairs, add_special_tokens=add_special_tokens, is_pretokenized=is_split_into_words)
tokens_and_encodings = [self._convert_encoding(encoding=encoding, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose) for encoding in encodings]
sanitized_tokens = {}
for key in tokens_and_encodings[0][0].keys():
stack = [e for (item, _) in tokens_and_encodings for e in item[key]]
sanitized_tokens[key] = stack
sanitized_encodings = [e for (_, item) in tokens_and_encodings for e in item]
if return_overflowing_tokens:
overflow_to_sample_mapping = []
for (i, (toks, _)) in enumerate(tokens_and_encodings):
overflow_to_sample_mapping += ([i] * len(toks['input_ids']))
sanitized_tokens['overflow_to_sample_mapping'] = overflow_to_sample_mapping
for input_ids in sanitized_tokens['input_ids']:
self._eventual_warn_about_too_long_sequence(input_ids, max_length, verbose)
return BatchEncoding(sanitized_tokens, sanitized_encodings, tensor_type=return_tensors)
def _encode_plus(self, text: Union[(TextInput, PreTokenizedInput)], text_pair: Optional[Union[(TextInput, PreTokenizedInput)]]=None, add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, is_split_into_words: bool=False, pad_to_multiple_of: Optional[int]=None, return_tensors: Optional[bool]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
batched_input = ([(text, text_pair)] if text_pair else [text])
batched_output = self._batch_encode_plus(batched_input, is_split_into_words=is_split_into_words, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)
if ((return_tensors is None) and (not return_overflowing_tokens)):
batched_output = BatchEncoding({key: (value[0] if ((len(value) > 0) and isinstance(value[0], list)) else value) for (key, value) in batched_output.items()}, batched_output.encodings)
self._eventual_warn_about_too_long_sequence(batched_output['input_ids'], max_length, verbose)
return batched_output
def convert_tokens_to_string(self, tokens: List[str]) -> str:
return self.backend_tokenizer.decoder.decode(tokens)
def _decode(self, token_ids: Union[(int, List[int])], skip_special_tokens: bool=False, clean_up_tokenization_spaces: bool=True, **kwargs) -> str:
self._decode_use_source_tokenizer = kwargs.pop('use_source_tokenizer', False)
if isinstance(token_ids, int):
token_ids = [token_ids]
text = self._tokenizer.decode(token_ids, skip_special_tokens=skip_special_tokens)
if clean_up_tokenization_spaces:
clean_text = self.clean_up_tokenization(text)
return clean_text
else:
return text
def _save_pretrained(self, save_directory: Union[(str, os.PathLike)], file_names: Tuple[str], legacy_format: Optional[bool]=None, filename_prefix: Optional[str]=None) -> Tuple[str]:
save_directory = str(save_directory)
if ((self.slow_tokenizer_class is None) and (legacy_format is True)):
raise ValueError('Your tokenizer does not have a legacy version defined and therefore cannot register this version. You might consider leaving the legacy_format at `None` or setting it to `False`.')
save_slow = (((legacy_format is None) or (legacy_format is True)) and (self.slow_tokenizer_class is not None) and self.can_save_slow_tokenizer)
save_fast = ((legacy_format is None) or (legacy_format is False))
if save_slow:
added_tokens_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + ADDED_TOKENS_FILE))
added_vocab = self.get_added_vocab()
if added_vocab:
with open(added_tokens_file, 'w', encoding='utf-8') as f:
out_str = json.dumps(added_vocab, ensure_ascii=False)
f.write(out_str)
vocab_files = self.save_vocabulary(save_directory, filename_prefix=filename_prefix)
file_names = ((file_names + vocab_files) + (added_tokens_file,))
if save_fast:
tokenizer_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + TOKENIZER_FILE))
self.backend_tokenizer.save(tokenizer_file)
file_names = (file_names + (tokenizer_file,))
return file_names
def train_new_from_iterator(self, text_iterator, vocab_size, new_special_tokens=None, special_tokens_map=None, **kwargs):
tokenizer_json = json.loads(self._tokenizer.to_str())
added_tokens = tokenizer_json.pop('added_tokens')
post_processor = tokenizer_json.pop('post_processor')
unk_token = None
if (tokenizer_json['model']['type'] == 'BPE'):
tokenizer_json['model']['vocab'] = {}
tokenizer_json['model']['merges'] = []
elif (tokenizer_json['model']['type'] == 'Unigram'):
if (tokenizer_json['model']['unk_id'] is not None):
unk_id = tokenizer_json['model']['unk_id']
unk_token = tokenizer_json['model']['vocab'][unk_id][0]
if ((special_tokens_map is not None) and (unk_token in special_tokens_map)):
unk_token = special_tokens_map[unk_token]
tokenizer_json['model']['unk_id'] = 0
tokenizer_json['model']['vocab'] = [[unk_token, 0.0]]
elif (tokenizer_json['model']['type'] in ['WordLevel', 'WordPiece']):
tokenizer_json['model']['vocab'] = {}
else:
raise ValueError(f"This method does not support this type of tokenizer (found {tokenizer_json['model']['type']}) only BPE, Unigram, WordLevel and WordPiece.")
if ((special_tokens_map is not None) and ('unk_token' in tokenizer_json['model']) and (tokenizer_json['model']['unk_token'] in special_tokens_map)):
tokenizer_json['model']['unk_token'] = special_tokens_map[tokenizer_json['model']['unk_token']]
tokenizer = TokenizerFast.from_str(json.dumps(tokenizer_json))
special_tokens = []
for added_token in added_tokens:
special = added_token.pop('special', None)
_ = added_token.pop('id', None)
if ((tokenizer_json['model']['type'] != 'Unigram') and (not special)):
continue
if ((special_tokens_map is not None) and (added_token['content'] in special_tokens_map)):
added_token['content'] = special_tokens_map[added_token['content']]
special_tokens.append(AddedToken(**added_token))
if (new_special_tokens is not None):
special_tokens.extend(new_special_tokens)
if ((tokenizer_json['model']['type'] == 'BPE') and ('continuing_subword_prefix' not in kwargs) and (tokenizer_json['model']['continuing_subword_prefix'] is not None)):
kwargs['continuing_subword_prefix'] = tokenizer_json['model']['continuing_subword_prefix']
if ((tokenizer_json['model']['type'] == 'BPE') and ('end_of_word_suffix' not in kwargs) and (tokenizer_json['model']['end_of_word_suffix'] is not None)):
kwargs['end_of_word_suffix'] = tokenizer_json['model']['end_of_word_suffix']
if ((tokenizer_json['model']['type'] == 'Unigram') and (unk_token is not None)):
kwargs['unk_token'] = unk_token
trainer_class = MODEL_TO_TRAINER_MAPPING[tokenizer_json['model']['type']]
trainer = trainer_class(vocab_size=vocab_size, special_tokens=special_tokens, **kwargs)
tokenizer.train_from_iterator(text_iterator, trainer=trainer)
if (post_processor is not None):
trained_tokenizer_json = json.loads(tokenizer.to_str())
if ('special_tokens' in post_processor):
for key in post_processor['special_tokens']:
tokens = post_processor['special_tokens'][key]['tokens']
if (special_tokens_map is not None):
tokens = [special_tokens_map.get(token, token) for token in tokens]
post_processor['special_tokens'][key]['tokens'] = tokens
post_processor['special_tokens'][key]['ids'] = [tokenizer.token_to_id(token) for token in tokens]
for special_token in ['cls', 'sep']:
if (special_token in post_processor):
(token, _) = post_processor[special_token]
if ((special_tokens_map is not None) and (token in special_tokens_map)):
token = special_tokens_map[token]
token_id = tokenizer.token_to_id(token)
post_processor[special_token] = [token, token_id]
trained_tokenizer_json['post_processor'] = post_processor
tokenizer = TokenizerFast.from_str(json.dumps(trained_tokenizer_json))
kwargs = self.init_kwargs.copy()
special_tokens_list = SpecialTokensMixin.SPECIAL_TOKENS_ATTRIBUTES.copy()
special_tokens_list.remove('additional_special_tokens')
for token in special_tokens_list:
if (getattr(self, f'_{token}') is not None):
special_token = getattr(self, token)
if ((special_tokens_map is not None) and (special_token in special_tokens_map)):
special_token = special_tokens_map[special_token]
special_token_full = getattr(self, f'_{token}')
if isinstance(special_token_full, AddedToken):
kwargs[token] = AddedToken(special_token, single_word=special_token_full.single_word, lstrip=special_token_full.lstrip, rstrip=special_token_full.rstrip, normalized=special_token_full.normalized)
else:
kwargs[token] = special_token
additional_special_tokens = self.additional_special_tokens
if (new_special_tokens is not None):
additional_special_tokens.extend(new_special_tokens)
if (len(additional_special_tokens) > 0):
kwargs['additional_special_tokens'] = additional_special_tokens
return self.__class__(tokenizer_object=tokenizer, **kwargs) |
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('word')
parser.add_argument('--title', action='store_true', help=_('display word and article'))
parser.add_argument('--name', action='store_true', help=_('display the word itself'))
parser.add_argument('--article', action='store_true', help=_('display article'))
parser.add_argument('--part-of-speech', action='store_true', help=_('display part of speech'))
parser.add_argument('--frequency', action='store_true', help=_('display commonness (1 to 5)'))
parser.add_argument('--usage', action='store_true', help=_('display context of use'))
parser.add_argument('--word-separation', action='store_true', help=_('display proper separation (line separated)'))
parser.add_argument('--meaning-overview', action='store_true', help=_('display meaning overview'))
parser.add_argument('--synonyms', action='store_true', help=_('list synonyms (line separated)'))
parser.add_argument('--origin', action='store_true', help=_('display origin'))
parser.add_argument('--grammar-overview', action='store_true', help=_('display short grammar overview'))
parser.add_argument('--compounds', nargs='?', const='ALL', help=_('list common compounds'))
parser.add_argument('-g', '--grammar', nargs='?', const='ALL', help=_('list grammar forms'))
parser.add_argument('--export', action='store_true', help=_('export parsed word attributes in yaml format'))
parser.add_argument('--words-before', action='store_true', help=_('list 5 words before this one'))
parser.add_argument('--words-after', action='store_true', help=_('list 5 words after this one'))
parser.add_argument('-r', '--result', type=int, help=_('display n-th (starting from 1) result in case of multiple words matching the input'))
parser.add_argument('--fuzzy', action='store_true', help=_('enable fuzzy word matching'))
parser.add_argument('--no-cache', action='store_false', dest='cache', help=_('do not cache retrieved words'))
parser.add_argument('-V', '--version', action='store_true', help=_('print program version'))
parser.add_argument('--phonetic', action='store_true', help=_('display pronunciation'))
parser.add_argument('--alternative-spellings', action='store_true', help=_('display alternative spellings'))
return parser.parse_args() |
class CommandRefactorUseFunction(Command):
name = commands.COMMAND_REFACTOR_USE_FUNCTION
kind: CodeActionKind = 'refactor'
document_uri: DocumentUri
position: typing.Range
def validate(self, info):
usefunction.UseFunction(project=self.project, resource=info.resource, offset=info.current_document.offset_at_position(info.position))
def get_changes(self):
(current_document, resource) = get_resource(self.workspace, self.document_uri)
refactoring = usefunction.UseFunction(project=self.project, resource=resource, offset=current_document.offset_at_position(self.position))
rope_changeset = refactoring.get_changes(resources=get_resources(self.workspace, getattr(self, 'documents', None)))
return rope_changeset |
class FakeDataABC(metaclass=ABCMeta):
def filelist(self):
msg = 'Collection of (str) file paths to mock'
raise NotImplementedError(msg)
def fake_files(self):
return map(type(self), self.filelist)
def fake_dirs(self):
return set(chain(*map(attr('parents'), self.fake_files)))
def contained_fake_names(self):
return filter(is_name, self.fake_content)
def fake_content(self):
return filter(None, map(self.fake_child, self.fake_files))
def fake_child(self, path):
try:
return path.relative_to(self)
except ValueError:
return None |
def test_concatenate_and_rechunk__tiny_file():
z1 = zarr.zeros(4, chunks=3, dtype='i4')
z1[:] = np.arange(4)
z2 = zarr.zeros(1, chunks=3, dtype='i4')
z2[:] = np.arange(4, 5)
z3 = zarr.zeros(5, chunks=3, dtype='i4')
z3[:] = np.arange(5, 10)
zarrs = [z1, z2, z3]
out = concatenate_and_rechunk(zarrs)
assert (out.chunks == ((3, 3, 3, 1),))
np.testing.assert_array_equal(out.compute(), np.arange(10)) |
class CosStepScheduler(LRScheduler):
def __init__(self, optimizer, start_lr=0.01, end_lr=0.005, epochs=50, last_epoch=(- 1), **kwargs):
self.start_lr = start_lr
self.end_lr = end_lr
self.lr_spaces = self._build_lr(start_lr, end_lr, epochs)
super(CosStepScheduler, self).__init__(optimizer, last_epoch)
def _build_lr(self, start_lr, end_lr, epochs):
index = np.arange(epochs).astype(np.float32)
lr = (end_lr + (((start_lr - end_lr) * (1.0 + np.cos(((index * np.pi) / epochs)))) * 0.5))
return lr.astype(np.float32) |
_fixtures(FieldFixture)
def test_helpers_for_events_class_side(fixture):
class ModelObject():
events = ExposedNames()
events.event1 = (lambda i: Event())
events.event2 = (lambda i: Event())
assert (ModelObject.events.event1.name == 'event1')
with expected(AttributeError):
ModelObject.events.nonevent |
('pytube.request.get')
def test_trimmed_pagination_not_found(request_get, playlist_html, playlist_long_html):
url = '
request_get.side_effect = [playlist_long_html, '{"content_html":"<a href=\\"/watch?v=BcWz41-4cDk&feature=plpp_video&ved=CCYQxjQYACITCO33n5-pn-cCFUG3xAodLogN2yj6LA\\">}", "load_more_widget_html":""}', '{}']
playlist = Playlist(url)
assert True |
def add_data_args(parser):
parser.add_argument('--dataset', type=str, default='writing_prompts', choices=DATASET_CHOICES, help='dataset format')
parser.add_argument('--data-dir', type=str, help='data directory')
parser.add_argument('--split-sizes', type=float, nargs=3, default=[0.8, 0.1, 0.1], help='train/val/test proportions for datasets where not provided')
parser.add_argument('--summarizer-prediction-split', type=str, default='valid', help='split to use for summarizer predictions')
parser.add_argument('--limit', type=int, default=None, help='limit the number of examples')
parser.add_argument('--length-limit', type=int, default=1000000, help='limit the number of words per example')
parser.add_argument('--lower-length-limit', type=int, default=0, help='limit the number of words per example')
parser.add_argument('--summary-length-limit', type=int, default=1000000, help='limit the number of words in the summary')
parser.add_argument('--single-sentence-summary', action='store_true', help='use single sentence summary data only')
parser.add_argument('--split-long-paragraph-mode', type=str, default='none', choices=SPLIT_PARAGRAPH_MODES, help='split long paragraph mode')
parser.add_argument('--split-short-paragraph-mode', type=str, default='none', choices=SPLIT_PARAGRAPH_MODES, help='split short paragraph mode')
parser.add_argument('--extra-keywords', type=int, default=0, help='max number of extra keywords from long content to add to short content')
parser.add_argument('--hallucinate-keywords', action='store_true', default=False, help='hallucinate keywords from short content')
parser.add_argument('--keyword-file', type=str, default='/home/yangk/data/glove/glove.840B.300d.vocab', help='file to load keywords from')
parser.add_argument('--keyword-temperature', type=float, default=1.0, help='temperature for keyword sampling')
parser.add_argument('--csv-column', type=str, help='column name to use as input for csv')
parser.add_argument('--num-workers', type=int, default=20, help='number of workers for data loading')
return parser |
class LevitImageProcessor(BaseImageProcessor):
model_input_names = ['pixel_values']
def __init__(self, do_resize: bool=True, size: Dict[(str, int)]=None, resample: PILImageResampling=PILImageResampling.BICUBIC, do_center_crop: bool=True, crop_size: Dict[(str, int)]=None, do_rescale: bool=True, rescale_factor: Union[(int, float)]=(1 / 255), do_normalize: bool=True, image_mean: Optional[Union[(float, Iterable[float])]]=IMAGENET_DEFAULT_MEAN, image_std: Optional[Union[(float, Iterable[float])]]=IMAGENET_DEFAULT_STD, **kwargs) -> None:
super().__init__(**kwargs)
size = (size if (size is not None) else {'shortest_edge': 224})
size = get_size_dict(size, default_to_square=False)
crop_size = (crop_size if (crop_size is not None) else {'height': 224, 'width': 224})
crop_size = get_size_dict(crop_size, param_name='crop_size')
self.do_resize = do_resize
self.size = size
self.resample = resample
self.do_center_crop = do_center_crop
self.crop_size = crop_size
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_normalize = do_normalize
self.image_mean = (image_mean if (image_mean is not None) else IMAGENET_DEFAULT_MEAN)
self.image_std = (image_std if (image_std is not None) else IMAGENET_DEFAULT_STD)
def resize(self, image: np.ndarray, size: Dict[(str, int)], resample: PILImageResampling=PILImageResampling.BICUBIC, data_format: Optional[Union[(str, ChannelDimension)]]=None, **kwargs) -> np.ndarray:
size_dict = get_size_dict(size, default_to_square=False)
if ('shortest_edge' in size):
shortest_edge = int(((256 / 224) * size['shortest_edge']))
output_size = get_resize_output_image_size(image, size=shortest_edge, default_to_square=False)
size_dict = {'height': output_size[0], 'width': output_size[1]}
if (('height' not in size_dict) or ('width' not in size_dict)):
raise ValueError(f"Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}")
return resize(image, size=(size_dict['height'], size_dict['width']), resample=resample, data_format=data_format, **kwargs)
def center_crop(self, image: np.ndarray, size: Dict[(str, int)], data_format: Optional[Union[(str, ChannelDimension)]]=None, **kwargs) -> np.ndarray:
size = get_size_dict(size)
if (('height' not in size) or ('width' not in size)):
raise ValueError(f"Size dict must have keys 'height' and 'width'. Got {size.keys()}")
return center_crop(image, size=(size['height'], size['width']), data_format=data_format, **kwargs)
def rescale(self, image: np.ndarray, scale: Union[(int, float)], data_format: Optional[Union[(str, ChannelDimension)]]=None, **kwargs) -> np.ndarray:
return rescale(image, scale=scale, data_format=data_format, **kwargs)
def normalize(self, image: np.ndarray, mean: Union[(float, List[float])], std: Union[(float, List[float])], data_format: Optional[Union[(str, ChannelDimension)]]=None, **kwargs) -> np.ndarray:
return normalize(image, mean=mean, std=std, data_format=data_format, **kwargs)
def preprocess(self, images: ImageInput, do_resize: Optional[bool]=None, size: Optional[Dict[(str, int)]]=None, resample: PILImageResampling=None, do_center_crop: Optional[bool]=None, crop_size: Optional[Dict[(str, int)]]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[(float, Iterable[float])]]=None, image_std: Optional[Union[(float, Iterable[float])]]=None, return_tensors: Optional[TensorType]=None, data_format: ChannelDimension=ChannelDimension.FIRST, **kwargs) -> BatchFeature:
do_resize = (do_resize if (do_resize is not None) else self.do_resize)
resample = (resample if (resample is not None) else self.resample)
do_center_crop = (do_center_crop if (do_center_crop is not None) else self.do_center_crop)
do_rescale = (do_rescale if (do_rescale is not None) else self.do_rescale)
rescale_factor = (rescale_factor if (rescale_factor is not None) else self.rescale_factor)
do_normalize = (do_normalize if (do_normalize is not None) else self.do_normalize)
image_mean = (image_mean if (image_mean is not None) else self.image_mean)
image_std = (image_std if (image_std is not None) else self.image_std)
size = (size if (size is not None) else self.size)
size = get_size_dict(size, default_to_square=False)
crop_size = (crop_size if (crop_size is not None) else self.crop_size)
crop_size = get_size_dict(crop_size, param_name='crop_size')
images = make_list_of_images(images)
if (not valid_images(images)):
raise ValueError('Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, torch.Tensor, tf.Tensor or jax.ndarray.')
if (do_resize and (size is None)):
raise ValueError('Size must be specified if do_resize is True.')
if (do_center_crop and (crop_size is None)):
raise ValueError('Crop size must be specified if do_center_crop is True.')
if (do_rescale and (rescale_factor is None)):
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if (do_normalize and ((image_mean is None) or (image_std is None))):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
images = [to_numpy_array(image) for image in images]
if do_resize:
images = [self.resize(image, size, resample) for image in images]
if do_center_crop:
images = [self.center_crop(image, crop_size) for image in images]
if do_rescale:
images = [self.rescale(image, rescale_factor) for image in images]
if do_normalize:
images = [self.normalize(image, image_mean, image_std) for image in images]
images = [to_channel_dimension_format(image, data_format) for image in images]
data = {'pixel_values': images}
return BatchFeature(data=data, tensor_type=return_tensors) |
def _check_assertions(wrapped: Callable[(..., Any)], function_locals: dict, condition_type: str='precondition', function_return_val: Any=None) -> None:
if hasattr(wrapped, '__self__'):
target = wrapped.__func__
else:
target = wrapped
assertions = []
if (condition_type == 'precondition'):
assertions = target.__preconditions__
elif (condition_type == 'postcondition'):
assertions = target.__postconditions__
for (assertion_str, compiled, *return_val_var_name) in assertions:
return_val_dict = {}
if (condition_type == 'postcondition'):
return_val_dict = {return_val_var_name[0]: function_return_val}
try:
_debug(f'Checking {condition_type} for {wrapped.__qualname__}: {assertion_str}')
check = eval(compiled, {**wrapped.__globals__, **function_locals, **return_val_dict})
except:
_debug(f'Warning: could not evaluate {condition_type}: {assertion_str}')
else:
if (not check):
arg_string = ', '.join((f'{k}: {_display_value(v)}' for (k, v) in function_locals.items()))
arg_string = (('{' + arg_string) + '}')
return_val_string = ''
if (condition_type == 'postcondition'):
return_val_string = f'and return value {function_return_val}'
raise PyTAContractError(f'{wrapped.__name__} {condition_type} "{assertion_str}" was violated for arguments {arg_string} {return_val_string}') |
def get_engine():
db_url = get_db_url()
peewee_connection_args = app.config.get('DB_CONNECTION_ARGS', {})
sa_connection_args = {}
if ('ssl' in peewee_connection_args):
sa_connection_args['ssl'] = peewee_connection_args['ssl']
engine = create_engine(db_url, connect_args=sa_connection_args)
return engine |
def test_marker_union_intersect_single_with_overlapping_constraints() -> None:
m = parse_marker('sys_platform == "darwin" or python_version < "3.4"')
intersection = m.intersect(parse_marker('python_version <= "3.6"'))
assert (str(intersection) == 'sys_platform == "darwin" and python_version <= "3.6" or python_version < "3.4"')
m = parse_marker('sys_platform == "darwin" or python_version < "3.4"')
intersection = m.intersect(parse_marker('sys_platform == "darwin"'))
assert (str(intersection) == 'sys_platform == "darwin"') |
def _load_dataparser(parser_file, data_value):
try:
compilation_data = parsing.register_fields(data_value)
specification = util.spec_from_file_location('', parser_file)
specification.loader.exec_module(util.module_from_spec(specification))
string_data = None
if options.data:
if (options.data[0] == '['):
assert (options.data[(- 1)] == ']')
string_data = ('-' + '-'.join((_basic_token(tok) for tok in options.data[1:(- 1)].split(','))))
else:
string_data = (('-' + _basic_token(options.data)) if options.data else None)
if (string_data is None):
string_data = (Compilation.string_data if Compilation.string_data else '')
return (compilation_data, string_data)
except Exception:
error(('It was not possible to correctly read the file: ' + parser_file))
raise |
class Head(nn.Module):
def __init__(self, input_dim, hidden_dim, n_class=8):
super(Head, self).__init__()
self._name = 'Head'
self.bn0 = nn.BatchNorm1d(input_dim)
self.fc_0 = nn.Linear(input_dim, hidden_dim)
self.bn1 = nn.BatchNorm1d(hidden_dim)
self.fc_1 = nn.Linear(hidden_dim, n_class)
def forward(self, x):
x = self.bn0(x)
f0 = self.bn1(F.relu(self.fc_0(x)))
output = self.fc_1(f0)
return {'output': output, 'feature': f0} |
.parametrize('url_str, source_url_str, resource_type', BUGGY_URLS)
def test_buggy_url_workaround_needed(ad_blocker, config_stub, easylist_easyprivacy, url_str, source_url_str, resource_type):
config_stub.val.content.blocking.adblock.lists = easylist_easyprivacy
ad_blocker.adblock_update()
resource_type_str = braveadblock._resource_type_to_string(resource_type)
if (source_url_str is None):
source_url_str = ''
result = ad_blocker._engine.check_network_urls(url=url_str, source_url=source_url_str, request_type=resource_type_str)
assert result.matched |
class ElmLexer(RegexLexer):
name = 'Elm'
url = '
aliases = ['elm']
filenames = ['*.elm']
mimetypes = ['text/x-elm']
version_added = '2.1'
validName = "[a-z_][a-zA-Z0-9_\\']*"
specialName = '^main '
builtinOps = ('~', '||', '|>', '|', '`', '^', '\\', "'", '>>', '>=', '>', '==', '=', '<~', '<|', '<=', '<<', '<-', '<', '::', ':', '/=', '//', '/', '..', '.', '->', '-', '++', '+', '*', '&&', '%')
reservedWords = words(('alias', 'as', 'case', 'else', 'if', 'import', 'in', 'let', 'module', 'of', 'port', 'then', 'type', 'where'), suffix='\\b')
tokens = {'root': [('\\{-', Comment.Multiline, 'comment'), ('--.*', Comment.Single), ('\\s+', Whitespace), ('"', String, 'doublequote'), ('^(\\s*)(module)(\\s*)', bygroups(Whitespace, Keyword.Namespace, Whitespace), 'imports'), ('^(\\s*)(import)(\\s*)', bygroups(Whitespace, Keyword.Namespace, Whitespace), 'imports'), ('\\[glsl\\|.*', Name.Entity, 'shader'), (reservedWords, Keyword.Reserved), ('[A-Z][a-zA-Z0-9_]*', Keyword.Type), (specialName, Keyword.Reserved), (words(builtinOps, prefix='\\(', suffix='\\)'), Name.Function), (words(builtinOps), Name.Function), include('numbers'), (validName, Name.Variable), ('[,()\\[\\]{}]', Punctuation)], 'comment': [('-(?!\\})', Comment.Multiline), ('\\{-', Comment.Multiline, 'comment'), ('[^-}]', Comment.Multiline), ('-\\}', Comment.Multiline, '#pop')], 'doublequote': [('\\\\u[0-9a-fA-F]{4}', String.Escape), ('\\\\[nrfvb\\\\"]', String.Escape), ('[^"]', String), ('"', String, '#pop')], 'imports': [('\\w+(\\.\\w+)*', Name.Class, '#pop')], 'numbers': [('_?\\d+\\.(?=\\d+)', Number.Float), ('_?\\d+', Number.Integer)], 'shader': [('\\|(?!\\])', Name.Entity), ('\\|\\]', Name.Entity, '#pop'), ('(.*)(\\n)', bygroups(Name.Entity, Whitespace))]} |
def mx_calculate_dist(anchor, positive):
d1 = mx.ndarray.sum((anchor * anchor), axis=1).reshape(1, 1)
d2 = mx.ndarray.sum((positive * positive), axis=1).reshape((- 1), 1)
eps = 1e-12
a = d1.repeat(int(positive.shape[0]))
b = mx.ndarray.transpose(d2.repeat(1))
c = (2.0 * mx.ndarray.dot(anchor, mx.ndarray.transpose(positive)))
return mx.ndarray.sqrt((mx.ndarray.abs(((a + b) - c)) + eps)) |
def get_args():
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('--uuid', required=True, type=uuid_parse, help='UUID of TA')
parser.add_argument('--version', type=int_parse, default=0, help='Version')
parser.add_argument('--key', required=True, help='Name of key file')
parser.add_argument('--in', required=True, dest='inf', help='Name of in file')
parser.add_argument('--out', required=True, help='Name of out file')
return parser.parse_args() |
class ControlBuilder():
def __init__(self, parent, title, dtype, default, selected_value=None, choices=None, is_radio=False, rounding=None, min_max=None, helptext=None, radio_columns=3, label_width=20, control_width=None):
logger.debug('Initializing %s: (parent: %s, title: %s, dtype: %s, default: %s, selected_value: %s, choices: %s, is_radio: %s, rounding: %s, min_max: %s, helptext: %s, radio_columns: %s, label_width: %s, control_width: %s)', self.__class__.__name__, parent, title, dtype, default, selected_value, choices, is_radio, rounding, min_max, helptext, radio_columns, label_width, control_width)
self.title = title
self.default = default
self.frame = self.control_frame(parent, helptext)
self.control = self.set_control(dtype, choices, is_radio)
self.tk_var = self.set_tk_var(dtype, selected_value)
self.build_control(choices, dtype, rounding, min_max, radio_columns, label_width, control_width)
logger.debug('Initialized: %s', self.__class__.__name__)
def control_frame(self, parent, helptext):
logger.debug('Build control frame')
frame = ttk.Frame(parent)
frame.pack(side=tk.TOP, fill=tk.X)
if (helptext is not None):
helptext = self.format_helptext(helptext)
Tooltip(frame, text=helptext, wraplength=720)
logger.debug('Built control frame')
return frame
def format_helptext(self, helptext):
logger.debug("Format control help: '%s'", self.title)
helptext = helptext.replace('\n\t', '\n - ').replace('%%', '%')
helptext = ((self.title + ' - ') + helptext)
logger.debug("Formatted control help: (title: '%s', help: '%s'", self.title, helptext)
return helptext
def set_control(self, dtype, choices, is_radio):
if (choices and is_radio):
control = ttk.Radiobutton
elif choices:
control = ttk.Combobox
elif (dtype == bool):
control = ttk.Checkbutton
elif (dtype in (int, float)):
control = ttk.Scale
else:
control = ttk.Entry
logger.debug("Setting control '%s' to %s", self.title, control)
return control
def set_tk_var(self, dtype, selected_value):
logger.debug("Setting tk variable: (title: '%s', dtype: %s, selected_value: %s)", self.title, dtype, selected_value)
if (dtype == bool):
var = tk.BooleanVar
elif (dtype == int):
var = tk.IntVar
elif (dtype == float):
var = tk.DoubleVar
else:
var = tk.StringVar
var = var(self.frame)
val = (self.default if (selected_value is None) else selected_value)
var.set(val)
logger.debug("Set tk variable: (title: '%s', type: %s, value: '%s')", self.title, type(var), val)
return var
def build_control(self, choices, dtype, rounding, min_max, radio_columns, label_width, control_width):
logger.debug('Build confog option control')
self.build_control_label(label_width)
self.build_one_control(choices, dtype, rounding, min_max, radio_columns, control_width)
logger.debug('Built option control')
def build_control_label(self, label_width):
logger.debug("Build control label: (title: '%s', label_width: %s)", self.title, label_width)
title = self.title.replace('_', ' ').title()
lbl = ttk.Label(self.frame, text=title, width=label_width, anchor=tk.W)
lbl.pack(padx=5, pady=5, side=tk.LEFT, anchor=tk.N)
logger.debug("Built control label: '%s'", self.title)
def build_one_control(self, choices, dtype, rounding, min_max, radio_columns, control_width):
logger.debug("Build control: (title: '%s', control: %s, choices: %s, dtype: %s, rounding: %s, min_max: %s: radio_columns: %s, control_width: %s)", self.title, self.control, choices, dtype, rounding, min_max, radio_columns, control_width)
if (self.control == ttk.Scale):
ctl = self.slider_control(dtype, rounding, min_max)
elif (self.control == ttk.Radiobutton):
ctl = self.radio_control(choices, radio_columns)
else:
ctl = self.control_to_optionsframe(choices)
self.set_control_width(ctl, control_width)
ctl.pack(padx=5, pady=5, fill=tk.X, expand=True)
logger.debug("Built control: '%s'", self.title)
def set_control_width(ctl, control_width):
if (control_width is not None):
ctl.config(width=control_width)
def radio_control(self, choices, columns):
logger.debug('Adding radio group: %s', self.title)
ctl = ttk.Frame(self.frame)
frames = list()
for _ in range(columns):
frame = ttk.Frame(ctl)
frame.pack(padx=5, pady=5, fill=tk.X, expand=True, side=tk.LEFT, anchor=tk.N)
frames.append(frame)
for (idx, choice) in enumerate(choices):
frame_id = (idx % columns)
radio = ttk.Radiobutton(frames[frame_id], text=choice.title(), value=choice, variable=self.tk_var)
radio.pack(anchor=tk.W)
logger.debug('Adding radio option %s to column %s', choice, frame_id)
logger.debug("Added radio group: '%s'", self.title)
return ctl
def slider_control(self, dtype, rounding, min_max):
logger.debug("Add slider control to Options Frame: (title: '%s', dtype: %s, rounding: %s, min_max: %s)", self.title, dtype, rounding, min_max)
tbox = ttk.Entry(self.frame, width=8, textvariable=self.tk_var, justify=tk.RIGHT)
tbox.pack(padx=(0, 5), side=tk.RIGHT)
ctl = self.control(self.frame, variable=self.tk_var, command=(lambda val, var=self.tk_var, dt=dtype, rn=rounding, mm=min_max: set_slider_rounding(val, var, dt, rn, mm)))
rc_menu = ContextMenu(tbox)
rc_menu.cm_bind()
ctl['from_'] = min_max[0]
ctl['to'] = min_max[1]
logger.debug('Added slider control to Options Frame: %s', self.title)
return ctl
def control_to_optionsframe(self, choices):
logger.debug("Add control to Options Frame: (title: '%s', control: %s, choices: %s)", self.title, self.control, choices)
if (self.control == ttk.Checkbutton):
ctl = self.control(self.frame, variable=self.tk_var, text=None)
else:
ctl = self.control(self.frame, textvariable=self.tk_var)
rc_menu = ContextMenu(ctl)
rc_menu.cm_bind()
if choices:
logger.debug('Adding combo choices: %s', choices)
ctl['values'] = [choice for choice in choices]
logger.debug('Added control to Options Frame: %s', self.title)
return ctl |
def get_irradiance_poa(surface_tilt, surface_azimuth, solar_zenith, solar_azimuth, gcr, height, pitch, ghi, dhi, dni, albedo, model='isotropic', dni_extra=None, iam=1.0, npoints=100, vectorize=False):
if (model == 'haydavies'):
if (dni_extra is None):
raise ValueError(f'must supply dni_extra for {model} model')
sky_diffuse_comps_horizontal = haydavies(0, 180, dhi, dni, dni_extra, solar_zenith, solar_azimuth, return_components=True)
circumsolar_horizontal = sky_diffuse_comps_horizontal['circumsolar']
sky_diffuse_comps_normal = haydavies(solar_zenith, solar_azimuth, dhi, dni, dni_extra, solar_zenith, solar_azimuth, return_components=True)
circumsolar_normal = sky_diffuse_comps_normal['circumsolar']
dhi = (dhi - circumsolar_horizontal)
dni = (dni + circumsolar_normal)
max_rows = np.ceil((height / (pitch * tand(5))))
f_gnd_beam = utils._unshaded_ground_fraction(surface_tilt, surface_azimuth, solar_zenith, solar_azimuth, gcr)
vf_gnd_sky = utils.vf_ground_sky_2d_integ(surface_tilt, gcr, height, pitch, max_rows, npoints, vectorize)
f_x = _shaded_fraction(solar_zenith, solar_azimuth, surface_tilt, surface_azimuth, gcr)
poa_sky_pv = _poa_sky_diffuse_pv(dhi, gcr, surface_tilt)
ground_diffuse = (ghi * albedo)
diffuse_fraction = np.clip((dhi / ghi), 0.0, 1.0)
diffuse_fraction = np.where((ghi < 0.0001), 0.0, diffuse_fraction)
ground_diffuse = _poa_ground_shadows(ground_diffuse, f_gnd_beam, diffuse_fraction, vf_gnd_sky)
poa_gnd_pv = _poa_ground_pv(ground_diffuse, gcr, surface_tilt)
poa_diffuse = (poa_gnd_pv + poa_sky_pv)
poa_beam = np.atleast_1d(beam_component(surface_tilt, surface_azimuth, solar_zenith, solar_azimuth, dni))
poa_direct = ((poa_beam * (1 - f_x)) * iam)
poa_global = (poa_direct + poa_diffuse)
output = {'poa_global': poa_global, 'poa_direct': poa_direct, 'poa_diffuse': poa_diffuse, 'poa_ground_diffuse': poa_gnd_pv, 'poa_sky_diffuse': poa_sky_pv, 'shaded_fraction': f_x}
if isinstance(poa_global, pd.Series):
output = pd.DataFrame(output)
return output |
class MockProcResult():
def get_details(self):
random_num = random.randint(1, 1000)
return {'device_name': f'workstation{random_num}', 'process_username': [f'username{random_num}'], 'process_name': f'proc{random_num}', 'process_cmdline': [f'cmdline{random_num}'], 'device_timestamp': f'ts{random_num}', 'process_guid': f'guid{random_num}'} |
def onconditional_peerdir(unit, *args):
dict_name = args[0].upper()
use_var = ('USE_' + dict_name)
make_var = (('MAKE_' + dict_name) + '_FROM_SOURCE')
use_var_value = unit.get(use_var)
make_var_value = unit.get(make_var)
if (use_var_value is None):
unit.set([use_var, 'yes'])
use_var_value = 'yes'
if (make_var_value is None):
unit.set([make_var, 'no'])
make_var_value = 'no'
if (use_var_value == 'yes'):
peer = join_intl_paths(args[1], ('source' if (make_var_value == 'yes') else 'generated'))
unit.onpeerdir([peer]) |
def test__getting_started__custom_objectives():
from bioptim.examples.getting_started import custom_objectives as ocp_module
bioptim_folder = os.path.dirname(ocp_module.__file__)
ocp_module.prepare_ocp(biorbd_model_path=(bioptim_folder + '/models/cube.bioMod'), phase_dynamics=PhaseDynamics.SHARED_DURING_THE_PHASE, expand_dynamics=False) |
def ndarray_to_file(np_array: np.ndarray, path: str, file_system: AbstractFileSystem, block_path_provider: BlockWritePathProvider, content_type: str=ContentType.PARQUET.value, **kwargs) -> None:
np_arrays = [array for array in np_array]
pa_utils.table_to_file(pa.table({'data': np_arrays}), path, file_system, block_path_provider, content_type, **kwargs) |
class GIFEncoder(nn.Module):
def __init__(self, image_feature_size=512, n_frames=4):
super().__init__()
self._n_frames = n_frames
self._image_feature_size = image_feature_size
self.image_seq_reduce_layer = nn.Linear((self._image_feature_size * self._n_frames), self._image_feature_size)
self.model = EfficientNetModel(output_dim=image_feature_size)
nn.init.xavier_normal_(self.image_seq_reduce_layer.weight)
def forward(self, gif_inputs):
(batchsize, n_frames, C, H, W) = gif_inputs.shape
gif_inputs = gif_inputs.view((batchsize * n_frames), C, H, W)
gif_features = self.model(gif_inputs)
gif_features = gif_features.view(batchsize, n_frames, self._image_feature_size)
gif_features = gif_features.flatten(start_dim=1)
gif_features = self.image_seq_reduce_layer(gif_features)
assert (gif_features.size() == (batchsize, self._image_feature_size))
return gif_features |
def _create_gradient_clipper(cfg: CfgNode) -> _GradientClipper:
cfg = copy.deepcopy(cfg)
def clip_grad_norm(p: _GradientClipperInput):
torch.nn.utils.clip_grad_norm_(p, cfg.CLIP_VALUE, cfg.NORM_TYPE)
def clip_grad_value(p: _GradientClipperInput):
torch.nn.utils.clip_grad_value_(p, cfg.CLIP_VALUE)
_GRADIENT_CLIP_TYPE_TO_CLIPPER = {GradientClipType.VALUE: clip_grad_value, GradientClipType.NORM: clip_grad_norm}
return _GRADIENT_CLIP_TYPE_TO_CLIPPER[GradientClipType(cfg.CLIP_TYPE)] |
class _LazyAutoMapping(OrderedDict):
def __init__(self, config_mapping, model_mapping):
self._config_mapping = config_mapping
self._reverse_config_mapping = {v: k for (k, v) in config_mapping.items()}
self._model_mapping = model_mapping
self._extra_content = {}
self._modules = {}
def __getitem__(self, key):
if (key in self._extra_content):
return self._extra_content[key]
model_type = self._reverse_config_mapping[key.__name__]
if (model_type not in self._model_mapping):
raise KeyError(key)
model_name = self._model_mapping[model_type]
return self._load_attr_from_module(model_type, model_name)
def _load_attr_from_module(self, model_type, attr):
module_name = model_type_to_module_name(model_type)
if (module_name not in self._modules):
self._modules[module_name] = importlib.import_module(f'.{module_name}', 'transformers.models')
return getattribute_from_module(self._modules[module_name], attr)
def keys(self):
mapping_keys = [self._load_attr_from_module(key, name) for (key, name) in self._config_mapping.items() if (key in self._model_mapping.keys())]
return (mapping_keys + list(self._extra_content.keys()))
def get(self, key, default):
try:
return self.__getitem__(key)
except KeyError:
return default
def __bool__(self):
return bool(self.keys())
def values(self):
mapping_values = [self._load_attr_from_module(key, name) for (key, name) in self._model_mapping.items() if (key in self._config_mapping.keys())]
return (mapping_values + list(self._extra_content.values()))
def items(self):
mapping_items = [(self._load_attr_from_module(key, self._config_mapping[key]), self._load_attr_from_module(key, self._model_mapping[key])) for key in self._model_mapping.keys() if (key in self._config_mapping.keys())]
return (mapping_items + list(self._extra_content.items()))
def __iter__(self):
return iter(self.keys())
def __contains__(self, item):
if (item in self._extra_content):
return True
if ((not hasattr(item, '__name__')) or (item.__name__ not in self._reverse_config_mapping)):
return False
model_type = self._reverse_config_mapping[item.__name__]
return (model_type in self._model_mapping)
def register(self, key, value):
if (hasattr(key, '__name__') and (key.__name__ in self._reverse_config_mapping)):
model_type = self._reverse_config_mapping[key.__name__]
if (model_type in self._model_mapping.keys()):
raise ValueError(f"'{key}' is already used by a Transformers model.")
self._extra_content[key] = value |
_db
def test_slug_is_not_regenerated_when_changing_title(submission_factory):
submission = submission_factory(title=LazyI18nString({'en': 'hello', 'it': 'hell'}))
assert (submission.slug == 'hello')
submission.title = LazyI18nString({'en': 'ciao', 'it': 'cia'})
submission.save()
submission.refresh_from_db()
assert (submission.slug == 'hello') |
def validate_entangler_map(entangler_map, num_qubits, allow_double_entanglement=False):
if isinstance(entangler_map, dict):
raise TypeError('The type of entangler map is changed to list of list.')
if (not isinstance(entangler_map, list)):
raise TypeError("Entangler map type 'list' expected")
for src_to_targ in entangler_map:
if (not isinstance(src_to_targ, list)):
raise TypeError('Entangle index list expected but got {}'.format(type(src_to_targ)))
ret_map = []
ret_map = [[int(src), int(targ)] for (src, targ) in entangler_map]
for (src, targ) in ret_map:
if ((src < 0) or (src >= num_qubits)):
raise ValueError('Qubit entangle source value {} invalid for {} qubits'.format(src, num_qubits))
if ((targ < 0) or (targ >= num_qubits)):
raise ValueError('Qubit entangle target value {} invalid for {} qubits'.format(targ, num_qubits))
if ((not allow_double_entanglement) and ([targ, src] in ret_map)):
raise ValueError('Qubit {} and {} cross-entangled.'.format(src, targ))
return ret_map |
def _list_append_impl(ctx: CallContext) -> ImplReturn:
lst = replace_known_sequence_value(ctx.vars['self'])
element = ctx.vars['object']
if isinstance(lst, SequenceValue):
varname = ctx.visitor.varname_for_self_constraint(ctx.node)
if (varname is not None):
no_return_unless = Constraint(varname, ConstraintType.is_value_object, True, SequenceValue.make_or_known(list, (*lst.members, (False, element))))
return ImplReturn(KnownValue(None), no_return_unless=no_return_unless)
if isinstance(lst, GenericValue):
return _check_generic_container('list.append', 'object', ctx.vars['self'], lst, element, ctx, list)
return ImplReturn(KnownValue(None)) |
class DataAugmentation(object):
def __init__(self, data_search_path, image_suffix, label_suffix, **kwargs):
self.image_suffix = image_suffix
self.label_suffix = label_suffix
self.image_names = strsort(self._find_image_names(data_search_path))
self.kwargs = kwargs
self.affine_augment = kwargs.pop('affine_augment', False)
def _init(self):
data_size = self.kwargs.pop('data_size', (112, 96, 112))
self.data = {'image': tf.placeholder(tf.float32, [1, data_size[0], data_size[1], data_size[2], 1]), 'label': tf.placeholder(tf.float32, [1, data_size[0], data_size[1], data_size[2], 1])}
self.augmented_data = self._get_augmented_data()
def _find_image_names(self, data_search_path):
names = glob.glob(data_search_path)
return [name for name in names if (self.image_suffix in name)]
def _get_augmented_data(self):
augmented_data = dict(zip(['image', 'label'], layers_2d.random_affine_augment([self.data['image'], self.data['label']], interp_methods=['linear', 'nearest'], **self.kwargs)))
return augmented_data
def load_data_numpy(self, name, dtype=np.float32, expand=True):
img = nib.load(name)
image = np.asarray(img.get_fdata(), dtype)
if expand:
image = np.expand_dims(image, 0)
image = np.expand_dims(image, (- 1))
return (image, img.affine, img.header)
def save_into_nii(array, save_path, save_name, **kwargs):
image = np.squeeze(array, (0, (- 1)))
affine = kwargs.pop('affine', np.eye(4))
header = kwargs.pop('header', None)
img = nib.Nifti1Image(image.astype(np.int16), affine=affine, header=header)
nib.save(img, os.path.join(save_path, save_name))
def augment(self, num_samples=1, save_path='./augmented_data'):
if (not os.path.exists(save_path)):
logging.info(("Allocating '%s'" % os.path.abspath(save_path)))
os.makedirs(save_path)
with tf.Session(config=config) as sess:
if self.affine_augment:
self._init()
for image_name in self.image_names:
label_name = image_name.replace(self.image_suffix, self.label_suffix)
(image, affine, header) = self.load_data_numpy(image_name)
label = self.load_data_numpy(label_name)[0]
logging.info(('Augmenting data: %s' % os.path.basename(image_name)))
for i in range(num_samples):
if self.affine_augment:
(image_affine, label_affine) = sess.run((self.augmented_data['image'], self.augmented_data['label']), feed_dict={self.data['image']: image, self.data['label']: label})
self.save_into_nii(image_affine, save_path, save_name=(('aug%s_affine_' % i) + os.path.basename(image_name)), affine=affine, header=header)
self.save_into_nii(label_affine, save_path, save_name=(('aug%s_affine_' % i) + os.path.basename(label_name)), affine=affine, header=header)
self.save_into_nii(randomFilter(image), save_path, save_name=(('aug%s_random_' % i) + os.path.basename(image_name)), affine=affine, header=header)
logging.info('Augmentation Finished!') |
class FixedCategorical(torch.distributions.Categorical):
def sample(self):
return super().sample().unsqueeze((- 1))
def log_probs(self, actions):
return super().log_prob(actions.squeeze((- 1))).view(actions.size(0), (- 1)).sum((- 1)).unsqueeze((- 1))
def mode(self):
return self.probs.argmax(dim=(- 1), keepdim=True) |
def help():
parent_dir_of_this_file = os.path.dirname(__file__)
print(((os.path.basename(parent_dir_of_this_file) + ' : ') + __description__))
for module in order_of_module_execution:
library = importlib.import_module(((__package__ + '.') + module))
library.help()
del sys.modules[((__package__ + '.') + module)] |
def do_evaluation_atac_from_atac(spliced_net, sc_dual_full_dataset, gene_names: str, atac_names: str, outdir: str, ext: str, marker_genes: List[str], prefix: str=''):
logging.info('Inferring ATAC from ATAC')
sc_atac_full_preds = spliced_net.translate_2_to_2(sc_dual_full_dataset)
sc_atac_full_preds_anndata = sc.AnnData(sc_atac_full_preds, obs=sc_dual_full_dataset.dataset_y.data_raw.obs.copy(deep=True))
sc_atac_full_preds_anndata.var_names = atac_names
logging.info('Writing ATAC from ATAC')
sc_atac_full_preds_anndata.write(os.path.join(outdir, f'{prefix}_atac_atac_adata.h5ad'.strip('_')))
if (hasattr(sc_dual_full_dataset.dataset_y, 'data_raw') and (ext is not None)):
logging.info('Plotting ATAC from ATAC')
plot_utils.plot_auroc(utils.ensure_arr(sc_dual_full_dataset.dataset_y.data_raw.X).flatten(), utils.ensure_arr(sc_atac_full_preds).flatten(), title_prefix=f'{DATASET_NAME} ATAC > ATAC'.strip(), fname=os.path.join(outdir, f'{prefix}_atac_atac_auroc.{ext}'.strip('_')))
del sc_atac_full_preds
del sc_atac_full_preds_anndata |
class TestMissinGenericParameters(TestNameCheckVisitorBase):
_passes()
def test(self):
from typing import List, Set, Dict
def capybara(x: list, y: List, z: List[int], a: Set[list], b: Dict[(str, list)]) -> set:
return {1}
_before((3, 9))
_passes()
def test_with_pep_585(self):
def capybara(a: set[list], b: dict[(str, list)]) -> None:
pass
_before((3, 10))
_passes()
def test_union_or(self):
def capybara(x: (list | int), y: (str | list), z: ((float | bool) | set)) -> None:
pass |
class Relationships(Dict[(str, '_Relationship')]):
def __init__(self, baseURI: str):
super(Relationships, self).__init__()
self._baseURI = baseURI
self._target_parts_by_rId: Dict[(str, Any)] = {}
def add_relationship(self, reltype: str, target: (str | Any), rId: str, is_external: bool=False) -> '_Relationship':
rel = _Relationship(rId, reltype, target, self._baseURI, is_external)
self[rId] = rel
if (not is_external):
self._target_parts_by_rId[rId] = target
return rel
def get_or_add(self, reltype, target_part):
rel = self._get_matching(reltype, target_part)
if (rel is None):
rId = self._next_rId
rel = self.add_relationship(reltype, target_part, rId)
return rel
def get_or_add_ext_rel(self, reltype, target_ref):
rel = self._get_matching(reltype, target_ref, is_external=True)
if (rel is None):
rId = self._next_rId
rel = self.add_relationship(reltype, target_ref, rId, is_external=True)
return rel.rId
def part_with_reltype(self, reltype):
rel = self._get_rel_of_type(reltype)
return rel.target_part
def related_parts(self):
return self._target_parts_by_rId
def xml(self):
rels_elm = CT_Relationships.new()
for rel in self.values():
rels_elm.add_rel(rel.rId, rel.reltype, rel.target_ref, rel.is_external)
return rels_elm.xml
def _get_matching(self, reltype, target, is_external=False):
def matches(rel, reltype, target, is_external):
if (rel.reltype != reltype):
return False
if (rel.is_external != is_external):
return False
rel_target = (rel.target_ref if rel.is_external else rel.target_part)
if (rel_target != target):
return False
return True
for rel in self.values():
if matches(rel, reltype, target, is_external):
return rel
return None
def _get_rel_of_type(self, reltype):
matching = [rel for rel in self.values() if (rel.reltype == reltype)]
if (len(matching) == 0):
tmpl = "no relationship of type '%s' in collection"
raise KeyError((tmpl % reltype))
if (len(matching) > 1):
tmpl = "multiple relationships of type '%s' in collection"
raise ValueError((tmpl % reltype))
return matching[0]
def _next_rId(self):
for n in range(1, (len(self) + 2)):
rId_candidate = ('rId%d' % n)
if (rId_candidate not in self):
return rId_candidate |
class ZeroShotGenerator():
def __init__(self, info_prompt: PromptTemplate, model_name='gpt-4', **kwargs) -> None:
if (model_name in ['gpt-3.5-turbo', 'gpt-3.5-turbo-0613', 'gpt-4', 'gpt-4-0314', 'gpt-4-0613']):
self.chain = LLMChain(prompt=info_prompt, llm=ChatOpenAI(model_name=model_name, **kwargs))
elif (model_name in ['codellama_34b', 'llama_2_70b']):
self.chain = LLMChain(prompt=info_prompt, llm=HuggingFaceLLM(name=model_name, **kwargs))
else:
raise ValueError(f'Model name {model_name} not supported!')
def generate_code(self, instruction: str, map_dict: dict) -> Tuple[(str, str)]:
code_content = ''
while True:
response = self.chain.run(**{'instruction': instruction})
pattern = ('\\```python\\n(.+?)\\n```' if ('```python' in response) else '\\```\\n(.+?)\\n```')
match = re.search(pattern, response, re.DOTALL)
if match:
code_content = match.group(1)
break
else:
print(response)
time.sleep(5)
print('No match!')
continue
general_code = code_content
converter = RewardFunctionConverter(map_dict)
specific_code = converter.general_to_specific(general_code)
return (general_code, specific_code) |
class Loss(metrics.Loss):
def update(self, output: Dict) -> None:
tgt_len = output['tgt_len']
average_loss = self._loss_fn(output).detach()
if (len(average_loss.shape) != 0):
raise ValueError('loss_fn did not return the average loss.')
n = torch.sum(tgt_len)
self._sum += (average_loss.to(self._device) * n)
self._num_examples += n
_grad()
def iteration_completed(self, engine: Engine) -> None:
output = self._output_transform(engine.state.output)
self.update(output) |
def test_dh_parameter_numbers_equality():
assert (dh.DHParameterNumbers(P_1536, 2) == dh.DHParameterNumbers(P_1536, 2))
assert (dh.DHParameterNumbers(P_1536, 7, 12345) == dh.DHParameterNumbers(P_1536, 7, 12345))
assert (dh.DHParameterNumbers((P_1536 + 2), 2) != dh.DHParameterNumbers(P_1536, 2))
assert (dh.DHParameterNumbers(P_1536, 2, 123) != dh.DHParameterNumbers(P_1536, 2, 456))
assert (dh.DHParameterNumbers(P_1536, 5) != dh.DHParameterNumbers(P_1536, 2))
assert (dh.DHParameterNumbers(P_1536, 2) != object()) |
class InitWeights_He(object):
def __init__(self, neg_slope=0.01):
self.neg_slope = neg_slope
def __call__(self, module):
if (isinstance(module, nn.Conv3d) or isinstance(module, nn.Conv2d) or isinstance(module, nn.ConvTranspose2d) or isinstance(module, nn.ConvTranspose3d)):
module.weight = nn.init.kaiming_normal_(module.weight, a=self.neg_slope)
if (module.bias is not None):
module.bias = nn.init.constant_(module.bias, 0) |
class DAQmx():
def __init__(self, name, *args, **kwargs):
super().__init__()
self.resourceName = name
self.numChannels = 0
self.numSamples = 0
self.dataBuffer = 0
self.taskHandleAI = TaskHandle(0)
self.taskHandleAO = TaskHandle(0)
self.terminated = False
def setup_analog_voltage_in(self, channelList, numSamples, sampleRate=10000, scale=3.0):
resourceString = ''
for (num, channel) in enumerate(channelList):
if (num > 0):
resourceString += ', '
resourceString += ((self.resourceName + '/ai') + str(num))
self.numChannels = len(channelList)
self.numSamples = numSamples
self.taskHandleAI = TaskHandle(0)
self.dataBuffer = np.zeros((self.numSamples, self.numChannels), dtype=np.float64)
self.CHK(nidaq.DAQmxCreateTask('', ctypes.byref(self.taskHandleAI)))
self.CHK(nidaq.DAQmxCreateAIVoltageChan(self.taskHandleAI, resourceString, '', DAQmx_Val_Cfg_Default, float64((- scale)), float64(scale), DAQmx_Val_Volts, None))
self.CHK(nidaq.DAQmxCfgSampClkTiming(self.taskHandleAI, '', float64(sampleRate), DAQmx_Val_Rising, DAQmx_Val_FiniteSamps, uInt64(self.numSamples)))
def setup_analog_voltage_out(self, channel=0):
resourceString = ((self.resourceName + '/ao') + str(channel))
self.taskHandleAO = TaskHandle(0)
self.CHK(nidaq.DAQmxCreateTask('', ctypes.byref(self.taskHandleAO)))
self.CHK(nidaq.DAQmxCreateAOVoltageChan(self.taskHandleAO, resourceString, '', float64((- 10.0)), float64(10.0), DAQmx_Val_Volts, None))
def setup_analog_voltage_out_multiple_channels(self, channelList):
resourceString = ''
for (num, channel) in enumerate(channelList):
if (num > 0):
resourceString += ', '
resourceString += ((self.resourceName + '/ao') + str(num))
self.taskHandleAO = TaskHandle(0)
self.CHK(nidaq.DAQmxCreateTask('', ctypes.byref(self.taskHandleAO)))
self.CHK(nidaq.DAQmxCreateAOVoltageChan(self.taskHandleAO, resourceString, '', float64((- 10.0)), float64(10.0), DAQmx_Val_Volts, None))
def write_analog_voltage(self, value):
timeout = (- 1.0)
self.CHK(nidaq.DAQmxWriteAnalogScalarF64(self.taskHandleAO, 1, float64(timeout), float64(value), None))
def write_analog_voltage_multiple_channels(self, values):
timeout = (- 1.0)
self.CHK(nidaq.DAQmxWriteAnalogF64(self.taskHandleAO, 1, 1, float64(timeout), DAQmx_Val_GroupByChannel, np.array(values).ctypes.data, None, None))
def acquire(self):
read = int32()
self.CHK(nidaq.DAQmxReadAnalogF64(self.taskHandleAI, self.numSamples, float64(10.0), DAQmx_Val_GroupByChannel, self.dataBuffer.ctypes.data, (self.numChannels * self.numSamples), ctypes.byref(read), None))
return self.dataBuffer.transpose()
def acquire_average(self):
if (not self.terminated):
avg = np.mean(self.acquire(), axis=1)
return avg
else:
return np.zeros(3)
def stop(self):
if (self.taskHandleAI.value != 0):
nidaq.DAQmxStopTask(self.taskHandleAI)
nidaq.DAQmxClearTask(self.taskHandleAI)
if (self.taskHandleAO.value != 0):
nidaq.DAQmxStopTask(self.taskHandleAO)
nidaq.DAQmxClearTask(self.taskHandleAO)
def CHK(self, err):
if (err < 0):
buf_size = 100
buf = ctypes.create_string_buffer(('\x00' * buf_size))
nidaq.DAQmxGetErrorString(err, ctypes.byref(buf), buf_size)
raise RuntimeError(('nidaq call failed with error %d: %s' % (err, repr(buf.value))))
if (err > 0):
buf_size = 100
buf = ctypes.create_string_buffer(('\x00' * buf_size))
nidaq.DAQmxGetErrorString(err, ctypes.byref(buf), buf_size)
raise RuntimeError(('nidaq generated warning %d: %s' % (err, repr(buf.value))))
def shutdown(self):
self.stop()
self.terminated = True
super().shutdown() |
def get_scores(task, mols):
model = models.get(task)
if (model is None):
if (task == 'chemprop_ecoli'):
model = chemprop_model(os.path.join(ROOT_DIR, 'chemprop_ckpt/ecoli'))
elif (task == 'chemprop_sars'):
model = chemprop_model(os.path.join(ROOT_DIR, 'chemprop_ckpt/sars_balanced'))
elif (task == 'chemprop_sars_cov_2'):
model = chemprop_model(os.path.join(ROOT_DIR, 'chemprop_ckpt/sars_cov_2'))
else:
raise NotImplementedError
models[task] = model
smiles = [Chem.MolToSmiles(mol) for mol in mols]
scores = model(smiles).tolist()
return scores |
def test_skip():
build_selector = BuildSelector(build_config='*', skip_config='pp36-* cp3?-manylinux_i686 cp36-win* *-win32')
assert (not build_selector('pp36-manylinux_x86_64'))
assert build_selector('pp37-manylinux_x86_64')
assert build_selector('pp38-manylinux_x86_64')
assert build_selector('pp37-manylinux_i686')
assert build_selector('pp38-manylinux_i686')
assert build_selector('cp36-manylinux_x86_64')
assert build_selector('cp37-manylinux_x86_64')
assert (not build_selector('cp36-manylinux_i686'))
assert (not build_selector('cp37-manylinux_i686'))
assert (not build_selector('pp36-macosx_10_6_intel'))
assert build_selector('pp37-macosx_10_6_intel')
assert build_selector('cp36-macosx_10_6_intel')
assert build_selector('cp37-macosx_10_6_intel')
assert (not build_selector('cp36-win32'))
assert (not build_selector('cp37-win32'))
assert (not build_selector('cp36-win_amd64'))
assert build_selector('cp37-win_amd64') |
class SimpleTransparencyPass(BasePass):
render_mask = 2
write_pick = False
def get_color_descriptors(self, blender):
(bf, bo) = (wgpu.BlendFactor, wgpu.BlendOperation)
return [{'format': blender.color_format, 'blend': {'alpha': (bf.one, bf.one_minus_src_alpha, bo.add), 'color': (bf.one, bf.one_minus_src_alpha, bo.add)}, 'write_mask': wgpu.ColorWrite.ALL}]
def get_color_attachments(self, blender):
color_load_op = wgpu.LoadOp.load
if blender.color_clear:
blender.color_clear = False
color_load_op = wgpu.LoadOp.clear
return [{'view': blender.color_view, 'resolve_target': None, 'load_op': color_load_op, 'store_op': wgpu.StoreOp.store}]
def get_depth_descriptor(self, blender):
return {'format': blender.depth_format, 'depth_write_enabled': False, 'depth_compare': wgpu.CompareFunction.less}
def get_depth_attachment(self, blender):
depth_load_op = wgpu.LoadOp.load
if blender.depth_clear:
blender.depth_clear = False
depth_load_op = wgpu.LoadOp.clear
return {'view': blender.depth_view, 'depth_load_op': depth_load_op, 'depth_store_op': wgpu.StoreOp.store}
def get_shader_code(self, blender):
return '\n struct FragmentOutput {\n (0) color: vec4<f32>,\n };\n fn get_fragment_output(depth: f32, color: vec4<f32>) -> FragmentOutput {\n if (color.a <= alpha_compare_epsilon) { discard; }\n var out : FragmentOutput;\n out.color = vec4<f32>(color.rgb * color.a, color.a);\n return out;\n }\n ' |
def launch_experiments(variant_generator, args):
variants = variant_generator.variants()
variants = [unflatten(variant, separator='.') for variant in variants]
print('Launching seed={} experiment.'.format(args.seed))
variant = variants[(args.seed - 1)]
variant['lr'] = args.lr
variant['tau'] = args.tau
variant['l1regpi'] = args.l1regpi
variant['l2regpi'] = args.l2regpi
variant['l1regvf'] = args.l1regvf
variant['l2regvf'] = args.l2regvf
variant['wclippi'] = args.wclippi
variant['wclipvf'] = args.wclipvf
variant['dropoutpi'] = args.dropoutpi
variant['dropoutvf'] = args.dropoutvf
variant['ent_coef'] = args.ent_coef
variant['batchnormpi'] = args.batchnormpi
variant['batchnormvf'] = args.batchnormvf
variant['reward_scale'] = args.reward_scale
variant['num_hidden'] = args.num_hidden
variant['policypath'] = args.policypath
variant['valuepath'] = args.valuepath
print('Variant for this experiment:', variant)
run_params = variant['run_params']
algo_params = variant['algorithm_params']
experiment_prefix = ((variant['prefix'] + '/') + args.exp_name)
experiment_name = '{prefix}-{exp_name}-{i:02}'.format(prefix=variant['prefix'], exp_name=args.exp_name, i=args.seed)
run_sac_experiment(run_experiment, mode=args.mode, variant=variant, exp_prefix=experiment_prefix, exp_name=experiment_name, n_parallel=1, seed=run_params['seed'], terminate_machine=True, log_dir=args.log_dir, snapshot_mode=run_params['snapshot_mode'], snapshot_gap=run_params['snapshot_gap'], sync_s3_pkl=run_params['sync_pkl']) |
def sd_gen(ctx, queues):
global blocking
print(queues)
if (len(queues) > 0):
blocking = True
prompt = queues.pop(0)
mention = list(prompt.keys())[0]
prompt = list(prompt.values())[0]
filename = hashlib.sha256(prompt.encode('utf-8')).hexdigest()[:20]
if ('seed' in prompt.lower()):
try:
seed = int(prompt.split('seed')[1].split('=')[1].strip())
except:
seed = random.randint(0, )
prompt = prompt.split('seed')[0]
else:
seed = random.randint(0, )
sd_bot.makeimg(prompt, filename, seed)
save_path = 'C:\\img'
channel = client.get_channel()
with open(f'{save_path}\{filename}.png', 'rb') as f:
pic = discord.File(f)
asyncio.run_coroutine_threadsafe(channel.send(f'{mention} "{prompt}", seed= {seed}', file=pic), loop)
sd_gen(ctx, queues)
else:
blocking = False |
def main():
data_provider = daily_data_provider
benchmark_tms = data_provider.get_price(DummyTicker('AAA'), PriceField.Close, start_date, end_date)
strategy_tms = data_provider.get_price(DummyTicker('BBB'), PriceField.Close, start_date, end_date)
regression_chart = RegressionChart(benchmark_tms=benchmark_tms, strategy_tms=strategy_tms)
regression_chart.plot()
plt.show(block=True) |
class TransformerAttentionModule(nn.Module):
def __init__(self, dim, num_heads, dropout, **kwargs):
super().__init__()
_check_dim_and_num_heads_consistency(dim, num_heads)
self.dim = dim
self.num_heads = num_heads
self.head_dim = (dim // num_heads)
self.attn_query = nn.Linear(in_features=dim, out_features=dim)
self.attn_key = nn.Linear(in_features=dim, out_features=dim)
self.attn_value = nn.Linear(in_features=dim, out_features=dim)
self.output_linear = nn.Linear(in_features=dim, out_features=dim)
self.dropout = nn.Dropout(p=dropout)
def forward(self, graph, x):
queries = self.attn_query(x)
keys = self.attn_key(x)
values = self.attn_value(x)
queries = queries.reshape((- 1), self.num_heads, self.head_dim)
keys = keys.reshape((- 1), self.num_heads, self.head_dim)
values = values.reshape((- 1), self.num_heads, self.head_dim)
attn_scores = (ops.u_dot_v(graph, queries, keys) / (self.head_dim ** 0.5))
attn_probs = edge_softmax(graph, attn_scores)
x = ops.u_mul_e_sum(graph, values, attn_probs)
x = x.reshape((- 1), self.dim)
x = self.output_linear(x)
x = self.dropout(x)
return x |
('/oauth/authorizeapp', methods=['POST'])
_auth_or_cookie
def authorize_application():
if (not get_authenticated_user()):
abort(401)
return
client_id = request.form.get('client_id', None)
whitelist = app.config.get('DIRECT_OAUTH_CLIENTID_WHITELIST', [])
if ((client_id not in whitelist) or (not has_basic_auth(get_authenticated_user().username))):
verify_csrf()
provider = FlaskAuthorizationProvider()
redirect_uri = request.form.get('redirect_uri', None)
response_type = request.form.get('response_type', 'code')
scope = request.form.get('scope', None)
state = request.form.get('state', None)
if (response_type == 'token'):
return provider.get_token_response(response_type, client_id, redirect_uri, scope=scope, state=state)
else:
return provider.get_authorization_code(response_type, client_id, redirect_uri, scope=scope, state=state) |
class SmilesRnnMoleculeGenerator():
def __init__(self, model: SmilesRnn, max_len: int, device: str) -> None:
self.device = device
self.model = model
lr = 0.001
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=lr)
self.criterion = nn.CrossEntropyLoss()
self.sampler = SmilesRnnSampler(device=self.device, batch_size=512)
self.max_len = max_len
self.trainer = SmilesRnnTrainer(model=self.model, criteria=[self.criterion], optimizer=self.optimizer, device=self.device)
def optimise(self, objective: ScoringFunction, start_population, keep_top, n_epochs, mols_to_sample, optimize_n_epochs, optimize_batch_size, pretrain_n_epochs) -> List[OptResult]:
int_results = self.pretrain_on_initial_population(objective, start_population, pretrain_epochs=pretrain_n_epochs)
results: List[OptResult] = []
seen: Set[str] = set()
for k in int_results:
if (k.smiles not in seen):
results.append(k)
seen.add(k.smiles)
for epoch in range(1, (1 + n_epochs)):
t0 = time.time()
samples = self.sampler.sample(self.model, mols_to_sample, max_seq_len=self.max_len)
t1 = time.time()
canonicalized_samples = set(canonicalize_list(samples, include_stereocenters=True))
payload = list(canonicalized_samples.difference(seen))
payload.sort()
seen.update(canonicalized_samples)
scores = objective.score_list(payload)
int_results = [OptResult(smiles=smiles, score=score) for (smiles, score) in zip(payload, scores)]
t2 = time.time()
results.extend(sorted(int_results, reverse=True)[0:keep_top])
results.sort(reverse=True)
subset = [i.smiles for i in results][0:keep_top]
np.random.shuffle(subset)
sub_train = subset[0:int(((3 * len(subset)) / 4))]
sub_test = subset[int(((3 * len(subset)) / 4)):]
(train_seqs, _) = load_smiles_from_list(sub_train, max_len=self.max_len)
(valid_seqs, _) = load_smiles_from_list(sub_test, max_len=self.max_len)
train_set = get_tensor_dataset(train_seqs)
valid_set = get_tensor_dataset(valid_seqs)
opt_batch_size = min(len(sub_train), optimize_batch_size)
print_every = int((len(sub_train) / opt_batch_size))
if (optimize_n_epochs > 0):
self.trainer.fit(train_set, valid_set, n_epochs=optimize_n_epochs, batch_size=opt_batch_size, print_every=print_every, valid_every=print_every)
t3 = time.time()
logger.info(f'Generation {epoch} --- timings: sample: {(t1 - t0):.3f} s, score: {(t2 - t1):.3f} s, finetune: {(t3 - t2):.3f} s')
top4 = '\n'.join((f' {result.score:.3f}: {result.smiles}' for result in results[:4]))
logger.info(f'''Top 4:
{top4}''')
return sorted(results, reverse=True)
def sample(self, num_mols) -> List[str]:
return self.sampler.sample(self.model, num_to_sample=num_mols, max_seq_len=self.max_len)
def pretrain_on_initial_population(self, scoring_function: ScoringFunction, start_population, pretrain_epochs) -> List[OptResult]:
seed: List[OptResult] = []
start_population_size = len(start_population)
training = canonicalize_list(start_population, include_stereocenters=True)
if (len(training) != start_population_size):
logger.warning('Some entries for the start population are invalid or duplicated')
start_population_size = len(training)
if (start_population_size == 0):
return seed
logger.info('finetuning with {} molecules for {} epochs'.format(start_population_size, pretrain_epochs))
scores = scoring_function.score_list(training)
seed.extend((OptResult(smiles=smiles, score=score) for (smiles, score) in zip(training, scores)))
(train_seqs, _) = load_smiles_from_list(training, max_len=self.max_len)
train_set = get_tensor_dataset(train_seqs)
batch_size = min(int(len(training)), 32)
print_every = (len(training) / batch_size)
losses = self.trainer.fit(train_set, train_set, batch_size=batch_size, n_epochs=pretrain_epochs, print_every=print_every, valid_every=print_every)
logger.info(losses)
return seed |
def test_enrichments_in_features_for():
vuln_report_filename = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'vulnerabilityreport_withenrichments.json')
security_info_filename = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'securityinformation_withenrichments.json')
with open(vuln_report_filename) as vuln_report_file:
vuln_report = json.load(vuln_report_file)
with open(security_info_filename) as security_info_file:
expected = json.load(security_info_file)
expected['Layer']['Features'].sort(key=(lambda d: d['Name']))
generated = SecurityInformation(Layer('sha256:4b42c2e36b0bedf017e14dc270f315e627a2a0030f453687a06375fa', '', '', 4, features_for(vuln_report))).to_dict()
expected['Layer']['Features'].sort(key=(lambda d: d['Name']))
generated['Layer']['Features'].sort(key=(lambda d: d['Name']))
assert (generated == expected) |
def gpu_init(vmin, Nv, dv, dxG, dxL, v0, da, na, S0, El, gamma_arr, iso, Mm_arr, Q_intp_list, verbose=0, backend='gpu-cuda'):
global gpu_mod
if (gpu_mod is not None):
warn('Only a single GPU context allowed; please call gpu_exit() first.')
return
if (backend == 'cpu-cuda'):
from radis.gpu.cuda.emulate import CuContext as GPUContext
ctx = GPUContext.Open(verbose=verbose)
import radis.gpu.cuda.emulate as backend_module
else:
from radis.gpu.cuda.driver import CuContext as GPUContext
ctx = GPUContext.Open(verbose=verbose)
if (ctx is None):
warn(NoGPUWarning(((('Failed to load CUDA context, this happened either because' + 'CUDA is not installed properly, or you have no NVIDIA GPU. ') + 'Continuing with emulated GPU on CPU...') + 'This means *NO* GPU acceleration!')))
from radis.gpu.cuda.emulate import CuContext as GPUContext
ctx = GPUContext.Open(verbose=verbose)
import radis.gpu.cuda.emulate as backend_module
else:
import radis.gpu.cuda.driver as backend_module
(GPUContext, GPUModule, GPUArray, GPUFFT, GPUTimer) = backend_module.getClasses()
if verbose:
print('Number of lines loaded: {0}'.format(len(v0)))
print()
ptx_path = os.path.join(getProjectRoot(), 'gpu', 'cuda', 'build', 'kernels.ptx')
if (not os.path.exists(ptx_path)):
raise FileNotFoundError(ptx_path)
gpu_mod = GPUModule(ctx, ptx_path)
if verbose:
print('mode:', gpu_mod.getMode())
if (verbose >= 2):
print('Copying initialization parameters to device memory...')
init_h.v_min = vmin
init_h.dv = dv
init_h.N_v = Nv
init_h.N_v_FT = next_fast_len((2 * init_h.N_v))
init_h.N_x_FT = ((init_h.N_v_FT // 2) + 1)
init_h.dxG = dxG
init_h.dxL = dxL
init_h.N_lines = int(len(v0))
init_h.N_collision_partners = gamma_arr.shape[0]
log_c2Mm_arr = np.array(([0] + [(0.5 * np.log((((8 * k) * np.log(2)) / ((((c ** 2) * Mm) * 0.001) / N_A)))) for Mm in Mm_arr[1:]]))
for i in range(len(log_c2Mm_arr)):
init_h.log_c2Mm[i] = log_c2Mm_arr[i]
init_Q(Q_intp_list)
log_2vMm = (np.log(v0) + log_c2Mm_arr.take(iso))
gpu_mod.setConstant('init_d', init_h)
init_G_params(log_2vMm.astype(np.float32), verbose)
init_L_params(na, gamma_arr, verbose)
if (verbose >= 2):
print('done!')
if (verbose >= 2):
print('Allocating device memory and copying data...')
NvFT = init_h.N_v_FT
NxFT = ((NvFT // 2) + 1)
Ntpb = ctx.getMaxThreadsPerBlock()
Nli = init_h.N_lines
threads = (Ntpb, 1, 1)
gpu_mod.fillLDM.setGrid((((Nli // Ntpb) + 1), 1, 1), threads)
gpu_mod.applyLineshapes.setGrid((((NxFT // Ntpb) + 1), 1, 1), threads)
gpu_mod.calcTransmittanceNoslit.setGrid((((NvFT // Ntpb) + 1), 1, 1), threads)
gpu_mod.applyGaussianSlit.setGrid((((NxFT // Ntpb) + 1), 1, 1), threads)
S_klm_d = GPUArray(0, dtype=np.float32, grow_only=True)
S_klm_FT_d = GPUArray(0, dtype=np.complex64, grow_only=True)
spectrum_in_d = GPUArray(NxFT, dtype=np.complex64)
spectrum_out_d = GPUArray(NvFT, dtype=np.float32)
transmittance_noslit_d = GPUArray(NvFT, dtype=np.float32)
transmittance_noslit_FT_d = GPUArray(NxFT, dtype=np.complex64)
transmittance_FT_d = GPUArray(NxFT, dtype=np.complex64)
transmittance_d = GPUArray(NvFT, dtype=np.float32)
gpu_mod.fillLDM.setArgs(GPUArray.fromArray(iso), GPUArray.fromArray(v0), GPUArray.fromArray(da), GPUArray.fromArray(S0), GPUArray.fromArray(El), GPUArray.fromArray(gamma_arr), GPUArray.fromArray(na), S_klm_d)
gpu_mod.applyLineshapes.setArgs(S_klm_FT_d, spectrum_in_d)
gpu_mod.calcTransmittanceNoslit.setArgs(spectrum_out_d, transmittance_noslit_d)
gpu_mod.applyGaussianSlit.setArgs(transmittance_noslit_FT_d, transmittance_FT_d)
workarea_d = GPUArray(0, dtype=np.byte, grow_only=True)
gpu_mod.fft_fwd = GPUFFT(S_klm_d, S_klm_FT_d, workarea=workarea_d, direction='fwd')
gpu_mod.fft_rev = GPUFFT(spectrum_in_d, spectrum_out_d, workarea=workarea_d, direction='rev')
gpu_mod.fft_fwd2 = GPUFFT(transmittance_noslit_d, transmittance_noslit_FT_d, workarea=workarea_d, direction='fwd')
gpu_mod.fft_rev2 = GPUFFT(transmittance_FT_d, transmittance_d, workarea=workarea_d, direction='rev')
gpu_mod.timer = GPUTimer()
if (verbose >= 2):
print('done!')
return init_h |
def _extract_episode_num(name):
debug(f'Extracting episode number from "{name}"')
if any(((ex.search(name) is not None) for ex in _excludors)):
return None
for regex in _num_extractors:
match = regex.match(name)
if (match is not None):
num = int(match.group(1))
debug(f' Match found, num={num}')
return num
debug(' No match found')
return none |
_fixtures(WebFixture, DynamicExampleFixture)
def test_example(web_fixture, dynamic_example_fixture):
fixture = dynamic_example_fixture
wsgi_application = web_fixture.new_wsgi_app(site_root=DynamicUI, enable_js=True)
web_fixture.reahl_server.set_app(wsgi_application)
browser = fixture.browser
browser.open('/')
browser.type(fixture.percentage_input_for('Fund A'), '80')
browser.wait_for(fixture.percentage_total_is, '80')
browser.type(fixture.percentage_input_for('Fund B'), '80')
browser.wait_for(fixture.percentage_total_is, '160')
browser.click(XPath.button_labelled('Submit'))
browser.wait_for_element_visible(fixture.domain_exception_alert)
browser.type(fixture.percentage_input_for('Fund B'), '20')
browser.wait_for(fixture.percentage_total_is, '100')
browser.click(XPath.button_labelled('Submit'))
browser.wait_for_element_not_visible(fixture.domain_exception_alert) |
class GetDependenciesSuite(DataSuite):
files = find_test_files(pattern='deps*.test')
def run_case(self, testcase: DataDrivenTestCase) -> None:
src = '\n'.join(testcase.input)
dump_all = ('# __dump_all__' in src)
options = parse_options(src, testcase, incremental_step=1)
options.use_builtins_fixtures = True
options.show_traceback = True
options.cache_dir = os.devnull
options.export_types = True
options.preserve_asts = True
options.allow_empty_bodies = True
(messages, files, type_map) = self.build(src, options)
a = messages
if ((files is None) or (type_map is None)):
if (not a):
a = ['Unknown compile error (likely syntax error in test case or fixture)']
else:
deps: defaultdict[(str, set[str])] = defaultdict(set)
for (module, file) in files.items():
if (((module in dumped_modules) or dump_all) and (module in testcase.test_modules)):
new_deps = get_dependencies(file, type_map, options.python_version, options)
for source in new_deps:
deps[source].update(new_deps[source])
type_state.add_all_protocol_deps(deps)
for (source, targets) in sorted(deps.items()):
if source.startswith(('<enum', '<typing', '<mypy', '<_typeshed.')):
continue
line = f"{source} -> {', '.join(sorted(targets))}"
line = line.replace('__main__', 'm')
a.append(line)
assert_string_arrays_equal(testcase.output, a, f'Invalid output ({testcase.file}, line {testcase.line})')
def build(self, source: str, options: Options) -> tuple[(list[str], (dict[(str, MypyFile)] | None), (dict[(Expression, Type)] | None))]:
try:
result = build.build(sources=[BuildSource('main', None, source)], options=options, alt_lib_path=test_temp_dir)
except CompileError as e:
return (e.messages, None, None)
return (result.errors, result.files, result.types) |
_module()
class GHMC(nn.Module):
def __init__(self, bins=10, momentum=0, use_sigmoid=True, loss_weight=1.0):
super(GHMC, self).__init__()
self.bins = bins
self.momentum = momentum
edges = (torch.arange((bins + 1)).float() / bins)
self.register_buffer('edges', edges)
self.edges[(- 1)] += 1e-06
if (momentum > 0):
acc_sum = torch.zeros(bins)
self.register_buffer('acc_sum', acc_sum)
self.use_sigmoid = use_sigmoid
if (not self.use_sigmoid):
raise NotImplementedError
self.loss_weight = loss_weight
def forward(self, pred, target, label_weight, *args, **kwargs):
if (pred.dim() != target.dim()):
(target, label_weight) = _expand_onehot_labels(target, label_weight, pred.size((- 1)))
(target, label_weight) = (target.float(), label_weight.float())
edges = self.edges
mmt = self.momentum
weights = torch.zeros_like(pred)
g = torch.abs((pred.sigmoid().detach() - target))
valid = (label_weight > 0)
tot = max(valid.float().sum().item(), 1.0)
n = 0
for i in range(self.bins):
inds = (((g >= edges[i]) & (g < edges[(i + 1)])) & valid)
num_in_bin = inds.sum().item()
if (num_in_bin > 0):
if (mmt > 0):
self.acc_sum[i] = ((mmt * self.acc_sum[i]) + ((1 - mmt) * num_in_bin))
weights[inds] = (tot / self.acc_sum[i])
else:
weights[inds] = (tot / num_in_bin)
n += 1
if (n > 0):
weights = (weights / n)
loss = (F.binary_cross_entropy_with_logits(pred, target, weights, reduction='sum') / tot)
return (loss * self.loss_weight) |
class Effect1060(BaseEffect):
type = 'passive'
def handler(fit, ship, context, projectionRange, **kwargs):
fit.modules.filteredItemBoost((lambda mod: mod.item.requiresSkill('Medium Projectile Turret')), 'falloff', ship.getModifiedItemAttr('eliteBonusHeavyGunship1'), skill='Heavy Assault Cruisers', **kwargs) |
def resnet18(num_classes, loss='softmax', pretrained=True, **kwargs):
model = ResNet(num_classes=num_classes, loss=loss, block=BasicBlock, layers=[2, 2, 2, 2], last_stride=2, fc_dims=None, dropout_p=None, **kwargs)
if pretrained:
init_pretrained_weights(model, model_urls['resnet18'])
return model |
class OwlViTTextConfig(PretrainedConfig):
model_type = 'owlvit_text_model'
def __init__(self, vocab_size=49408, hidden_size=512, intermediate_size=2048, num_hidden_layers=12, num_attention_heads=8, max_position_embeddings=16, hidden_act='quick_gelu', layer_norm_eps=1e-05, attention_dropout=0.0, initializer_range=0.02, initializer_factor=1.0, pad_token_id=0, bos_token_id=49406, eos_token_id=49407, **kwargs):
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.max_position_embeddings = max_position_embeddings
self.hidden_act = hidden_act
self.layer_norm_eps = layer_norm_eps
self.attention_dropout = attention_dropout
self.initializer_range = initializer_range
self.initializer_factor = initializer_factor
def from_pretrained(cls, pretrained_model_name_or_path: Union[(str, os.PathLike)], **kwargs) -> 'PretrainedConfig':
(config_dict, kwargs) = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
if (config_dict.get('model_type') == 'owlvit'):
config_dict = config_dict['text_config']
if (('model_type' in config_dict) and hasattr(cls, 'model_type') and (config_dict['model_type'] != cls.model_type)):
logger.warning(f"You are using a model of type {config_dict['model_type']} to instantiate a model of type {cls.model_type}. This is not supported for all configurations of models and can yield errors.")
return cls.from_dict(config_dict, **kwargs) |
def load_modules_from_path(path):
if (path[(- 1):] != '/'):
path += '/'
if (not os.path.exists(path)):
raise OSError(('Directory does not exist: %s' % path))
sys.path.append(path)
for f in os.listdir(path):
if ((len(f) > 3) and (f[(- 3):] == '.py')):
modname = f[:(- 3)]
__import__(modname, globals(), locals(), ['*']) |
_module()
class PascalContextDataset(CustomDataset):
CLASSES = ('background', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'table', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor', 'bag', 'bed', 'bench', 'book', 'building', 'cabinet', 'ceiling', 'cloth', 'computer', 'cup', 'door', 'fence', 'floor', 'flower', 'food', 'grass', 'ground', 'keyboard', 'light', 'mountain', 'mouse', 'curtain', 'platform', 'sign', 'plate', 'road', 'rock', 'shelves', 'sidewalk', 'sky', 'snow', 'bedclothes', 'track', 'tree', 'truck', 'wall', 'water', 'window', 'wood')
PALETTE = [[120, 120, 120], [180, 120, 120], [6, 230, 230], [80, 50, 50], [4, 200, 3], [120, 120, 80], [140, 140, 140], [204, 5, 255], [230, 230, 230], [4, 250, 7], [224, 5, 255], [235, 255, 7], [150, 5, 61], [120, 120, 70], [8, 255, 51], [255, 6, 82], [143, 255, 140], [204, 255, 4], [255, 51, 7], [204, 70, 3], [0, 102, 200], [61, 230, 250], [255, 6, 51], [11, 102, 255], [255, 7, 71], [255, 9, 224], [9, 7, 230], [220, 220, 220], [255, 9, 92], [112, 9, 255], [8, 255, 214], [7, 255, 224], [255, 184, 6], [10, 255, 71], [255, 41, 10], [7, 255, 255], [224, 255, 8], [102, 8, 255], [255, 61, 6], [255, 194, 7], [255, 122, 8], [0, 255, 20], [255, 8, 41], [255, 5, 153], [6, 51, 255], [235, 12, 255], [160, 150, 20], [0, 163, 255], [140, 140, 140], [250, 10, 15], [20, 255, 0], [31, 255, 0], [255, 31, 0], [255, 224, 0], [153, 255, 0], [0, 0, 255], [255, 71, 0], [0, 235, 255], [0, 173, 255], [31, 0, 255]]
def __init__(self, split, **kwargs):
super(PascalContextDataset, self).__init__(img_suffix='.jpg', seg_map_suffix='.png', split=split, reduce_zero_label=False, **kwargs)
assert (osp.exists(self.img_dir) and (self.split is not None)) |
def _get_room_node(state: EnvironmentState, node: Node):
if (node.category == 'Rooms'):
return node
inside_nodes = state.get_nodes_from(node, Relation.INSIDE)
if (len(inside_nodes) > 1):
for n in state.get_nodes_from(node, Relation.INSIDE):
if (n.category == 'Rooms'):
return n
else:
while (len(inside_nodes) > 0):
n = inside_nodes[0]
if (n.category == 'Rooms'):
return n
else:
inside_nodes = state.get_nodes_from(n, Relation.INSIDE)
return None |
class Migration(migrations.Migration):
initial = True
dependencies = [('conferences', '0007_auto__1953')]
operations = [migrations.CreateModel(name='Page', fields=[('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')), ('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')), ('title', models.CharField(max_length=200, verbose_name='title')), ('slug', models.SlugField(blank=True, max_length=200, verbose_name='slug')), ('content', models.TextField(verbose_name='content')), ('published', models.BooleanField(default=False, verbose_name='published')), ('image', models.ImageField(blank=True, null=True, upload_to='pages', verbose_name='image')), ('conference', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='pages', to='conferences.Conference', verbose_name='conference'))], options={'ordering': ['-published'], 'unique_together': {('slug', 'conference')}})] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.