code stringlengths 101 5.91M |
|---|
class SuperResIDWE2K7(SuperResIDWEXKX):
def __init__(self, in_channels=None, out_channels=None, stride=None, bottleneck_channels=None, sub_layers=None, no_create=False, **kwargs):
super(SuperResIDWE2K7, self).__init__(in_channels=in_channels, out_channels=out_channels, stride=stride, bottleneck_channels=bottleneck_channels, sub_layers=sub_layers, kernel_size=7, expension=2.0, no_create=no_create, **kwargs) |
class Statistics(object):
def __init__(self, loss=0, flow_loss=0, flow_history_loss=0, corefvocab_loss=0, corefattn_loss=0, num_effective_coref=0, n_words=0, n_correct=0):
self.num_effective_coref = num_effective_coref
self.loss = loss
self.flow_loss = flow_loss
self.flow_history_loss = flow_history_loss
self.corefvocab_loss = corefvocab_loss
self.corefattn_loss = corefattn_loss
self.n_words = n_words
self.n_correct = n_correct
self.n_src_words = 0
self.start_time = time.time()
def all_gather_stats(stat, max_size=4096):
stats = Statistics.all_gather_stats_list([stat], max_size=max_size)
return stats[0]
def all_gather_stats_list(stat_list, max_size=4096):
all_stats = all_gather_list(stat_list, max_size=max_size)
our_rank = get_rank()
our_stats = all_stats[our_rank]
for (other_rank, stats) in enumerate(all_stats):
if (other_rank == our_rank):
continue
for (i, stat) in enumerate(stats):
our_stats[i].update(stat, update_n_src_words=True)
return our_stats
def update(self, stat, update_n_src_words=False):
self.loss += stat.loss
self.flow_loss += stat.flow_loss
self.flow_history_loss += stat.flow_history_loss
self.corefvocab_loss += stat.corefvocab_loss
self.corefattn_loss += stat.corefattn_loss
self.num_effective_coref += stat.num_effective_coref
self.n_words += stat.n_words
self.n_correct += stat.n_correct
if update_n_src_words:
self.n_src_words += stat.n_src_words
def accuracy(self):
return (100 * (self.n_correct / self.n_words))
def xent(self):
return (self.loss / self.n_words)
def xent_corefvocab(self):
return ((self.corefvocab_loss / self.num_effective_coref) if self.num_effective_coref else (- 1))
def xent_corefattn(self):
return ((self.corefattn_loss / self.num_effective_coref) if self.num_effective_coref else (- 1))
def ppl(self):
return math.exp(min((self.loss / self.n_words), 100))
def elapsed_time(self):
return (time.time() - self.start_time)
def output(self, step, num_steps, learning_rate, start):
t = self.elapsed_time()
logger.info(((((('Step %2d/%5d; acc: %6.2f; ppl: %5.2f; xent: %4.2f; ' + 'corefvocab xent: %4.2f; corefattn xent: %4.2f; ') + 'flow loss: %4.2f; ') + 'flow history loss: %4.2f;') + 'lr: %7.5f; %3.0f/%3.0f tok/s; %6.0f sec') % (step, num_steps, self.accuracy(), self.ppl(), self.xent(), self.xent_corefvocab(), self.xent_corefattn(), self.flow_loss, self.flow_history_loss, learning_rate, (self.n_src_words / (t + 1e-05)), (self.n_words / (t + 1e-05)), (time.time() - start))))
sys.stdout.flush()
def log_tensorboard(self, prefix, writer, learning_rate, step):
t = self.elapsed_time()
writer.add_scalar((prefix + '/xent'), self.xent(), step)
writer.add_scalar((prefix + '/ppl'), self.ppl(), step)
writer.add_scalar((prefix + '/accuracy'), self.accuracy(), step)
writer.add_scalar((prefix + '/tgtper'), (self.n_words / t), step)
writer.add_scalar((prefix + '/lr'), learning_rate, step) |
def get_new_subject_file_split(df: pd.DataFrame, split_method: str, data_testing: dict, random_seed: int, train_frac: float, test_frac: float, path_output: str, balance: str, subject_selection: dict=None) -> (list, list, list):
if (subject_selection is not None):
if (not (len(subject_selection['metadata']) == len(subject_selection['n']) == len(subject_selection['value']))):
raise ValueError('All lists in subject_selection parameter should have the same length.')
sampled_dfs = []
random.seed(random_seed)
for (m, n, v) in zip(subject_selection['metadata'], subject_selection['n'], subject_selection['value']):
participants = random.sample(df[(df[m] == v)]['participant_id'].unique().tolist(), n)
for participant in participants:
sampled_dfs.append(df[(df['participant_id'] == participant)])
if (len(sampled_dfs) != 0):
df = pd.concat(sampled_dfs)
if balance:
if (balance in df.keys()):
df_list = [df[(df[balance] == k)] for k in df[balance][df[balance].notna()].unique().tolist()]
else:
logger.warning("No column named '{}' was found in 'participants.tsv' file. Not taken into account to split the dataset.".format(balance))
df_list = [df]
else:
df_list = [df]
(train_lst, valid_lst, test_lst) = ([], [], [])
for df_tmp in df_list:
(train_tmp, valid_tmp, test_tmp) = split_dataset(df=df_tmp, split_method=split_method, data_testing=data_testing, random_seed=random_seed, train_frac=train_frac, test_frac=test_frac)
train_lst += train_tmp
valid_lst += valid_tmp
test_lst += test_tmp
split_dct = {'train': train_lst, 'valid': valid_lst, 'test': test_lst}
split_path = Path(path_output, 'split_datasets.joblib')
joblib.dump(split_dct, split_path)
return (train_lst, valid_lst, test_lst) |
class PublisherAgent(ph.Agent):
_USER_CLICK_PROBABILITIES = {1: {'sport': 0.0, 'travel': 1.0, 'science': 0.2, 'tech': 0.8}, 2: {'sport': 1.0, 'travel': 0.0, 'science': 0.7, 'tech': 0.1}}
def __init__(self, agent_id: str, exchange_id: str, user_click_proba: dict=None):
super().__init__(agent_id)
self.exchange_id = exchange_id
self.user_click_proba = (user_click_proba or self._USER_CLICK_PROBABILITIES)
def generate_messages(self, ctx: ph.Context):
return [(self.exchange_id, ImpressionRequest.generate_random())]
.msg_handler(Ads)
def handle_ads(self, _ctx: ph.Context, msg: ph.Message):
logger.debug('PublisherAgent %s ads: %s', self.id, msg.payload)
clicked = np.random.binomial(1, self.user_click_proba[msg.payload.user_id][msg.payload.theme])
return [(msg.payload.advertiser_id, ImpressionResult(clicked=clicked))] |
_grad()
def moment_update(model, model_ema, m):
for (p1, p2) in zip(model.parameters(), model_ema.parameters()):
p2.data = ((p2.data * m) + (p1.data * (1 - m))) |
(autouse=True, scope='package')
def orca_context_fixture():
conf = {'spark.python.worker.reuse': 'false'}
sc = init_orca_context(cores=8, conf=conf)
def to_array_(v):
return v.toArray().tolist()
def flatten_(v):
result = []
for elem in v:
result.extend(elem.toArray().tolist())
return result
spark = SparkSession(sc)
spark.udf.register('to_array', to_array_, ArrayType(DoubleType()))
spark.udf.register('flatten', flatten_, ArrayType(DoubleType()))
(yield)
stop_orca_context() |
def main():
args = parse_args()
model = Model()
model.read_model(args.input_model, ext=args.input_format)
print('num_cameras:', len(model.cameras))
print('num_images:', len(model.images))
print('num_points3D:', len(model.points3D))
model.create_window()
model.add_points()
model.add_cameras(scale=0.25)
model.show() |
def predict_by_best_model(args):
tokenizer = T5Tokenizer.from_pretrained(args['checkpoint'])
data = preprocess_data_t5(args)
testing_set = ood_dataset(data, tokenizer, args)
testing_set_length = len(testing_set)
eval_params = {'batch_size': args['per_device_eval_batch_size'], 'shuffle': True, 'num_workers': 0}
testing_loader = DataLoader(testing_set, **eval_params)
best_model_dir = args['best_model_dir']
loaded_whole_model = torch.load(best_model_dir)
loaded_model = Classifier(args)
loaded_model_dict = loaded_whole_model.module.state_dict()
loaded_model.load_state_dict(loaded_model_dict)
loaded_model = torch.nn.DataParallel(loaded_model, device_ids=args['device'])
model = loaded_model.cuda(device=args['device'][0])
best_result = Evaluation(args, model, testing_loader, tokenizer)
del model
return (best_result, len(testing_set)) |
def check_python_script(cmd):
args = split(cmd)
if (args[0] == 'python'):
args = args[1:]
with patch.object(sys, 'argv', args):
run_path(args[0], run_name='__main__') |
class TestStinespring(ChannelTestCase):
def test_init(self):
chan = Stinespring(self.UI)
self.assertAllClose(chan.data, self.UI)
self.assertEqual(chan.dim, (2, 2))
chan = Stinespring(self.depol_stine(0.5))
self.assertAllClose(chan.data, self.depol_stine(0.5))
self.assertEqual(chan.dim, (2, 2))
(stine_l, stine_r) = (self.rand_matrix(4, 2), self.rand_matrix(4, 2))
chan = Stinespring((stine_l, stine_r))
self.assertAllClose(chan.data, (stine_l, stine_r))
self.assertEqual(chan.dim, (2, 2))
chan = Stinespring((stine_l, stine_l))
self.assertAllClose(chan.data, stine_l)
self.assertEqual(chan.dim, (2, 2))
self.assertRaises(QiskitError, Stinespring, stine_l, input_dims=4, output_dims=4)
def test_circuit_init(self):
(circuit, target) = self.simple_circuit_no_measure()
op = Stinespring(circuit)
target = Stinespring(target)
self.assertEqual(op, target)
def test_circuit_init_except(self):
circuit = self.simple_circuit_with_measure()
self.assertRaises(QiskitError, Stinespring, circuit)
def test_equal(self):
stine = tuple((self.rand_matrix(4, 2) for _ in range(2)))
self.assertEqual(Stinespring(stine), Stinespring(stine))
def test_copy(self):
mat = np.eye(4)
orig = Stinespring(mat)
cpy = orig.copy()
cpy._data[0][(0, 0)] = 0.0
self.assertFalse((cpy == orig))
def test_evolve(self):
input_psi = [0, 1]
input_rho = [[0, 0], [0, 1]]
chan = Stinespring(self.UI)
target_psi = np.array([0, 1])
self.assertAllClose(chan._evolve(input_psi), target_psi)
self.assertAllClose(chan._evolve(np.array(input_psi)), target_psi)
target_rho = np.array([[0, 0], [0, 1]])
self.assertAllClose(chan._evolve(input_rho), target_rho)
self.assertAllClose(chan._evolve(np.array(input_rho)), target_rho)
mat = (np.array([[1, 1], [1, (- 1)]]) / np.sqrt(2))
chan = Stinespring(mat)
target_psi = (np.array([1, (- 1)]) / np.sqrt(2))
self.assertAllClose(chan._evolve(input_psi), target_psi)
self.assertAllClose(chan._evolve(np.array(input_psi)), target_psi)
target_rho = (np.array([[1, (- 1)], [(- 1), 1]]) / 2)
self.assertAllClose(chan._evolve(input_rho), target_rho)
self.assertAllClose(chan._evolve(np.array(input_rho)), target_rho)
chan = Stinespring(self.depol_stine(1))
target_rho = (np.eye(2) / 2)
self.assertAllClose(chan._evolve(input_psi), target_rho)
self.assertAllClose(chan._evolve(np.array(input_psi)), target_rho)
self.assertAllClose(chan._evolve(input_rho), target_rho)
self.assertAllClose(chan._evolve(np.array(input_rho)), target_rho)
def test_is_cptp(self):
self.assertTrue(Stinespring(self.depol_stine(0.5)).is_cptp())
self.assertTrue(Stinespring(self.UX).is_cptp())
(stine_l, stine_r) = (self.rand_matrix(4, 2), self.rand_matrix(4, 2))
self.assertFalse(Stinespring((stine_l, stine_r)).is_cptp())
self.assertFalse(Stinespring((self.UI + self.UX)).is_cptp())
def test_conjugate(self):
(stine_l, stine_r) = (self.rand_matrix(16, 2), self.rand_matrix(16, 2))
targ = Stinespring(stine_l.conj(), output_dims=4)
chan1 = Stinespring(stine_l, output_dims=4)
chan = chan1.conjugate()
self.assertEqual(chan, targ)
self.assertEqual(chan.dim, (2, 4))
targ = Stinespring((stine_l.conj(), stine_r.conj()), output_dims=4)
chan1 = Stinespring((stine_l, stine_r), output_dims=4)
chan = chan1.conjugate()
self.assertEqual(chan, targ)
self.assertEqual(chan.dim, (2, 4))
def test_transpose(self):
(stine_l, stine_r) = (self.rand_matrix(4, 2), self.rand_matrix(4, 2))
targ = Stinespring(stine_l.T, 4, 2)
chan1 = Stinespring(stine_l, 2, 4)
chan = chan1.transpose()
self.assertEqual(chan, targ)
self.assertEqual(chan.dim, (4, 2))
targ = Stinespring((stine_l.T, stine_r.T), 4, 2)
chan1 = Stinespring((stine_l, stine_r), 2, 4)
chan = chan1.transpose()
self.assertEqual(chan, targ)
self.assertEqual(chan.dim, (4, 2))
def test_adjoint(self):
(stine_l, stine_r) = (self.rand_matrix(4, 2), self.rand_matrix(4, 2))
targ = Stinespring(stine_l.T.conj(), 4, 2)
chan1 = Stinespring(stine_l, 2, 4)
chan = chan1.adjoint()
self.assertEqual(chan, targ)
self.assertEqual(chan.dim, (4, 2))
targ = Stinespring((stine_l.T.conj(), stine_r.T.conj()), 4, 2)
chan1 = Stinespring((stine_l, stine_r), 2, 4)
chan = chan1.adjoint()
self.assertEqual(chan, targ)
self.assertEqual(chan.dim, (4, 2))
def test_compose_except(self):
self.assertRaises(QiskitError, Stinespring(np.eye(2)).compose, Stinespring(np.eye(4)))
self.assertRaises(QiskitError, Stinespring(np.eye(2)).compose, 2)
def test_compose(self):
rho = self.rand_rho(2)
chan1 = Stinespring(self.UX)
chan2 = Stinespring(self.UY)
chan = chan1.compose(chan2)
targ = Stinespring(self.UZ)._evolve(rho)
self.assertAllClose(chan._evolve(rho), targ)
chan1 = Stinespring(self.depol_stine(0.5))
chan = chan1.compose(chan1)
targ = Stinespring(self.depol_stine(0.75))._evolve(rho)
self.assertAllClose(chan._evolve(rho), targ)
(stine1, stine2) = (self.rand_matrix(16, 2), self.rand_matrix(8, 4))
chan1 = Stinespring(stine1, input_dims=2, output_dims=4)
chan2 = Stinespring(stine2, input_dims=4, output_dims=2)
targ = chan2._evolve(chan1._evolve(rho))
chan = chan1.compose(chan2)
self.assertEqual(chan.dim, (2, 2))
self.assertAllClose(chan._evolve(rho), targ)
chan = (chan1 chan2)
self.assertEqual(chan.dim, (2, 2))
self.assertAllClose(chan._evolve(rho), targ)
def test_compose_front(self):
rho = self.rand_rho(2)
chan1 = Stinespring(self.UX)
chan2 = Stinespring(self.UY)
chan = chan1.compose(chan2, front=True)
targ = Stinespring(self.UZ)._evolve(rho)
self.assertAllClose(chan._evolve(rho), targ)
chan1 = Stinespring(self.depol_stine(0.5))
chan = chan1.compose(chan1, front=True)
targ = Stinespring(self.depol_stine(0.75))._evolve(rho)
self.assertAllClose(chan._evolve(rho), targ)
(stine1, stine2) = (self.rand_matrix(16, 2), self.rand_matrix(8, 4))
chan1 = Stinespring(stine1, input_dims=2, output_dims=4)
chan2 = Stinespring(stine2, input_dims=4, output_dims=2)
targ = chan2._evolve(chan1._evolve(rho))
chan = chan2.compose(chan1, front=True)
self.assertEqual(chan.dim, (2, 2))
self.assertAllClose(chan._evolve(rho), targ)
def test_expand(self):
(rho0, rho1) = (np.diag([1, 0]), np.diag([0, 1]))
rho_init = np.kron(rho0, rho0)
chan1 = Stinespring(self.UI)
chan2 = Stinespring(self.UX)
chan = chan1.expand(chan2)
rho_targ = np.kron(rho1, rho0)
self.assertEqual(chan.dim, (4, 4))
self.assertAllClose(chan._evolve(rho_init), rho_targ)
chan = chan2.expand(chan1)
rho_targ = np.kron(rho0, rho1)
self.assertEqual(chan.dim, (4, 4))
self.assertAllClose(chan._evolve(rho_init), rho_targ)
chan_dep = Stinespring(self.depol_stine(1))
chan = chan_dep.expand(chan_dep)
rho_targ = (np.diag([1, 1, 1, 1]) / 4)
self.assertEqual(chan.dim, (4, 4))
self.assertAllClose(chan._evolve(rho_init), rho_targ)
def test_tensor(self):
(rho0, rho1) = (np.diag([1, 0]), np.diag([0, 1]))
rho_init = np.kron(rho0, rho0)
chan1 = Stinespring(self.UI)
chan2 = Stinespring(self.UX)
chan = chan2.tensor(chan1)
rho_targ = np.kron(rho1, rho0)
self.assertEqual(chan.dim, (4, 4))
self.assertAllClose(chan._evolve(rho_init), rho_targ)
chan = chan1.tensor(chan2)
rho_targ = np.kron(rho0, rho1)
self.assertEqual(chan.dim, (4, 4))
self.assertAllClose(chan._evolve(rho_init), rho_targ)
chan_dep = Stinespring(self.depol_stine(1))
chan = chan_dep.tensor(chan_dep)
rho_targ = (np.diag([1, 1, 1, 1]) / 4)
self.assertEqual(chan.dim, (4, 4))
self.assertAllClose(chan._evolve(rho_init), rho_targ)
def test_power(self):
rho = np.diag([1, 0])
p_id = 0.9
chan = Stinespring(self.depol_stine((1 - p_id)))
p_id3 = (p_id ** 3)
chan3 = chan.power(3)
targ3a = chan._evolve(chan._evolve(chan._evolve(rho)))
self.assertAllClose(chan3._evolve(rho), targ3a)
targ3b = Stinespring(self.depol_stine((1 - p_id3)))._evolve(rho)
self.assertAllClose(chan3._evolve(rho), targ3b)
def test_power_except(self):
chan = Stinespring(self.depol_stine(0.9))
self.assertRaises(QiskitError, chan.power, 0.5)
def test_add(self):
rho = self.rand_rho(2)
(stine1, stine2) = (self.rand_matrix(16, 2), self.rand_matrix(16, 2))
chan1 = Stinespring(stine1, input_dims=2, output_dims=4)
chan2 = Stinespring(stine2, input_dims=2, output_dims=4)
targ = (chan1._evolve(rho) + chan2._evolve(rho))
chan = chan1.add(chan2)
self.assertAllClose(chan._evolve(rho), targ)
chan = (chan1 + chan2)
self.assertAllClose(chan._evolve(rho), targ)
chan = Stinespring((stine1, stine2))
targ = (2 * chan._evolve(rho))
chan = chan.add(chan)
self.assertAllClose(chan._evolve(rho), targ)
def test_subtract(self):
rho = self.rand_rho(2)
(stine1, stine2) = (self.rand_matrix(16, 2), self.rand_matrix(16, 2))
chan1 = Stinespring(stine1, input_dims=2, output_dims=4)
chan2 = Stinespring(stine2, input_dims=2, output_dims=4)
targ = (chan1._evolve(rho) - chan2._evolve(rho))
chan = chan1.subtract(chan2)
self.assertAllClose(chan._evolve(rho), targ)
chan = (chan1 - chan2)
self.assertAllClose(chan._evolve(rho), targ)
chan = Stinespring((stine1, stine2))
targ = (0 * chan._evolve(rho))
chan = chan.subtract(chan)
self.assertAllClose(chan._evolve(rho), targ)
def test_multiply(self):
rho = self.rand_rho(2)
val = 0.5
(stine1, stine2) = (self.rand_matrix(16, 2), self.rand_matrix(16, 2))
chan1 = Stinespring(stine1, input_dims=2, output_dims=4)
targ = (val * chan1._evolve(rho))
chan = chan1.multiply(val)
self.assertAllClose(chan._evolve(rho), targ)
chan = (val * chan1)
self.assertAllClose(chan._evolve(rho), targ)
chan = (chan1 * val)
self.assertAllClose(chan._evolve(rho), targ)
chan2 = Stinespring((stine1, stine2), input_dims=2, output_dims=4)
targ = (val * chan2._evolve(rho))
chan = chan2.multiply(val)
self.assertAllClose(chan._evolve(rho), targ)
chan = (val * chan2)
self.assertAllClose(chan._evolve(rho), targ)
chan = (chan2 * val)
self.assertAllClose(chan._evolve(rho), targ)
def test_multiply_except(self):
chan = Stinespring(self.depol_stine(1))
self.assertRaises(QiskitError, chan.multiply, 's')
self.assertRaises(QiskitError, chan.multiply, chan)
def test_negate(self):
rho = np.diag([1, 0])
targ = np.diag([(- 0.5), (- 0.5)])
chan = (- Stinespring(self.depol_stine(1)))
self.assertAllClose(chan._evolve(rho), targ) |
def process_hits(response, column_id_pa, column_cit_srprt, column_category_P, column_category_A, column_category_D, column_category_Y, column_category_L, column_category_O, column_category_T, column_category_E, column_category_X):
all_response_patent_applications = response.get('hits').get('hits')
for element in all_response_patent_applications:
element_id_pa = element.get('_id')
for citation_id in element.get('_source').get('citation_ids'):
column_id_pa.append(element_id_pa)
column_cit_srprt.append(citation_id)
response_citation = es.search(index='ep_patent_citations', body=query_citation_id(citation_id), size=10000, filter_path=['hits.total.value', 'hits.hits'])
response_citation_entry = response_citation.get('hits').get('hits')[0].get('_source')
column_category_P.append((response_citation_entry.get('category_P') != None))
column_category_A.append((response_citation_entry.get('category_A') != None))
column_category_D.append((response_citation_entry.get('category_D') != None))
column_category_Y.append((response_citation_entry.get('category_Y') != None))
column_category_L.append((response_citation_entry.get('category_L') != None))
column_category_O.append((response_citation_entry.get('category_O') != None))
column_category_T.append((response_citation_entry.get('category_T') != None))
column_category_E.append((response_citation_entry.get('category_E') != None))
column_category_X.append((response_citation_entry.get('category_X') != None)) |
def ComputePriorCounts(args, counts, ref_lexicon, g2p_lexicon, phonetic_decoding_lexicon):
prior_counts = defaultdict(list)
for word in counts:
prior_mean = [args.prior_mean[0], args.prior_mean[1], args.prior_mean[2]]
if (word not in ref_lexicon):
prior_mean[0] = 0
if (word not in g2p_lexicon):
prior_mean[1] = 0
if (word not in phonetic_decoding_lexicon):
prior_mean[2] = 0
prior_mean_sum = sum(prior_mean)
try:
prior_mean = [(float(t) / prior_mean_sum) for t in prior_mean]
except ZeroDivisionError:
print('WARNING: word {} appears in train_counts but not in any lexicon.'.format(word), file=sys.stderr)
prior_counts[word] = [(t * args.prior_counts_tot) for t in prior_mean]
return prior_counts |
_module()
class MixVisionTransformer(BaseModule):
def __init__(self, in_channels=3, embed_dims=64, num_stages=4, num_layers=[3, 4, 6, 3], num_heads=[1, 2, 4, 8], patch_sizes=[7, 3, 3, 3], strides=[4, 2, 2, 2], sr_ratios=[8, 4, 2, 1], out_indices=(0, 1, 2, 3), mlp_ratio=4, qkv_bias=True, drop_rate=0.0, attn_drop_rate=0.0, drop_path_rate=0.0, act_cfg=dict(type='GELU'), norm_cfg=dict(type='LN', eps=1e-06), pretrained=None, init_cfg=None, with_cp=False):
super(MixVisionTransformer, self).__init__(init_cfg=init_cfg)
assert (not (init_cfg and pretrained)), 'init_cfg and pretrained cannot be set at the same time'
if isinstance(pretrained, str):
warnings.warn('DeprecationWarning: pretrained is deprecated, please use "init_cfg" instead')
self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)
elif (pretrained is not None):
raise TypeError('pretrained must be a str or None')
self.embed_dims = embed_dims
self.num_stages = num_stages
self.num_layers = num_layers
self.num_heads = num_heads
self.patch_sizes = patch_sizes
self.strides = strides
self.sr_ratios = sr_ratios
self.with_cp = with_cp
assert (num_stages == len(num_layers) == len(num_heads) == len(patch_sizes) == len(strides) == len(sr_ratios))
self.out_indices = out_indices
assert (max(out_indices) < self.num_stages)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(num_layers))]
cur = 0
self.layers = ModuleList()
for (i, num_layer) in enumerate(num_layers):
embed_dims_i = (embed_dims * num_heads[i])
patch_embed = PatchEmbed(in_channels=in_channels, embed_dims=embed_dims_i, kernel_size=patch_sizes[i], stride=strides[i], padding=(patch_sizes[i] // 2), norm_cfg=norm_cfg)
layer = ModuleList([TransformerEncoderLayer(embed_dims=embed_dims_i, num_heads=num_heads[i], feedforward_channels=(mlp_ratio * embed_dims_i), drop_rate=drop_rate, attn_drop_rate=attn_drop_rate, drop_path_rate=dpr[(cur + idx)], qkv_bias=qkv_bias, act_cfg=act_cfg, norm_cfg=norm_cfg, with_cp=with_cp, sr_ratio=sr_ratios[i]) for idx in range(num_layer)])
in_channels = embed_dims_i
norm = build_norm_layer(norm_cfg, embed_dims_i)[1]
self.layers.append(ModuleList([patch_embed, layer, norm]))
cur += num_layer
def init_weights(self):
if (self.init_cfg is None):
for m in self.modules():
if isinstance(m, nn.Linear):
trunc_normal_init(m, std=0.02, bias=0.0)
elif isinstance(m, nn.LayerNorm):
constant_init(m, val=1.0, bias=0.0)
elif isinstance(m, nn.Conv2d):
fan_out = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
fan_out //= m.groups
normal_init(m, mean=0, std=math.sqrt((2.0 / fan_out)), bias=0)
else:
super(MixVisionTransformer, self).init_weights()
def forward(self, x):
outs = []
for (i, layer) in enumerate(self.layers):
(x, hw_shape) = layer[0](x)
for block in layer[1]:
x = block(x, hw_shape)
x = layer[2](x)
x = nlc_to_nchw(x, hw_shape)
if (i in self.out_indices):
outs.append(x)
return outs |
def test_one_time_tracing_func():
run_cell('x = 0')
run_cell('y = 1')
run_cell('\n def f(p):\n if p:\n return x\n else:\n return y\n ')
run_cell('z = f(False) + 1\nz = f(True) + 1')
run_cell('y = 2')
run_cell('logging.info(z)')
assert_not_detected()
run_cell('x = 3')
run_cell('logging.info(z)')
assert_detected('tracing should not be disabled') |
class BatchFlattenWrapper(VerifiableWrapper):
def __init__(self, module):
if (not isinstance(module, snt.BatchFlatten)):
raise ValueError('Cannot wrap {} with a BatchFlattenWrapper.'.format(module))
super(BatchFlattenWrapper, self).__init__(module) |
def TrainDataLoader(imgDir, nbImg, transform, batchSize):
trainSet = ImageFolder(imgDir, nbImg, transform)
trainLoader = data.DataLoader(dataset=trainSet, batch_size=batchSize, shuffle=True, num_workers=1, drop_last=True)
return trainLoader |
class MegDistributedDataParallel(nn.Module):
def __init__(self, module, dim=0, broadcast_buffers=True, bucket_cap_mb=25):
super(MegDistributedDataParallel, self).__init__()
self.module = module
self.dim = dim
self.broadcast_buffers = broadcast_buffers
self.broadcast_bucket_size = ((bucket_cap_mb * 1024) * 1024)
self._sync_params()
def _dist_broadcast_coalesced(self, tensors, buffer_size):
for tensors in _take_tensors(tensors, buffer_size):
flat_tensors = _flatten_dense_tensors(tensors)
dist.broadcast(flat_tensors, 0)
for (tensor, synced) in zip(tensors, _unflatten_dense_tensors(flat_tensors, tensors)):
tensor.copy_(synced)
def _sync_params(self):
module_states = list(self.module.state_dict().values())
if (len(module_states) > 0):
self._dist_broadcast_coalesced(module_states, self.broadcast_bucket_size)
if self.broadcast_buffers:
if (torch.__version__ < '1.0'):
buffers = [b.data for b in self.module._all_buffers()]
else:
buffers = [b.data for b in self.module.buffers()]
if (len(buffers) > 0):
self._dist_broadcast_coalesced(buffers, self.broadcast_bucket_size)
def scatter(self, inputs, kwargs, device_ids):
return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim)
def forward(self, *inputs, **kwargs):
(inputs, kwargs) = self.scatter(inputs, kwargs, [torch.cuda.current_device()])
return self.module(*inputs[0], **kwargs[0]) |
def EmbedWord2Vec(walks, dimension):
time_start = time.time()
print('Creating embeddings.')
model = Word2Vec(walks, size=dimension, window=5, min_count=0, sg=1, workers=32, iter=1)
node_ids = model.wv.index2word
node_embeddings = model.wv.vectors
print('Embedding generation runtime: ', (time.time() - time_start))
return (node_ids, node_embeddings) |
def _register_on_step_begin(model):
def hook(module, input):
for pruning in module.prunings:
pruning.on_step_begin()
hook_handle = model.register_forward_pre_hook(hook)
return hook_handle |
class DHCF(nn.Module):
def __init__(self, num_users: int, num_items: int, emb_dim: int, num_layers: int=3, drop_rate: float=0.5) -> None:
super().__init__()
(self.num_users, self.num_items) = (num_users, num_items)
self.num_layers = num_layers
self.drop_rate = drop_rate
self.u_embedding = nn.Embedding(num_users, emb_dim)
self.i_embedding = nn.Embedding(num_items, emb_dim)
(self.W_gc, self.W_bi) = (nn.ModuleList(), nn.ModuleList())
for _ in range(self.num_layers):
self.W_gc.append(nn.Linear(emb_dim, emb_dim))
self.W_bi.append(nn.Linear(emb_dim, emb_dim))
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_uniform_(self.u_embedding.weight)
nn.init.xavier_uniform_(self.i_embedding.weight)
for (W_gc, W_bi) in zip(self.W_gc, self.W_bi):
nn.init.xavier_uniform_(W_gc.weight)
nn.init.xavier_uniform_(W_bi.weight)
nn.init.constant_(W_gc.bias, 0)
nn.init.constant_(W_bi.bias, 0)
def forward(self, hg_ui: Hypergraph, hg_iu: Hypergraph) -> Tuple[(torch.Tensor, torch.Tensor)]:
u_embs = self.u_embedding.weight
i_embs = self.i_embedding.weight
all_embs = torch.cat([u_embs, i_embs], dim=0)
embs_list = [all_embs]
for _idx in range(self.num_layers):
(u_embs, i_embs) = torch.split(all_embs, [self.num_users, self.num_items], dim=0)
u_embs = hg_ui.smoothing_with_HGNN(u_embs)
i_embs = hg_iu.smoothing_with_HGNN(i_embs)
g_embs = torch.cat([u_embs, i_embs], dim=0)
sum_embs = F.leaky_relu((self.W_gc[_idx](g_embs) + all_embs), negative_slope=0.2)
bi_embs = (all_embs * g_embs)
bi_embs = F.leaky_relu(self.W_bi[_idx](bi_embs), negative_slope=0.2)
all_embs = (sum_embs + bi_embs)
all_embs = F.dropout(all_embs, p=self.drop_rate, training=self.training)
all_embs = F.normalize(all_embs, p=2, dim=1)
embs_list.append(all_embs)
embs = torch.stack(embs_list, dim=1)
embs = torch.mean(embs, dim=1)
(u_embs, i_embs) = torch.split(embs, [self.num_users, self.num_items], dim=0)
return (u_embs, i_embs) |
class InvertedResidual(nn.Module):
def __init__(self, inp, oup, stride, dilate, expand_ratio):
super(InvertedResidual, self).__init__()
self.stride = stride
assert (stride in [1, 2])
hidden_dim = round((inp * expand_ratio))
self.use_res_connect = ((self.stride == 1) and (inp == oup))
if (expand_ratio == 1):
self.conv = nn.Sequential(nn.Conv2d(in_channels=hidden_dim, out_channels=hidden_dim, kernel_size=3, stride=stride, padding=dilate, groups=hidden_dim, bias=False), nn.BatchNorm2d(hidden_dim), nn.ReLU6(inplace=True), nn.Conv2d(in_channels=hidden_dim, out_channels=oup, kernel_size=1, stride=1, padding=0, dilation=1, groups=1, bias=False), nn.BatchNorm2d(oup))
else:
self.conv = nn.Sequential(nn.Conv2d(in_channels=inp, out_channels=hidden_dim, kernel_size=1, stride=1, padding=0, dilation=1, groups=1, bias=False), nn.BatchNorm2d(hidden_dim), nn.ReLU6(inplace=True), nn.Conv2d(in_channels=hidden_dim, out_channels=hidden_dim, kernel_size=3, stride=stride, padding=dilate, dilation=dilate, groups=hidden_dim, bias=False), nn.BatchNorm2d(hidden_dim), nn.ReLU6(inplace=True), nn.Conv2d(in_channels=hidden_dim, out_channels=oup, kernel_size=1, stride=1, padding=0, dilation=1, groups=1, bias=False), nn.BatchNorm2d(oup))
def forward(self, x):
if self.use_res_connect:
return (x + self.conv(x))
else:
return self.conv(x) |
class AverageInKspaceLayer(MergeLayer):
def __init__(self, incomings, data_shape, frame_dist=[1, 3, 5], divide_by_n=False, clipped=False, **kwargs):
if ('name' not in kwargs):
kwargs['name'] = 'kspace_averaging_layer'
super(AverageInKspaceLayer, self).__init__(incomings, **kwargs)
(data, mask) = incomings
(n, nc, nx, ny, nt) = data_shape
nc_new = ((len(frame_dist) + 1) * 2)
self.data = data
self.mask = mask
self.frame_dist = frame_dist
self.divide_by_n = divide_by_n
self.dft2 = FFT2Layer(data, data_shape, name='kavg_dft2')
if clipped:
self.kavg = KspaceFillNeighbourLayer_Clipped([self.dft2, mask], nt, frame_dist, divide_by_n, name='kavg_avg')
else:
self.kavg = KspaceFillNeighbourLayer([self.dft2, mask], frame_dist, divide_by_n, name='kavg_avg')
self.kavg_tmp = lasagne.layers.reshape(self.kavg, ((- 1), 2, nx, ny, nt))
self.idft2 = FFT2Layer(self.kavg_tmp, data_shape, inv=True, name='kavg_idft2')
self.out = lasagne.layers.reshape(self.idft2, ((- 1), nc_new, nx, ny, nt))
def get_output_for(self, inputs, **kwargs):
x = inputs[0]
mask = inputs[1]
res = get_output(self.out, {self.data: x, self.mask: mask})
return res
def get_output_shape_for(self, input_shapes, **kwargs):
return self.kavg.get_output_shape_for(input_shapes) |
def get_impact_point_direction(state: X, impact_point: Point) -> float:
abs_angle_dof = np.arctan2((impact_point.y - state.y), (impact_point.x - state.x))
car_heading: float = state.psi
return (abs_angle_dof - car_heading) |
def _render_databricks(js):
import inspect
if (_render_databricks.displayHTML is None):
found = False
for frame in inspect.getouterframes(inspect.currentframe()):
global_names = set(frame.frame.f_globals)
target_names = {'displayHTML', 'display', 'spark'}
if target_names.issubset(global_names):
_render_databricks.displayHTML = frame.frame.f_globals['displayHTML']
found = True
break
if (not found):
msg = "Could not find DataBrick's displayHTML function"
_log.error(msg)
raise RuntimeError(msg)
_render_databricks.displayHTML(js) |
class Logged(Timed):
def __init__(self, **kwargs):
kwargs.setdefault('silent', True)
self.setup_logger(**kwargs)
def timeenv(self, **kwargs):
kw = dict(kwargs)
message = kw.pop('message', None)
timing = kw.pop('timing', True)
timer = kw.setdefault('timer', None)
self.setup_logger(**kw)
(yield timer)
self.close_logger(message=message, timing=timing)
def logenv(self, **kwargs):
kw = dict(kwargs)
message = kw.pop('message', None)
timing = kw.pop('timing', False)
self.setup_logger(**kw)
(yield)
self.close_logger(message=message, timing=timing)
def logger_count(self):
try:
count = self._logger_count
except AttributeError:
count = 0
self._logger_count = count
return count
_count.setter
def logger_count(self, count):
self._logger_count = count
_count.deleter
def logger_count(self):
del self._logger_count
def _info(self, message):
if (self.logger_count <= 0):
with self.logenv(silent=False, timing=False):
self.logger.info(message)
else:
self.logger.info(message)
def setup_logger(self, silent=None, logfile=None, level=None, format=None, timer=None, filename=False, pathname=False, funcname=False, linenumber=False, process=False, name=None, classname=None):
self.logger_count += 1
self.logger_logfile = None
if (self.logger_count == 1):
if (not ('_timers' in self.__dict__)):
self.setup_timer()
self._log_timers = []
if (not (silent in (None, True, False))):
level = silent
silent = False
if (silent is None):
if (level is None):
try:
silent = self.logger_silent
except AttributeError:
silent = True
else:
silent = False
self.logger_silent = silent
if (level is None):
try:
level = self.logger_level
except AttributeError:
level = logging.INFO
self.logger_level = level
self.logger = logging.getLogger(self.__class__.__name__)
root_logger = logging.getLogger()
if ((len(root_logger.handlers) == 0) and (len(self.logger.handlers) == 0)):
if (format is None):
info = ''
if (name is not None):
info = name
if (classname is True):
info = ('%(name)s-' + info)
elif (classname is not False):
info = '%(name)s'
if funcname:
info += '.%(funcName)s'
if filename:
info = ('%(filename)s.' + info)
elif pathname:
info = ('%(pathname)s.' + info)
if linenumber:
info += ':%(lineno)d'
if process:
info = ('%(process)d:' + info)
formatter = logging.Formatter(' [{}] %(message)s'.format(info))
elif (format is 'UTC'):
formatter = utils.UTCFormatter('%(asctime)s%(msecs)03d %(nameb)12-s %(levelname)s: %(message)s', datefmt='%Y%m%d%H%M%S')
else:
raise Exception('Logger Format Not Recognized')
if (silent is True):
self.logger_handler = logging.NullHandler()
elif (logfile is not None):
self.logger_logfile = logfile
self.logger_handler = logging.FileHandler(logfile, 'w')
else:
self.logger_handler = logging.StreamHandler()
self.logger_handler.setFormatter(formatter)
self.logger.addHandler(self.logger_handler)
self.logger.setLevel(level)
else:
self.logger.setLevel(level)
self.logger_handler = None
else:
self._log_timers += [(self._log_timer, self._timers[self._log_timer])]
self.add_timer(timer)
self._log_timer = timer
def _format_timing(timing, time):
if (timing is False):
return None
elif (timing in (True, None)):
timing = 'Runtime:'
stime = time2human(time)
if (len(re.findall('({.*})', timing)) == 1):
s = timing.format(stime)
else:
s = '{:s} {:s}.'.format(timing, stime)
return s
def logger_timing(self, timing=None, timer=None, finish=False):
if ((timer is not None) and (timing is None)):
timing = (timer + ':')
timing = self._format_timing(timing, self.get_timer(timer))
if (timing is not None):
self.logger.info(timing)
if (finish and (timer is not None)):
self.finish_timer(timer)
def close_logger(self, timing=None, message=None):
if (timing is not None):
self.logger_timing(timing=timing)
if (message is not None):
self.logger.info(message)
self.logger_count -= 1
assert (self.logger_count >= 0), 'logger count is {}'.format(self.logger_count)
if (self.logger_count == 0):
if (self.logger_handler is not None):
self.logger.removeHandler(self.logger_handler)
if (self.logger_logfile is not None):
logging.shutdown()
else:
self.finish_timer(self._log_timer)
(self._log_timer, timer) = self._log_timers.pop()
self.replace_timer(name=self._log_timer, timer=timer)
def close_timer(self, timer=None, timing=None):
try:
log_timers = self._log_timers
except:
log_timers = []
if (self._timers[timer] in log_timers):
self.logger.error('Cannot close logger timer {}'.format(timer))
time = self.get_timer(timer)
return time
try:
time = self.finish_timer(timer)
except KeyError:
time = 0.0
if (timing is not None):
self.logger.info(self._format_timing(timing, time))
return time
def logger_file_info(self, f):
if isinstance(f, FortranReader):
filename = f.filename
filesize = f.filesize
self.logger.info('Loading {:s} ({:s})'.format(filename, byte2human(filesize)))
if f.compressed:
filesize = f.stat.st_size
self.logger.info('Compressed file size: {:s} (modulo {:s} for gz)'.format(byte2human(filesize), byte2human((2 ** 32))))
elif isinstance(f, io.IOBase):
stat = os.fstat(f.fileno())
filename = f.name
filesize = stat.st_size
self.logger.info('Loading {:s} ({:s})'.format(filename, byte2human(filesize)))
def logger_load_info(self, nvers, ncyc0, ncyc1, nmodels, time=None):
if (time is None):
time = self.get_timer()
self.logger.info(' version {:>9s}'.format(version2human(nvers)))
self.logger.info('first model read {:>9d}'.format(int(ncyc0)))
self.logger.info(' last model read {:>9d}'.format(int(ncyc1)))
self.logger.info(' num models read {:>9d}'.format(int(nmodels)))
self.logger.info(' data loaded in {:>9s}'.format(time2human(time))) |
('cond-affine')
def cond_affine(dataset, model, use_baseline):
assert (not use_baseline), 'Cannot use baseline model for this config'
return {'schema_type': 'cond-affine', 'num_density_layers': 10, 'batch_norm': False, 'st_nets': ([128] * 2), 'p_nets': ([128] * 2), 'q_nets': GridParams(([10] * 2), ([100] * 4))} |
def evaluate_metrics_from_lists(predictions: List[str], ground_truths: List[List[str]], ids: Union[(List[int], None)]=None) -> Tuple[(Dict[(str, float)], Dict[(int, Dict[(str, float)])])]:
assert (len(predictions) == len(ground_truths))
assert all([(len(i) == 5) for i in ground_truths])
if (ids is None):
ids = range(len(predictions))
(pred, ref) = reformat_to_coco(predictions, ground_truths, ids)
tmp_dir = Path('tmp')
if (not tmp_dir.is_dir()):
tmp_dir.mkdir()
unique_id = f'{random.randint(0, 1000000.0)}_{datetime.now()}'
ref_file = tmp_dir.joinpath(f'{unique_id}_ref.json')
pred_file = tmp_dir.joinpath(f'{unique_id}_pred.json')
write_json(ref, ref_file)
write_json(pred, pred_file)
(metrics, per_file_metrics) = evaluate_metrics_from_files(pred_file, ref_file)
ref_file.unlink()
pred_file.unlink()
return (metrics, per_file_metrics) |
class ExtensionsWidget():
tag = 'extensions'
description = 'Extensions'
icon = join(dirname(abspath(__file__)), '..', 'gui', 'buttons', 'button_extensions.png')
icon_highlighted = join(dirname(abspath(__file__)), '..', 'gui', 'buttons', 'button_extensions_highlighted.png')
def __init__(self, viz):
self.viz = viz
self._show_err_popup = False
self.stylegan = any([(w.tag == 'stylegan') for w in viz.widgets])
self.mosaic = any([(w.tag == 'mosaic') for w in viz.widgets])
self.segment = any([(w.tag == 'segment') for w in viz.widgets])
self.mil = any([(w.tag == 'mil') for w in viz.widgets])
_off_path = join(dirname(abspath(__file__)), '..', 'gui', 'buttons', 'small_button_verified.png')
self._official_tex = gl_utils.Texture(image=np.array(Image.open(_off_path)), bilinear=True, mipmap=True)
def toggle_stylegan(self):
viz = self.viz
from ..widgets.stylegan import StyleGANWidget
if (not any((isinstance(w, StyleGANWidget) for w in viz.widgets))):
viz.add_widgets(StyleGANWidget)
else:
viz.remove_widget(StyleGANWidget)
def toggle_mosaic(self):
viz = self.viz
from ..widgets.mosaic import MosaicWidget
if (not any((isinstance(w, MosaicWidget) for w in viz.widgets))):
viz.add_widgets(MosaicWidget)
else:
viz.remove_widget(MosaicWidget)
def toggle_segment(self):
viz = self.viz
from ..widgets.segment import SegmentWidget
if (not any((isinstance(w, SegmentWidget) for w in viz.widgets))):
viz.add_widgets(SegmentWidget)
else:
viz.remove_widget(SegmentWidget)
def toggle_mil(self):
viz = self.viz
from ..widgets.mil import MILWidget
if (not any((isinstance(w, MILWidget) for w in viz.widgets))):
viz.add_widgets(MILWidget)
else:
viz.remove_widget(MILWidget)
def extension_checkbox(self, title, description, check_value, official=False):
viz = self.viz
height = (imgui.get_text_line_height_with_spacing() * 3)
imgui.begin_child(f'##{title}', height=height)
with viz.bold_font():
imgui.text(title)
imgui.text_colored(description, *viz.theme.dim)
if official:
imgui.image(self._official_tex.gl_id, viz.font_size, viz.font_size)
imgui.same_line((viz.font_size + (viz.spacing / 2)))
imgui.text('Official')
else:
imgui.text('')
imgui.same_line(((imgui.get_content_region_max()[0] - viz.font_size) - (viz.spacing * 1.5)))
result = imgui.checkbox(f'##{title}_checkbox', check_value)
imgui.end_child()
return result
def show_extension_error(self, message, full_trace=None):
self._show_err_popup = True
self._err_msg = message
if full_trace:
print(full_trace)
else:
print(message)
def draw_error_popup(self):
wrapped = textwrap.wrap(self._err_msg, width=45)
lh = imgui.get_text_line_height_with_spacing()
window_size = ((self.viz.font_size * 18), ((lh * len(wrapped)) + (self.viz.font_size * 4)))
self.viz.center_next_window(*window_size)
imgui.set_next_window_size(*window_size)
(_, opened) = imgui.begin('Error loading extension', closable=True, flags=imgui.WINDOW_NO_RESIZE)
if (not opened):
self._show_err_popup = False
for line in wrapped:
imgui.text(line)
if self.viz.sidebar.full_button('OK', width=(- 1)):
self._show_err_popup = False
imgui.end()
_utils.scoped_by_object_id
def __call__(self, show=True):
viz = self.viz
if show:
viz.header('Extensions')
(_c2, self.mosaic) = self.extension_checkbox('Mosaic Maps', description='Open and interact with Mosaic Maps.', check_value=self.mosaic, official=True)
if _c2:
self.toggle_mosaic()
imgui.separator()
(_c1, self.stylegan) = self.extension_checkbox('StyleGAN', description='Generate images with StyleGAN.', check_value=self.stylegan, official=True)
if _c1:
try:
self.toggle_stylegan()
except Exception as e:
self.show_extension_error(str(e), traceback.format_exc())
self.stylegan = False
imgui.separator()
(_c3, self.segment) = self.extension_checkbox('Cell Segmentation', description='Segment cells with Cellpose.', check_value=self.segment, official=True)
if _c3:
try:
self.toggle_segment()
except ImportError as e:
self.show_extension_error('Cellpose is not installed. Cellpose can be installed with "pip install cellpose"')
self.segment = False
except Exception as e:
self.show_extension_error(str(e), traceback.format_exc())
self.segment = False
(_c4, self.mil) = self.extension_checkbox('Multiple-Instance Learning', description='MIL support with attention heatmaps.', check_value=self.mil, official=True)
if _c4:
try:
self.toggle_mil()
except Exception as e:
self.show_extension_error(str(e), traceback.format_exc())
self.mil = False
if self._show_err_popup:
self.draw_error_popup() |
class DistModel(BaseModel):
def name(self):
return self.model_name
def initialize(self, model='net-lin', net='alex', vgg_blocks=[1, 2, 3, 4, 5], colorspace='Lab', pnet_rand=False, pnet_tune=False, model_path=None, use_gpu=True, printNet=False, spatial=False, is_train=False, lr=0.0001, beta1=0.5, version='0.1', gpu_ids=[0]):
BaseModel.initialize(self, use_gpu=use_gpu, gpu_ids=gpu_ids)
self.model = model
self.net = net
self.is_train = is_train
self.spatial = spatial
self.gpu_ids = gpu_ids
self.model_name = ('%s [%s]' % (model, net))
if (self.model == 'net-lin'):
self.net = netw.PNetLin(pnet_rand=pnet_rand, pnet_tune=pnet_tune, pnet_type=net, use_dropout=True, spatial=spatial, version=version, lpips=True, vgg_blocks=vgg_blocks)
kw = {}
if (not use_gpu):
kw['map_location'] = 'cpu'
if (model_path is None):
import inspect
model_path = os.path.abspath(os.path.join(inspect.getfile(self.initialize), '..', ('weights/v%s/%s.pth' % (version, net))))
if (not is_train):
print(('Loading model from: %s' % model_path))
self.net.load_state_dict(torch.load(model_path, **kw), strict=False)
elif (self.model == 'net'):
self.net = netw.PNetLin(pnet_rand=pnet_rand, pnet_type=net, lpips=False)
elif (self.model in ['L2', 'l2']):
self.net = netw.L2(use_gpu=use_gpu, colorspace=colorspace)
self.model_name = 'L2'
elif (self.model in ['DSSIM', 'dssim', 'SSIM', 'ssim']):
self.net = netw.DSSIM(use_gpu=use_gpu, colorspace=colorspace)
self.model_name = 'SSIM'
else:
raise ValueError(('Model [%s] not recognized.' % self.model))
self.parameters = list(self.net.parameters())
if self.is_train:
self.rankLoss = netw.BCERankingLoss()
self.parameters += list(self.rankLoss.net.parameters())
self.lr = lr
self.old_lr = lr
self.optimizer_net = torch.optim.Adam(self.parameters, lr=lr, betas=(beta1, 0.999))
else:
self.net.eval()
if use_gpu:
self.net.to(gpu_ids[0])
self.net = torch.nn.DataParallel(self.net, device_ids=gpu_ids)
if self.is_train:
self.rankLoss = self.rankLoss.to(device=gpu_ids[0])
if printNet:
print(' Networks initialized ')
netw.print_network(self.net)
print('')
def forward(self, in0, in1, mask=None, retPerLayer=False):
return self.net.forward(in0, in1, mask=mask, retPerLayer=retPerLayer)
def optimize_parameters(self):
self.forward_train()
self.optimizer_net.zero_grad()
self.backward_train()
self.optimizer_net.step()
self.clamp_weights()
def clamp_weights(self):
for module in self.net.modules():
if (hasattr(module, 'weight') and (module.kernel_size == (1, 1))):
module.weight.data = torch.clamp(module.weight.data, min=0)
def set_input(self, data):
self.input_ref = data['ref']
self.input_p0 = data['p0']
self.input_p1 = data['p1']
self.input_judge = data['judge']
if self.use_gpu:
self.input_ref = self.input_ref.to(device=self.gpu_ids[0])
self.input_p0 = self.input_p0.to(device=self.gpu_ids[0])
self.input_p1 = self.input_p1.to(device=self.gpu_ids[0])
self.input_judge = self.input_judge.to(device=self.gpu_ids[0])
self.var_ref = Variable(self.input_ref, requires_grad=True)
self.var_p0 = Variable(self.input_p0, requires_grad=True)
self.var_p1 = Variable(self.input_p1, requires_grad=True)
def forward_train(self):
self.d0 = self.forward(self.var_ref, self.var_p0)
self.d1 = self.forward(self.var_ref, self.var_p1)
self.acc_r = self.compute_accuracy(self.d0, self.d1, self.input_judge)
self.var_judge = Variable((1.0 * self.input_judge)).view(self.d0.size())
self.loss_total = self.rankLoss.forward(self.d0, self.d1, ((self.var_judge * 2.0) - 1.0))
return self.loss_total
def backward_train(self):
torch.mean(self.loss_total).backward()
def compute_accuracy(self, d0, d1, judge):
d1_lt_d0 = (d1 < d0).cpu().data.numpy().flatten()
judge_per = judge.cpu().numpy().flatten()
return ((d1_lt_d0 * judge_per) + ((1 - d1_lt_d0) * (1 - judge_per)))
def get_current_errors(self):
retDict = OrderedDict([('loss_total', self.loss_total.data.cpu().numpy()), ('acc_r', self.acc_r)])
for key in retDict.keys():
retDict[key] = np.mean(retDict[key])
return retDict
def get_current_visuals(self):
zoom_factor = (256 / self.var_ref.data.size()[2])
ref_img = util.tensor2im(self.var_ref.data)
p0_img = util.tensor2im(self.var_p0.data)
p1_img = util.tensor2im(self.var_p1.data)
ref_img_vis = zoom(ref_img, [zoom_factor, zoom_factor, 1], order=0)
p0_img_vis = zoom(p0_img, [zoom_factor, zoom_factor, 1], order=0)
p1_img_vis = zoom(p1_img, [zoom_factor, zoom_factor, 1], order=0)
return OrderedDict([('ref', ref_img_vis), ('p0', p0_img_vis), ('p1', p1_img_vis)])
def save(self, path, label):
if self.use_gpu:
self.save_network(self.net.module, path, '', label)
else:
self.save_network(self.net, path, '', label)
self.save_network(self.rankLoss.net, path, 'rank', label)
def update_learning_rate(self, nepoch_decay):
lrd = (self.lr / nepoch_decay)
lr = (self.old_lr - lrd)
for param_group in self.optimizer_net.param_groups:
param_group['lr'] = lr
print(('update lr [%s] decay: %f -> %f' % (type, self.old_lr, lr)))
self.old_lr = lr |
def dsrla_mobilenetv2_k24():
print('Constructing dsrla_mobilenetv2_k24......')
model = dsRLA_MobileNetV2(rla_channel=24)
return model |
def mlp_architecture(n_pc_points, bneck_size, bneck_post_mlp=False, check_n_pc_points=True):
if (check_n_pc_points and (n_pc_points != 2048)):
raise ValueError()
encoder = encoder_with_convs_and_symmetry
decoder = decoder_with_fc_only
n_input = [n_pc_points, 3]
encoder_args = {'n_filters': [64, 128, 128, 256, bneck_size], 'filter_sizes': [1], 'strides': [1], 'b_norm': True, 'verbose': True}
decoder_args = {'layer_sizes': [256, 256, np.prod(n_input)], 'b_norm': False, 'b_norm_finish': False, 'verbose': True}
if bneck_post_mlp:
encoder_args['n_filters'].pop()
decoder_args['layer_sizes'][0] = bneck_size
return (encoder, decoder, encoder_args, decoder_args) |
def now():
from datetime import datetime
return datetime.now().strftime('%Y%m%d%H%M')[:(- 1)] |
def draw_points_on_image(image, points, curr_point=None, highlight_all=True, radius_scale=0.01):
overlay_rgba = Image.new('RGBA', image.size, 0)
overlay_draw = ImageDraw.Draw(overlay_rgba)
for (point_key, point) in points.items():
if (((curr_point is not None) and (curr_point == point_key)) or highlight_all):
p_color = (255, 0, 0)
t_color = (0, 0, 255)
else:
p_color = (255, 0, 0, 35)
t_color = (0, 0, 255, 35)
rad_draw = int((image.size[0] * radius_scale))
p_start = point.get('start_temp', point['start'])
p_target = point['target']
if ((p_start is not None) and (p_target is not None)):
p_draw = (int(p_start[0]), int(p_start[1]))
t_draw = (int(p_target[0]), int(p_target[1]))
overlay_draw.line((p_draw[0], p_draw[1], t_draw[0], t_draw[1]), fill=(255, 255, 0), width=2)
if (p_start is not None):
p_draw = (int(p_start[0]), int(p_start[1]))
overlay_draw.ellipse(((p_draw[0] - rad_draw), (p_draw[1] - rad_draw), (p_draw[0] + rad_draw), (p_draw[1] + rad_draw)), fill=p_color)
if ((curr_point is not None) and (curr_point == point_key)):
overlay_draw.text(p_draw, 'p', align='center', fill=(0, 0, 0))
if (p_target is not None):
t_draw = (int(p_target[0]), int(p_target[1]))
overlay_draw.ellipse(((t_draw[0] - rad_draw), (t_draw[1] - rad_draw), (t_draw[0] + rad_draw), (t_draw[1] + rad_draw)), fill=t_color)
if ((curr_point is not None) and (curr_point == point_key)):
overlay_draw.text(t_draw, 't', align='center', fill=(0, 0, 0))
return Image.alpha_composite(image.convert('RGBA'), overlay_rgba).convert('RGB') |
class CbamBlock(nn.Module):
def __init__(self, channels, reduction_ratio=16):
super(CbamBlock, self).__init__()
self.ch_gate = ChannelGate(channels=channels, reduction_ratio=reduction_ratio)
self.sp_gate = SpatialGate()
def forward(self, x):
x = self.ch_gate(x)
x = self.sp_gate(x)
return x |
def main():
args = parse_args()
if (args.mode == 'single'):
train_cmd = ('python lib/train/run_training.py --script %s --config %s --save_dir %s --use_lmdb %d --script_prv %s --config_prv %s --distill %d --script_teacher %s --config_teacher %s --use_wandb %d' % (args.script, args.config, args.save_dir, args.use_lmdb, args.script_prv, args.config_prv, args.distill, args.script_teacher, args.config_teacher, args.use_wandb))
elif (args.mode == 'multiple'):
train_cmd = ('python -m torch.distributed.launch --nproc_per_node %d --master_port %d lib/train/run_training.py --script %s --config %s --save_dir %s --use_lmdb %d --script_prv %s --config_prv %s --use_wandb %d --distill %d --script_teacher %s --config_teacher %s' % (args.nproc_per_node, random.randint(10000, 50000), args.script, args.config, args.save_dir, args.use_lmdb, args.script_prv, args.config_prv, args.use_wandb, args.distill, args.script_teacher, args.config_teacher))
elif (args.mode == 'multi_node'):
train_cmd = ('python -m torch.distributed.launch --nproc_per_node %d --master_addr %s --master_port %d --nnodes %d --node_rank %d lib/train/run_training.py --script %s --config %s --save_dir %s --use_lmdb %d --script_prv %s --config_prv %s --use_wandb %d --distill %d --script_teacher %s --config_teacher %s' % (args.nproc_per_node, args.ip, args.port, args.world_size, args.rank, args.script, args.config, args.save_dir, args.use_lmdb, args.script_prv, args.config_prv, args.use_wandb, args.distill, args.script_teacher, args.config_teacher))
else:
raise ValueError("mode should be 'single' or 'multiple'.")
os.system(train_cmd) |
class video_show():
def __init__(self):
self.show_output = rospy.get_param('~show_output', True)
self.save_output = rospy.get_param('~save_output', False)
self.output_video_file = rospy.get_param('~output_video_file', 'result.mp4')
self.bridge = CvBridge()
self.image_sub = rospy.Subscriber('image_topic', Image, self.callback)
def callback(self, data):
try:
cv_image = self.bridge.imgmsg_to_cv2(data)
except CvBridgeError as e:
print(e)
return
if (cv_image.size == 0):
return
rospy.loginfo('Listener_original: Received new frame')
cv_image = cv_image.astype('uint8')
if (self.show_output == True):
cv2.imshow('video_show_orig', cv_image)
cv2.waitKey(10)
if (self.save_output == True):
if (self.video_writer_init == False):
fourcc = cv2.VideoWriter_fourcc(*'XVID')
self.out = cv2.VideoWriter(self.output_video_file, fourcc, 25, (cv_image.shape[1], cv_image.shape[0]))
self.out.write(cv_image) |
def compare_models(model_1, model_2):
models_differ = 0
for (key_item_1, key_item_2) in zip(model_1.state_dict().items(), model_2.state_dict().items()):
if torch.equal(key_item_1[1], key_item_2[1]):
pass
else:
models_differ += 1
if (key_item_1[0] == key_item_2[0]):
print('Mismtach found at', key_item_1[0])
else:
raise Exception
if (models_differ == 0):
print('Models match perfectly! :)') |
def cmpGraphs(g1, g2):
assert (g1.numVertices == g2.numVertices)
assert (g1.numEdges == g2.numEdges)
assert (len(list(g1.vertices)) == len(list(g2.vertices)))
assert (len(list(g1.edges)) == len(list(g2.edges)))
for (v1, v2) in zip(g1.vertices, g2.vertices):
assert (v1.id == v2.id)
assert (v1.degree == v2.degree)
assert (len(list(v1.incidentEdges)) == len(list(v2.incidentEdges)))
for (e1, e2) in zip(v1.incidentEdges, v2.incidentEdges):
assert (e1.source.id == e2.source.id)
assert (e1.target.id == e2.target.id)
for (e1, e2) in zip(g1.edges, g2.edges):
assert (e1.source.id == e2.source.id)
assert (e1.target.id == e2.target.id) |
class ExperimentPlannerPoolBasedOnSpacing(ExperimentPlanner):
def __init__(self, folder_with_cropped_data, preprocessed_output_folder):
super(ExperimentPlannerPoolBasedOnSpacing, self).__init__(folder_with_cropped_data, preprocessed_output_folder)
self.data_identifier = 'nnUNetData_poolBasedOnSpacing'
self.plans_fname = join(self.preprocessed_output_folder, ('nnUNetPlans' + 'poolBasedOnSpacing_plans_3D.pkl'))
def get_properties_for_stage(self, current_spacing, original_spacing, original_shape, num_cases, num_modalities, num_classes):
new_median_shape = np.round(((original_spacing / current_spacing) * original_shape)).astype(int)
dataset_num_voxels = (np.prod(new_median_shape) * num_cases)
input_patch_size = (1 / np.array(current_spacing))
input_patch_size /= input_patch_size.mean()
input_patch_size *= ((1 / min(input_patch_size)) * 512)
input_patch_size = np.round(input_patch_size).astype(int)
input_patch_size = [min(i, j) for (i, j) in zip(input_patch_size, new_median_shape)]
(network_num_pool_per_axis, pool_op_kernel_sizes, conv_kernel_sizes, new_shp, shape_must_be_divisible_by) = get_pool_and_conv_props(current_spacing, input_patch_size, self.unet_featuremap_min_edge_length, self.unet_max_numpool)
ref = Generic_UNet.use_this_for_batch_size_computation_3D
here = Generic_UNet.compute_approx_vram_consumption(new_shp, network_num_pool_per_axis, self.unet_base_num_features, self.unet_max_num_filters, num_modalities, num_classes, pool_op_kernel_sizes, conv_per_stage=self.conv_per_stage)
while (here > ref):
axis_to_be_reduced = np.argsort((new_shp / new_median_shape))[(- 1)]
tmp = deepcopy(new_shp)
tmp[axis_to_be_reduced] -= shape_must_be_divisible_by[axis_to_be_reduced]
(_, _, _, _, shape_must_be_divisible_by_new) = get_pool_and_conv_props(current_spacing, tmp, self.unet_featuremap_min_edge_length, self.unet_max_numpool)
new_shp[axis_to_be_reduced] -= shape_must_be_divisible_by_new[axis_to_be_reduced]
(network_num_pool_per_axis, pool_op_kernel_sizes, conv_kernel_sizes, new_shp, shape_must_be_divisible_by) = get_pool_and_conv_props(current_spacing, new_shp, self.unet_featuremap_min_edge_length, self.unet_max_numpool)
here = Generic_UNet.compute_approx_vram_consumption(new_shp, network_num_pool_per_axis, self.unet_base_num_features, self.unet_max_num_filters, num_modalities, num_classes, pool_op_kernel_sizes, conv_per_stage=self.conv_per_stage)
print(new_shp)
input_patch_size = new_shp
batch_size = Generic_UNet.DEFAULT_BATCH_SIZE_3D
batch_size = int(np.floor((max((ref / here), 1) * batch_size)))
max_batch_size = np.round(((self.batch_size_covers_max_percent_of_dataset * dataset_num_voxels) / np.prod(input_patch_size, dtype=np.int64))).astype(int)
max_batch_size = max(max_batch_size, self.unet_min_batch_size)
batch_size = min(batch_size, max_batch_size)
do_dummy_2D_data_aug = ((max(input_patch_size) / input_patch_size[0]) > self.anisotropy_threshold)
plan = {'batch_size': batch_size, 'num_pool_per_axis': network_num_pool_per_axis, 'patch_size': input_patch_size, 'median_patient_size_in_voxels': new_median_shape, 'current_spacing': current_spacing, 'original_spacing': original_spacing, 'do_dummy_2D_data_aug': do_dummy_2D_data_aug, 'pool_op_kernel_sizes': pool_op_kernel_sizes, 'conv_kernel_sizes': conv_kernel_sizes}
return plan |
def _match_array_semantics(sym_model):
if (check_mx_version('2.0.0') and mx.util.is_np_array()):
(symnet, args, auxs) = sym_model
symnet = symnet.as_np_ndarray()
for (k, v) in args.items():
args[k] = v.as_np_ndarray()
for (k, v) in auxs.items():
auxs[k] = v.as_np_ndarray()
sym_model = (symnet, args, auxs)
return sym_model |
class VREPGraspVisualization(object):
def __init__(self):
print('VREPGraspVisualization: Object started, attempting to connect to V-REP')
vrep.vrep.simxFinish((- 1))
self.client_id = vrep.vrep.simxStart(FLAGS.vrepConnectionAddress, FLAGS.vrepConnectionPort, FLAGS.vrepWaitUntilConnected, FLAGS.vrepDoNotReconnectOnceDisconnected, FLAGS.vrepTimeOutInMs, FLAGS.vrepCommThreadCycleInMs)
self.dataset = FLAGS.grasp_dataset
self.parent_name = FLAGS.vrepParentName
self.visualization_pipeline = FLAGS.vrepVisualizationPipeline
self.visualization_dir = FLAGS.visualization_dir
if (self.client_id != (- 1)):
print('VREPGraspVisualization: Connected to remote API server')
else:
print('VREPGraspVisualization: Error connecting to remote API server')
return
def visualize(self, tf_session=None, dataset=None, batch_size=1, parent_name=None, visualization_pipeline=None, visualization_dir=None):
if (dataset is not None):
self.dataset = dataset
if (parent_name is not None):
self.parent_name = parent_name
if (visualization_pipeline is not None):
self.visualization_pipeline = visualization_pipeline
if (visualization_dir is not None):
self.visualization_dir = visualization_dir
if (self.visualization_pipeline == 'python'):
self.visualize_python(tf_session, self.dataset, batch_size, self.parent_name)
elif (self.visualization_pipeline == 'tensorflow'):
self.visualize_tensorflow(tf_session, self.dataset, batch_size, self.parent_name)
else:
raise ValueError(('VREPGraspVisualization.visualize(): unsupported vrepVisualizationPipeline: ' + str(self.visualization_pipeline)))
def visualize_tensorflow(self, tf_session=None, dataset=None, batch_size=1, parent_name=None, visualization_pipeline=None, visualization_dir=None, verbose=0):
if (dataset is not None):
self.dataset = dataset
if (parent_name is not None):
self.parent_name = parent_name
if (visualization_pipeline is not None):
self.visualization_pipeline = visualization_pipeline
if (visualization_dir is not None):
self.visualization_dir = visualization_dir
raise NotImplementedError
def visualize_python(self, tf_session=None, dataset=None, batch_size=1, parent_name=None, visualization_pipeline=None, visualization_dir=None):
if (dataset is not None):
self.dataset = dataset
if (parent_name is not None):
self.parent_name = parent_name
if (visualization_pipeline is not None):
self.visualization_pipeline = visualization_pipeline
if (visualization_dir is not None):
self.visualization_dir = visualization_dir
raise NotImplementedError
def __del__(self):
vrep.vrep.simxFinish((- 1)) |
class DictTensorOutputModel1(nn.Module):
def __init__(self):
super().__init__()
self.layer_1 = nn.Linear((28 * 28), 12)
self.layer_2 = nn.Linear((28 * 28), 12)
self.layer_3 = nn.Linear(24, 1)
def forward(self, x1, x2):
x1 = self.layer_1(x1)
x2 = self.layer_2(x2)
x = torch.cat([x1, x2], axis=1)
x3 = self.layer_3(x)
output = {'x1': x1, 'x2': x2}
return (output, x3) |
class MBart50TokenizerFast(PreTrainedTokenizerFast):
vocab_files_names = VOCAB_FILES_NAMES
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
model_input_names = ['input_ids', 'attention_mask']
slow_tokenizer_class = MBart50Tokenizer
prefix_tokens: List[int] = []
suffix_tokens: List[int] = []
def __init__(self, vocab_file=None, src_lang=None, tgt_lang=None, tokenizer_file=None, eos_token='</s>', sep_token='</s>', cls_token='<s>', unk_token='<unk>', pad_token='<pad>', mask_token='<mask>', **kwargs):
mask_token = (AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token)
kwargs['additional_special_tokens'] = kwargs.get('additional_special_tokens', [])
kwargs['additional_special_tokens'] += [code for code in FAIRSEQ_LANGUAGE_CODES if (code not in kwargs['additional_special_tokens'])]
super().__init__(vocab_file, src_lang=src_lang, tgt_lang=tgt_lang, tokenizer_file=tokenizer_file, eos_token=eos_token, sep_token=sep_token, cls_token=cls_token, unk_token=unk_token, pad_token=pad_token, mask_token=mask_token, **kwargs)
self.vocab_file = vocab_file
self.can_save_slow_tokenizer = (False if (not self.vocab_file) else True)
self.lang_code_to_id = {lang_code: self.convert_tokens_to_ids(lang_code) for lang_code in FAIRSEQ_LANGUAGE_CODES}
self._src_lang = (src_lang if (src_lang is not None) else 'en_XX')
self.tgt_lang = tgt_lang
self.cur_lang_code_id = self.lang_code_to_id[self._src_lang]
self.set_src_lang_special_tokens(self._src_lang)
def src_lang(self) -> str:
return self._src_lang
_lang.setter
def src_lang(self, new_src_lang: str) -> None:
self._src_lang = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
if (token_ids_1 is None):
return ((self.prefix_tokens + token_ids_0) + self.suffix_tokens)
return (((self.prefix_tokens + token_ids_0) + token_ids_1) + self.suffix_tokens)
def prepare_seq2seq_batch(self, src_texts: List[str], src_lang: str='en_XX', tgt_texts: Optional[List[str]]=None, tgt_lang: str='ro_RO', **kwargs) -> BatchEncoding:
self.src_lang = src_lang
self.tgt_lang = tgt_lang
return super().prepare_seq2seq_batch(src_texts, tgt_texts, **kwargs)
def _switch_to_input_mode(self):
return self.set_src_lang_special_tokens(self.src_lang)
def _switch_to_target_mode(self):
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def set_src_lang_special_tokens(self, src_lang: str) -> None:
self.cur_lang_code_id = self.convert_tokens_to_ids(src_lang)
self.prefix_tokens = [self.cur_lang_code_id]
self.suffix_tokens = [self.eos_token_id]
prefix_tokens_str = self.convert_ids_to_tokens(self.prefix_tokens)
suffix_tokens_str = self.convert_ids_to_tokens(self.suffix_tokens)
self._tokenizer.post_processor = processors.TemplateProcessing(single=((prefix_tokens_str + ['$A']) + suffix_tokens_str), pair=((prefix_tokens_str + ['$A', '$B']) + suffix_tokens_str), special_tokens=list(zip((prefix_tokens_str + suffix_tokens_str), (self.prefix_tokens + self.suffix_tokens))))
def set_tgt_lang_special_tokens(self, tgt_lang: str) -> None:
self.cur_lang_code_id = self.convert_tokens_to_ids(tgt_lang)
self.prefix_tokens = [self.cur_lang_code_id]
self.suffix_tokens = [self.eos_token_id]
prefix_tokens_str = self.convert_ids_to_tokens(self.prefix_tokens)
suffix_tokens_str = self.convert_ids_to_tokens(self.suffix_tokens)
self._tokenizer.post_processor = processors.TemplateProcessing(single=((prefix_tokens_str + ['$A']) + suffix_tokens_str), pair=((prefix_tokens_str + ['$A', '$B']) + suffix_tokens_str), special_tokens=list(zip((prefix_tokens_str + suffix_tokens_str), (self.prefix_tokens + self.suffix_tokens))))
def _build_translation_inputs(self, raw_inputs, return_tensors: str, src_lang: Optional[str], tgt_lang: Optional[str], **extra_kwargs):
if ((src_lang is None) or (tgt_lang is None)):
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model')
self.src_lang = src_lang
inputs = self(raw_inputs, add_special_tokens=True, return_tensors=return_tensors, **extra_kwargs)
tgt_lang_id = self.convert_tokens_to_ids(tgt_lang)
inputs['forced_bos_token_id'] = tgt_lang_id
return inputs
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:
if (not self.can_save_slow_tokenizer):
raise ValueError('Your fast tokenizer does not have the necessary information to save the vocabulary for a slow tokenizer.')
if (not os.path.isdir(save_directory)):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
out_vocab_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']))
if (os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file)):
copyfile(self.vocab_file, out_vocab_file)
return (out_vocab_file,) |
def localize(loc_trainer, classify_evaluator, iter_items, k=5):
(batch_dict, dp_list) = iter_items
(probs, labels, loss) = loc_trainer.model(batch_dict, 'test', loss_fn=None)
classify_evaluator.add_metric_data(probs.cpu().tolist(), labels)
if (probs.shape[(- 1)] < k):
(value, idx_srt) = torch.topk(probs, k=probs.shape[(- 1)], dim=1)
else:
(value, idx_srt) = torch.topk(probs, k=k, dim=1)
idx_srt = idx_srt.cpu().tolist()
for (i, dp) in enumerate(dp_list):
sorted_idxs = idx_srt[i]
topn_list = []
topn_index = []
for x in range(k):
if (x >= len(sorted_idxs)):
topn_list.append('')
topn_index.append((- 1))
continue
cur_idx = sorted_idxs[x]
if (cur_idx >= len(dp.buggy_context_tk_list)):
topn_list.append('')
topn_index.append((- 1))
else:
topn_list.append(dp.buggy_context_tk_list[cur_idx])
topn_index.append(cur_idx)
dp.topn_list = topn_list
dp.topn_index = topn_index |
def load_optimized_vae_decoder(cache_dir, accelerator='openvino', ipex=True, precision='float32', device='CPU', low_memory=False):
t_start = time.perf_counter()
decoder_path = os.path.join(cache_dir, 'decoder')
(nano_vae_decoder, cache_dir) = try_load_existing_model({}, decoder_path, accelerator=accelerator, ipex=ipex, precision=precision, low_memory=low_memory, device=device)
t_end = time.perf_counter()
if (nano_vae_decoder is None):
raise Exception(f'You have to download the optimized nano vae decoder models. Expected path: {cache_dir}')
else:
print(f'Load vae decoder in {(t_end - t_start)}s')
return nano_vae_decoder |
def get_buffer(config, game) -> (ChessEnv, list):
env = ChessEnv().reset()
white = ChessPlayer(config, dummy=True)
black = ChessPlayer(config, dummy=True)
result = game.headers['Result']
(white_elo, black_elo) = (int(game.headers['WhiteElo']), int(game.headers['BlackElo']))
white_weight = clip_elo_policy(config, white_elo)
black_weight = clip_elo_policy(config, black_elo)
actions = []
while (not game.is_end()):
game = game.variation(0)
actions.append(game.move.uci())
k = 0
while ((not env.done) and (k < len(actions))):
if env.white_to_move:
action = white.sl_action(env.observation, actions[k], weight=white_weight)
else:
action = black.sl_action(env.observation, actions[k], weight=black_weight)
env.step(action, False)
k += 1
if ((not env.board.is_game_over()) and (result != '1/2-1/2')):
env.resigned = True
if (result == '1-0'):
env.winner = Winner.white
black_win = (- 1)
elif (result == '0-1'):
env.winner = Winner.black
black_win = 1
else:
env.winner = Winner.draw
black_win = 0
black.finish_game(black_win)
white.finish_game((- black_win))
data = []
for i in range(len(white.moves)):
data.append(white.moves[i])
if (i < len(black.moves)):
data.append(black.moves[i])
return (env, data) |
_cache(None)
def _infer_backed_cached(pool_class):
if (pool_class.__name__ == 'RayExecutor'):
return 'ray'
path = pool_class.__module__.split('.')
if (path[0] == 'concurrent'):
return 'concurrent.futures'
if (path[0] == 'joblib'):
return 'loky'
if (path[0] == 'distributed'):
return 'dask'
return path[0] |
def test_bytes(doc):
assert (m.bytes_from_char_ssize_t().decode() == 'green')
assert (m.bytes_from_char_size_t().decode() == 'purple')
assert (m.bytes_from_string().decode() == 'foo')
assert (m.bytes_from_str().decode() == 'bar')
assert (doc(m.bytes_from_str) == 'bytes_from_str() -> bytes') |
def load_data(path: PathOrStr, file: PathLikeOrBinaryStream='data_save.pkl', bs: int=64, val_bs: int=None, num_workers: int=defaults.cpus, dl_tfms: Optional[Collection[Callable]]=None, device: torch.device=None, collate_fn: Callable=data_collate, no_check: bool=False, **kwargs) -> DataBunch:
source = ((Path(path) / file) if is_pathlike(file) else file)
ll = (torch.load(source, map_location='cpu') if (defaults.device == torch.device('cpu')) else torch.load(source))
return ll.databunch(path=path, bs=bs, val_bs=val_bs, num_workers=num_workers, dl_tfms=dl_tfms, device=device, collate_fn=collate_fn, no_check=no_check, **kwargs) |
class CorrelationMetrics():
def __init__(self):
pass
def pearson_cor(self, data1, data2):
(r, p) = stats.pearsonr(data1, data2)
return (r, p)
def spearman_cor(self, data1, data2=None):
(rho, p) = stats.spearmanr(data1, data2)
return (rho, p) |
def test_evolveddiskdf_setup_roAsQuantity_oddunits():
from galpy.df import dehnendf
from galpy.potential import EllipticalDiskPotential, LogarithmicHaloPotential
lp = LogarithmicHaloPotential(normalize=1.0)
ep = EllipticalDiskPotential(twophio=0.05, phib=0.0, p=0.0, tform=(- 150.0), tsteady=125.0)
ro = 7000.0
idfwarm = dehnendf(beta=0.0, profileParams=((1.0 / 3.0), 1.0, 0.15), ro=(ro * units.lyr))
from galpy.df import evolveddiskdf
df = evolveddiskdf(idfwarm, [lp, ep], to=(- 150.0))
assert (numpy.fabs((df._ro - (ro * units.lyr.to(units.kpc)))) < (10.0 ** (- 10.0))), 'ro in evolveddiskdf setup as Quantity does not work as expected'
return None |
def get_parser():
parser = argparse.ArgumentParser(description='merge json files', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--input-jsons', type=str, nargs='+', action='append', default=[], help='Json files for the inputs')
parser.add_argument('--output-jsons', type=str, nargs='+', action='append', default=[], help='Json files for the outputs')
parser.add_argument('--jsons', type=str, nargs='+', action='append', default=[], help='The json files except for the input and outputs')
parser.add_argument('--verbose', '-V', default=0, type=int, help='Verbose option')
parser.add_argument('-O', dest='output', type=str, help='Output json file')
return parser |
class DDPG(object):
def __init__(self, actor, critic, memory, observation_shape, action_shape, param_noise=None, action_noise=None, gamma=0.99, tau=0.001, normalize_returns=False, enable_popart=False, normalize_observations=True, batch_size=128, observation_range=((- 5.0), 5.0), action_range=((- 1.0), 1.0), return_range=((- np.inf), np.inf), adaptive_param_noise=True, adaptive_param_noise_policy_threshold=0.1, critic_l2_reg=0.0, actor_lr=0.0001, critic_lr=0.001, clip_norm=None, reward_scale=1.0):
self.obs0 = tf.placeholder(tf.float32, shape=((None,) + observation_shape), name='obs0')
self.obs1 = tf.placeholder(tf.float32, shape=((None,) + observation_shape), name='obs1')
self.terminals1 = tf.placeholder(tf.float32, shape=(None, 1), name='terminals1')
self.rewards = tf.placeholder(tf.float32, shape=(None, 1), name='rewards')
self.actions = tf.placeholder(tf.float32, shape=((None,) + action_shape), name='actions')
self.critic_target = tf.placeholder(tf.float32, shape=(None, 1), name='critic_target')
self.param_noise_stddev = tf.placeholder(tf.float32, shape=(), name='param_noise_stddev')
self.gamma = gamma
self.tau = tau
self.memory = memory
self.normalize_observations = normalize_observations
self.normalize_returns = normalize_returns
self.action_noise = action_noise
self.param_noise = param_noise
self.action_range = action_range
self.return_range = return_range
self.observation_range = observation_range
self.critic = critic
self.actor = actor
self.actor_lr = actor_lr
self.critic_lr = critic_lr
self.clip_norm = clip_norm
self.enable_popart = enable_popart
self.reward_scale = reward_scale
self.batch_size = batch_size
self.stats_sample = None
self.critic_l2_reg = critic_l2_reg
if self.normalize_observations:
with tf.variable_scope('obs_rms'):
self.obs_rms = RunningMeanStd(shape=observation_shape)
else:
self.obs_rms = None
normalized_obs0 = tf.clip_by_value(normalize(self.obs0, self.obs_rms), self.observation_range[0], self.observation_range[1])
normalized_obs1 = tf.clip_by_value(normalize(self.obs1, self.obs_rms), self.observation_range[0], self.observation_range[1])
if self.normalize_returns:
with tf.variable_scope('ret_rms'):
self.ret_rms = RunningMeanStd()
else:
self.ret_rms = None
target_actor = copy(actor)
target_actor.name = 'target_actor'
self.target_actor = target_actor
target_critic = copy(critic)
target_critic.name = 'target_critic'
self.target_critic = target_critic
self.actor_tf = actor(normalized_obs0)
self.normalized_critic_tf = critic(normalized_obs0, self.actions)
self.critic_tf = denormalize(tf.clip_by_value(self.normalized_critic_tf, self.return_range[0], self.return_range[1]), self.ret_rms)
self.normalized_critic_with_actor_tf = critic(normalized_obs0, self.actor_tf, reuse=True)
self.critic_with_actor_tf = denormalize(tf.clip_by_value(self.normalized_critic_with_actor_tf, self.return_range[0], self.return_range[1]), self.ret_rms)
Q_obs1 = denormalize(target_critic(normalized_obs1, target_actor(normalized_obs1)), self.ret_rms)
self.target_Q = (self.rewards + (((1.0 - self.terminals1) * gamma) * Q_obs1))
if (self.param_noise is not None):
self.setup_param_noise(normalized_obs0)
self.setup_actor_optimizer()
self.setup_critic_optimizer()
if (self.normalize_returns and self.enable_popart):
self.setup_popart()
self.setup_stats()
self.setup_target_network_updates()
def setup_target_network_updates(self):
(actor_init_updates, actor_soft_updates) = get_target_updates(self.actor.vars, self.target_actor.vars, self.tau)
(critic_init_updates, critic_soft_updates) = get_target_updates(self.critic.vars, self.target_critic.vars, self.tau)
self.target_init_updates = [actor_init_updates, critic_init_updates]
self.target_soft_updates = [actor_soft_updates, critic_soft_updates]
def setup_param_noise(self, normalized_obs0):
assert (self.param_noise is not None)
param_noise_actor = copy(self.actor)
param_noise_actor.name = 'param_noise_actor'
self.perturbed_actor_tf = param_noise_actor(normalized_obs0)
logger.info('setting up param noise')
self.perturb_policy_ops = get_perturbed_actor_updates(self.actor, param_noise_actor, self.param_noise_stddev)
adaptive_param_noise_actor = copy(self.actor)
adaptive_param_noise_actor.name = 'adaptive_param_noise_actor'
adaptive_actor_tf = adaptive_param_noise_actor(normalized_obs0)
self.perturb_adaptive_policy_ops = get_perturbed_actor_updates(self.actor, adaptive_param_noise_actor, self.param_noise_stddev)
self.adaptive_policy_distance = tf.sqrt(tf.reduce_mean(tf.square((self.actor_tf - adaptive_actor_tf))))
def setup_actor_optimizer(self):
logger.info('setting up actor optimizer')
self.actor_loss = (- tf.reduce_mean(self.critic_with_actor_tf))
actor_shapes = [var.get_shape().as_list() for var in self.actor.trainable_vars]
actor_nb_params = sum([reduce((lambda x, y: (x * y)), shape) for shape in actor_shapes])
logger.info(' actor shapes: {}'.format(actor_shapes))
logger.info(' actor params: {}'.format(actor_nb_params))
self.actor_grads = U.flatgrad(self.actor_loss, self.actor.trainable_vars, clip_norm=self.clip_norm)
self.actor_optimizer = MpiAdam(var_list=self.actor.trainable_vars, beta1=0.9, beta2=0.999, epsilon=1e-08)
def setup_critic_optimizer(self):
logger.info('setting up critic optimizer')
normalized_critic_target_tf = tf.clip_by_value(normalize(self.critic_target, self.ret_rms), self.return_range[0], self.return_range[1])
self.critic_loss = tf.reduce_mean(tf.square((self.normalized_critic_tf - normalized_critic_target_tf)))
if (self.critic_l2_reg > 0.0):
critic_reg_vars = [var for var in self.critic.trainable_vars if (('kernel' in var.name) and ('output' not in var.name))]
for var in critic_reg_vars:
logger.info(' regularizing: {}'.format(var.name))
logger.info(' applying l2 regularization with {}'.format(self.critic_l2_reg))
critic_reg = tc.layers.apply_regularization(tc.layers.l2_regularizer(self.critic_l2_reg), weights_list=critic_reg_vars)
self.critic_loss += critic_reg
critic_shapes = [var.get_shape().as_list() for var in self.critic.trainable_vars]
critic_nb_params = sum([reduce((lambda x, y: (x * y)), shape) for shape in critic_shapes])
logger.info(' critic shapes: {}'.format(critic_shapes))
logger.info(' critic params: {}'.format(critic_nb_params))
self.critic_grads = U.flatgrad(self.critic_loss, self.critic.trainable_vars, clip_norm=self.clip_norm)
self.critic_optimizer = MpiAdam(var_list=self.critic.trainable_vars, beta1=0.9, beta2=0.999, epsilon=1e-08)
def setup_popart(self):
self.old_std = tf.placeholder(tf.float32, shape=[1], name='old_std')
new_std = self.ret_rms.std
self.old_mean = tf.placeholder(tf.float32, shape=[1], name='old_mean')
new_mean = self.ret_rms.mean
self.renormalize_Q_outputs_op = []
for vs in [self.critic.output_vars, self.target_critic.output_vars]:
assert (len(vs) == 2)
(M, b) = vs
assert ('kernel' in M.name)
assert ('bias' in b.name)
assert (M.get_shape()[(- 1)] == 1)
assert (b.get_shape()[(- 1)] == 1)
self.renormalize_Q_outputs_op += [M.assign(((M * self.old_std) / new_std))]
self.renormalize_Q_outputs_op += [b.assign(((((b * self.old_std) + self.old_mean) - new_mean) / new_std))]
def setup_stats(self):
ops = []
names = []
if self.normalize_returns:
ops += [self.ret_rms.mean, self.ret_rms.std]
names += ['ret_rms_mean', 'ret_rms_std']
if self.normalize_observations:
ops += [tf.reduce_mean(self.obs_rms.mean), tf.reduce_mean(self.obs_rms.std)]
names += ['obs_rms_mean', 'obs_rms_std']
ops += [tf.reduce_mean(self.critic_tf)]
names += ['reference_Q_mean']
ops += [reduce_std(self.critic_tf)]
names += ['reference_Q_std']
ops += [tf.reduce_mean(self.critic_with_actor_tf)]
names += ['reference_actor_Q_mean']
ops += [reduce_std(self.critic_with_actor_tf)]
names += ['reference_actor_Q_std']
ops += [tf.reduce_mean(self.actor_tf)]
names += ['reference_action_mean']
ops += [reduce_std(self.actor_tf)]
names += ['reference_action_std']
if self.param_noise:
ops += [tf.reduce_mean(self.perturbed_actor_tf)]
names += ['reference_perturbed_action_mean']
ops += [reduce_std(self.perturbed_actor_tf)]
names += ['reference_perturbed_action_std']
self.stats_ops = ops
self.stats_names = names
def pi(self, obs, apply_noise=True, compute_Q=True):
if ((self.param_noise is not None) and apply_noise):
actor_tf = self.perturbed_actor_tf
else:
actor_tf = self.actor_tf
feed_dict = {self.obs0: [obs]}
if compute_Q:
(action, q) = self.sess.run([actor_tf, self.critic_with_actor_tf], feed_dict=feed_dict)
else:
action = self.sess.run(actor_tf, feed_dict=feed_dict)
q = None
action = action.flatten()
if ((self.action_noise is not None) and apply_noise):
noise = self.action_noise()
assert (noise.shape == action.shape)
action += noise
action = np.clip(action, self.action_range[0], self.action_range[1])
return (action, q)
def store_transition(self, obs0, action, reward, obs1, terminal1):
reward *= self.reward_scale
self.memory.append(obs0, action, reward, obs1, terminal1)
if self.normalize_observations:
self.obs_rms.update(np.array([obs0]))
def train(self):
batch = self.memory.sample(batch_size=self.batch_size)
if (self.normalize_returns and self.enable_popart):
(old_mean, old_std, target_Q) = self.sess.run([self.ret_rms.mean, self.ret_rms.std, self.target_Q], feed_dict={self.obs1: batch['obs1'], self.rewards: batch['rewards'], self.terminals1: batch['terminals1'].astype('float32')})
self.ret_rms.update(target_Q.flatten())
self.sess.run(self.renormalize_Q_outputs_op, feed_dict={self.old_std: np.array([old_std]), self.old_mean: np.array([old_mean])})
else:
target_Q = self.sess.run(self.target_Q, feed_dict={self.obs1: batch['obs1'], self.rewards: batch['rewards'], self.terminals1: batch['terminals1'].astype('float32')})
ops = [self.actor_grads, self.actor_loss, self.critic_grads, self.critic_loss]
(actor_grads, actor_loss, critic_grads, critic_loss) = self.sess.run(ops, feed_dict={self.obs0: batch['obs0'], self.actions: batch['actions'], self.critic_target: target_Q})
self.actor_optimizer.update(actor_grads, stepsize=self.actor_lr)
self.critic_optimizer.update(critic_grads, stepsize=self.critic_lr)
return (critic_loss, actor_loss)
def initialize(self, sess):
self.sess = sess
self.sess.run(tf.global_variables_initializer())
self.actor_optimizer.sync()
self.critic_optimizer.sync()
self.sess.run(self.target_init_updates)
def update_target_net(self):
self.sess.run(self.target_soft_updates)
def get_stats(self):
if (self.stats_sample is None):
self.stats_sample = self.memory.sample(batch_size=self.batch_size)
values = self.sess.run(self.stats_ops, feed_dict={self.obs0: self.stats_sample['obs0'], self.actions: self.stats_sample['actions']})
names = self.stats_names[:]
assert (len(names) == len(values))
stats = dict(zip(names, values))
if (self.param_noise is not None):
stats = {**stats, **self.param_noise.get_stats()}
return stats
def adapt_param_noise(self):
if (self.param_noise is None):
return 0.0
batch = self.memory.sample(batch_size=self.batch_size)
self.sess.run(self.perturb_adaptive_policy_ops, feed_dict={self.param_noise_stddev: self.param_noise.current_stddev})
distance = self.sess.run(self.adaptive_policy_distance, feed_dict={self.obs0: batch['obs0'], self.param_noise_stddev: self.param_noise.current_stddev})
mean_distance = (MPI.COMM_WORLD.allreduce(distance, op=MPI.SUM) / MPI.COMM_WORLD.Get_size())
self.param_noise.adapt(mean_distance)
return mean_distance
def reset(self):
if (self.action_noise is not None):
self.action_noise.reset()
if (self.param_noise is not None):
self.sess.run(self.perturb_policy_ops, feed_dict={self.param_noise_stddev: self.param_noise.current_stddev}) |
def register_task(name, dataclass=None):
def register_task_cls(cls):
if (name in TASK_REGISTRY):
return TASK_REGISTRY[name]
if (not issubclass(cls, FairseqTask)):
raise ValueError('Task ({}: {}) must extend FairseqTask'.format(name, cls.__name__))
if (cls.__name__ in TASK_CLASS_NAMES):
raise ValueError('Cannot register task with duplicate class name ({})'.format(cls.__name__))
TASK_REGISTRY[name] = cls
TASK_CLASS_NAMES.add(cls.__name__)
if ((dataclass is not None) and (not issubclass(dataclass, FairseqDataclass))):
raise ValueError('Dataclass {} must extend FairseqDataclass'.format(dataclass))
cls.__dataclass = dataclass
if (dataclass is not None):
TASK_DATACLASS_REGISTRY[name] = dataclass
cs = ConfigStore.instance()
node = dataclass()
node._name = name
cs.store(name=name, group='task', node=node, provider='fairseq')
return cls
return register_task_cls |
class EncodeTest(tf.test.TestCase):
def testBasic(self):
with self.test_session():
item_emb = tf.constant([[0.1, 0.4, (- 0.51), (- 0.9)], [0.2, 0.4, (- 0.2), (- 0.2)], [0.1, 0.7, (- 0.4), (- 0.8)], [0.6, 0.4, (- 0.8), (- 0.3)], [0.9, 0.6, (- 0.2), (- 0.3)]])
codebook = tf.constant([[[0.0, 0.0], [0.0, 1.0], [1.0, 0.0], [1.0, 1.0]], [[0.0, 0.0], [0.0, (- 1.0)], [(- 1.0), 0.0], [(- 1.0), (- 1.0)]]])
code = encode.encode(item_emb, codebook)
self.assertAllEqual(code.eval(), [[0, 3], [0, 0], [1, 1], [2, 2], [3, 0]]) |
def oPNBI_torch(pred, true, mask_value=None):
if (mask_value != None):
mask = torch.gt(true, mask_value)
pred = torch.masked_select(pred, mask)
true = torch.masked_select(true, mask)
bias = ((true + pred) / (2 * true))
return bias.mean() |
class NeuralNet(object):
def __init__(self, device, ngpu):
(self.device, self.ngpu) = (device, ngpu)
self.model = SRNET(self.ngpu).to(self.device)
if ((self.device.type == 'cuda') and (self.model.ngpu > 0)):
self.model = nn.DataParallel(self.model, list(range(self.model.ngpu)))
num_params = 0
for p in self.model.parameters():
num_params += p.numel()
print(self.model)
print('The number of parameters: {}'.format(num_params))
self.mse = nn.MSELoss()
self.optimizer = optim.SGD(self.model.parameters(), lr=0.001) |
_task('span_bert')
class SpanBertTask(FairseqTask):
def add_args(parser):
parser.add_argument('data', help='path to data directory')
parser.add_argument('--tokens-per-sample', default=512, type=int, help='max number of total tokens over all segments per sample for BERT dataset')
parser.add_argument('--raw-text', default=False, action='store_true', help='load raw text dataset')
parser.add_argument('--break-mode', default='doc', type=str, help='mode for breaking sentence')
parser.add_argument('--schemes', default='["random"]', type=str, help='list of masking schemes')
parser.add_argument('--span-lower', default=1, type=int, help='lower bound on the number of words in a span')
parser.add_argument('--span-upper', default=10, type=int, help='upper bound on the number of words in a span')
parser.add_argument('--max-pair-targets', default=20, type=int, help='max word pieces b/w a pair')
parser.add_argument('--mask-ratio', default=0.15, type=float, help='proportion of words to be masked')
parser.add_argument('--geometric-p', default=0.3, type=float, help='p for the geometric distribution used in span masking. -1 is uniform')
parser.add_argument('--pair-loss-weight', default=0.0, type=float, help='weight for pair2/SBO loss')
parser.add_argument('--tagged-anchor-prob', default=0.0, type=float, help='prob of selecting an anchor according to the tag bitmap')
parser.add_argument('--short-seq-prob', default=0.1, type=float)
parser.add_argument('--pair-target-layer', default=(- 1), type=int)
parser.add_argument('--pair-positional-embedding-size', default=200, type=int)
parser.add_argument('--ner-masking-prob', default=0.5, type=float)
parser.add_argument('--replacement-method', default='word_piece')
parser.add_argument('--return-only-spans', default=False, action='store_true')
parser.add_argument('--shuffle-instance', default=False, action='store_true')
parser.add_argument('--no-nsp', default=False, action='store_true')
parser.add_argument('--endpoints', default='external', type=str)
parser.add_argument('--skip-validation', default=False, action='store_true')
parser.add_argument('--tag-bitmap-file-prefix', default=None, help='file containing bitmap of verb tokens')
def __init__(self, args, dictionary):
super().__init__(args)
self.dictionary = dictionary
args.vocab_size = len(dictionary)
self.seed = args.seed
self.no_nsp = args.no_nsp
self.short_seq_prob = args.short_seq_prob
def target_dictionary(self):
return self.dictionary
def setup_task(cls, args, **kwargs):
dictionary = BertDictionary.load(os.path.join(args.data, 'dict.txt'))
print('| dictionary: {} types'.format(len(dictionary)))
return cls(args, dictionary)
def load_dataset(self, split, combine=False):
loaded_datasets = []
for k in itertools.count():
split_k = (split + (str(k) if (k > 0) else ''))
path = os.path.join(self.args.data, split_k)
if (self.args.raw_text and IndexedRawTextDataset.exists(path)):
ds = IndexedRawTextDataset(path, self.dictionary)
tokens = [t for l in ds.tokens_list for t in l]
elif ((not self.args.raw_text) and IndexedInMemoryDataset.exists(path)):
ds = IndexedInMemoryDataset(path, fix_lua_indexing=False)
tokens = ds.buffer
elif (k > 0):
break
else:
raise FileNotFoundError('Dataset not found: {} ({})'.format(split, self.args.data))
tag_map = None
if (self.args.tag_bitmap_file_prefix is not None):
tag_map = bitarray()
tag_map.fromfile(open((self.args.tag_bitmap_file_prefix + split), 'rb'))
block_cls = (BlockPairDataset if (not self.no_nsp) else BlockDataset)
with data_utils.numpy_seed((self.seed + k)):
loaded_datasets.append(block_cls(tokens, ds.sizes, self.args.tokens_per_sample, pad=self.dictionary.pad(), cls=self.dictionary.cls(), mask=self.dictionary.mask(), sep=self.dictionary.sep(), break_mode=self.args.break_mode, short_seq_prob=self.short_seq_prob, tag_map=tag_map))
print('| {} {} {} examples'.format(self.args.data, split_k, len(loaded_datasets[(- 1)])))
if (not combine):
break
if (len(loaded_datasets) == 1):
dataset = loaded_datasets[0]
sizes = dataset.sizes
else:
dataset = ConcatDataset(loaded_datasets)
sizes = np.concatenate([ds.sizes for ds in loaded_datasets])
dataset_cls = (SpanBertDataset if (not self.no_nsp) else NoNSPSpanBertDataset)
self.datasets[split] = dataset_cls(dataset, sizes, self.dictionary, shuffle=self.args.shuffle_instance, seed=self.seed, args=self.args) |
def remove_symbols_and_diacritics(s: str, keep=''):
return ''.join(((c if (c in keep) else (ADDITIONAL_DIACRITICS[c] if (c in ADDITIONAL_DIACRITICS) else ('' if (unicodedata.category(c) == 'Mn') else (' ' if (unicodedata.category(c)[0] in 'MSP') else c)))) for c in unicodedata.normalize('NFKD', s))) |
.skipif((not torch.cuda.is_available()), reason='requires cuda')
.parametrize('cfg_file', ['../configs/kie/sdmgr/sdmgr_unet16_60e_wildreceipt.py'])
def test_single_gpu_test_kie(cfg_file):
curr_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
config_file = os.path.join(curr_dir, cfg_file)
cfg = Config.fromfile(config_file)
with tempfile.TemporaryDirectory() as tmpdirname:
out_dir = osp.join(tmpdirname, 'tmp')
(model, data_loader) = gene_sdmgr_model_dataloader(cfg, out_dir, curr_dir)
results = single_gpu_test(model, data_loader, out_dir=out_dir, is_kie=True)
assert check_argument.is_type_list(results, dict) |
class DropoutParameter(_message.Message):
__metaclass__ = _reflection.GeneratedProtocolMessageType
DESCRIPTOR = _DROPOUTPARAMETER |
def load_data(root_path, src, tar, batch_size):
kwargs = {'num_workers': 1, 'pin_memory': True}
loader_src = data_loader.load_training(root_path, src, batch_size, kwargs)
loader_tar = data_loader.load_training(root_path, tar, batch_size, kwargs)
loader_tar_test = data_loader.load_testing(root_path, tar, batch_size, kwargs)
return (loader_src, loader_tar, loader_tar_test) |
class Linear(gpy.means.Mean):
def __init__(self, input_dim, output_dim) -> None:
super(Linear, self).__init__()
numpy.random.seed(cg.seed)
self.a = nn.Parameter(torch.tensor(numpy.random.randn(output_dim, input_dim, 1), dtype=cg.dtype))
self.b = nn.Parameter(torch.zeros(output_dim, 1, 1, dtype=cg.dtype))
def __call__(self, X) -> torch.tensor:
return (torch.bmm(X, self.a) + self.b) |
def preprocess(x, dset):
if (dset == 'CIFAR10-C'):
return (x / 255.0)
else:
return x |
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
directory = ('runs/%s/' % args.model_type)
if (not os.path.exists(directory)):
os.makedirs(directory)
filename = (directory + filename)
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, (('runs/%s/' % args.model_type) + 'model_best.pth.tar')) |
def get_cluster_info(cluster, gold_doc):
text = gold_doc['text']
gold_ner = gold_doc['ner']
(ner, number, person, gender) = (set(), set(), set(), set())
for mention in cluster:
mtext = coreference_rendering.mention_text(text, mention).lower()
(tgender, tnumber, tperson) = coreference.pronoun_properties_text(mtext)
if (tgender != 'unknown'):
gender.add(tgender)
if (tnumber != 'unknown'):
number.add(tnumber)
if (tperson != 'unknown'):
person.add(tperson)
if (mention in gold_ner):
ner.add(gold_ner[mention])
return (ner, number, person, gender) |
class HRSACAgent():
def __init__(self, in_actor, hidden_in_actor, hidden_out_actor, out_actor, in_critic, hidden_in_critic, hidden_out_critic, rnn_num_layers, rnn_hidden_size_actor, rnn_hidden_size_critic, lr_actor=0.01, lr_critic=0.01, weight_decay=1e-05, device='cpu', rnn=True, alpha=0.2, automatic_entropy_tuning=True):
super(HRSACAgent, self).__init__()
self.actor = Network(in_actor, hidden_in_actor, hidden_out_actor, out_actor, rnn_num_layers, rnn_hidden_size_actor, device, actor=True, rnn=rnn).to(device)
self.critic = Network(in_critic, hidden_in_critic, hidden_out_critic, 1, rnn_num_layers, rnn_hidden_size_critic, device, rnn=rnn).to(device)
self.target_critic = Network(in_critic, hidden_in_critic, hidden_out_critic, 1, rnn_num_layers, rnn_hidden_size_critic, device, rnn=rnn).to(device)
self.noise = OUNoise(out_actor, scale=1.0)
self.device = device
hard_update(self.target_critic, self.critic)
self.actor_optimizer = Adam(self.actor.parameters(), lr=lr_actor)
self.critic_optimizer = Adam(self.critic.parameters(), lr=lr_critic, weight_decay=weight_decay)
self.automatic_entropy_tuning = automatic_entropy_tuning
self.alpha = alpha
if (self.automatic_entropy_tuning is True):
self.target_entropy = (- torch.prod(torch.Tensor(out_actor).to(self.device)).item())
self.log_alpha = (torch.zeros(1, requires_grad=True, device=self.device) + np.log(self.alpha)).detach().requires_grad_(True)
self.alpha_optimizer = Adam([self.log_alpha], lr=lr_actor)
def act(self, his, obs, noise=0.0):
his = his.to(self.device)
obs = obs.to(self.device)
if (noise > 0.0):
(action, _) = self.actor.sample_normal(his, obs)
else:
(action, _) = self.actor.forward(his, obs)
action = action.cpu().clamp((- 1), 1)
return action.cpu()
def act_prob(self, his, obs, noise=0.0):
his = his.to(self.device)
obs = obs.to(self.device)
if (noise > 0.0):
(action, log_probs) = self.actor.sample_normal(his, obs)
else:
(action, log_probs) = self.actor.forward(his, obs)
action = action.cpu().clamp((- 1), 1)
return (action.cpu(), log_probs) |
def readFragmentScores(name='resources/fpscores'):
import gzip
global _fscores
if (name == 'fpscores'):
name = op.join(op.dirname(__file__), name)
_fscores = cPickle.load(gzip.open(('%s.pkl.gz' % name)))
outDict = {}
for i in _fscores:
for j in range(1, len(i)):
outDict[i[j]] = float(i[0])
_fscores = outDict |
def DoubleConv3x3BnReLU(filters, use_batchnorm, name=None):
(name1, name2) = (None, None)
if (name is not None):
name1 = (name + 'a')
name2 = (name + 'b')
def wrapper(input_tensor):
x = Conv3x3BnReLU(filters, use_batchnorm, name=name1)(input_tensor)
x = Conv3x3BnReLU(filters, use_batchnorm, name=name2)(x)
return x
return wrapper |
def make_array_list_fn_sign_covariant(fn: Callable[([ArrayList], Array)], axis: int=(- 2)) -> Callable[([ArrayList], Array)]:
return apply_sign_symmetry_to_fn(fn, functools.partial(_get_sign_orbit_array_list, axis=axis), functools.partial(_multiply_sign_along_axis, axis=axis), functools.partial(jnp.sum, axis=axis)) |
class PassLogTfIntermediate(NodeTransformerWithPrePost):
__tempId = 0
def __init__(self) -> None:
self.nestedCall = False
def reset(self) -> None:
self.__tempId = 0
def newTempVar(self, lval: str) -> str:
self.__tempId += 1
return 'PassLogTfIntermediateTempVar{}_{}'.format(self.__tempId, lval)
def getSinppet(lval: str, rval: str) -> ast.AST:
return ast.parse('\nif "Tensor" in str(type({})):\n {} = tf.identity({})'.format(rval, lval, rval)).body[0]
def visit_Assign(self, node: ast.Assign) -> ast.AST:
self.generic_visit(node)
for lval in node.targets:
if isinstance(lval, ast.Tuple):
for elem in lval.elts:
if (not isinstance(elem, ast.Name)):
continue
name: str = astunparse.unparse(elem)
name = name.strip('\n')
node.post.append(self.getSinppet(self.newTempVar(name), name))
else:
if (not isinstance(lval, ast.Name)):
continue
name: str = astunparse.unparse(lval)
name = name.strip('\n')
node.post.append(self.getSinppet(self.newTempVar(name), name))
return node |
def _is_valid_sub_path(path, parent_paths):
if (not parent_paths):
return True
for parent_path in parent_paths:
if (path[:len(parent_path)] == parent_path):
return True
return False |
class ReplayBuffer():
def __init__(self, start_index, end_index, batch_size, is_permed, coin_number, sample_bias=1.0):
self.__coin_number = coin_number
self.__experiences = [Experience(i) for i in range(start_index, end_index)]
self.__is_permed = is_permed
self.__batch_size = batch_size
self.__sample_bias = sample_bias
logging.debug(('buffer_bias is %f' % sample_bias))
def append_experience(self, state_index):
self.__experiences.append(Experience(state_index))
logging.debug(('a new experience, indexed by %d, was appended' % state_index))
def __sample(self, start, end, bias):
ran = np.random.geometric(bias)
while (ran > (end - start)):
ran = np.random.geometric(bias)
result = (end - ran)
return result
def next_experience_batch(self):
batch = []
if self.__is_permed:
for i in range(self.__batch_size):
batch.append(self.__experiences[self.__sample(self.__experiences[0].state_index, self.__experiences[(- 1)].state_index, self.__sample_bias)])
else:
batch_start = self.__sample(0, (len(self.__experiences) - self.__batch_size), self.__sample_bias)
batch = self.__experiences[batch_start:(batch_start + self.__batch_size)]
return batch |
def set_num_threads(num_threads=2):
os.environ['MKL_NUM_THREADS'] = ('%s' % num_threads)
os.environ['NUMEXPR_NUM_THREADS'] = ('%s' % num_threads)
os.environ['OMP_NUM_THREADS'] = ('%s' % num_threads)
os.environ['OPENBLAS_NUM_THREADS'] = ('%s' % num_threads)
os.environ['VECLIB_MAXIMUM_THREADS'] = ('%s' % num_threads)
os.environ['NUMBA_NUM_THREADS'] = ('%s' % num_threads) |
def _return_handle(x):
handle = x.v_handle
if (not isinstance(handle, ctypes.c_void_p)):
handle = ctypes.c_void_p(handle)
return handle |
class Cutpaste_Dataset(Dataset):
def __init__(self, files: np.ndarray, config: Namespace):
self.files = files
self.center = config.center
self.cutpaste_transform = CutPaste(type=config.cutpaste_type)
self.crop_size = ((32, 32) if config.localization else (config.image_size, config.image_size))
self.crop = T.RandomCrop(self.crop_size)
self.transforms = T.Compose([T.ToTensor()])
def __len__(self):
return len(self.files)
def __getitem__(self, idx) -> Tensor:
img = self.files[idx]
img = np.tile(img, (3, 1, 1))
img = (img.transpose(1, 2, 0) * 255)
img = Image.fromarray(img.astype(np.uint8)).convert('RGB')
img_cropped = self.crop(img)
cutpaste_list = self.cutpaste_transform(img_cropped)
cutpaste_list = [self.transforms(i) for i in cutpaste_list]
if self.center:
cutpaste_list = [((i - 0.5) * 2) for i in cutpaste_list]
return cutpaste_list |
def blend_images_np(image, image2, alpha=0.5):
if (image.dtype != np.uint8):
raise ValueError('`image` not of type np.uint8')
if (image2.dtype != np.uint8):
raise ValueError('`image2` not of type np.uint8')
if (image.shape[:2] != image2.shape):
raise ValueError(('The image has spatial dimensions %s but the image2 has dimensions %s' % (image.shape[:2], image2.shape)))
pil_image = Image.fromarray(image)
pil_image2 = Image.fromarray(image2)
pil_image = Image.blend(pil_image, pil_image2, alpha)
np.copyto(image, np.array(pil_image.convert('RGB')))
return image |
def break_up_expressions(pred, label2idx):
if (pred.sum() == 0):
return ([pred.tolist()], [])
if (pred.all() > 0):
full_label = label2idx.idx2label['expressions'][int(pred[0])]
polarity = full_label.split('-')[(- 1)]
return ([([1] * len(pred))], [polarity])
idxs = []
bidx = None
polarity = None
for (i, p) in enumerate(pred):
if ((p > 0) and (bidx is None)):
bidx = i
full_label = label2idx.idx2label['expressions'][int(p)]
polarity = full_label.split('-')[(- 1)]
if ((p == 0) and (bidx is not None)):
idxs.append((bidx, i, polarity))
bidx = None
polarity = None
if ((i == (len(pred) - 1)) and (bidx is not None)):
idxs.append((bidx, (i + 1), polarity))
preds = []
polarities = []
for (bidx, eidx, polarity) in idxs:
l = ([0] * len(pred))
for i in range(bidx, eidx):
l[i] = 1
preds.append(l)
polarities.append(polarity)
return (preds, polarities) |
(num_cpus=4)
def get_eval(content: str, max_tokens: int):
while True:
try:
response = openai.ChatCompletion.create(model='gpt-4', messages=[{'role': 'system', 'content': 'You are a helpful and precise assistant for checking the quality of the answer.'}, {'role': 'user', 'content': content}], temperature=0.2, max_tokens=max_tokens)
break
except openai.error.RateLimitError:
pass
except Exception as e:
print(e)
time.sleep(1)
print('success!')
return response['choices'][0]['message']['content'] |
class SAConv2d(ConvAWS2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, use_deform=False):
super().__init__(in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias)
self.use_deform = use_deform
self.switch = torch.nn.Conv2d(self.in_channels, 1, kernel_size=1, stride=stride, bias=True)
self.switch.weight.data.fill_(0)
self.switch.bias.data.fill_(1)
self.weight_diff = torch.nn.Parameter(torch.Tensor(self.weight.size()))
self.weight_diff.data.zero_()
self.pre_context = torch.nn.Conv2d(self.in_channels, self.in_channels, kernel_size=1, bias=True)
self.pre_context.weight.data.fill_(0)
self.pre_context.bias.data.fill_(0)
self.post_context = torch.nn.Conv2d(self.out_channels, self.out_channels, kernel_size=1, bias=True)
self.post_context.weight.data.fill_(0)
self.post_context.bias.data.fill_(0)
if self.use_deform:
self.offset_s = torch.nn.Conv2d(self.in_channels, 18, kernel_size=3, padding=1, stride=stride, bias=True)
self.offset_l = torch.nn.Conv2d(self.in_channels, 18, kernel_size=3, padding=1, stride=stride, bias=True)
self.offset_s.weight.data.fill_(0)
self.offset_s.bias.data.fill_(0)
self.offset_l.weight.data.fill_(0)
self.offset_l.bias.data.fill_(0)
def forward(self, x):
avg_x = torch.nn.functional.adaptive_avg_pool2d(x, output_size=1)
avg_x = self.pre_context(avg_x)
avg_x = avg_x.expand_as(x)
x = (x + avg_x)
avg_x = torch.nn.functional.pad(x, pad=(2, 2, 2, 2), mode='reflect')
avg_x = torch.nn.functional.avg_pool2d(avg_x, kernel_size=5, stride=1, padding=0)
switch = self.switch(avg_x)
weight = self._get_weight(self.weight)
if self.use_deform:
offset = self.offset_s(avg_x)
out_s = deform_conv(x, offset, weight, self.stride, self.padding, self.dilation, self.groups, 1)
else:
out_s = super().conv2d_forward(x, weight)
ori_p = self.padding
ori_d = self.dilation
self.padding = tuple(((3 * p) for p in self.padding))
self.dilation = tuple(((3 * d) for d in self.dilation))
weight = (weight + self.weight_diff)
if self.use_deform:
offset = self.offset_l(avg_x)
out_l = deform_conv(x, offset, weight, self.stride, self.padding, self.dilation, self.groups, 1)
else:
out_l = super().conv2d_forward(x, weight)
out = ((switch * out_s) + ((1 - switch) * out_l))
self.padding = ori_p
self.dilation = ori_d
avg_x = torch.nn.functional.adaptive_avg_pool2d(out, output_size=1)
avg_x = self.post_context(avg_x)
avg_x = avg_x.expand_as(out)
out = (out + avg_x)
return out |
class ResNet(nn.Module):
def __init__(self, block, layer_channels, channels, num_classes=1000, zero_init_residual=False, groups=1, width_per_group=64, replace_stride_with_dilation=None, norm_layer=None):
super(ResNet, self).__init__()
if (norm_layer is None):
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.zero_init_residual = zero_init_residual
self.inplanes = channels[0]
self.dilation = 1
if (replace_stride_with_dilation is None):
replace_stride_with_dilation = [False, False, False]
if (len(replace_stride_with_dilation) != 3):
raise ValueError('replace_stride_with_dilation should be None or a 3-element tuple, got {}'.format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, self.inplanes, layer_channels[0])
self.layer2 = self._make_layer(block, channels[1], layer_channels[1], stride=2, dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, channels[2], layer_channels[2], stride=2, dilate=replace_stride_with_dilation[1])
if (len(layer_channels) == 4):
self.layer4 = self._make_layer(block, channels[3], layer_channels[3], stride=2, dilate=replace_stride_with_dilation[2])
self.fc = nn.Linear((channels[3] * block.expansion), num_classes)
else:
self.fc = nn.Linear((channels[2] * block.expansion), num_classes)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
if self.zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(conv1x1(self.inplanes, (planes * block.expansion), stride), norm_layer((planes * block.expansion)))
name = 'ds_block'
else:
name = 'n_block'
layers = OrderedDict()
layers[(name + '0')] = block(self.inplanes, planes, stride, downsample, self.groups, self.base_width, previous_dilation, norm_layer)
self.inplanes = (planes * block.expansion)
for i in range(1, blocks):
layers[('n_block' + str(i))] = block(self.inplanes, planes, groups=self.groups, base_width=self.base_width, dilation=self.dilation, norm_layer=norm_layer)
return nn.Sequential(layers)
def _forward_impl(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
if hasattr(self, 'layer4'):
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
def forward(self, x):
return self._forward_impl(x) |
def _full_conv(input, weights, bias, i, ops, net):
w = tf.Variable(weights, name=('w' + str(i)), dtype='float32')
b = tf.Variable(bias, name=('bias' + str(i)), dtype='float32')
ops.append(w)
ops.append(b)
net[('weights' + str(i))] = w
net[('b' + str(i))] = b
conv = tf.nn.conv2d(input, w, strides=[1, 1, 1, 1], padding='VALID', name=('fc' + str(i)))
return tf.nn.bias_add(conv, b, name=('add' + str(i))) |
class ImageSet(JavaValue):
def __init__(self, jvalue, bigdl_type='float'):
self.value = jvalue
self.bigdl_type = bigdl_type
if self.is_local():
self.image_set = LocalImageSet(jvalue=self.value)
else:
self.image_set = DistributedImageSet(jvalue=self.value)
def is_local(self):
return callZooFunc(self.bigdl_type, 'isLocalImageSet', self.value)
def is_distributed(self):
return callZooFunc(self.bigdl_type, 'isDistributedImageSet', self.value)
def label_map(self):
return callZooFunc(self.bigdl_type, 'imageSetGetLabelMap', self.value)
def read(cls, path, sc=None, min_partitions=1, resize_height=(- 1), resize_width=(- 1), image_codec=(- 1), with_label=False, one_based_label=True, bigdl_type='float'):
return ImageSet(jvalue=callZooFunc(bigdl_type, 'readImageSet', path, sc, min_partitions, resize_height, resize_width, image_codec, with_label, one_based_label))
def from_image_frame(cls, image_frame, bigdl_type='float'):
return ImageSet(jvalue=callZooFunc(bigdl_type, 'imageFrameToImageSet', image_frame))
def from_rdds(cls, image_rdd, label_rdd=None, bigdl_type='float'):
image_rdd = image_rdd.map((lambda x: JTensor.from_ndarray(x)))
if (label_rdd is not None):
label_rdd = label_rdd.map((lambda x: JTensor.from_ndarray(x)))
return ImageSet(jvalue=callZooFunc(bigdl_type, 'createDistributedImageSet', image_rdd, label_rdd), bigdl_type=bigdl_type)
def transform(self, transformer):
return ImageSet(callZooFunc(self.bigdl_type, 'transformImageSet', transformer, self.value), self.bigdl_type)
def get_image(self, key='floats', to_chw=True):
return self.image_set.get_image(key, to_chw)
def get_label(self):
return self.image_set.get_label()
def get_predict(self, key='predict'):
return self.image_set.get_predict(key)
def to_image_frame(self, bigdl_type='float'):
return ImageFrame(callZooFunc(bigdl_type, 'imageSetToImageFrame', self.value), bigdl_type) |
class ViltFeatureExtractionTester(unittest.TestCase):
def __init__(self, parent, batch_size=7, num_channels=3, image_size=18, min_resolution=30, max_resolution=400, do_resize=True, size=30, size_divisor=2, do_normalize=True, image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5]):
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.image_size = image_size
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_resize = do_resize
self.size = size
self.size_divisor = size_divisor
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
def prepare_feat_extract_dict(self):
return {'image_mean': self.image_mean, 'image_std': self.image_std, 'do_normalize': self.do_normalize, 'do_resize': self.do_resize, 'size': self.size, 'size_divisor': self.size_divisor}
def get_expected_values(self, image_inputs, batched=False):
if (not batched):
image = image_inputs[0]
if isinstance(image, Image.Image):
(w, h) = image.size
else:
(h, w) = (image.shape[1], image.shape[2])
scale = (self.size / min(w, h))
if (h < w):
(newh, neww) = (self.size, (scale * w))
else:
(newh, neww) = ((scale * h), self.size)
max_size = int(((1333 / 800) * self.size))
if (max(newh, neww) > max_size):
scale = (max_size / max(newh, neww))
newh = (newh * scale)
neww = (neww * scale)
(newh, neww) = (int((newh + 0.5)), int((neww + 0.5)))
(expected_height, expected_width) = (((newh // self.size_divisor) * self.size_divisor), ((neww // self.size_divisor) * self.size_divisor))
else:
expected_values = []
for image in image_inputs:
(expected_height, expected_width) = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
expected_height = max(expected_values, key=(lambda item: item[0]))[0]
expected_width = max(expected_values, key=(lambda item: item[1]))[1]
return (expected_height, expected_width) |
def resnet34(pretrained=False, **kwargs):
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34']), strict=False)
return model |
def run(rank, size, G):
save_path = f'./results_v0/{args.experiment_name}/'
if (rank == 0):
if (not os.path.exists(save_path)):
try:
os.makedirs(save_path)
except OSError:
pass
folder_name = ((save_path + args.name) + '/')
if ((rank == 0) and (os.path.isdir(folder_name) == False) and args.save):
os.makedirs(folder_name)
else:
time.sleep(5)
dist.barrier()
torch.manual_seed(1)
torch.cuda.manual_seed(1)
torch.backends.cudnn.deterministic = True
(train_loader_list, test_loader_list, path_device_idxs, max_len) = Get_TrainLoader(args)
ue_list_epoches = util_1.Load_communicate_user_list(args, path_device_idxs)
model = Get_Model(args)
criterion = Get_Criterion(args)
optimizer = Get_Optimizer(args, model, size=size, lr=args.lr)
if (args.fast == 0):
fast = False
else:
fast = True
scheduler = Get_Scheduler(args, optimizer, warmup_epoch=args.warmup_epoch, fast=fast)
batch_meter = util.Meter(ptag='Time')
comm_meter = util.Meter(ptag='Time')
print('Now train the model')
Fed_training = True
user_weight_diff_array = np.zeros((args.size, args.epoch, (args.iteration + 1)))
if Fed_training:
if (args.epoch_resume == 0):
start_epoch = 0
if ((not args.eval_grad) and (rank == 0)):
util_1.init_files(args, save_path, rank, prefix='Test_Acc')
Fed_acc_list = []
else:
start_epoch = (args.epoch_resume + 1)
if (rank == 0):
Fed_acc_list = util_1.get_acc(args, save_path, rank, prefix='Test_Acc')
if args.eval_grad:
args.iteration = 1
if (args.ue_loss == 'SF'):
args.iteration = (max_len // args.bs)
for epoch in range(start_epoch, args.epoch):
begin_time = time.time()
if ((args.epoch_resume > 0) and (epoch == (args.epoch_resume + 1))):
print('Loading saved averaged model ... epoch=', epoch, args.epoch_resume)
checkpoint_weights = util_1.Load_Avg_model_checkpoint(args.experiment_folder, args.experiment_name, epoch, prefix='after')
model.load_state_dict(checkpoint_weights, strict=False)
if ((not args.eval_grad) or ((epoch % args.epoch_interval) == 0)):
(user_id, WD_list, user_weight_diff_array) = train(rank, model, criterion, optimizer, scheduler, batch_meter, comm_meter, train_loader_list, test_loader_list, epoch, device, ue_list_epoches, G, user_weight_diff_array)
if (rank == 0):
test_acc = evaluate(model, test_loader_list[0])
test_acc = round(test_acc, 2)
print('test acc', epoch, test_acc, (time.time() - begin_time))
if (not args.eval_grad):
Fed_acc_list.append(test_acc)
util_1.Save_acc_file(args, save_path, rank, prefix='Test_Acc', acc_list=Fed_acc_list) |
def rms(x, name=None):
if (name is None):
name = (x.op.name + '/rms')
with tf.name_scope(None):
return tf.sqrt(tf.reduce_mean(tf.square(x)), name=name)
return tf.sqrt(tf.reduce_mean(tf.square(x)), name=name) |
def _split_on_proportions_and_save(name, proportion_dict, logger, depth_and_tree_tuples):
length_all_data = len(depth_and_tree_tuples)
assert (sum(proportion_dict.values()) == 1.0), 'proportions should sum to one'
used_so_far = 0
out_dict = {}
out_trees_dict = {}
for (subset_name, proportion) in proportion_dict.items():
number_to_use = int(np.ceil((proportion * length_all_data)))
end_indx = min((used_so_far + number_to_use), length_all_data)
indices = list(range(used_so_far, end_indx))
depth_and_trees_for_subset = [depth_and_tree_tuples[i] for i in indices]
out_trees_dict[subset_name] = depth_and_trees_for_subset
misc.to_pickle(depth_and_trees_for_subset, path.join(PATH, f'{name}-{subset_name}-depth_and_tree_tuples.pick'))
depths = collections.Counter([el[0] for el in depth_and_trees_for_subset])
out_table = tabulate.tabulate((([('Number of levels', 'Freq')] + sorted(list(depths.items()))) + [('Total', len(depth_and_trees_for_subset))]))
logger.info(f'''For name: {name}, subset: {subset_name}, the tree levels are:
{out_table}''')
out_dict[subset_name] = indices
used_so_far = end_indx
return (out_dict, out_trees_dict) |
class InceptionA(nn.Module):
def __init__(self, in_channels, pool_features):
super(InceptionA, self).__init__()
self.branch1x1 = BasicConv2d(in_channels, 64, 1)
self.branch5x5_1 = BasicConv2d(in_channels, 48, 1)
self.branch5x5_2 = BasicConv2d(48, 64, 5, padding=2)
self.branch3x3dbl_1 = BasicConv2d(in_channels, 64, 1)
self.branch3x3dbl_2 = BasicConv2d(64, 96, 3, padding=1)
self.branch3x3dbl_3 = BasicConv2d(96, 96, 3, padding=1)
self.branch_pool = BasicConv2d(in_channels, pool_features, 1)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch5x5 = self.branch5x5_1(x)
branch5x5 = self.branch5x5_2(branch5x5)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1) |
def build_fake_yaml():
fake_yaml = '\n model:\n name: fake_yaml\n framework: tensorflow\n inputs: x\n outputs: op2_to_store\n device: cpu\n evaluation:\n accuracy:\n metric:\n topk: 1\n tuning:\n strategy:\n name: basic\n accuracy_criterion:\n relative: 0.01\n workspace:\n path: saved\n '
y = yaml.load(fake_yaml, Loader=yaml.SafeLoader)
with open('fake_yaml.yaml', 'w', encoding='utf-8') as f:
yaml.dump(y, f)
f.close() |
def count_chunks(true_seqs, pred_seqs):
correct_chunks = defaultdict(int)
true_chunks = defaultdict(int)
pred_chunks = defaultdict(int)
correct_counts = defaultdict(int)
true_counts = defaultdict(int)
pred_counts = defaultdict(int)
(prev_true_tag, prev_pred_tag) = ('O', 'O')
correct_chunk = None
for (true_tag, pred_tag) in zip(true_seqs, pred_seqs):
if (true_tag == pred_tag):
correct_counts[true_tag] += 1
true_counts[true_tag] += 1
pred_counts[pred_tag] += 1
(_, true_type) = split_tag(true_tag)
(_, pred_type) = split_tag(pred_tag)
if (correct_chunk is not None):
true_end = is_chunk_end(prev_true_tag, true_tag)
pred_end = is_chunk_end(prev_pred_tag, pred_tag)
if (pred_end and true_end):
correct_chunks[correct_chunk] += 1
correct_chunk = None
elif ((pred_end != true_end) or (true_type != pred_type)):
correct_chunk = None
true_start = is_chunk_start(prev_true_tag, true_tag)
pred_start = is_chunk_start(prev_pred_tag, pred_tag)
if (true_start and pred_start and (true_type == pred_type)):
correct_chunk = true_type
if true_start:
true_chunks[true_type] += 1
if pred_start:
pred_chunks[pred_type] += 1
(prev_true_tag, prev_pred_tag) = (true_tag, pred_tag)
if (correct_chunk is not None):
correct_chunks[correct_chunk] += 1
return (correct_chunks, true_chunks, pred_chunks, correct_counts, true_counts, pred_counts) |
def get_inceptionv4(model_name=None, pretrained=False, root=os.path.join('~', '.torch', 'models'), **kwargs):
net = InceptionV4(**kwargs)
if pretrained:
if ((model_name is None) or (not model_name)):
raise ValueError('Parameter `model_name` should be properly initialized for loading pretrained model.')
from .model_store import download_model
download_model(net=net, model_name=model_name, local_model_store_dir_path=root)
return net |
class P9(GenericPenaltyLagrangian):
def __call__(self, y: Tensor, : Tensor, : Tensor) -> Tensor:
_adjusted = torch.max(, (2 * ))
tilde_x = self..tilde(, _adjusted)
return (self.(((_adjusted * y) + tilde_x)) - self.(tilde_x)) |
def test_nulticlass_task():
from sklearn.datasets import make_classification
(X, y) = make_classification(n_samples=100, n_features=10, n_informative=3, n_classes=3, random_state=2022)
ranked_strengths = measure_interactions(X, y)
assert (45 == len(ranked_strengths)) |
def get_datasets(logdir, condition=None):
global exp_idx
global units
datasets = []
for (root, _, files) in os.walk(logdir):
if ('progress.txt' in files):
exp_name = None
try:
config_path = open(os.path.join(root, 'config.json'))
config = json.load(config_path)
if ('exp_name' in config):
exp_name = config['exp_name']
except:
print('No file named config.json')
condition1 = (condition or exp_name or 'exp')
condition2 = ((condition1 + '-') + str(exp_idx))
exp_idx += 1
if (condition1 not in units):
units[condition1] = 0
unit = units[condition1]
units[condition1] += 1
try:
exp_data = pd.read_table(os.path.join(root, 'progress.txt'))
except:
print(('Could not read from %s' % os.path.join(root, 'progress.txt')))
continue
reward_performance = ('EpRet' if ('EpRet' in exp_data) else 'AverageEpRet')
cost_performance = ('EpCost' if ('EpCost' in exp_data) else 'AverageEpCost')
cost_rate_performance = ('AverageTestCostRate' if ('AverageTestCostRate' in exp_data) else 'CostRate')
exp_data.insert(len(exp_data.columns), 'Unit', unit)
exp_data.insert(len(exp_data.columns), 'Condition1', condition1)
exp_data.insert(len(exp_data.columns), 'Condition2', condition2)
exp_data.insert(len(exp_data.columns), 'Reward_Performance', exp_data[reward_performance])
if (cost_performance in exp_data):
exp_data.insert(len(exp_data.columns), 'Cost_Performance', exp_data[cost_performance])
if (cost_rate_performance in exp_data):
exp_data.insert(len(exp_data.columns), 'Cost_Rate_Performance', exp_data[cost_rate_performance])
datasets.append(exp_data)
return datasets |
def ExtractCam(gall_img):
gall_cam = []
for i in range(len(gall_img)):
cam_id = int(gall_img[i][(- 10)])
gall_cam.append(cam_id)
return np.array(gall_cam) |
(scope='function')
def ray_local_session_fixture():
if (not ray.is_initialized()):
ray.init(local_mode=True, ignore_reinit_error=True, log_to_driver=False, include_webui=False)
(yield)
if ray.is_initialized():
ray.shutdown() |
def test_ade_double_double_track():
(c3, c3q, c3qsols) = cyclic3homotopy()
ans = input('Tune the path parameters ? (y/n) ')
if (ans != 'y'):
sols = ade_double_double_track(c3, c3q, c3qsols)
else:
from phcpy.tuning import tune_path_parameters as tune
pars = tune(32)
sols = ade_tuned_double_double_track(c3, c3q, c3qsols, pars)
for sol in sols:
print(sol) |
def rename_cols(df, outcome, *, y_true=None, y_pred=None, uncertainty=None):
if (y_true is None):
y_true = y_true_header(outcome, underscore=(y_true_header(outcome, underscore=True) in df.columns))
if (y_true not in df.columns):
y_true = (str(outcome) + '-y_true')
if (y_pred is None):
y_pred = y_pred_header(outcome, underscore=(y_pred_header(outcome, underscore=True) in df.columns))
if (uncertainty is None):
uncertainty = uncertainty_header(outcome, underscore=(uncertainty_header(outcome, underscore=True) in df.columns))
new_cols = {y_true: 'y_true', y_pred: 'y_pred', uncertainty: 'uncertainty'}
df.rename(columns=new_cols, inplace=True) |
_module()
class OrgUDADataset(object):
def __init__(self, source, target, cfg):
self.source = source
self.target = target
self.ignore_index = target.ignore_index
self.CLASSES = target.CLASSES
self.PALETTE = target.PALETTE
assert (target.ignore_index == source.ignore_index)
assert (target.CLASSES == source.CLASSES)
assert (target.PALETTE == source.PALETTE)
rcs_cfg = cfg.get('rare_class_sampling')
self.rcs_enabled = (rcs_cfg is not None)
if self.rcs_enabled:
self.rcs_class_temp = rcs_cfg['class_temp']
self.rcs_min_crop_ratio = rcs_cfg['min_crop_ratio']
self.rcs_min_pixels = rcs_cfg['min_pixels']
(self.rcs_classes, self.rcs_classprob) = get_rcs_class_probs(cfg['source']['data_root'], self.rcs_class_temp)
mmcv.print_log(f'RCS Classes: {self.rcs_classes}', 'mmseg')
mmcv.print_log(f'RCS ClassProb: {self.rcs_classprob}', 'mmseg')
with open(osp.join(cfg['source']['data_root'], 'samples_with_class.json'), 'r') as of:
samples_with_class_and_n = json.load(of)
samples_with_class_and_n = {int(k): v for (k, v) in samples_with_class_and_n.items() if (int(k) in self.rcs_classes)}
self.samples_with_class = {}
for c in self.rcs_classes:
self.samples_with_class[c] = []
for (file, pixels) in samples_with_class_and_n[c]:
if (pixels > self.rcs_min_pixels):
self.samples_with_class[c].append(file.split('/')[(- 1)])
assert (len(self.samples_with_class[c]) > 0)
self.file_to_idx = {}
for (i, dic) in enumerate(self.source.img_infos):
file = dic['ann']['seg_map']
if isinstance(self.source, CityscapesDataset):
file = file.split('/')[(- 1)]
self.file_to_idx[file] = i
def get_rare_class_sample(self):
c = np.random.choice(self.rcs_classes, p=self.rcs_classprob)
f1 = np.random.choice(self.samples_with_class[c])
i1 = self.file_to_idx[f1]
s1 = self.source[i1]
if (self.rcs_min_crop_ratio > 0):
for j in range(10):
n_class = torch.sum((s1['gt_semantic_seg'].data == c))
if (n_class > (self.rcs_min_pixels * self.rcs_min_crop_ratio)):
break
s1 = self.source[i1]
i2 = np.random.choice(range(len(self.target)))
s2 = self.target[i2]
output = {**s1, 'target_img_metas': s2['img_metas'], 'target_img': s2['img']}
return output
def __getitem__(self, idx):
if self.rcs_enabled:
return self.get_rare_class_sample()
else:
s1 = self.source[(idx // len(self.target))]
s2 = self.target[(idx % len(self.target))]
output = {**s1, 'target_img_metas': s2['img_metas'], 'target_img': s2['img']}
return output
def __len__(self):
return (len(self.source) * len(self.target)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.