code stringlengths 101 5.91M |
|---|
class QuotientsCategory(RegressiveCovariantConstructionCategory):
_functor_category = 'Quotients'
def default_super_categories(cls, category):
return Category.join([category.Subquotients(), super().default_super_categories(category)]) |
class TestAllFindings():
def setup(self):
self.detector = StubDetector()
self.misuse = create_misuse('-m1-')
self.misuses = [self.misuse, create_misuse('-m2-')]
self.detector_run = MagicMock()
self.detector_run.detector = self.detector
self.uut = AllFindingsFilterTask(50)
def test_returns_all_findings(self):
expected = [Finding({'rank': '1', 'misuse': 'finding-0', 'file': ''}), Finding({'rank': '2', 'misuse': 'finding-1', 'file': ''})]
self.detector_run.findings = expected
actual = self.uut.run(self.detector_run)
assert_equals(expected, actual.findings)
def test_limits_number_of_findings(self):
all = [Finding({'rank': '1'}), Finding({'rank': '2'})]
self.detector_run.findings = all
self.uut.limit = 1
actual = self.uut.run(self.detector_run)
assert_equals(1, len(actual.findings)) |
def _seg_34():
return [(13170, 'M', u'da'), (13171, 'M', u'au'), (13172, 'M', u'bar'), (13173, 'M', u'ov'), (13174, 'M', u'pc'), (13175, 'M', u'dm'), (13176, 'M', u'dm2'), (13177, 'M', u'dm3'), (13178, 'M', u'iu'), (13179, 'M', u''), (13180, 'M', u''), (13181, 'M', u''), (13182, 'M', u''), (13183, 'M', u''), (13184, 'M', u'pa'), (13185, 'M', u'na'), (13186, 'M', u'a'), (13187, 'M', u'ma'), (13188, 'M', u'ka'), (13189, 'M', u'kb'), (13190, 'M', u'mb'), (13191, 'M', u'gb'), (13192, 'M', u'cal'), (13193, 'M', u'kcal'), (13194, 'M', u'pf'), (13195, 'M', u'nf'), (13196, 'M', u'f'), (13197, 'M', u'g'), (13198, 'M', u'mg'), (13199, 'M', u'kg'), (13200, 'M', u'hz'), (13201, 'M', u'khz'), (13202, 'M', u'mhz'), (13203, 'M', u'ghz'), (13204, 'M', u'thz'), (13205, 'M', u'l'), (13206, 'M', u'ml'), (13207, 'M', u'dl'), (13208, 'M', u'kl'), (13209, 'M', u'fm'), (13210, 'M', u'nm'), (13211, 'M', u'm'), (13212, 'M', u'mm'), (13213, 'M', u'cm'), (13214, 'M', u'km'), (13215, 'M', u'mm2'), (13216, 'M', u'cm2'), (13217, 'M', u'm2'), (13218, 'M', u'km2'), (13219, 'M', u'mm3'), (13220, 'M', u'cm3'), (13221, 'M', u'm3'), (13222, 'M', u'km3'), (13223, 'M', u'ms'), (13224, 'M', u'ms2'), (13225, 'M', u'pa'), (13226, 'M', u'kpa'), (13227, 'M', u'mpa'), (13228, 'M', u'gpa'), (13229, 'M', u'rad'), (13230, 'M', u'rads'), (13231, 'M', u'rads2'), (13232, 'M', u'ps'), (13233, 'M', u'ns'), (13234, 'M', u's'), (13235, 'M', u'ms'), (13236, 'M', u'pv'), (13237, 'M', u'nv'), (13238, 'M', u'v'), (13239, 'M', u'mv'), (13240, 'M', u'kv'), (13241, 'M', u'mv'), (13242, 'M', u'pw'), (13243, 'M', u'nw'), (13244, 'M', u'w'), (13245, 'M', u'mw'), (13246, 'M', u'kw'), (13247, 'M', u'mw'), (13248, 'M', u'k'), (13249, 'M', u'm'), (13250, 'X'), (13251, 'M', u'bq'), (13252, 'M', u'cc'), (13253, 'M', u'cd'), (13254, 'M', u'ckg'), (13255, 'X'), (13256, 'M', u'db'), (13257, 'M', u'gy'), (13258, 'M', u'ha'), (13259, 'M', u'hp'), (13260, 'M', u'in'), (13261, 'M', u'kk'), (13262, 'M', u'km'), (13263, 'M', u'kt'), (13264, 'M', u'lm'), (13265, 'M', u'ln'), (13266, 'M', u'log'), (13267, 'M', u'lx'), (13268, 'M', u'mb'), (13269, 'M', u'mil')] |
class OpenImagesCfg():
variant: str = None
parser: str = 'openimages'
num_classes: int = None
img_filename = '%s.jpg'
splits: Dict[(str, dict)] = None |
def write_lst(lst, output_file):
out_f = open(output_file, 'w')
print('Writing lines to file...')
out_f.writelines(lst)
out_f.close()
print('Lines written to files') |
class BernoulliTS(BaseContextFreePolicy):
alpha: Optional[np.ndarray] = None
beta: Optional[np.ndarray] = None
is_zozotown_prior: bool = False
campaign: Optional[str] = None
policy_name: str = 'bts'
def __post_init__(self) -> None:
super().__post_init__()
if self.is_zozotown_prior:
if (self.campaign is None):
raise Exception('`campaign` must be specified when `is_zozotown_prior` is True.')
self.alpha = production_prior_for_bts[self.campaign]['alpha']
self.beta = production_prior_for_bts[self.campaign]['beta']
else:
self.alpha = (np.ones(self.n_actions) if (self.alpha is None) else self.alpha)
self.beta = (np.ones(self.n_actions) if (self.beta is None) else self.beta)
def select_action(self) -> np.ndarray:
predicted_rewards = self.random_.beta(a=(self.reward_counts + self.alpha), b=((self.action_counts - self.reward_counts) + self.beta))
return predicted_rewards.argsort()[::(- 1)][:self.len_list]
def update_params(self, action: int, reward: float) -> None:
self.n_trial += 1
self.action_counts_temp[action] += 1
self.reward_counts_temp[action] += reward
if ((self.n_trial % self.batch_size) == 0):
self.action_counts = np.copy(self.action_counts_temp)
self.reward_counts = np.copy(self.reward_counts_temp)
def compute_batch_action_dist(self, n_rounds: int=1, n_sim: int=100000) -> np.ndarray:
action_count = np.zeros((self.n_actions, self.len_list))
for _ in np.arange(n_sim):
selected_actions = self.select_action()
for pos in np.arange(self.len_list):
action_count[(selected_actions[pos], pos)] += 1
action_dist = np.tile((action_count / n_sim), (n_rounds, 1, 1))
return action_dist |
def ComplexIntervalField(prec=53, names=None):
global cache
if (prec in cache):
X = cache[prec]
C = X()
if (C is not None):
return C
C = ComplexIntervalField_class(prec)
cache[prec] = weakref.ref(C)
return C |
def starts_with(anaphor_cleaned_tokens, antecedent_cleaned_tokens):
for (ana_token, ante_token) in zip(anaphor_cleaned_tokens, antecedent_cleaned_tokens):
if (ana_token != ante_token):
return False
return True |
def params_and_buffers(module):
assert isinstance(module, torch.nn.Module)
return (list(module.parameters()) + list(module.buffers())) |
def demo_heuristic_lander(env, w, seed=None):
total_reward = 0
steps = 0
env = wrappers.Monitor(env, './', force=True)
env.reset(seed=seed)
s = env.reset()
while True:
if (steps > STEPS_LIMIT):
total_reward -= TIMEOUT_REWARD
return total_reward
a = heuristic_Controller(s, w)
(s, r, done, info) = env.step(a)
total_reward += r
steps += 1
if done:
break
return total_reward |
def get_display_profile(handle=None):
if (sys.platform != 'win32'):
return None
from PIL import ImageWin
if isinstance(handle, ImageWin.HDC):
profile = core.get_display_profile_win32(handle, 1)
else:
profile = core.get_display_profile_win32((handle or 0))
if (profile is None):
return None
return ImageCmsProfile(profile) |
class TypeSpec():
_types: Dict[(str, Type)]
def __init__(self):
self._types = dict()
def get_type(self, name: str) -> Optional[Type]:
return self._types.get(name)
def get_type_or_raise(self, name: str) -> Type:
return self._types[name]
def define_type(self, ty: Type) -> Type:
name = ty.name
if (name in self._types):
raise ValueError('The type has already been defined in the Tyrell spec: {}'.format(ty))
else:
self._types[name] = ty
return ty
def types(self) -> Iterable[Type]:
return self._types.values()
def num_types(self) -> int:
return len(self._types)
def __repr(self) -> str:
return 'TypeSpec({})'.format([str(x) for x in self._types.values()]) |
class NOPaxosClient(AppConfig):
def __init__(self) -> None:
super().__init__()
self.server_ips: tp.List[str] = []
self.is_last = False
self.use_ehseq = False
def run_cmds(self, node: NodeConfig) -> tp.List[str]:
cmds = []
for ip in self.server_ips:
cmds.append(('ping -c 2 ' + ip))
cmd = (('/root/nopaxos/bench/client -c /root/nopaxos.config ' + '-m nopaxos -u 2 -h ') + node.ip)
if self.use_ehseq:
cmd += ' -e'
cmds.append(cmd)
if self.is_last:
cmds.append('sleep 1')
else:
cmds.append('sleep infinity')
return cmds |
def create_exp_name(exp_prefix, exp_id=0, seed=0):
now = datetime.datetime.now(dateutil.tz.tzlocal())
timestamp = now.strftime('%Y_%m_%d_%H_%M_%S')
return ('%s_%s_%04d--s-%d' % (exp_prefix, timestamp, exp_id, seed)) |
def configure_gpu(use_gpu: bool, which_gpu: int) -> torch.device:
if use_gpu:
device = torch.device('cuda')
os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES'] = str(which_gpu)
else:
device = torch.device('cpu')
os.environ['CUDA_VISIBLE_DEVICES'] = ''
return device |
class TestJsonIO(object):
def test_ace2004(self):
io = JsonIO(text_key='tokens', chunk_key='entities', chunk_type_key='type', chunk_start_key='start', chunk_end_key='end')
train_data = io.read('data/ace-lu2015emnlp/ACE2004/train.json')
dev_data = io.read('data/ace-lu2015emnlp/ACE2004/dev.json')
test_data = io.read('data/ace-lu2015emnlp/ACE2004/test.json')
assert (len(train_data) == 6799)
assert (sum((len(ex['chunks']) for ex in train_data)) == 22207)
assert (max(((ck[2] - ck[1]) for ex in train_data for ck in ex['chunks'])) == 57)
assert (len(dev_data) == 829)
assert (sum((len(ex['chunks']) for ex in dev_data)) == 2511)
assert (max(((ck[2] - ck[1]) for ex in dev_data for ck in ex['chunks'])) == 35)
assert (len(test_data) == 879)
assert (sum((len(ex['chunks']) for ex in test_data)) == 3031)
assert (max(((ck[2] - ck[1]) for ex in test_data for ck in ex['chunks'])) == 43)
assert (sum((count_nested(ex['chunks']) for ex in ((train_data + dev_data) + test_data))) == 7832)
assert (max(((end - start) for ex in ((train_data + dev_data) + test_data) for (_, start, end) in ex['chunks'])) == 57)
def test_ace2005(self):
io = JsonIO(text_key='tokens', chunk_key='entities', chunk_type_key='type', chunk_start_key='start', chunk_end_key='end')
train_data = io.read('data/ace-lu2015emnlp/ACE2005/train.json')
dev_data = io.read('data/ace-lu2015emnlp/ACE2005/dev.json')
test_data = io.read('data/ace-lu2015emnlp/ACE2005/test.json')
assert (len(train_data) == 7336)
assert (sum((len(ex['chunks']) for ex in train_data)) == 24687)
assert (max(((ck[2] - ck[1]) for ex in train_data for ck in ex['chunks'])) == 49)
assert (len(dev_data) == 958)
assert (sum((len(ex['chunks']) for ex in dev_data)) == 3217)
assert (max(((ck[2] - ck[1]) for ex in dev_data for ck in ex['chunks'])) == 30)
assert (len(test_data) == 1047)
assert (sum((len(ex['chunks']) for ex in test_data)) == 3027)
assert (max(((ck[2] - ck[1]) for ex in test_data for ck in ex['chunks'])) == 27)
assert (sum((count_nested(ex['chunks']) for ex in ((train_data + dev_data) + test_data))) == 7460)
assert (max(((end - start) for ex in ((train_data + dev_data) + test_data) for (_, start, end) in ex['chunks'])) == 49)
def test_genia(self):
io = JsonIO(text_key='tokens', chunk_key='entities', chunk_type_key='type', chunk_start_key='start', chunk_end_key='end', retain_keys=['doc_key', 'bibliomisc'])
train_data = io.read('data/genia/term.train.json')
dev_data = io.read('data/genia/term.dev.json')
test_data = io.read('data/genia/term.test.json')
assert (len(train_data) == 15023)
assert (sum((len(ex['chunks']) for ex in train_data)) == 46164)
assert (len(dev_data) == 1669)
assert (sum((len(ex['chunks']) for ex in dev_data)) == 4371)
assert (len(test_data) == 1854)
assert (sum((len(ex['chunks']) for ex in test_data)) == 5511)
assert (sum((count_nested(ex['chunks']) for ex in ((train_data + dev_data) + test_data))) == 5431)
assert (sum((len(set([(s, e) for (_, s, e) in ex['chunks']])) for ex in ((train_data + dev_data) + test_data))) == 56015)
assert (max(((end - start) for ex in ((train_data + dev_data) + test_data) for (_, start, end) in ex['chunks'])) == 18)
assert (len(set([ex['doc_key'] for ex in ((train_data + dev_data) + test_data)])) == 2000)
def test_genia_yu2020acl(self):
io = JsonIO(text_key='tokens', chunk_key='entities', chunk_type_key='type', chunk_start_key='start', chunk_end_key='end')
train_data = io.read('data/genia-yu2020acl/train_dev.json')
test_data = io.read('data/genia-yu2020acl/test.json')
assert (len(train_data) == 16692)
assert (sum((len(ex['chunks']) for ex in train_data)) == 50509)
assert (len(test_data) == 1854)
assert (sum((len(ex['chunks']) for ex in test_data)) == 5506)
assert (max(((end - start) for ex in (train_data + test_data) for (_, start, end) in ex['chunks'])) == 18)
def test_kbp2017_shen2022acl(self):
io = JsonIO(text_key='tokens', chunk_key='entities', chunk_type_key='type', chunk_start_key='start', chunk_end_key='end', retain_keys=['org_id'])
train_data = io.read('data/kbp2017-shen2022acl/kbp17_train_context.json')
dev_data = io.read('data/kbp2017-shen2022acl/kbp17_dev_context.json')
test_data = io.read('data/kbp2017-shen2022acl/kbp17_test_context.json')
assert (len(train_data) == 10546)
assert (sum((len(ex['chunks']) for ex in train_data)) == 31235)
assert (len(dev_data) == 545)
assert (sum((len(ex['chunks']) for ex in dev_data)) == 1879)
assert (len(test_data) == 4267)
assert (sum((len(ex['chunks']) for ex in test_data)) == 12600)
assert (sum((count_nested(ex['chunks']) for ex in ((train_data + dev_data) + test_data))) == 7479)
assert (max(((end - start) for ex in ((train_data + dev_data) + test_data) for (_, start, end) in ex['chunks'])) == 49)
assert (len(set([ex['org_id'] for ex in ((train_data + dev_data) + test_data)])) == 619)
def test_nne_ringland2019acl(self):
io = JsonIO(text_key='tokens', chunk_key='entities', chunk_type_key='type', chunk_start_key='start', chunk_end_key='end')
train_data = io.read('data/nne-ringland2019acl/train.json')
dev_data = io.read('data/nne-ringland2019acl/dev.json')
test_data = io.read('data/nne-ringland2019acl/test.json')
assert (len(train_data) == 43457)
assert (sum((len(ex['chunks']) for ex in train_data)) == 248136)
assert (len(dev_data) == 1989)
assert (sum((len(ex['chunks']) for ex in dev_data)) == 10463)
assert (len(test_data) == 3762)
assert (sum((len(ex['chunks']) for ex in test_data)) == 21196)
assert (sum([(len(ex['chunks']) - len(set(((s, e) for (_, s, e) in ex['chunks'])))) for ex in train_data]) == 16996)
assert (sum([(len(ex['chunks']) - len(set(((s, e) for (_, s, e) in ex['chunks'])))) for ex in dev_data]) == 801)
assert (sum([(len(ex['chunks']) - len(set(((s, e) for (_, s, e) in ex['chunks'])))) for ex in test_data]) == 1612)
assert (max(((end - start) for ex in ((train_data + dev_data) + test_data) for (_, start, end) in ex['chunks'])) == 16)
def test_nne_shen2022acl(self):
io = JsonIO(text_key='tokens', chunk_key='entities', chunk_type_key='type', chunk_start_key='start', chunk_end_key='end', retain_keys=['org_id'])
train_data = io.read('data/nne-shen2022acl/nne_train_context.json')
dev_data = io.read('data/nne-shen2022acl/nne_dev_context.json')
test_data = io.read('data/nne-shen2022acl/nne_test_context.json')
assert (len(train_data) == 43457)
assert (sum((len(ex['chunks']) for ex in train_data)) == 248136)
assert (len(dev_data) == 1989)
assert (sum((len(ex['chunks']) for ex in dev_data)) == 10463)
assert (len(test_data) == 3762)
assert (sum((len(ex['chunks']) for ex in test_data)) == 21196)
assert (max(((end - start) for ex in ((train_data + dev_data) + test_data) for (_, start, end) in ex['chunks'])) == 16)
def test_ace2004_rel(self):
io = JsonIO(relation_key='relations', relation_type_key='type', relation_head_key='head', relation_tail_key='tail')
train_data = io.read('data/ace-luan2019naacl/ace04/cv0.train.json')
test_data = io.read('data/ace-luan2019naacl/ace04/cv0.test.json')
assert (len(train_data) == 6898)
assert (sum((len(ex['chunks']) for ex in train_data)) == 18065)
assert (sum((len(ex['relations']) for ex in train_data)) == 3292)
assert (len(test_data) == 1785)
assert (sum((len(ex['chunks']) for ex in test_data)) == 4670)
assert (sum((len(ex['relations']) for ex in test_data)) == 795)
def test_ace2005_rel(self):
io = JsonIO(relation_key='relations', relation_type_key='type', relation_head_key='head', relation_tail_key='tail')
train_data = io.read('data/ace-luan2019naacl/ace05/train.json')
dev_data = io.read('data/ace-luan2019naacl/ace05/dev.json')
test_data = io.read('data/ace-luan2019naacl/ace05/test.json')
assert (len(train_data) == 10051)
assert (sum((len(ex['chunks']) for ex in train_data)) == 26473)
assert (sum((len(ex['relations']) for ex in train_data)) == 4788)
assert (len(dev_data) == 2424)
assert (sum((len(ex['chunks']) for ex in dev_data)) == 6338)
assert (sum((len(ex['relations']) for ex in dev_data)) == 1131)
assert (len(test_data) == 2050)
assert (sum((len(ex['chunks']) for ex in test_data)) == 5476)
assert (sum((len(ex['relations']) for ex in test_data)) == 1151)
def test_conll2004(self):
json_io = JsonIO(text_key='tokens', chunk_key='entities', chunk_type_key='type', chunk_start_key='start', chunk_end_key='end', relation_key='relations', relation_type_key='type', relation_head_key='head', relation_tail_key='tail')
train_data = json_io.read('data/conll2004/conll04_train.json')
dev_data = json_io.read('data/conll2004/conll04_dev.json')
test_data = json_io.read('data/conll2004/conll04_test.json')
assert (len(train_data) == 922)
assert (sum((len(ex['chunks']) for ex in train_data)) == 3377)
assert (sum((len(ex['relations']) for ex in train_data)) == 1283)
assert (len(dev_data) == 231)
assert (sum((len(ex['chunks']) for ex in dev_data)) == 893)
assert (sum((len(ex['relations']) for ex in dev_data)) == 343)
assert (len(test_data) == 288)
assert (sum((len(ex['chunks']) for ex in test_data)) == 1079)
assert (sum((len(ex['relations']) for ex in test_data)) == 422)
assert (max((detect_overlapping_level(ex['chunks']) for ex in ((train_data + dev_data) + test_data))) == FLAT)
assert all(((filter_clashed_by_priority(ex['chunks'], allow_level=FLAT) == ex['chunks']) for ex in ((train_data + dev_data) + test_data)))
def test_SciERC(self):
json_io = JsonIO(text_key='tokens', chunk_key='entities', chunk_type_key='type', chunk_start_key='start', chunk_end_key='end', relation_key='relations', relation_type_key='type', relation_head_key='head', relation_tail_key='tail')
train_data = json_io.read('data/SciERC/scierc_train.json')
dev_data = json_io.read('data/SciERC/scierc_dev.json')
test_data = json_io.read('data/SciERC/scierc_test.json')
assert (len(train_data) == 1861)
assert (sum((len(ex['chunks']) for ex in train_data)) == 5598)
assert (sum((len(ex['relations']) for ex in train_data)) == 3215)
assert (len(dev_data) == 275)
assert (sum((len(ex['chunks']) for ex in dev_data)) == 811)
assert (sum((len(ex['relations']) for ex in dev_data)) == 455)
assert (len(test_data) == 551)
assert (sum((len(ex['chunks']) for ex in test_data)) == 1685)
assert (sum((len(ex['relations']) for ex in test_data)) == 974)
assert (max((detect_overlapping_level(ex['chunks']) for ex in ((train_data + dev_data) + test_data))) == NESTED)
assert all(((filter_clashed_by_priority(ex['chunks'], allow_level=NESTED) == ex['chunks']) for ex in ((train_data + dev_data) + test_data)))
def test_ADE(self):
json_io = JsonIO(text_key='tokens', chunk_key='entities', chunk_type_key='type', chunk_start_key='start', chunk_end_key='end', relation_key='relations', relation_type_key='type', relation_head_key='head', relation_tail_key='tail')
data = json_io.read('data/ADE/ade_full.json')
assert (len(data) == 4272)
assert (sum((len(ex['chunks']) for ex in data)) == 10839)
assert (sum((len(ex['relations']) for ex in data)) == 6821)
assert (max((detect_overlapping_level(ex['chunks']) for ex in data)) == NESTED)
assert all(((filter_clashed_by_priority(ex['chunks'], allow_level=NESTED) == ex['chunks']) for ex in data))
def test_yidu_s4k(self):
io = JsonIO(is_tokenized=False, tokenize_callback='char', text_key='originalText', chunk_key='entities', chunk_type_key='label_type', chunk_start_key='start_pos', chunk_end_key='end_pos', is_whole_piece=False, encoding='utf-8-sig')
(train_data1, train_errors1, train_mismatches1) = io.read('data/yidu_s4k/subtask1_training_part1.txt', return_errors=True)
(train_data2, train_errors2, train_mismatches2) = io.read('data/yidu_s4k/subtask1_training_part2.txt', return_errors=True)
(train_data, train_errors, train_mismatches) = ((train_data1 + train_data2), (train_errors1 + train_errors2), (train_mismatches1 + train_mismatches2))
(test_data, test_errors, test_mismatches) = io.read('data/yidu_s4k/subtask1_test_set_with_answer.json', return_errors=True)
assert (len(train_data) == 1000)
assert (sum((len(ex['chunks']) for ex in train_data)) == 17653)
assert (len(train_errors) == 0)
assert (len(train_mismatches) == 0)
assert (len(test_data) == 379)
assert (sum((len(ex['chunks']) for ex in test_data)) == 6002)
assert (len(test_errors) == 0)
assert (len(test_mismatches) == 0)
assert (max((detect_overlapping_level(ex['chunks']) for ex in (train_data + test_data))) == FLAT)
assert all(((filter_clashed_by_priority(ex['chunks'], allow_level=FLAT) == ex['chunks']) for ex in (train_data + test_data)))
def test_cmeee(self):
io = JsonIO(is_tokenized=False, tokenize_callback='char', text_key='text', chunk_key='entities', chunk_type_key='type', chunk_start_key='start_idx', chunk_end_key='end_idx', encoding='utf-8')
(train_data, train_errors, train_mismatches) = io.read('data/cblue/CMeEE/CMeEE_train_vz.json', return_errors=True)
(dev_data, dev_errors, dev_mismatches) = io.read('data/cblue/CMeEE/CMeEE_dev_vz.json', return_errors=True)
test_data = io.read('data/cblue/CMeEE/CMeEE_test_vz.json')
assert (len(train_data) == 15000)
assert (sum((len(ex['chunks']) for ex in train_data)) == 61796)
assert (len(train_errors) == 0)
assert (len(train_mismatches) == 0)
assert (len(dev_data) == 5000)
assert (sum((len(ex['chunks']) for ex in dev_data)) == 20300)
assert (len(dev_errors) == 0)
assert (len(dev_mismatches) == 0)
assert (len(test_data) == 3000)
assert (max((detect_overlapping_level(ex['chunks']) for ex in (train_data + dev_data))) == ARBITRARY)
assert all(((filter_clashed_by_priority(ex['chunks'], allow_level=ARBITRARY) == ex['chunks']) for ex in (train_data + dev_data)))
def test_cmeie(self):
io = JsonIO(is_tokenized=False, tokenize_callback='char', text_key='text', chunk_key='entities', chunk_type_key='type', chunk_start_key='start', chunk_end_key='end', relation_key='relations', relation_type_key='type', relation_head_key='head', relation_tail_key='tail', encoding='utf-8')
train_data = io.read('data/cblue/CMeIE/CMeIE_train_vz.json')
dev_data = io.read('data/cblue/CMeIE/CMeIE_dev_vz.json')
test_data = io.read('data/cblue/CMeIE/CMeIE_test_vz.json')
assert (len(train_data) == 14339)
assert (sum((len(ex['chunks']) for ex in train_data)) == 57880)
assert (sum((len(ex['relations']) for ex in train_data)) == 43629)
assert (len(dev_data) == 3585)
assert (sum((len(ex['chunks']) for ex in dev_data)) == 14167)
assert (sum((len(ex['relations']) for ex in dev_data)) == 10613)
assert (len(test_data) == 4482) |
class Inference():
def __init__(self, model: str, checkpoint: str, det_model: str, det_checkpoint: str) -> None:
self.device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu'))
self.model = eval(model)(112)
self.model.load_state_dict(torch.load(checkpoint, map_location='cpu'), strict=False)
self.model = self.model.to(self.device)
self.model.eval()
self.align = FaceDetectAlign(det_model, det_checkpoint)
self.preprocess = T.Compose([T.Lambda((lambda x: (x / 255))), T.Normalize([0.5], [0.5]), T.Lambda((lambda x: x.unsqueeze(0)))])
def __call__(self, img_path: str):
face = self.align.detect_and_align_faces(img_path, (112, 112))[0][0]
pface = self.preprocess(face.permute(2, 0, 1)).to(self.device)
with torch.inference_mode():
feature = self.model(pface)
return feature.detach().cpu() |
def register_Ns3MmWaveMacCschedSapUserCschedUeReleaseCnfParameters_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::MmWaveMacCschedSapUser::CschedUeReleaseCnfParameters const &', 'arg0')])
cls.add_instance_attribute('m_result', 'ns3::Result_e', is_const=False)
cls.add_instance_attribute('m_rnti', 'uint16_t', is_const=False)
cls.add_instance_attribute('m_vendorSpecificList', 'std::vector< ns3::VendorSpecificListElement_s >', is_const=False)
return |
def test_toarrow_NumpyArray_2():
array = ak.contents.NumpyArray(np.array([[0.0, 1.1], [2.2, 3.3], [4.4, 5.5]]))
assert isinstance(array.to_arrow(), pyarrow.lib.Array)
assert (array.to_arrow().to_pylist() == [[0.0, 1.1], [2.2, 3.3], [4.4, 5.5]]) |
def test_get_last_mutatable_statement_max(test_case_chromosome_with_test):
(chromosome, test_case) = test_case_chromosome_with_test
test_case.add_statement(IntPrimitiveStatement(test_case, 5))
assert (chromosome.get_last_mutatable_statement() == 0) |
def add_log_to_file(log_path):
fh = logging.FileHandler(log_path)
formatter = logging.Formatter(_LOG_FMT, datefmt=_DATE_FMT)
fh.setFormatter(formatter)
LOGGER.addHandler(fh) |
class DModel(nn.Module):
def __init__(self, opt):
super(DModel, self).__init__()
self.opt = opt
self.fc = nn.Sequential(nn.Linear(2, 32), nn.ReLU(), nn.Linear(32, 64), nn.ReLU(), nn.Linear(64, 32), nn.ReLU(), nn.Linear(32, 2))
def forward(self, data):
return self.fc(data) |
def adjust_sigmoid(image, cutoff=0.5, gain=10, inv=False):
_assert_non_negative(image)
dtype = image.dtype.type
scale = float((dtype_limits(image, True)[1] - dtype_limits(image, True)[0]))
if inv:
out = ((1 - (1 / (1 + np.exp((gain * (cutoff - (image / scale))))))) * scale)
return dtype(out)
out = ((1 / (1 + np.exp((gain * (cutoff - (image / scale)))))) * scale)
return out.astype(dtype) |
def resize(in_dict, cfg):
in_dict['img'] = Image.fromarray(cv2.resize(np.array(in_dict['img']), (cfg.width, cfg.height), interpolation=cv2.INTER_LINEAR))
in_dict['mask'] = Image.fromarray(cv2.resize(np.array(in_dict['mask']), (cfg.width_mask, cfg.height_mask), cv2.INTER_NEAREST), mode='L') |
def test_action_space_0():
env = Warehouse(shelf_columns=1, column_height=3, shelf_rows=3, n_agents=2, msg_bits=0, sensor_range=1, request_queue_size=5, max_inactivity_steps=None, max_steps=None, reward_type=RewardType.GLOBAL)
env.reset()
assert (env.action_space == spaces.Tuple((2 * (spaces.Discrete(len(Action)),))))
env.step(env.action_space.sample()) |
def test_open_api_verbose_name(openapi_30):
assert (openapi_30.verbose_name == 'Open API 3.0.0')
assert (openapi_30.spec_version == '3.0.0') |
def _generate_batch_data(sampler, batch_size):
batch = []
for idx in sampler:
batch.append(idx)
if (len(batch) == batch_size):
(yield batch)
batch = []
if (len(batch) > 0):
(yield batch) |
def _check_pickleable(obj):
def recurse(obj):
if isinstance(obj, (list, tuple, set)):
return [recurse(x) for x in obj]
if isinstance(obj, dict):
return [[recurse(x), recurse(y)] for (x, y) in obj.items()]
if isinstance(obj, (str, int, float, bool, bytes, bytearray)):
return None
if (f'{type(obj).__module__}.{type(obj).__name__}' in ['numpy.ndarray', 'torch.Tensor', 'torch.nn.parameter.Parameter']):
return None
if is_persistent(obj):
return None
return obj
with io.BytesIO() as f:
pickle.dump(recurse(obj), f) |
def eval_ndcg_at_k(inference_model, device, df_valid, valid_loader, batch_size, k_list, gain_type, phase='Eval'):
ndcg_metrics = {k: NDCG(k, gain_type) for k in k_list}
(qids, rels, scores) = ([], [], [])
inference_model.to_eval()
with torch.no_grad():
for (qid, rel, x) in valid_loader.generate_query_batch(df_valid, batch_size):
if ((x is None) or (x.shape[0] == 0)):
continue
(_, y_tensor) = inference_model.forward(torch.Tensor(x).to(device))
scores.append(y_tensor.cpu().numpy().squeeze())
qids.append(qid)
rels.append(rel)
qids = np.hstack(qids)
rels = np.hstack(rels)
scores = np.hstack(scores)
result_df = pd.DataFrame({'qid': qids, 'rel': rels, 'score': scores})
session_ndcgs = defaultdict(list)
for qid in result_df.qid.unique():
result_qid = result_df[(result_df.qid == qid)].sort_values('score', ascending=False)
rel_rank = result_qid.rel.values
for (k, ndcg) in ndcg_metrics.items():
if (ndcg.maxDCG(rel_rank) == 0):
continue
ndcg_k = ndcg.evaluate(rel_rank)
if (not np.isnan(ndcg_k)):
session_ndcgs[k].append(ndcg_k)
ndcg_result = {k: np.mean(session_ndcgs[k]) for k in k_list}
ndcg_result_print = ', '.join(['{}: {:.5f}'.format(k, ndcg_result[k]) for k in k_list])
print(get_time(), '{} Phase evaluate {}'.format(phase, ndcg_result_print))
return ndcg_result |
def GetNodeInDegV_PNGraph(Graph, NIdInDegV):
return _snap.GetNodeInDegV_PNGraph(Graph, NIdInDegV) |
class DataDimLoops(util.ContentHashClass):
def __init__(self, *lpe_list):
for lpe in lpe_list:
if (lpe not in range(le.NUM)):
raise ValueError('DataDimLoops: arguments must be LoopEnum.')
self.lpe_tuple = tuple(sorted(set(lpe_list)))
def loops(self):
return self.lpe_tuple
def take(self, lpe_indexed):
return [lpe_indexed[lpe] for lpe in self.lpe_tuple]
def drop(self, lpe_indexed):
return [lpe_indexed[lpe] for lpe in range(le.NUM) if (lpe not in self.lpe_tuple)]
def __repr__(self):
return '{}({})'.format(self.__class__.__name__, ', '.join([repr(lpe) for lpe in self.lpe_tuple])) |
class Optimizer():
def init_parser(parser: argparse.ArgumentParser):
parser_group = parser.add_argument_group('Optimization')
parser_group.add_argument('-o', '--optimizer', default='Adam', type=str, help="The optimizer class, 'torch.optim.XXX'")
parser_group.add_argument('-lr', default=0.01, type=float, help='The learning rate')
parser_group.add_argument('-i', '--epochs', default=50, type=int, help='The number of iterations in the training')
parser_group.add_argument('--lr_gamma', type=float, default=0.5, help='The learning rate decays every lrStep-epochs by this factor')
parser_group.add_argument('--lr_step', type=int, default=500, help='The learning rate decays every lrStep-epochs (this parameter) by lrGamma factor')
parser_group.add_argument('--optim_params', default='{}', type=str, help='Additional optimizer parameters parsed as json')
def __init__(self, opt: dict, parameters, dtype, device):
self._opt = opt
self._optimizer_class = getattr(torch.optim, opt['optimizer'])
self._optimizer_parameters = json.loads(opt['optim_params'])
self._optimizer_parameters['lr'] = opt['lr']
self._optimizer = self._optimizer_class(parameters, **self._optimizer_parameters)
self._scheduler = torch.optim.lr_scheduler.StepLR(self._optimizer, opt['lr_step'], opt['lr_gamma'])
self._num_epochs = opt['epochs']
def reset(self, parameters):
self._optimizer = self._optimizer_class(parameters, **self._optimizer_parameters)
self._scheduler = torch.optim.lr_scheduler.StepLR(self._optimizer, self._opt['lr_step'], self._opt['lr_gamma'])
def num_epochs(self):
return self._num_epochs
def zero_grad(self):
self._optimizer.zero_grad()
def step(self, closure):
self._optimizer.step(closure)
def post_epoch(self):
self._scheduler.step()
def get_lr(self):
return self._scheduler.get_last_lr() |
def check_tolerance(ftol, xtol, gtol, method):
def check(tol, name):
if (tol is None):
tol = 0
elif (tol < EPS):
warn(f'Setting `{name}` below the machine epsilon ({EPS:.2e}) effectively disables the corresponding termination condition.', stacklevel=3)
return tol
ftol = check(ftol, 'ftol')
xtol = check(xtol, 'xtol')
gtol = check(gtol, 'gtol')
if ((method == 'lm') and ((ftol < EPS) or (xtol < EPS) or (gtol < EPS))):
raise ValueError(f"All tolerances must be higher than machine epsilon ({EPS:.2e}) for method 'lm'.")
elif ((ftol < EPS) and (xtol < EPS) and (gtol < EPS)):
raise ValueError(f'At least one of the tolerances must be higher than machine epsilon ({EPS:.2e}).')
return (ftol, xtol, gtol) |
class MapRelativeToAbsoluteNumberField(NumberFieldIsomorphism):
def __init__(self, R, A):
NumberFieldIsomorphism.__init__(self, Hom(R, A))
def _call_(self, x):
A = self.codomain()
f = x.polynomial()
return A._element_class(A, f) |
def get_data_augmentation_with_wikisql_tag(args):
aug_wikisql_tag = ('wikisql.' if args.augment_with_wikisql else '')
return aug_wikisql_tag |
def get_generic_path_information(paths, stat_prefix=''):
statistics = OrderedDict()
returns = [sum(path['rewards']) for path in paths]
rewards = np.vstack([path['rewards'] for path in paths])
if ('q_preds' in paths[0]):
(q_preds, q_trues, q_pred_true_gaps) = get_q_pred_true_gaps(paths)
statistics.update(create_stats_ordered_dict('Q pred', q_preds, stat_prefix=stat_prefix))
statistics.update(create_stats_ordered_dict('Q true', q_trues, stat_prefix=stat_prefix))
statistics.update(create_stats_ordered_dict('Q pred-true gap', q_pred_true_gaps, stat_prefix=stat_prefix))
statistics.update(create_stats_ordered_dict('Rewards', rewards, stat_prefix=stat_prefix))
statistics.update(create_stats_ordered_dict('Returns', returns, stat_prefix=stat_prefix))
actions = [path['actions'] for path in paths]
if (len(actions[0].shape) == 1):
actions = np.hstack([path['actions'] for path in paths])
else:
actions = np.vstack([path['actions'] for path in paths])
statistics.update(create_stats_ordered_dict('Actions', actions, stat_prefix=stat_prefix))
statistics['Num Paths'] = len(paths)
statistics[(stat_prefix + 'Average Returns')] = get_average_returns(paths)
return statistics |
def get_just_x_or_y_train_dev_dataset(just, DATA_DIR, **kw):
tokenizer = kw['tokenizer']
task_name = kw['task_name']
max_seq_length = kw['max_seq_length']
overwrite_cache = kw['overwrite_cache']
is_last_partition = kw.get('is_last_partition')
precompute_attention_mask = kw['precompute_attention_mask']
data_dir = os.path.join(DATA_DIR, TASK_NAME_TO_DATA_DIR[task_name])
args = GlueDataTrainingArguments(task_name=task_name, data_dir=data_dir, max_seq_length=max_seq_length, overwrite_cache=overwrite_cache)
print('-I- creating datasets...')
train_ds = GlueDataset(args, tokenizer, mode='train')
dev_ds = GlueDataset(args, tokenizer, mode='dev')
if (just == 'x'):
just_f = make_just_x
elif (just == 'y'):
just_f = make_just_y
elif isinstance(just, list):
just_f = make_just_by_ds
else:
raise NotImplementedError()
train_ds = just_f(train_ds, just=just, precompute_attention_mask=precompute_attention_mask, is_last_partition=is_last_partition)
dev_ds = just_f(dev_ds, just=just, precompute_attention_mask=precompute_attention_mask, is_last_partition=is_last_partition)
print('-I- done creating datasets')
partial_evaluate = build_compute_metrics_fn(task_name)
num_labels = glue_tasks_num_labels[task_name]
def evaluate_glue(self):
global_step = self.fit_res.num_epochs
print('Evaluating Glue on CPU')
predictions = torch.cat(self.predictions, dim=0).cpu().numpy()
label_ids = torch.cat(self.label_ids, dim=0).cpu().numpy()
self.predictions.clear()
self.label_ids.clear()
ep = EvalPrediction(predictions, label_ids)
result = partial_evaluate(ep)
try:
print(result)
except:
print('evaluate_glue: failed to print result')
if (not hasattr(self.fit_res, 'glue_results')):
self.fit_res.glue_results = dict()
self.fit_res.glue_results[global_step] = getitem(result)
def set_eval(trainer):
trainer.loss_fn = GlueLoss(num_labels)
trainer.statistics.evaluate_glue = types.MethodType(evaluate_glue, trainer.statistics)
trainer.statistics.set_glue_task(task_name)
return (train_ds, dev_ds, set_eval) |
def _get_type_candidates(context: MutationContext, schema: Schema) -> set[str]:
types = set(get_type(schema))
if context.is_path_location:
candidates = ({'string', 'integer', 'number', 'boolean', 'null'} - types)
else:
candidates = ({'string', 'integer', 'number', 'object', 'array', 'boolean', 'null'} - types)
if (('integer' in types) and ('number' in candidates)):
candidates.remove('number')
return candidates |
def agg_dict_list(dict_list):
dict_agg = {'epoch': dict_list[0]['epoch']}
for key in dict_list[0]:
if (key != 'epoch'):
value = np.array([dict[key] for dict in dict_list])
dict_agg[key] = np.mean(value).round(cfg.round)
dict_agg['{}_std'.format(key)] = np.std(value).round(cfg.round)
return dict_agg |
class SelfConsciousDialogueTeacher(FixedDialogTeacher):
def __init__(self, opt, shared=None):
super().__init__(opt, shared)
self.opt = opt
(datapath, datatype) = _path(opt)
if (not shared):
self.episodes = []
self.num_exs = 0
self._setup_data(datapath, datatype)
else:
self.episodes = shared['episodes']
self.num_exs = sum((len(e) for e in self.episodes))
self.id = 'self_conscious_dialogue'
self.reset()
def add_cmdline_args(argparser):
agent = argparser.add_argument_group('Self Conscious Dialogue Teacher arguments')
agent.add_argument('--eval-type', type=str, choices=['convai2', 'dnli'], default='dnli', help='Which validation data to use')
def _setup_data(self, path, datatype):
random.seed(46)
print(f'[Loading ParlAI text data: {path}]')
convai2_datapath = make_path(self.opt, f'{datatype}_both_original.txt')
convai2_episodes = self._load_convai2_data(convai2_datapath)
(all_personas, persona_to_idx) = self._get_persona_pool(self.opt)
sorted_personas = self._get_sorted_persona_pool(datatype)
if (self.opt['eval_type'] == 'convai2'):
self.episodes = []
self.num_exs = 0
eps = []
with open(path) as read:
for line in read:
msg = str_to_msg(line.rstrip('\n'))
if msg:
self.num_exs += 1
eps.append(msg)
if msg.get('episode_done', False):
self.episodes.append(eps)
eps = []
if (len(eps) > 0):
eps[(- 1)].force_set('episode_done', True)
self.episodes.append(eps)
for (episode_idx, episode) in enumerate(self.episodes):
for (turn_idx, turn) in enumerate(episode):
convai2_turn = convai2_episodes[episode_idx][turn_idx]
convai2_text = convai2_turn[0]
label_candidates = convai2_turn[3]
turn['label_candidates'] = label_candidates
if (turn_idx == 0):
(my_persona, partner_persona, _) = _split_personas_and_context(convai2_text)
turn['partner_persona'] = partner_persona
turn['my_persona'] = my_persona
else:
turn['partner_persona'] = episode[0]['partner_persona']
turn['my_persona'] = episode[0]['my_persona']
elif (self.opt['eval_type'] == 'dnli'):
self.episodes = []
self.num_exs = 0
for eval_set in ['attributes', 'havenot', 'likedislike']:
datapath = make_path(self.opt, f'{datatype}_{eval_set}.jsonl')
with open(datapath, 'r') as fp:
for line in fp:
msg = json.loads(line)
msg['eval_set'] = eval_set
msg['episode_done'] = True
persona_lines = [f'your persona: {x[:(- 2)]}.' for x in msg['persona']]
utts = msg['prefix']
(p1_token, p2_token) = (TorchAgent.P1_TOKEN, TorchAgent.P2_TOKEN)
lines = persona_lines
for (i, utt) in enumerate(utts):
if ((i % 2) == 0):
lines.append(f'{p1_token} {utt}')
else:
lines.append(f'{p2_token} {utt}')
text = '\n'.join(lines)
msg['text'] = text
cands = msg['candidates']
msg['label_candidates'] = (((cands['label'] + cands['neg'][:10]) + cands['similar'][:10]) + cands['rand'][:10])
del msg['persona']
del msg['prefix']
del msg['triple']
del msg['relevant_persona_sentence']
del msg['candidates']
self.episodes.append([msg])
self.num_exs += 1
if (self.opt['world_cardinality'] > 0):
num_all_personas = len(all_personas)
persona_indices = list(range(num_all_personas))
world_cardinality = self.opt['world_cardinality']
for episode in self.episodes:
(gt_persona, first_context) = _split_persona_and_context(episode[0]['text'], self.opt['eval_type'])
gt_persona_idx = persona_to_idx.get(gt_persona, (- 1))
distractor_indices = random.sample(persona_indices, (world_cardinality - 1))
while (gt_persona_idx in distractor_indices):
distractor_indices = random.sample(persona_indices, (world_cardinality - 1))
distractor_personas = itemgetter(*distractor_indices)(all_personas)
distractor_personas = list(distractor_personas)
for (turn_idx, turn) in enumerate(episode):
if (turn_idx == 0):
turn['distractor_text'] = ['\n'.join([persona, first_context]) for persona in ([gt_persona] + distractor_personas)]
else:
turn['distractor_text'] = ([turn['text']] * world_cardinality)
def _get_persona_pool(self, opt, remove_duplicate=True):
print('[loading persona pool from convai2 training data]')
datapath = make_path(opt, 'train.txt')
episodes = []
eps = []
with open(datapath) as read:
for line in read:
msg = str_to_msg(line.rstrip('\n'))
if msg:
eps.append(msg)
if msg.get('episode_done', False):
episodes.append(eps)
eps = []
if (len(eps) > 0):
eps[(- 1)].force_set('episode_done', True)
episodes.append(eps)
persona_set = OrderedSet()
for episode in episodes:
first_turn = episode[0]
text = first_turn['text']
(persona, _) = _split_persona_and_context(text)
persona_set.add(persona)
if remove_duplicate:
train_persona_fname = os.path.join(__PATH__, 'train_persona_map.pkl')
with open(train_persona_fname, 'rb') as fp:
_train_personas = pickle.load(fp)
train_personas = []
for personas in _train_personas.values():
longest_idx = 0
longest_length = (- 1)
for (idx, persona) in enumerate(personas):
if (len(persona) > longest_length):
longest_idx = idx
longest_length = len(persona)
selected_persona = map((lambda x: f'your persona: {x}.'), personas[longest_idx])
selected_persona = '\n'.join(selected_persona)
train_personas.append(selected_persona)
persona_set = OrderedSet()
for train_persona in train_personas:
persona_set.add(train_persona)
all_personas = []
persona_to_idx = {}
for (i, persona) in enumerate(persona_set):
all_personas.append(persona)
persona_to_idx[persona] = i
print(f'Total {len(all_personas)} personas in dataset')
return (all_personas, persona_to_idx)
def _get_sorted_persona_pool(self, datatype):
print('[loading sorted persona pool from convai2 training data]')
eval_type = self.opt['eval_type']
if (eval_type == 'convai2'):
datapath = make_path(self.opt, 'valid_sorted_50_personas.json')
elif (eval_type == 'dnli'):
datapath = make_path(self.opt, 'dnli_sorted_50_personas.json')
else:
raise ValueError('eval_set must be one of convai2 and dnli')
with open(datapath, 'r') as fp:
sorted_personas = json.load(fp)
sorted_personas['idx2persona'] = sorted_personas['train_personas']
sorted_personas['persona2idx'] = {}
for (idx, persona) in enumerate(sorted_personas['train_personas']):
sorted_personas['persona2idx'][persona] = idx
return sorted_personas
def _load_convai2_data(self, datapath):
self.cloze = False
convai2_dataloader = FbDeprecatedDialogTeacher.setup_data(self, datapath)
convai2_episodes = []
for episode in DialogData._read_episode(self, convai2_dataloader):
convai2_episodes.append(episode)
del self.cloze
return convai2_episodes
def share(self):
shared = super().share()
shared['episodes'] = self.episodes
return shared
def num_examples(self):
return self.num_exs
def num_episodes(self):
return len(self.episodes)
def get(self, episode_idx, entry_idx=None):
return self.episodes[episode_idx][entry_idx] |
def tensor2depth(input_depth, imtype=np.int32):
if isinstance(input_depth, torch.Tensor):
depth_tensor = input_depth.data
else:
return input_depth
depth_numpy = depth_tensor[0].cpu().float().numpy()
depth_numpy = depth_numpy.reshape((depth_numpy.shape[1], depth_numpy.shape[2]))
return depth_numpy.astype(imtype) |
_func
def sample3(qf: ti.types.ndarray(ndim=2), u: int, v: int) -> vec3:
return sample_impl(qf, u, v) |
def run_tests():
read_waf_config()
global BUILD_PROFILE_SUFFIX
if (BUILD_PROFILE == 'release'):
BUILD_PROFILE_SUFFIX = ''
else:
BUILD_PROFILE_SUFFIX = ('-' + BUILD_PROFILE)
test_runner_name = ('%s%s-%s%s' % (APPNAME, VERSION, 'test-runner', BUILD_PROFILE_SUFFIX))
if (not options.nowaf):
if (options.kinds or options.list or (len(options.constrain) and (options.constrain in core_kinds))):
if (sys.platform == 'win32'):
waf_cmd = (sys.executable + ' waf --target=test-runner')
else:
waf_cmd = (sys.executable + ' waf --target=test-runner')
elif len(options.example):
if (sys.platform == 'win32'):
waf_cmd = (sys.executable + (' waf --target=%s' % os.path.basename(options.example)))
else:
waf_cmd = (sys.executable + (' waf --target=%s' % os.path.basename(options.example)))
elif (sys.platform == 'win32'):
waf_cmd = (sys.executable + ' waf')
else:
waf_cmd = (sys.executable + ' waf')
if options.verbose:
print(('Building: %s' % waf_cmd))
proc = subprocess.Popen(waf_cmd, shell=True)
proc.communicate()
if proc.returncode:
print('Waf died. Not running tests', file=sys.stderr)
return proc.returncode
make_paths()
build_status_file = os.path.join(NS3_BUILDDIR, 'build-status.py')
if os.path.exists(build_status_file):
ns3_runnable_programs = get_list_from_file(build_status_file, 'ns3_runnable_programs')
ns3_runnable_scripts = get_list_from_file(build_status_file, 'ns3_runnable_scripts')
else:
print('The build status file was not found. You must do waf build before running test.py.', file=sys.stderr)
sys.exit(2)
ns3_runnable_programs_dictionary = {}
for program in ns3_runnable_programs:
program_name = os.path.basename(program)
ns3_runnable_programs_dictionary[program_name] = program
example_tests = []
example_names_original = []
python_tests = []
for directory in EXAMPLE_DIRECTORIES:
example_directory = os.path.join('examples', directory)
examples_to_run_path = os.path.join(example_directory, 'examples-to-run.py')
cpp_executable_dir = os.path.join(NS3_BUILDDIR, example_directory)
python_script_dir = os.path.join(example_directory)
parse_examples_to_run_file(examples_to_run_path, cpp_executable_dir, python_script_dir, example_tests, example_names_original, python_tests)
for module in NS3_ENABLED_MODULES:
module = module[len('ns3-'):]
module_directory = os.path.join('src', module)
example_directory = os.path.join(module_directory, 'examples')
examples_to_run_path = os.path.join(module_directory, 'test', 'examples-to-run.py')
cpp_executable_dir = os.path.join(NS3_BUILDDIR, example_directory)
python_script_dir = os.path.join(example_directory)
parse_examples_to_run_file(examples_to_run_path, cpp_executable_dir, python_script_dir, example_tests, example_names_original, python_tests)
for module in NS3_ENABLED_CONTRIBUTED_MODULES:
module = module[len('ns3-'):]
module_directory = os.path.join('contrib', module)
example_directory = os.path.join(module_directory, 'examples')
examples_to_run_path = os.path.join(module_directory, 'test', 'examples-to-run.py')
cpp_executable_dir = os.path.join(NS3_BUILDDIR, example_directory)
python_script_dir = os.path.join(example_directory)
parse_examples_to_run_file(examples_to_run_path, cpp_executable_dir, python_script_dir, example_tests, example_names_original, python_tests)
os.environ['NS_LOG'] = ''
if options.kinds:
path_cmd = os.path.join('utils', (test_runner_name + ' --print-test-type-list'))
(rc, standard_out, standard_err, et) = run_job_synchronously(path_cmd, os.getcwd(), False, False)
print(standard_out.decode())
if options.list:
if len(options.constrain):
path_cmd = os.path.join('utils', (test_runner_name + (' --print-test-name-list --print-test-types --test-type=%s' % options.constrain)))
else:
path_cmd = os.path.join('utils', (test_runner_name + ' --print-test-name-list --print-test-types'))
(rc, standard_out, standard_err, et) = run_job_synchronously(path_cmd, os.getcwd(), False, False)
if (rc != 0):
print('test.py error: test-runner return code returned {}'.format(rc))
print('To debug, try running {}\n'.format('\'./waf --run "test-runner --print-test-name-list"\''))
return
if isinstance(standard_out, bytes):
standard_out = standard_out.decode()
list_items = standard_out.split('\n')
list_items.sort()
print('Test Type Test Name')
print(' ')
for item in list_items:
if len(item.strip()):
print(item)
example_names_original.sort()
for item in example_names_original:
print('example ', item)
print()
if (options.kinds or options.list):
return
date_and_time = time.strftime('%Y-%m-%d-%H-%M-%S-CUT', time.gmtime())
if (not os.path.exists(TMP_OUTPUT_DIR)):
os.makedirs(TMP_OUTPUT_DIR)
testpy_output_dir = os.path.join(TMP_OUTPUT_DIR, date_and_time)
if (not os.path.exists(testpy_output_dir)):
os.makedirs(testpy_output_dir)
xml_results_file = os.path.join(testpy_output_dir, 'results.xml')
f = open(xml_results_file, 'w')
f.write('<?xml version="1.0"?>\n')
f.write('<Results>\n')
f.close()
single_suite = False
if len(options.suite):
path_cmd = os.path.join('utils', (test_runner_name + ' --print-test-name-list'))
(rc, suites, standard_err, et) = run_job_synchronously(path_cmd, os.getcwd(), False, False)
if isinstance(suites, bytes):
suites = suites.decode()
if (options.suite in suites.split('\n')):
suites = (options.suite + '\n')
single_suite = True
else:
print('The test suite was not run because an unknown test suite name was requested.', file=sys.stderr)
sys.exit(2)
elif ((len(options.example) == 0) and (len(options.pyexample) == 0)):
if len(options.constrain):
path_cmd = os.path.join('utils', (test_runner_name + (' --print-test-name-list --test-type=%s' % options.constrain)))
(rc, suites, standard_err, et) = run_job_synchronously(path_cmd, os.getcwd(), False, False)
else:
path_cmd = os.path.join('utils', (test_runner_name + ' --print-test-name-list'))
(rc, suites, standard_err, et) = run_job_synchronously(path_cmd, os.getcwd(), False, False)
else:
suites = ''
if isinstance(suites, bytes):
suites = suites.decode()
suite_list = suites.split('\n')
if ((not single_suite) and (options.constrain != 'performance')):
path_cmd = os.path.join('utils', (test_runner_name + (' --print-test-name-list --test-type=%s' % 'performance')))
(rc, performance_tests, standard_err, et) = run_job_synchronously(path_cmd, os.getcwd(), False, False)
if isinstance(performance_tests, bytes):
performance_tests = performance_tests.decode()
performance_test_list = performance_tests.split('\n')
for performance_test in performance_test_list:
if (performance_test in suite_list):
suite_list.remove(performance_test)
input_queue = queue.Queue(0)
output_queue = queue.Queue(0)
jobs = 0
threads = []
processors = 1
if (sys.platform != 'win32'):
if ('SC_NPROCESSORS_ONLN' in os.sysconf_names):
processors = os.sysconf('SC_NPROCESSORS_ONLN')
else:
proc = subprocess.Popen('sysctl -n hw.ncpu', shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout_results, stderr_results) = proc.communicate()
stdout_results = stdout_results.decode()
stderr_results = stderr_results.decode()
if (len(stderr_results) == 0):
processors = int(stdout_results)
for i in range(processors):
thread = worker_thread(input_queue, output_queue)
threads.append(thread)
thread.start()
total_tests = 0
skipped_tests = 0
skipped_testnames = []
for test in suite_list:
test = test.strip()
if len(test):
job = Job()
job.set_is_example(False)
job.set_is_pyexample(False)
job.set_display_name(test)
job.set_tmp_file_name(os.path.join(testpy_output_dir, ('%s.xml' % test)))
job.set_cwd(os.getcwd())
job.set_basedir(os.getcwd())
job.set_tempdir(testpy_output_dir)
if options.multiple:
multiple = ''
else:
multiple = ' --stop-on-failure'
if len(options.fullness):
fullness = options.fullness.upper()
fullness = (' --fullness=%s' % fullness)
else:
fullness = ' --fullness=QUICK'
path_cmd = os.path.join('utils', (test_runner_name + (' --test-name=%s%s%s' % (test, multiple, fullness))))
job.set_shell_command(path_cmd)
if (options.valgrind and (test in core_valgrind_skip_tests)):
job.set_is_skip(True)
if ((not NSC_ENABLED) and (test in core_nsc_missing_skip_tests)):
job.set_is_skip(True)
if options.verbose:
print(('Queue %s' % test))
input_queue.put(job)
jobs = (jobs + 1)
total_tests = (total_tests + 1)
if ((len(options.suite) == 0) and (len(options.example) == 0) and (len(options.pyexample) == 0)):
if ((len(options.constrain) == 0) or (options.constrain == 'example')):
if ENABLE_EXAMPLES:
for (name, test, do_run, do_valgrind_run) in example_tests:
test_name = test.split(' ', 1)[0]
test_name = os.path.basename(test_name)
if (test_name in ns3_runnable_programs_dictionary):
if eval(do_run):
job = Job()
job.set_is_example(True)
job.set_is_pyexample(False)
job.set_display_name(name)
job.set_tmp_file_name('')
job.set_cwd(testpy_output_dir)
job.set_basedir(os.getcwd())
job.set_tempdir(testpy_output_dir)
job.set_shell_command(test)
job.set_build_path(options.buildpath)
if (options.valgrind and (not eval(do_valgrind_run))):
job.set_is_skip(True)
if options.verbose:
print(('Queue %s' % test))
input_queue.put(job)
jobs = (jobs + 1)
total_tests = (total_tests + 1)
elif len(options.example):
example_name = ('%s%s-%s%s' % (APPNAME, VERSION, options.example, BUILD_PROFILE_SUFFIX))
if (example_name not in ns3_runnable_programs_dictionary):
print(('Example %s is not runnable.' % example_name))
else:
example_path = ns3_runnable_programs_dictionary[example_name]
example_path = os.path.abspath(example_path)
job = Job()
job.set_is_example(True)
job.set_is_pyexample(False)
job.set_display_name(example_path)
job.set_tmp_file_name('')
job.set_cwd(testpy_output_dir)
job.set_basedir(os.getcwd())
job.set_tempdir(testpy_output_dir)
job.set_shell_command(example_path)
job.set_build_path(options.buildpath)
if options.verbose:
print(('Queue %s' % example_name))
input_queue.put(job)
jobs = (jobs + 1)
total_tests = (total_tests + 1)
if ((len(options.suite) == 0) and (len(options.example) == 0) and (len(options.pyexample) == 0)):
if ((len(options.constrain) == 0) or (options.constrain == 'pyexample')):
if ENABLE_EXAMPLES:
for (test, do_run) in python_tests:
test_name = test.split(' ', 1)[0]
test_name = os.path.basename(test_name)
if (test_name in ns3_runnable_scripts):
if eval(do_run):
job = Job()
job.set_is_example(False)
job.set_is_pyexample(True)
job.set_display_name(test)
job.set_tmp_file_name('')
job.set_cwd(testpy_output_dir)
job.set_basedir(os.getcwd())
job.set_tempdir(testpy_output_dir)
job.set_shell_command(test)
job.set_build_path('')
if options.valgrind:
job.set_is_skip(True)
if (not ENABLE_PYTHON_BINDINGS):
job.set_is_skip(True)
if options.verbose:
print(('Queue %s' % test))
input_queue.put(job)
jobs = (jobs + 1)
total_tests = (total_tests + 1)
elif len(options.pyexample):
example_name = os.path.basename(options.pyexample)
if (example_name not in ns3_runnable_scripts):
print(('Example %s is not runnable.' % example_name))
else:
job = Job()
job.set_is_pyexample(True)
job.set_display_name(options.pyexample)
job.set_tmp_file_name('')
job.set_cwd(testpy_output_dir)
job.set_basedir(os.getcwd())
job.set_tempdir(testpy_output_dir)
job.set_shell_command(options.pyexample)
job.set_build_path('')
if options.verbose:
print(('Queue %s' % options.pyexample))
input_queue.put(job)
jobs = (jobs + 1)
total_tests = (total_tests + 1)
for i in range(processors):
job = Job()
job.set_is_break(True)
input_queue.put(job)
passed_tests = 0
failed_tests = 0
failed_testnames = []
crashed_tests = 0
crashed_testnames = []
valgrind_errors = 0
valgrind_testnames = []
for i in range(jobs):
job = output_queue.get()
if job.is_break:
continue
if (job.is_example or job.is_pyexample):
kind = 'Example'
else:
kind = 'TestSuite'
if job.is_skip:
status = 'SKIP'
skipped_tests = (skipped_tests + 1)
skipped_testnames.append(job.display_name)
elif (job.returncode == 0):
status = 'PASS'
passed_tests = (passed_tests + 1)
elif (job.returncode == 1):
failed_tests = (failed_tests + 1)
failed_testnames.append(job.display_name)
status = 'FAIL'
elif (job.returncode == 2):
valgrind_errors = (valgrind_errors + 1)
valgrind_testnames.append(job.display_name)
status = 'VALGR'
else:
crashed_tests = (crashed_tests + 1)
crashed_testnames.append(job.display_name)
status = 'CRASH'
if (options.duration or (options.constrain == 'performance')):
print(('%s (%.3f): %s %s' % (status, job.elapsed_time, kind, job.display_name)))
else:
print(('%s: %s %s' % (status, kind, job.display_name)))
if (job.is_example or job.is_pyexample):
f = open(xml_results_file, 'a')
f.write('<Example>\n')
example_name = (' <Name>%s</Name>\n' % job.display_name)
f.write(example_name)
if (status == 'PASS'):
f.write(' <Result>PASS</Result>\n')
elif (status == 'FAIL'):
f.write(' <Result>FAIL</Result>\n')
elif (status == 'VALGR'):
f.write(' <Result>VALGR</Result>\n')
elif (status == 'SKIP'):
f.write(' <Result>SKIP</Result>\n')
else:
f.write(' <Result>CRASH</Result>\n')
f.write((' <Time real="%.3f"/>\n' % job.elapsed_time))
f.write('</Example>\n')
f.close()
elif job.is_skip:
f = open(xml_results_file, 'a')
f.write('<Test>\n')
f.write((' <Name>%s</Name>\n' % job.display_name))
f.write(' <Result>SKIP</Result>\n')
f.write('</Test>\n')
f.close()
elif ((job.returncode == 0) or (job.returncode == 1) or (job.returncode == 2)):
f_to = open(xml_results_file, 'a')
f_from = open(job.tmp_file_name)
f_to.write(f_from.read())
f_to.close()
f_from.close()
else:
f = open(xml_results_file, 'a')
f.write('<Test>\n')
f.write((' <Name>%s</Name>\n' % job.display_name))
f.write(' <Result>CRASH</Suite>\n')
f.write('</Test>\n')
f.close()
if (job.returncode == 2):
f = open(xml_results_file, 'a')
f.write('<Test>\n')
f.write((' <Name>%s</Name>\n' % job.display_name))
f.write(' <Result>VALGR</Result>\n')
f.write('</Test>\n')
f.close()
for thread in threads:
thread.join()
f = open(xml_results_file, 'a')
f.write('</Results>\n')
f.close()
print(('%d of %d tests passed (%d passed, %d skipped, %d failed, %d crashed, %d valgrind errors)' % (passed_tests, total_tests, passed_tests, skipped_tests, failed_tests, crashed_tests, valgrind_errors)))
if skipped_testnames:
skipped_testnames.sort()
print(('List of SKIPped tests:\n %s' % '\n '.join(map(str, skipped_testnames))))
if failed_testnames:
failed_testnames.sort()
print(('List of FAILed tests:\n %s' % '\n '.join(map(str, failed_testnames))))
if crashed_testnames:
crashed_testnames.sort()
print(('List of CRASHed tests:\n %s' % '\n '.join(map(str, crashed_testnames))))
if valgrind_testnames:
valgrind_testnames.sort()
print(('List of VALGR failures:\n %s' % '\n '.join(map(str, valgrind_testnames))))
if len(options.html):
translate_to_html(xml_results_file, options.html)
if len(options.text):
translate_to_text(xml_results_file, options.text)
if len(options.xml):
shutil.copyfile(xml_results_file, options.xml)
if ((not ENABLE_TESTS) or (not ENABLE_EXAMPLES)):
print()
if (not ENABLE_TESTS):
print('*** Note: ns-3 tests are currently disabled. Enable them by adding')
print('*** "--enable-tests" to ./waf configure or modifying your .ns3rc file.')
print()
if (not ENABLE_EXAMPLES):
print('*** Note: ns-3 examples are currently disabled. Enable them by adding')
print('*** "--enable-examples" to ./waf configure or modifying your .ns3rc file.')
print()
if (options.valgrind and (not VALGRIND_FOUND)):
print()
print('*** Note: you are trying to use valgrind, but valgrind could not be found')
print('*** on your machine. All tests and examples will crash or be skipped.')
print()
if (not options.retain):
shutil.rmtree(testpy_output_dir)
if ((passed_tests + skipped_tests) == total_tests):
return 0
else:
return 1 |
def process(passageIDs, response):
output = ''
for i in range(len(passageIDs)):
output += '{}\t'.format(passageIDs[i])
for j in range(len(response[i])):
output += '{} '.format(response[i][j])
output += '\n'
return output |
class ConstructorStatement(ParametrizedStatement):
def clone(self, test_case: tc.TestCase, memo: dict[(vr.VariableReference, vr.VariableReference)]) -> Statement:
return ConstructorStatement(test_case, self.accessible_object(), self._clone_args(memo))
def accept(self, visitor: StatementVisitor) -> None:
visitor.visit_constructor_statement(self)
def accessible_object(self) -> gao.GenericConstructor:
return cast(gao.GenericConstructor, self._generic_callable)
def __repr__(self) -> str:
return f'ConstructorStatement({self._test_case}, {self._generic_callable}(args={self._args})'
def __str__(self) -> str:
return (f'{self._generic_callable}(args={self._args})' + '-> None') |
def res2net101_v1b_26w_4s(pretrained=False, **kwargs):
model = Res2Net(Bottle2neck, [3, 4, 23, 3], baseWidth=26, scale=4, **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['res2net101_v1b_26w_4s']))
return model |
class InvalidSDFGNodeError(InvalidSDFGError):
def __init__(self, message: str, sdfg: 'SDFG', state_id: int, node_id: int):
self.message = message
self.sdfg = sdfg
self.state_id = state_id
self.node_id = node_id
self.path = None
def to_json(self):
return dict(message=self.message, sdfg_id=self.sdfg.sdfg_id, state_id=self.state_id, node_id=self.node_id)
def __str__(self):
state = self.sdfg.node(self.state_id)
locinfo = ''
if (self.node_id is not None):
from dace.sdfg.nodes import Node
node: Node = state.node(self.node_id)
nodestr = f', node {node}'
locinfo = self._getlineinfo(node)
else:
nodestr = ''
locinfo = self._getlineinfo(state)
if locinfo:
locinfo = ('\nOriginating from source code at ' + locinfo)
if self.path:
locinfo += f'''
Invalid SDFG saved for inspection in {os.path.abspath(self.path)}'''
return f'{self.message} (at state {state.label}{nodestr}){locinfo}' |
class BrokenPicklingConjugateGradientOptimizer(ConjugateGradientOptimizer):
def state(self):
return dict()
def state(self, state):
ConjugateGradientOptimizer.state.fset(self, state) |
_model
def ens_adv_inception_resnet_v2(pretrained=False, num_classes=1000, in_chans=3, **kwargs):
default_cfg = default_cfgs['ens_adv_inception_resnet_v2']
model = InceptionResnetV2(num_classes=num_classes, in_chans=in_chans, **kwargs)
model.default_cfg = default_cfg
if pretrained:
load_pretrained(model, default_cfg, num_classes, in_chans)
return model |
class BackpackGpt2Embeddings(eqx.Module):
Vocab: Axis = eqx.static_field()
config: Gpt2Config = eqx.static_field()
token_embeddings: NamedArray
position_embeddings: NamedArray
dropout: hnn.Dropout
def init(Vocab: Axis, config: Gpt2Config, *, key) -> 'BackpackGpt2Embeddings':
(k_wte, k_wpe, k_out) = jrandom.split(key, 3)
token_embeddings = (hax.random.normal(k_wte, (Vocab, config.Embed)) * config.initializer_range)
position_embeddings = (hax.random.normal(k_wpe, (config.Pos, config.Embed)) * (config.initializer_range / 2))
dropout = hnn.Dropout(pdrop=config.embed_pdrop)
return BackpackGpt2Embeddings(Vocab, config, token_embeddings, position_embeddings, dropout)
_call
def embed_input_ids(self, input_ids: NamedArray) -> NamedArray:
return self.token_embeddings.take('vocab', input_ids)
_call
def embed(self, input_ids, *, key):
input_embeds = self.token_embeddings.take('vocab', input_ids)
position_embeds = self.position_embeddings
input_len = input_ids.resolve_axis('position').size
x = (input_embeds + position_embeds[('position', hax.dslice(0, input_len))])
x = self.dropout(x, key=key)
return x
def unembed(self, x: NamedArray):
return hax.dot('embed', x, self.token_embeddings)
def _state_dict_key_map(self) -> Dict[(str, Optional[str])]:
return {'token_embeddings': 'wte.weight', 'position_embeddings': 'wpe.weight'}
def resize_embeddings(self, new_size: int, key: Optional[PRNGKeyArray]=None):
new_weights = hax.tree_util.resize_axis(self.token_embeddings, self.Vocab, new_size, key=key)
return dataclasses.replace(self, Vocab=self.Vocab.resize(new_size), token_embeddings=new_weights) |
class TestGridworld(unittest.TestCase):
def setUp(self):
self.base_mdp = OvercookedGridworld.from_layout_name('mdp_test', **{'cook_time': 5, 'start_order_list': ['onion', 'any']})
def test_constructor_invalid_inputs(self):
with self.assertRaises(AssertionError):
mdp = OvercookedGridworld.from_grid(['X', 'X', 'X'])
with self.assertRaises(AssertionError):
mdp = OvercookedGridworld.from_grid([['X', 'X', 'X']])
with self.assertRaises(AssertionError):
mdp = OvercookedGridworld.from_grid(['XOSX', 'P D', ' 21 '])
with self.assertRaises(AssertionError):
mdp = OvercookedGridworld.from_grid(['XXPXX', 'O 2XX', 'X1 3 X', 'XDXSXX'])
with self.assertRaises(AssertionError):
mdp = OvercookedGridworld.from_grid(['XXPXX', 'O 3O', 'X1 X', 'XDXSX'])
with self.assertRaises(AssertionError):
mdp = OvercookedGridworld.from_grid(['XXPXX', 'O 1O', 'X1 X', 'XDXSX'])
with self.assertRaises(AssertionError):
mdp = OvercookedGridworld.from_grid(['XBPXX', 'O 2O', 'X1 X', 'XDXSX'])
def test_start_positions(self):
expected_start_state = OvercookedState([PlayerState((1, 2), Direction.NORTH), PlayerState((3, 1), Direction.NORTH)], {}, order_list=['onion', 'any'])
actual_start_state = self.base_mdp.get_standard_start_state()
self.assertEqual(actual_start_state, expected_start_state, ((('\n' + str(actual_start_state)) + '\n') + str(expected_start_state)))
def test_file_constructor(self):
mdp = OvercookedGridworld.from_layout_name('corridor')
expected_start_state = OvercookedState([PlayerState((3, 1), Direction.NORTH), PlayerState((10, 1), Direction.NORTH)], {}, order_list=None)
actual_start_state = mdp.get_standard_start_state()
self.assertEqual(actual_start_state, expected_start_state, ((('\n' + str(actual_start_state)) + '\n') + str(expected_start_state)))
def test_actions(self):
bad_state = OvercookedState([PlayerState((0, 0), Direction.NORTH), PlayerState((3, 1), Direction.NORTH)], {}, order_list=['any'])
with self.assertRaises(AssertionError):
self.base_mdp.get_actions(bad_state)
self.assertEqual(self.base_mdp.get_actions(self.base_mdp.get_standard_start_state()), [Action.ALL_ACTIONS, Action.ALL_ACTIONS])
def test_transitions_and_environment(self):
bad_state = OvercookedState([P((0, 0), s), P((3, 1), s)], {}, order_list=[])
with self.assertRaises(AssertionError):
self.base_mdp.get_state_transition(bad_state, stay)
env = OvercookedEnv(self.base_mdp)
env.state.order_list = ['onion', 'any']
def check_transition(action, expected_state, expected_reward=0):
state = env.state
(pred_state, sparse_reward, dense_reward) = self.base_mdp.get_state_transition(state, action)
self.assertEqual(pred_state, expected_state, ((('\n' + str(pred_state)) + '\n') + str(expected_state)))
(new_state, sparse_reward, _, _) = env.step(action)
self.assertEqual(new_state, expected_state)
self.assertEqual(sparse_reward, expected_reward)
check_transition([n, e], OvercookedState([P((1, 1), n), P((3, 1), e)], {}, order_list=['onion', 'any']))
def test_common_mdp_jsons(self):
traj_test_json_paths = iterate_over_files_in_dir('../common_tests/trajectory_tests/')
for test_json_path in traj_test_json_paths:
test_trajectory = AgentEvaluator.load_traj_from_json(test_json_path)
try:
AgentEvaluator.check_trajectories(test_trajectory)
except AssertionError as e:
self.fail('File {} failed with error:\n{}'.format(test_json_path, e))
def test_four_player_mdp(self):
try:
OvercookedGridworld.from_layout_name('multiplayer_schelling')
except AssertionError as e:
print('Loading > 2 player map failed with error:', e) |
def test_ufuncs_on_records_1439_without_warning():
def overload_abs(self):
return np.sqrt(((self.x ** 2) + (self.y ** 2)))
behavior = {}
behavior[(np.absolute, 'Overload')] = overload_abs
one = ak.Array([[{'x': 4, 'y': 3}, {'x': 6, 'y': 8}, {'x': 5, 'y': 12}], [], [{'x': 9, 'y': 12}, {'x': 15, 'y': 20}]], with_name='Overload', behavior=behavior)
assert (np.absolute(one).tolist() == [[5.0, 10.0, 13.0], [], [15.0, 25.0]]) |
def test_validator_combine_objectives_bad_obj_results():
v = Validator(model, dataloader, metrics, objectives)
with pytest.raises(TypeError, match='Argument: obj_results must be set.'):
v.combine_objectives(None, alphas, max_normalization)
with pytest.raises(TypeError, match=('Argument:' + ' obj_results must be a list.')):
v.combine_objectives('Results', alphas, max_normalization)
with pytest.raises(TypeError, match=('All elements of argument: obj_results' + ' must be of type int or float.')):
v.combine_objectives([1, 2.5, 'number'], alphas, max_normalization) |
class WindowsLibtorchConfigNode(ConfigNode):
def __init__(self, parent, libtorch_config_variant):
super(WindowsLibtorchConfigNode, self).__init__(parent, ('LIBTORCH_CONFIG_VARIANT=' + str(libtorch_config_variant)))
self.props['libtorch_config_variant'] = libtorch_config_variant
def get_children(self):
return [ArchConfigNode(self, v) for v in self.find_prop('gpu_versions')] |
def test_fields_in_90pct_credible_region(bench, random_fields, random_sky_map):
cum_prob = sa.func.sum((SkymapTile.probdensity * SkymapTile.hpx.area)).over(order_by=SkymapTile.probdensity.desc()).label('cum_prob')
subquery1 = sa.select(SkymapTile.probdensity, cum_prob).filter((SkymapTile.id == 1)).subquery()
min_probdensity = sa.select(sa.func.min(subquery1.columns.probdensity)).filter((subquery1.columns.cum_prob <= 0.9)).scalar_subquery()
query = sa.select(sa.func.count(FieldTile.id.distinct())).filter(SkymapTile.hpx.overlaps(FieldTile.hpx), (SkymapTile.probdensity >= min_probdensity))
bench(query) |
class SkewPolynomialRing_finite_field(SkewPolynomialRing_finite_order):
def __init__(self, base_ring, morphism, derivation, names, sparse, category=None):
if (self.Element is None):
import sage.rings.polynomial.skew_polynomial_finite_field
self.Element = sage.rings.polynomial.skew_polynomial_finite_field.SkewPolynomial_finite_field_dense
SkewPolynomialRing_finite_order.__init__(self, base_ring, morphism, derivation, names, sparse, category)
self._matrix_retraction = None
def _new_retraction_map(self, seed=None):
k = self.base_ring()
section = self._embed_constants.section()
if (seed is None):
seed = k.random_element()
self._seed_retraction = seed
trace = []
elt = seed
for _ in range(k.degree()):
x = elt
tr = elt
for _ in range(1, self._order):
x = self._morphism(x)
tr += x
elt *= k.gen()
trace.append(section(tr))
self._matrix_retraction = MatrixSpace(self._constants, 1, k.degree())(trace)
def _retraction(self, x, newmap=False, seed=None):
if (newmap or (seed is not None) or (self._matrix_retraction is None)):
self._new_retraction_map()
return (self._matrix_retraction * self.base_ring()(x)._vector_())[0] |
def test_counting_with_frequentist_calculator():
(loss, Nsig) = create_loss_counting()
calculator = FrequentistCalculator(loss, Minuit(), ntoysnull=1000)
poinull = POI(Nsig, 0)
discovery_test = Discovery(calculator, poinull)
(pnull, significance) = discovery_test.result()
assert (significance < 2) |
class Printer(Visitor, Text):
def __init__(self, factor_prefixes=False, c2_syntax=True):
super(Visitor, self).__init__()
super(Text, self).__init__()
self.factor_prefixes = factor_prefixes
self.c2_syntax = c2_syntax
self.c2_net_name = None |
class CondConvResidual(InvertedResidual):
def __init__(self, in_chs, out_chs, dw_kernel_size=3, stride=1, dilation=1, group_size=1, pad_type='', noskip=False, exp_ratio=1.0, exp_kernel_size=1, pw_kernel_size=1, act_layer=tf.keras.layers.ReLU, norm_layer=tf.keras.layers.BatchNormalization, se_layer=None, num_experts=0, drop_path_rate=0.0, name=None):
self.num_experts = num_experts
conv_kwargs = dict(num_experts=self.num_experts)
super(CondConvResidual, self).__init__(in_chs, out_chs, dw_kernel_size=dw_kernel_size, stride=stride, dilation=dilation, group_size=group_size, pad_type=pad_type, act_layer=act_layer, noskip=noskip, exp_ratio=exp_ratio, exp_kernel_size=exp_kernel_size, pw_kernel_size=pw_kernel_size, se_layer=se_layer, norm_layer=norm_layer, conv_kwargs=conv_kwargs, drop_path_rate=drop_path_rate)
self.routing_fn = tf.keras.layers.Dense(self.num_experts)
def __call__(self, x):
shortcut = x
pooled_inputs = F.adaptive_avg_pool2d(x, 1).flatten(1)
routing_weights = torch.sigmoid(self.routing_fn(pooled_inputs))
x = self.conv_pw(x, routing_weights)
x = self.bn1(x)
x = self.conv_dw(x, routing_weights)
x = self.bn2(x)
x = self.se(x)
x = self.conv_pwl(x, routing_weights)
x = self.bn3(x)
if self.has_skip:
x = (self.drop_path(x) + shortcut)
return x |
def violin(df_dfc):
(fig, ax) = plt.subplots(1, 1, figsize=(15, 9))
dfc_mean = df_dfc.abs().mean()
N = 10
sorted_ix = dfc_mean.abs().sort_values()[(- N):].index
parts = ax.violinplot([df_dfc[w] for w in sorted_ix], vert=False, showextrema=False, showmeans=False, showmedians=False, widths=0.7, positions=np.arange(len(sorted_ix)))
plt.setp(parts['bodies'], facecolor='darkblue', edgecolor='black')
ax.set_yticks(np.arange(len(sorted_ix)))
ax.set_yticklabels(sorted_ix, size=16)
ax.set_xlabel('Contribution to predicted probability', size=18)
ax.grid(False, axis='y')
ax.grid(True, axis='x') |
def getObjsFromPrepositions(deps):
objs = []
for dep in deps:
if ((dep.pos_ == 'ADP') and (dep.dep_ == 'prep')):
objs.extend([tok for tok in dep.rights if (tok.dep_ in OBJECTS)])
return objs |
class SymforceCCSymTest(TestCase):
def test_key(self) -> None:
with self.subTest(msg='static member fields were wrapped'):
self.assertIsInstance(cc_sym.Key.INVALID_LETTER, str)
self.assertIsInstance(cc_sym.Key.INVALID_SUB, int)
self.assertIsInstance(cc_sym.Key.INVALID_SUPER, int)
with self.subTest(msg='Two keys with the same fields are equal'):
self.assertEqual(cc_sym.Key('a'), cc_sym.Key('a'))
self.assertEqual(cc_sym.Key('a', 1), cc_sym.Key('a', 1))
self.assertEqual(cc_sym.Key('a', 1, 2), cc_sym.Key('a', 1, 2))
with self.subTest(msg='Two keys with different fields are not equal'):
self.assertNotEqual(cc_sym.Key('a'), cc_sym.Key('b'))
with self.subTest(msg='A key is not equal to instances of other types'):
self.assertNotEqual(cc_sym.Key('a'), 1)
self.assertNotEqual('a', cc_sym.Key('a'))
with self.subTest(msg='A key can be specified with keyword arguments'):
self.assertEqual(cc_sym.Key('a'), cc_sym.Key(letter='a'))
self.assertEqual(cc_sym.Key('a', 1), cc_sym.Key(letter='a', sub=1))
self.assertEqual(cc_sym.Key('a', 1, 2), cc_sym.Key(letter='a', sub=1, super=2))
with self.subTest(msg='Accessors correctly return the fields'):
key = cc_sym.Key('a', 1, 2)
self.assertEqual(key.letter, 'a')
self.assertEqual(key.sub, 1)
self.assertEqual(key.super, 2)
with self.subTest(msg='Method with_letter works as intended'):
key = cc_sym.Key(letter='a', sub=1, super=2)
new_letter = 'b'
new_key = key.with_letter(letter=new_letter)
self.assertEqual(new_key.letter, new_letter)
self.assertEqual(new_key.sub, key.sub)
self.assertEqual(new_key.super, key.super)
with self.subTest(msg='Method with_sub works as intended'):
key = cc_sym.Key(letter='a', sub=1, super=2)
new_sub = 3
new_key = key.with_sub(sub=new_sub)
self.assertEqual(new_key.letter, key.letter)
self.assertEqual(new_key.sub, new_sub)
self.assertEqual(new_key.super, key.super)
with self.subTest(msg='Method with_super works as intended'):
key = cc_sym.Key(letter='a', sub=1, super=2)
new_super = 4
new_key = key.with_super(super=new_super)
self.assertEqual(new_key.letter, key.letter)
self.assertEqual(new_key.sub, key.sub)
self.assertEqual(new_key.super, new_super)
letter_sub_super_samples: T.List[T.Union[(T.Tuple[str], T.Tuple[(str, int)], T.Tuple[(str, int, int)])]] = []
for letter in ['a', 'b']:
letter_sub_super_samples.append((letter,))
for sub in [1, 2]:
letter_sub_super_samples.append((letter, sub))
for sup in [3, 4]:
letter_sub_super_samples.append((letter, sub, sup))
with self.subTest(msg='inequality operators match that of tuples'):
for tuple1 in letter_sub_super_samples:
for tuple2 in letter_sub_super_samples:
self.assertEqual(cc_sym.Key(*tuple1).lexical_less_than(cc_sym.Key(*tuple2)), (tuple1 < tuple2))
with self.subTest(msg='cc_sym.Key.__hash__ is defined'):
hash(cc_sym.Key('a'))
with self.subTest(msg='cc_sym.Key.get_lcm_type returns a key_t'):
self.assertIsInstance(cc_sym.Key('a').get_lcm_type(), key_t)
with self.subTest(msg='cc_sym.Key is pickleable'):
for key in [cc_sym.Key('a'), cc_sym.Key('a', 1), cc_sym.Key('a', 1, 2), cc_sym.Key('a', cc_sym.Key.INVALID_SUB, 2)]:
key_dumps = pickle.dumps(key)
self.assertEqual(key, pickle.loads(key_dumps))
def test_values(self) -> None:
supported_types = ([T.Scalar] + [getattr(sym, cls.__name__) for cls in (sf.GEO_TYPES + sf.CAM_TYPES)])
def instantiate_type(tp: T.Type[T.Any]) -> T.Any:
try:
return tp()
except TypeError:
return tp.from_storage(([0] * tp.storage_dim()))
for tp in supported_types:
with self.subTest(msg=f'Can set and retrieve {tp.__name__}'):
values = cc_sym.Values()
val: T.Any = instantiate_type(tp)
values.set(cc_sym.Key('v'), val)
self.assertEqual(values.at(cc_sym.Key('v')), val)
with self.subTest(msg='Can set and at 9x9 matrices and smaller'):
values = cc_sym.Values()
for rows in range(1, 10):
for cols in range(1, 10):
matrix = np.array(([([0] * cols)] * rows))
values.set(cc_sym.Key('l', rows, cols), matrix)
values.at(cc_sym.Key('l', rows, cols))
values.set(cc_sym.Key('a', rows, cols), np.array(matrix))
values.at(cc_sym.Key('a', rows, cols))
with self.subTest(msg='at raises RuntimeError if no entry exists'):
with self.assertRaises(RuntimeError):
cc_sym.Values().at(cc_sym.Key('a'))
with self.subTest(msg='set returns true no value existed yet for the key'):
values = cc_sym.Values()
self.assertTrue(values.set(cc_sym.Key('a'), 1))
self.assertFalse(values.set(cc_sym.Key('a'), 2))
with self.subTest(msg='has returns whether or not key is present in Values'):
values = cc_sym.Values()
key = cc_sym.Key('a')
self.assertFalse(values.has(key))
values.set(key, 1)
self.assertTrue(values.has(key))
with self.subTest(msg='test that Remove returns whether or not key to be removed existed'):
values = cc_sym.Values()
key = cc_sym.Key('a')
self.assertFalse(values.remove(key))
values.set(key, 3)
self.assertTrue(values.remove(key))
with self.subTest(msg='Test that remove is consistent with has'):
values = cc_sym.Values()
key = cc_sym.Key('a')
values.set(key, 1)
values.remove(key=key)
self.assertFalse(values.has(key))
with self.subTest(msg='num_entries returns the correct number of entries'):
values = cc_sym.Values()
self.assertEqual(values.num_entries(), 0)
values.set(cc_sym.Key('a'), 1.2)
self.assertEqual(values.num_entries(), 1)
values.remove(cc_sym.Key('a'))
self.assertEqual(values.num_entries(), 0)
with self.subTest(msg='Values.empty returns true if empty and false otherwise'):
values = cc_sym.Values()
self.assertTrue(values.empty())
values.set(cc_sym.Key('a'), 1)
self.assertFalse(values.empty())
with self.subTest('Values.keys works correctly'):
values = cc_sym.Values()
a = cc_sym.Key('a')
a_1 = cc_sym.Key('a', 1)
b = cc_sym.Key('b')
values.set(a_1, 1)
values.set(b, 2)
values.set(a, 3)
self.assertEqual([a_1, b, a], values.keys())
self.assertEqual([a_1, b, a], values.keys(sort_by_offset=True))
keys_false = values.keys(sort_by_offset=False)
self.assertEqual({a, a_1, b}, set(keys_false))
self.assertEqual(3, len(keys_false))
with self.subTest('Values.items returns a dict[Key, index_entry_t]'):
values = cc_sym.Values()
a = cc_sym.Key('a')
values.set(a, 1)
items = values.items()
self.assertIsInstance(items, dict)
self.assertIn(a, items)
self.assertIsInstance(items[a], index_entry_t)
with self.subTest('Values.items and Values.at [index_entry_t version] work together'):
values = cc_sym.Values()
a = cc_sym.Key('a')
values.set(a, 1)
items = values.items()
self.assertEqual(values.at(entry=items[a]), 1)
with self.subTest('Values.data returns the correct value'):
values = cc_sym.Values()
values.set(cc_sym.Key('a'), 1)
values.set(cc_sym.Key('b'), 2)
self.assertEqual(values.data(), [1, 2])
with self.subTest(msg='Values.create_index returns an index_t'):
values = cc_sym.Values()
keys = [cc_sym.Key('a', i) for i in range(10)]
for key in keys:
values.set(key, key.sub)
self.assertIsInstance(values.create_index(keys=keys), index_t)
with self.subTest(msg='Values.update_or_set works as expected'):
key_a = cc_sym.Key('a')
key_b = cc_sym.Key('b')
key_c = cc_sym.Key('c')
values_1 = cc_sym.Values()
values_1.set(key_a, 1)
values_1.set(key_b, 2)
values_2 = cc_sym.Values()
values_2.set(key_b, 3)
values_2.set(key_c, 4)
values_1.update_or_set(index=values_2.create_index([key_b, key_c]), other=values_2)
self.assertEqual(values_1.at(key_a), 1)
self.assertEqual(values_1.at(key_b), 3)
self.assertEqual(values_1.at(key_c), 4)
with self.subTest(msg='Values.remove_all leaves a values as empty'):
values = cc_sym.Values()
for i in range(4):
values.set(cc_sym.Key('a', i), i)
values.remove_all()
self.assertTrue(values.empty())
with self.subTest(msg='Test that Values.cleanup is callable and returns correct output'):
values = cc_sym.Values()
values.set(cc_sym.Key('a'), 1)
values.set(cc_sym.Key('b'), 2)
values.remove(cc_sym.Key('a'))
self.assertEqual(values.cleanup(), 1)
for tp in supported_types:
with self.subTest(msg=f'Can call set as a function of index_entry_t and {tp.__name__}'):
values = cc_sym.Values()
a = cc_sym.Key('a')
values.set(a, instantiate_type(tp))
values.set(values.items()[a], instantiate_type(tp))
with self.subTest(msg='Test Values.update (since index overlaod) works as expected'):
key_a = cc_sym.Key('a')
key_b = cc_sym.Key('b')
key_c = cc_sym.Key('c')
values_1 = cc_sym.Values()
values_1.set(key_a, 1)
values_1.set(key_b, 2)
values_1.set(key_c, 3)
values_2 = cc_sym.Values()
values_2.set(key_a, 4)
values_2.set(key_b, 5)
values_2.set(key_c, 6)
values_1.update(index=values_1.create_index([key_b, key_c]), other=values_2)
self.assertEqual(values_1.at(key_a), 1)
self.assertEqual(values_1.at(key_b), 5)
self.assertEqual(values_1.at(key_c), 6)
with self.subTest(msg='Test Values.update (two index overlaod) works as expected'):
key_a = cc_sym.Key('a')
key_b = cc_sym.Key('b')
key_c = cc_sym.Key('c')
values_1 = cc_sym.Values()
values_1.set(key_a, 1)
values_1.set(key_b, 2)
values_1.set(key_c, 3)
values_2 = cc_sym.Values()
values_2.set(key_b, 4)
values_2.set(key_c, 5)
values_1.update(index_this=values_1.create_index([key_b, key_c]), index_other=values_2.create_index([key_b, key_c]), other=values_2)
self.assertEqual(values_1.at(key_a), 1)
self.assertEqual(values_1.at(key_b), 4)
self.assertEqual(values_1.at(key_c), 5)
with self.subTest(msg='Test that Values.retract works roughly'):
a = cc_sym.Key('a')
values_1 = cc_sym.Values()
values_1.set(a, 0)
values_2 = cc_sym.Values()
values_2.set(a, 0)
values_2.retract(values_2.create_index([a]), [1], 1e-08)
self.assertNotEqual(values_1, values_2)
with self.subTest(msg='Test that Values.local_coordinates works roughly'):
a = cc_sym.Key('a')
values_1 = cc_sym.Values()
values_1.set(a, 0)
values_2 = cc_sym.Values()
values_2.set(a, 10)
self.assertEqual(values_2.local_coordinates(values_1, values_1.create_index([a]), 0), 10)
with self.subTest(msg='Test that Values.get_lcm_type returns a values_t'):
v = cc_sym.Values()
v.set(cc_sym.Key('a', 1, 2), 10)
self.assertIsInstance(v.get_lcm_type(), values_t)
with self.subTest(msg='Test the initializer from values_t'):
v = cc_sym.Values()
a = cc_sym.Key('a')
v.set(cc_sym.Key('a'), 1)
v_copy = cc_sym.Values(v.get_lcm_type())
self.assertTrue(v_copy.has(a))
self.assertEqual(v_copy.at(a), v.at(a))
with self.subTest(msg='Can pickle Values'):
v = cc_sym.Values()
keys = []
for (i, tp) in enumerate(supported_types):
v.set(cc_sym.Key('x', i), instantiate_type(tp))
keys.append(cc_sym.Key('x', i))
pickled_v = pickle.loads(pickle.dumps(v))
for key in keys:
self.assertEqual(v.at(key), pickled_v.at(key))
def pi_residual(x: T.Scalar) -> T.Tuple[(T.List[T.Scalar], T.List[T.Scalar], T.List[T.Scalar], T.List[T.Scalar])]:
x_2 = (x / 2.0)
cos = math.cos(x_2)
sin = math.sin(x_2)
sin_2 = ((1.0 / 2.0) * sin)
res = [cos]
jacobian = [(- sin_2)]
hessian = [((1.0 / 4.0) * (sin ** 2))]
rhs = [((- cos) * sin_2)]
return (res, jacobian, hessian, rhs)
def sparse_pi_residual(x: T.Scalar) -> T.Tuple[(T.List[T.Scalar], sparse.csc_matrix, sparse.csc_matrix, T.List[T.Scalar])]:
(res, jacobian, hessian, rhs) = SymforceCCSymTest.pi_residual(x)
return (res, sparse.csc_matrix(jacobian), sparse.csc_matrix(hessian), rhs)
dense_pi_hessian = (lambda values, index_entries: SymforceCCSymTest.pi_residual(values.at(index_entries[0])))
dense_pi_jacobian = (lambda values, index_entries: SymforceCCSymTest.pi_residual(values.at(index_entries[0]))[0:2])
sparse_pi_hessian = (lambda values, index_entries: SymforceCCSymTest.sparse_pi_residual(values.at(index_entries[0])))
sparse_pi_jacobian = (lambda values, index_entries: SymforceCCSymTest.sparse_pi_residual(values.at(index_entries[0]))[0:2])
def test_factor(self) -> None:
pi_key = cc_sym.Key('3', 1, 4)
pi_factor = cc_sym.Factor(hessian_func=SymforceCCSymTest.dense_pi_hessian, keys=[pi_key])
pi_jacobian_factor = cc_sym.Factor.jacobian(jacobian_func=SymforceCCSymTest.dense_pi_jacobian, keys=[pi_key])
sparse_pi_factor = cc_sym.Factor(hessian_func=SymforceCCSymTest.sparse_pi_hessian, keys=[pi_key], sparse=True)
sparse_pi_jacobian_factor = cc_sym.Factor.jacobian(jacobian_func=SymforceCCSymTest.sparse_pi_jacobian, keys=[pi_key], sparse=True)
with self.subTest(msg='Test that alternate Factor constructors can be called'):
cc_sym.Factor(hessian_func=SymforceCCSymTest.dense_pi_hessian, keys_to_func=[pi_key], keys_to_optimize=[pi_key])
cc_sym.Factor.jacobian(jacobian_func=SymforceCCSymTest.dense_pi_jacobian, keys_to_func=[pi_key], keys_to_optimize=[pi_key], sparse=True)
cc_sym.Factor(hessian_func=SymforceCCSymTest.sparse_pi_hessian, keys_to_func=[pi_key], keys_to_optimize=[pi_key], sparse=True)
cc_sym.Factor.jacobian(jacobian_func=SymforceCCSymTest.sparse_pi_jacobian, keys_to_func=[pi_key], keys_to_optimize=[pi_key], sparse=True)
self.assertFalse(pi_factor.is_sparse())
self.assertFalse(pi_jacobian_factor.is_sparse())
self.assertTrue(sparse_pi_factor.is_sparse())
self.assertTrue(sparse_pi_jacobian_factor.is_sparse())
with self.subTest(msg='Test that Factor.linearized_factor/linearize are wrapped'):
pi_values = cc_sym.Values()
eval_value = 3.0
pi_values.set(pi_key, eval_value)
self.assertIsInstance(pi_factor.linearized_factor(pi_values), linearized_dense_factor_t)
(target_residual, target_jacobian, *_) = SymforceCCSymTest.pi_residual(eval_value)
for factor in [pi_factor, pi_jacobian_factor, sparse_pi_factor, sparse_pi_jacobian_factor]:
(residual, jacobian) = factor.linearize(pi_values)
self.assertAlmostEqual(target_residual[0], residual[0])
self.assertAlmostEqual(target_jacobian[0], jacobian[(0, 0)])
if factor.is_sparse():
self.assertIsInstance(jacobian, sparse.csc_matrix)
else:
self.assertNotIsInstance(jacobian, sparse.csc_matrix)
with self.subTest(msg='Test error is raised if mismatch in sparsity of factor and matrix'):
pi_values = cc_sym.Values()
pi_values.set(pi_key, 3.0)
sparse_factor_dense_hessian = cc_sym.Factor(hessian_func=SymforceCCSymTest.dense_pi_hessian, keys=[pi_key], sparse=True)
with self.assertRaises(ValueError):
sparse_factor_dense_hessian.linearize(pi_values)
dense_factor_sparse_hessian = cc_sym.Factor(hessian_func=SymforceCCSymTest.sparse_pi_hessian, keys=[pi_key], sparse=False)
with self.assertRaises(ValueError):
dense_factor_sparse_hessian.linearize(pi_values)
sparse_factor_dense_jacobian = cc_sym.Factor.jacobian(jacobian_func=SymforceCCSymTest.dense_pi_jacobian, keys=[pi_key], sparse=True)
with self.assertRaises(ValueError):
sparse_factor_dense_jacobian.linearize(pi_values)
dense_factor_sparse_jacobian = cc_sym.Factor.jacobian(jacobian_func=SymforceCCSymTest.sparse_pi_jacobian, keys=[pi_key], sparse=False)
with self.assertRaises(ValueError):
dense_factor_sparse_jacobian.linearize(pi_values)
with self.subTest(msg='Test that Factor.all_keys and optimized_keys are wrapped'):
self.assertEqual(pi_factor.all_keys(), [pi_key])
self.assertEqual(pi_factor.optimized_keys(), [pi_key])
def compare_linearizations(lin1: cc_sym.Linearization, lin2: cc_sym.Linearization) -> bool:
return ((lin1.residual == lin2.residual).all() and (lin1.hessian_lower.toarray() == lin2.hessian_lower.toarray()).all() and (lin1.jacobian.toarray() == lin2.jacobian.toarray()).all() and (lin1.rhs == lin2.rhs).all() and (lin1.is_initialized() == lin2.is_initialized()))
def compare_optimization_stats(stats1: cc_sym.OptimizationStats, stats2: cc_sym.OptimizationStats) -> bool:
TVar = T.TypeVar('TVar')
def unwrap(option: T.Optional[TVar]) -> TVar:
assert (option is not None)
return option
if ((stats1.best_linearization is None) ^ (stats2.best_linearization is None)):
return False
return ((stats1.iterations == stats2.iterations) and (stats1.best_index == stats2.best_index) and (stats1.status == stats2.status) and (stats1.failure_reason == stats2.failure_reason) and (((stats1.best_linearization is None) and (stats2.best_linearization is None)) or SymforceCCSymTest.compare_linearizations(unwrap(stats1.best_linearization), unwrap(stats2.best_linearization))))
def test_optimization_stats(self) -> None:
with self.subTest(msg='Can read and write to iterations field'):
stats = cc_sym.OptimizationStats()
self.assertIsInstance(stats.iterations, list)
stats.iterations = [optimization_iteration_t() for _ in range(5)]
with self.subTest(msg='Can read and write to best_index and status'):
stats = cc_sym.OptimizationStats()
stats.best_index = stats.best_index
stats.status = stats.status
stats.failure_reason = stats.failure_reason
with self.subTest(msg='Can read and write to best_linearization'):
stats = cc_sym.OptimizationStats()
stats.best_linearization = None
self.assertIsNone(stats.best_linearization)
stats.best_linearization = cc_sym.Linearization()
self.assertIsInstance(stats.best_linearization, cc_sym.Linearization)
with self.subTest(msg='get_lcm_type is wrapped'):
stats = cc_sym.OptimizationStats()
self.assertIsInstance(stats.get_lcm_type(), optimization_stats_t)
with self.subTest(msg='Can pickle cc_sym.OptimizationStats'):
stats = cc_sym.OptimizationStats()
stats.iterations = [optimization_iteration_t(iteration=i) for i in range(4)]
stats.best_index = 1
stats.status = optimization_status_t.SUCCESS
stats.failure_reason = levenberg_marquardt_solver_failure_reason_t.INVALID.value
stats.best_linearization = None
self.assertTrue(self.compare_optimization_stats(stats, pickle.loads(pickle.dumps(stats))))
linearization = cc_sym.Linearization()
linearization.residual = np.array([1, 2, 3])
stats.best_linearization = linearization
self.assertTrue(self.compare_optimization_stats(stats, pickle.loads(pickle.dumps(stats))))
def test_optimizer(self) -> None:
with self.subTest(msg='Test that default_optimizer_params is wrapped'):
self.assertIsInstance(cc_sym.default_optimizer_params(), optimizer_params_t)
pi_key = cc_sym.Key('3', 1, 4)
pi_factor = cc_sym.Factor(hessian_func=(lambda values, index_entries: SymforceCCSymTest.pi_residual(values.at(index_entries[0]))), keys=[pi_key])
with self.subTest(msg='Can construct an Optimizer with or without default arguments'):
cc_sym.Optimizer(params=cc_sym.default_optimizer_params(), factors=[pi_factor])
cc_sym.Optimizer(params=cc_sym.default_optimizer_params(), factors=[pi_factor], epsilon=1e-06, name='OptimizeTest', keys=[], debug_stats=False, check_derivatives=False)
make_opt = (lambda : cc_sym.Optimizer(params=cc_sym.default_optimizer_params(), factors=[pi_factor], debug_stats=False, include_jacobians=True))
with self.subTest(msg='Optimizer.factors has been wrapped'):
opt = make_opt()
self.assertEqual(1, len(opt.factors()))
self.assertEqual(opt.factors()[0].all_keys(), pi_factor.all_keys())
with self.subTest(msg='Optimizer.optimize has been wrapped'):
values = cc_sym.Values()
values.set(pi_key, 3.0)
opt = make_opt()
self.assertIsInstance(opt.optimize(values=values), cc_sym.OptimizationStats)
self.assertAlmostEqual(values.at(pi_key), math.pi)
self.assertIsInstance(opt.optimize(values=values, num_iterations=2), cc_sym.OptimizationStats)
self.assertIsInstance(opt.optimize(values=values, populate_best_linearization=True), cc_sym.OptimizationStats)
self.assertIsInstance(opt.optimize(values, 2, True), cc_sym.OptimizationStats)
self.assertIsNone(opt.optimize(values=values, num_iterations=2, populate_best_linearization=False, stats=cc_sym.OptimizationStats()))
self.assertIsNone(opt.optimize(values, 2, False, cc_sym.OptimizationStats()))
self.assertIsNone(opt.optimize(values=values, num_iterations=2, stats=cc_sym.OptimizationStats()))
self.assertIsNone(opt.optimize(values, 2, cc_sym.OptimizationStats()))
self.assertIsNone(opt.optimize(values=values, stats=cc_sym.OptimizationStats()))
self.assertIsNone(opt.optimize(values, cc_sym.OptimizationStats()))
stats = cc_sym.OptimizationStats()
self.assertEqual(len(stats.iterations), 0)
opt.optimize(values=values, stats=stats)
self.assertNotEqual(len(stats.iterations), 0)
with self.subTest(msg='Optimizer.linearize has been wrapped'):
values = cc_sym.Values()
values.set(pi_key, 2.0)
opt = make_opt()
self.assertIsInstance(opt.linearize(values=values), cc_sym.Linearization)
with self.subTest(msg='The methods of Linearization have been wrapped'):
cc_sym.Linearization()
values = cc_sym.Values()
values.set(pi_key, 3.0)
opt = make_opt()
lin = opt.linearize(values=values)
lin.residual = lin.residual
lin.hessian_lower = lin.hessian_lower
lin.jacobian = lin.jacobian
lin.rhs = lin.rhs
lin.set_initialized()
self.assertTrue(lin.is_initialized())
lin.reset()
self.assertFalse(lin.is_initialized())
lin.set_initialized(initialized=True)
self.assertTrue(lin.is_initialized())
lin.set_initialized(initialized=False)
self.assertFalse(lin.is_initialized())
lin.set_initialized()
self.assertIsInstance(lin.error(), T.Scalar)
self.assertIsInstance(lin.linear_error(x_update=np.array([0.01])), T.Scalar)
lin.linear_error(np.array([0.01]))
with self.subTest(msg='cc_sym.Linearization is pickleable'):
linearization = cc_sym.Linearization()
linearization.residual = np.array([1, 2, 3])
linearization.jacobian = sparse.csc_matrix([[1, 2], [3, 4], [5, 6]])
linearization.hessian_lower = sparse.csc_matrix([[35, 0], [44, 56]])
linearization.rhs = np.array([22, 28])
self.assertTrue(self.compare_linearizations(linearization, pickle.loads(pickle.dumps(linearization))))
linearization.set_initialized(True)
self.assertTrue(self.compare_linearizations(linearization, pickle.loads(pickle.dumps(linearization))))
with self.subTest(msg='Optimizer.compute_all_covariances has been wrapped'):
values = cc_sym.Values()
values.set(pi_key, 2.0)
opt = make_opt()
opt.optimize(values=values)
self.assertIsInstance(opt.compute_all_covariances(linearization=opt.linearize(values)), dict)
with self.subTest(msg='Optimizer.compute_covariances has been wrapped'):
values = cc_sym.Values()
values.set(pi_key, 1.0)
opt = make_opt()
self.assertIsInstance(opt.compute_covariances(linearization=opt.linearize(values), keys=[pi_key]), dict)
with self.subTest(msg='Optimzer.keys is wrapped'):
opt = make_opt()
self.assertEqual(opt.keys(), [pi_key])
with self.subTest(msg='Optimizer.update_params is wrapped'):
opt = make_opt()
opt.update_params(params=cc_sym.default_optimizer_params())
with self.subTest(msg='cc_sym.optimize is wrapped'):
values = cc_sym.Values()
values.set(pi_key, 3.0)
cc_sym.optimize(params=cc_sym.default_optimizer_params(), factors=[pi_factor], values=values, epsilon=1e-09)
self.assertAlmostEqual(values.at(pi_key), math.pi)
cc_sym.optimize(params=cc_sym.default_optimizer_params(), factors=[pi_factor], values=values)
def test_default_params_match(self) -> None:
self.assertEqual(cc_sym.default_optimizer_params(), optimizer_params_t(**dataclasses.asdict(optimizer.Optimizer.Params(verbose=False)))) |
class TFXLNetMainLayer(metaclass=DummyObject):
_backends = ['tf']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tf']) |
_with_task('Doing naive query')
def naive_query(features, deep_feats, color_feats, labels, retrieval_top_n=5):
results = get_deep_color_top_n(features, deep_feats, color_feats, labels, retrieval_top_n)
return results |
def graph_preparation_runner(in_model: Any, representative_data_gen: Callable, quantization_config: QuantizationConfig, fw_info: FrameworkInfo, fw_impl: FrameworkImplementation, tpc: TargetPlatformCapabilities, tb_w: TensorboardWriter=None, mixed_precision_enable: bool=False) -> Graph:
graph = read_model_to_graph(in_model, representative_data_gen, tpc, fw_info, fw_impl)
if (tb_w is not None):
tb_w.add_graph(graph, 'initial_graph')
transformed_graph = get_finalized_graph(graph, tpc, quantization_config, fw_info, tb_w, fw_impl, mixed_precision_enable=mixed_precision_enable)
return transformed_graph |
def train_epoch(model_gen, model_dis2, model_dis4, model_dis1=None, optim_gen=None, optim_dis2=None, optim_dis4=None, optim_dis1=None, trainA_iterator=None, trainB_iterator=None):
source_domain_label = 1
target_domain_label = 0
smooth = 1e-07
model_gen.train()
if args.d1:
model_dis1.train()
if args.d2:
model_dis2.train()
if args.d4:
model_dis4.train()
train_result = {}
running_seg_loss = []
vertex_source_loss = []
vertex_target_loss = []
seg_dice = []
running_adv_diff_loss = []
running_dis_diff_loss = []
(d1_acc1, d1_acc2, d2_acc1, d2_acc2, d4_acc1, d4_acc2) = ([], [], [], [], [], [])
for ((imgA, maskA, vertexA), (imgB, _, vertexB)) in zip(trainA_iterator, trainB_iterator):
if args.d1:
optim_dis1.zero_grad()
for param in model_dis1.parameters():
param.requires_grad = False
if args.d2:
optim_dis2.zero_grad()
for param in model_dis2.parameters():
param.requires_grad = False
if args.d4:
optim_dis4.zero_grad()
for param in model_dis4.parameters():
param.requires_grad = False
optim_gen.zero_grad()
for param in model_gen.parameters():
param.requires_grad = True
(oS, oS2, vertS) = model_gen(torch.tensor(imgA).cuda())
loss_seg = BCELoss()(torch.sigmoid(oS), torch.tensor(maskA, dtype=torch.float32).cuda())
loss_seg2 = jaccard_loss(logits=torch.sigmoid(oS), true=torch.tensor(maskA, dtype=torch.float32).cuda(), activation=False)
loss_seg3 = 0
if args.d4:
loss_seg3 = batch_NN_loss(x=vertS, y=torch.tensor(vertexA).cuda())
vertex_source_loss.append(loss_seg3.item())
loss_seg1 = ((loss_seg + loss_seg2) + (args.wp * loss_seg3))
running_seg_loss.append((loss_seg + loss_seg2).item())
loss_seg1.backward()
y_pred = soft_to_hard_pred(oS.cpu().detach().numpy(), 1)
seg_dice.append(dice_coef_multilabel(y_true=maskA, y_pred=y_pred, channel='channel_first'))
(oT, oT2, vertT) = model_gen(torch.tensor(imgB).cuda())
loss_adv_diff2 = 0
if args.d2:
uncertainty_mapT = (((- 1.0) * torch.sigmoid(oT)) * torch.log((torch.sigmoid(oT) + smooth)))
D_out2 = model_dis2(uncertainty_mapT)
loss_adv_diff2 = (args.dr * F.binary_cross_entropy_with_logits(D_out2, torch.FloatTensor(D_out2.data.size()).fill_(source_domain_label).cuda()))
loss_adv_diff_point = 0
if args.d4:
loss_vert_target = batch_NN_loss(x=vertT, y=torch.tensor(vertexB).cuda())
vertex_target_loss.append(loss_vert_target.item())
D_out4 = model_dis4(vertT.transpose(2, 1))[0]
loss_adv_diff_point = (args.dr * F.binary_cross_entropy_with_logits(D_out4, torch.FloatTensor(D_out4.data.size()).fill_(source_domain_label).cuda()))
loss_adv_diff1 = 0
if args.d1:
D_out1 = model_dis1(oT)
loss_adv_diff1 = (args.dr * F.binary_cross_entropy_with_logits(D_out1, torch.FloatTensor(D_out1.data.size()).fill_(source_domain_label).cuda()))
loss_adv_diff = ((loss_adv_diff2 + loss_adv_diff_point) + loss_adv_diff1)
running_adv_diff_loss.append(loss_adv_diff.item())
loss_adv_diff.backward()
optim_gen.step()
if args.d1:
for param in model_dis1.parameters():
param.requires_grad = True
if args.d2:
for param in model_dis2.parameters():
param.requires_grad = True
if args.d4:
for param in model_dis4.parameters():
param.requires_grad = True
for param in model_gen.parameters():
param.requires_grad = False
oS = oS.detach()
oT = oT.detach()
if args.d2:
uncertainty_mapS = (((- 1.0) * torch.sigmoid(oS)) * torch.log((torch.sigmoid(oS) + smooth)))
D_out2 = model_dis2(uncertainty_mapS)
loss_D_same2 = F.binary_cross_entropy_with_logits(D_out2, torch.FloatTensor(D_out2.data.size()).fill_(source_domain_label).cuda())
loss_D_same2.backward()
D_out2 = torch.sigmoid(D_out2.detach()).cpu().numpy()
D_out2 = np.where((D_out2 >= 0.5), 1, 0)
d2_acc1.append(np.mean(D_out2))
if args.d1:
D_out1 = model_dis1(oS)
loss_D_same1 = F.binary_cross_entropy_with_logits(D_out1, torch.FloatTensor(D_out1.data.size()).fill_(source_domain_label).cuda())
loss_D_same1.backward()
D_out1 = torch.sigmoid(D_out1.detach()).cpu().numpy()
D_out1 = np.where((D_out1 >= 0.5), 1, 0)
d1_acc1.append(np.mean(D_out1))
if args.d4:
vertS = vertS.detach()
D_out4 = model_dis4(vertS.transpose(2, 1))[0]
loss_D_same4 = F.binary_cross_entropy_with_logits(D_out4, torch.FloatTensor(D_out4.data.size()).fill_(source_domain_label).cuda())
loss_D_same4.backward()
D_out4 = torch.sigmoid(D_out4.detach()).cpu().numpy()
D_out4 = np.where((D_out4 >= 0.5), 1, 0)
d4_acc1.append(np.mean(D_out4))
if args.d2:
D_out2 = model_dis2(uncertainty_mapT.detach())
loss_D_diff2 = F.binary_cross_entropy_with_logits(D_out2, torch.FloatTensor(D_out2.data.size()).fill_(target_domain_label).cuda())
loss_D_diff2.backward()
running_dis_diff_loss.append(loss_D_diff2.item())
D_out2 = torch.sigmoid(D_out2.detach()).cpu().numpy()
D_out2 = np.where((D_out2 >= 0.5), 1, 0)
d2_acc2.append((1 - np.mean(D_out2)))
if args.d1:
D_out1 = model_dis1(oT)
loss_D_diff1 = F.binary_cross_entropy_with_logits(D_out1, torch.FloatTensor(D_out1.data.size()).fill_(target_domain_label).cuda())
loss_D_diff1.backward()
D_out1 = torch.sigmoid(D_out1.detach()).cpu().numpy()
D_out1 = np.where((D_out1 >= 0.5), 1, 0)
d1_acc2.append((1 - np.mean(D_out1)))
if args.d4:
vertT = vertT.detach()
D_out4 = model_dis4(vertT.transpose(2, 1))[0]
loss_D_diff_4 = F.binary_cross_entropy_with_logits(D_out4, torch.FloatTensor(D_out4.data.size()).fill_(target_domain_label).cuda())
loss_D_diff_4.backward()
D_out4 = torch.sigmoid(D_out4.detach()).cpu().numpy()
D_out4 = np.where((D_out4 >= 0.5), 1, 0)
d4_acc2.append((1 - np.mean(D_out4)))
if args.d1:
optim_dis1.step()
if args.d2:
optim_dis2.step()
if args.d4:
optim_dis4.step()
train_result['seg_loss'] = np.mean(np.array(running_seg_loss))
train_result['seg_dice'] = np.mean(np.array(seg_dice))
if args.d2:
train_result['dis2_acc1'] = np.mean(np.array(d2_acc1))
train_result['dis2_acc2'] = np.mean(np.array(d2_acc2))
if args.d1:
train_result['dis1_acc1'] = np.mean(np.array(d1_acc1))
train_result['dis1_acc2'] = np.mean(np.array(d1_acc2))
if args.d4:
train_result['dis4_acc1'] = np.mean(np.array(d4_acc1))
train_result['dis4_acc2'] = np.mean(np.array(d4_acc2))
train_result['ver_s_loss'] = np.mean(np.array(vertex_source_loss))
train_result['ver_t_loss'] = np.mean(np.array(vertex_target_loss))
return train_result |
class BartForSequenceClassification():
def __init__(self, *args, **kwargs):
requires_pytorch(self)
def from_pretrained(self, *args, **kwargs):
requires_pytorch(self) |
class Partition13(nn.Module):
LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[decoder]/T5Block[15]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[16]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[17]']
TENSORS = []
def __init__(self, layers, tensors, device='cuda:13'):
super().__init__()
for (idx, layer_scope) in enumerate(self.LAYER_SCOPES):
self.add_module(f'l_{idx}', layers[layer_scope])
b = p = 0
for tensor_scope in self.TENSORS:
tensor = tensors[tensor_scope]
if isinstance(tensor, nn.Parameter):
self.register_parameter(f'p_{p}', tensor)
p += 1
else:
self.register_buffer(f'b_{b}', tensor)
b += 1
self.device = torch.device(device)
self.input_structure = [1, 1, 1, 1, 1, 1]
self.lookup = {'l_0': 'decoder.15', 'l_1': 'decoder.16', 'l_2': 'decoder.17'}
self.to(self.device)
def forward(self, *args):
(decoder_attention_mask, inverted_encoder_attention_mask, x0, x1, x2, x3) = unflatten(args, self.input_structure)
t_0 = self.l_0(x3, attention_mask=decoder_attention_mask, position_bias=x1, encoder_hidden_states=x0, encoder_attention_mask=inverted_encoder_attention_mask, encoder_decoder_position_bias=x2)
t_0 = self.l_1(t_0, attention_mask=decoder_attention_mask, position_bias=x1, encoder_hidden_states=x0, encoder_attention_mask=inverted_encoder_attention_mask, encoder_decoder_position_bias=x2)
t_0 = self.l_2(t_0, attention_mask=decoder_attention_mask, position_bias=x1, encoder_hidden_states=x0, encoder_attention_mask=inverted_encoder_attention_mask, encoder_decoder_position_bias=x2)
return list(flatten((x0, x1, x2, t_0)))
def state_dict(self, *args, **kwargs):
return state_dict(self, *args, **kwargs)
def load_state_dict(self, *args, **kwargs):
return load_state_dict(self, *args, **kwargs)
def named_parameters(self, *args, **kwargs):
return named_parameters(self, *args, **kwargs)
def named_buffers(self, *args, **kwargs):
return named_buffers(self, *args, **kwargs)
def cpu(self):
return cpu(self)
def cuda(self, device=None):
return cuda(self, device=device)
def to(self, *args, **kwargs):
return to(self, *args, **kwargs) |
class ReplicationPad1d(_ReplicationPadNd):
padding: Tuple[(int, int)]
def __init__(self, padding: _size_2_t) -> None:
super(ReplicationPad1d, self).__init__()
self.padding = _pair(padding) |
def start_training():
logger.info('Setup config, data and model...')
opt = BaseOptions().parse()
set_seed(opt.seed)
config = {}
config = update_config(opt, config)
tb_writer = SummaryWriter(opt.tensorboard_log_dir)
qfvs_split = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2, 4], 4: [1, 2, 3]}
scores_videos = {}
for (test_id, splits) in qfvs_split.items():
if (opt.qfvs_split != (- 1)):
if (test_id != opt.qfvs_split):
continue
logger.info(f'Start Training {opt.dset_name}: {test_id}')
config['train_videos'] = qfvs_split[test_id]
config['test_videos'] = [test_id]
train_dataset = DatasetQFVS(config)
train_loader = DataLoader(train_dataset, batch_size=opt.bsz, collate_fn=start_end_collate_qfvs, shuffle=True, num_workers=opt.num_workers)
(model, criterion, optimizer, lr_scheduler) = setup_model(opt)
count_parameters(model)
best_score = train(model, criterion, optimizer, lr_scheduler, train_loader, opt, config)
scores_videos[('V' + str(test_id))] = best_score
avg_fscore = (sum([v['Fscore'] for (k, v) in scores_videos.items()]) / len(scores_videos))
avg_precision = (sum([v['Precision'] for (k, v) in scores_videos.items()]) / len(scores_videos))
avg_recall = (sum([v['Recall'] for (k, v) in scores_videos.items()]) / len(scores_videos))
scores_videos['avg'] = {'Fscore': avg_fscore, 'Precision': avg_precision, 'Recall': avg_recall}
save_metrics_path = os.path.join(opt.results_dir, f'best_{opt.dset_name}_{opt.eval_split_name}_preds_metrics.json')
save_json(scores_videos, save_metrics_path, save_pretty=True, sort_keys=False)
tb_writer.add_scalar(f'Eval/QFVS-avg-fscore', round(avg_fscore, 2), 1)
tb_writer.add_text(f'Eval/QFVS-{opt.dset_name}', dict_to_markdown(scores_videos, max_str_len=None))
tb_writer.close()
print(scores_videos)
return |
def is_valid_outcome_range(dx, code_range):
for code in code_range:
if dx.startswith(code):
return True
return False |
def create_relation_type(type_dict, path):
print('Creating relation_type dictionary...')
dic = {}
for f in path:
dic_kb = json.load(open(f, 'r'))
for idx in tqdm(dic_kb, total=len(dic_kb)):
try:
idx_type = type_dict[get_id(idx)]
except:
continue
for p in dic_kb[idx]:
if (p not in dic):
dic[p] = set()
dic[p].add(idx_type)
for y in dic_kb[idx][p]:
try:
y_type = type_dict[get_id(y)]
except:
continue
dic[p].add(y_type)
pickle.dump(dic, open('data/BFS/type_predicate_link.pkl', 'wb')) |
class FixedParam(RandomHyperparameter):
def __init__(self, name, value):
super().__init__(name)
self._value = value
def generate_next_value(self):
return self._value |
class TrainableSupportsPredictJoint(TrainableProbabilisticModel, SupportsPredictJoint, Protocol):
pass |
def bch_bound(n, D, arithmetic=False):
def longest_streak(step):
max_len = 1
max_offset = 0
j = 0
while (j < n):
h = j
while isD[((h * step) % n)]:
h += 1
if ((h - j) > max_len):
max_offset = ((j * step) % n)
max_len = (h - j)
j = (h + 1)
return (max_len, max_offset)
isD = ([0] * n)
for d in D:
try:
isD[d] = 1
except IndexError:
raise ValueError(('%s must contains integers between 0 and %s' % (D, (n - 1))))
if (0 not in isD):
return ((n + 1), (1, 0))
if (not arithmetic):
(one_len, offset) = longest_streak(1)
return ((one_len + 1), (1, offset))
else:
n = Integer(n)
longest_streak_list = [(longest_streak(step), step) for step in n.coprime_integers(((n // 2) + 1)) if (step >= 1)]
((max_len, offset), step) = max(longest_streak_list)
return ((max_len + 1), (step, offset)) |
def prepare_inception_moments(dataloader, eval_mode, generator, inception_model, splits, run_name, logger, device):
dataset_name = dataloader.dataset.dataset_name
inception_model.eval()
save_path = os.path.abspath(os.path.join('./data', ((((dataset_name + '_') + eval_mode) + '_') + 'inception_moments.npz')))
is_file = os.path.isfile(save_path)
if is_file:
mu = np.load(save_path)['mu']
sigma = np.load(save_path)['sigma']
else:
if (device == 0):
logger.info('Calculate moments of {} dataset'.format(eval_mode))
(mu, sigma) = calculate_activation_statistics(data_loader=dataloader, generator=generator, discriminator=None, inception_model=inception_model, n_generate=None, truncated_factor=None, prior=None, is_generate=False, latent_op=False, latent_op_step=None, latent_op_alpha=None, latent_op_beta=None, device=device, tqdm_disable=False, run_name=run_name)
if (device == 0):
logger.info('Save calculated means and covariances to disk...')
np.savez(save_path, **{'mu': mu, 'sigma': sigma})
if is_file:
pass
else:
if (device == 0):
logger.info('calculate inception score of {} dataset.'.format(eval_mode))
evaluator_instance = evaluator(inception_model, device=device)
(is_score, is_std) = evaluator_instance.eval_dataset(dataloader, splits=splits)
if (device == 0):
logger.info('Inception score={is_score}-Inception_std={is_std}'.format(is_score=is_score, is_std=is_std))
return (mu, sigma) |
def assert_and_infer_cfg(cache_urls=True):
if (__C.MODEL.RPN_ONLY or __C.MODEL.FASTER_RCNN):
__C.RPN.RPN_ON = True
if (__C.RPN.RPN_ON or __C.RETINANET.RETINANET_ON):
__C.TEST.PRECOMPUTED_PROPOSALS = False
if cache_urls:
cache_cfg_urls() |
def layer_norm(input, normalized_shape, weight=None, bias=None, eps=1e-05):
return torch.layer_norm(input, normalized_shape, weight, bias, eps, torch.backends.cudnn.enabled) |
class WarmRestartPlateau(torch.optim.lr_scheduler.ReduceLROnPlateau):
def __init__(self, T_restart, *args, **kwargs):
super().__init__(*args, **kwargs)
self.T_restart = T_restart
self.base_lrs = [group['lr'] for group in self.optimizer.param_groups]
def step(self, *args, **kwargs):
super().step(*args, **kwargs)
if ((self.last_epoch > 0) and ((self.last_epoch % self.T_restart) == 0)):
for (group, lr) in zip(self.optimizer.param_groups, self.base_lrs):
group['lr'] = lr
self._reset() |
class NodeAttrEq(LogicalValue):
def __init__(self, attr: str, value):
self.attr = attr
self.value = value
def evaluate(self, node: GraphNode, **kwargs):
return (self.value == getattr(node, self.attr)) |
class UnlabeledDataset(Dataset):
def __init__(self, csv_path):
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
impressions = bert_tokenizer.get_impressions_from_csv(csv_path)
self.encoded_imp = bert_tokenizer.tokenize(impressions, tokenizer)
def __len__(self):
return len(self.encoded_imp)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
imp = self.encoded_imp[idx]
imp = torch.LongTensor(imp)
return {'imp': imp, 'len': imp.shape[0]} |
class CBSubSwinTransformer(SwinTransformerOriginal):
def _freeze_stages(self):
if ((self.frozen_stages >= 0) and hasattr(self, 'patch_embed')):
self.patch_embed.eval()
for param in self.patch_embed.parameters():
param.requires_grad = False
if ((self.frozen_stages >= 1) and self.ape):
self.absolute_pos_embed.requires_grad = False
if (self.frozen_stages >= 2):
self.pos_drop.eval()
for i in range(0, (self.frozen_stages - 1)):
m = self.layers[i]
if (m is None):
continue
m.eval()
for param in m.parameters():
param.requires_grad = False
def del_layers(self, del_stages):
self.del_stages = del_stages
if (self.del_stages >= 0):
del self.patch_embed
if ((self.del_stages >= 1) and self.ape):
del self.absolute_pos_embed
for i in range(0, (self.del_stages - 1)):
self.layers[i] = None
def forward(self, x, cb_feats=None, pre_tmps=None):
outs = []
tmps = []
if hasattr(self, 'patch_embed'):
x = self.patch_embed(x)
(Wh, Ww) = (x.size(2), x.size(3))
if self.ape:
absolute_pos_embed = F.interpolate(self.absolute_pos_embed, size=(Wh, Ww), mode='bicubic')
x = (x + absolute_pos_embed)
x = x.flatten(2).transpose(1, 2)
x = self.pos_drop(x)
tmps.append((x, Wh, Ww))
else:
(x, Wh, Ww) = pre_tmps[0]
for i in range(self.num_layers):
layer = self.layers[i]
if (layer is None):
(x_out, H, W, x, Wh, Ww) = pre_tmps[(i + 1)]
else:
if (cb_feats is not None):
x = (x + cb_feats[i])
(x_out, H, W, x, Wh, Ww) = layer(x, Wh, Ww)
tmps.append((x_out, H, W, x, Wh, Ww))
if (i in self.out_indices):
norm_layer = getattr(self, f'norm{i}')
x_out = norm_layer(x_out)
out = x_out.view((- 1), H, W, self.num_features[i])
out = out.permute(0, 3, 1, 2).contiguous()
outs.append(out)
return (tuple(outs), tmps)
def train(self, mode=True):
super(CBSubSwinTransformer, self).train(mode)
self._freeze_stages() |
_vision
class FlavaProcessorTest(unittest.TestCase):
def setUp(self):
self.tmpdirname = tempfile.mkdtemp()
vocab_tokens = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest']
self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file, 'w', encoding='utf-8') as fp:
fp.write(''.join([(x + '\n') for x in vocab_tokens]))
image_processor_map = {'image_mean': FLAVA_IMAGE_MEAN, 'image_std': FLAVA_IMAGE_STD, 'do_normalize': True, 'do_resize': True, 'size': 224, 'do_center_crop': True, 'crop_size': 224, 'input_size_patches': 14, 'total_mask_patches': 75, 'mask_group_max_patches': None, 'mask_group_min_patches': 16, 'mask_group_min_aspect_ratio': 0.3, 'mask_group_max_aspect_ratio': None, 'codebook_do_resize': True, 'codebook_size': 112, 'codebook_do_center_crop': True, 'codebook_crop_size': 112, 'codebook_do_map_pixels': True, 'codebook_do_normalize': True, 'codebook_image_mean': FLAVA_CODEBOOK_MEAN, 'codebook_image_std': FLAVA_CODEBOOK_STD}
self.image_processor_file = os.path.join(self.tmpdirname, IMAGE_PROCESSOR_NAME)
with open(self.image_processor_file, 'w', encoding='utf-8') as fp:
json.dump(image_processor_map, fp)
def get_tokenizer(self, **kwargs):
return BertTokenizer.from_pretrained(self.tmpdirname, **kwargs)
def get_rust_tokenizer(self, **kwargs):
return BertTokenizerFast.from_pretrained(self.tmpdirname, **kwargs)
def get_image_processor(self, **kwargs):
return FlavaImageProcessor.from_pretrained(self.tmpdirname, **kwargs)
def tearDown(self):
shutil.rmtree(self.tmpdirname)
def prepare_image_inputs(self):
image_inputs = [np.random.randint(255, size=(3, 30, 400), dtype=np.uint8)]
image_inputs = [Image.fromarray(np.moveaxis(x, 0, (- 1))) for x in image_inputs]
return image_inputs
def test_save_load_pretrained_default(self):
tokenizer_slow = self.get_tokenizer()
tokenizer_fast = self.get_rust_tokenizer()
image_processor = self.get_image_processor()
processor_slow = FlavaProcessor(tokenizer=tokenizer_slow, image_processor=image_processor)
processor_slow.save_pretrained(self.tmpdirname)
processor_slow = FlavaProcessor.from_pretrained(self.tmpdirname, use_fast=False)
processor_fast = FlavaProcessor(tokenizer=tokenizer_fast, image_processor=image_processor)
processor_fast.save_pretrained(self.tmpdirname)
processor_fast = FlavaProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab(), tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab(), tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab(), tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer, BertTokenizer)
self.assertIsInstance(processor_fast.tokenizer, BertTokenizerFast)
self.assertEqual(processor_slow.image_processor.to_json_string(), image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string(), image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor, FlavaImageProcessor)
self.assertIsInstance(processor_fast.image_processor, FlavaImageProcessor)
def test_save_load_pretrained_additional_features(self):
processor = FlavaProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
tokenizer_add_kwargs = self.get_tokenizer(bos_token='(BOS)', eos_token='(EOS)')
image_processor_add_kwargs = self.get_image_processor(do_normalize=False, padding_value=1.0)
processor = FlavaProcessor.from_pretrained(self.tmpdirname, bos_token='(BOS)', eos_token='(EOS)', do_normalize=False, padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer, BertTokenizerFast)
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor, FlavaImageProcessor)
def test_image_processor(self):
image_processor = self.get_image_processor()
tokenizer = self.get_tokenizer()
processor = FlavaProcessor(tokenizer=tokenizer, image_processor=image_processor)
image_input = self.prepare_image_inputs()
input_feat_extract = image_processor(image_input, return_tensors='np')
input_processor = processor(images=image_input, return_tensors='np')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=0.01)
random.seed(1234)
input_feat_extract = image_processor(image_input, return_image_mask=True, return_codebook_pixels=True, return_tensors='np')
random.seed(1234)
input_processor = processor(images=image_input, return_image_mask=True, return_codebook_pixels=True, return_tensors='np')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=0.01)
def test_tokenizer(self):
image_processor = self.get_image_processor()
tokenizer = self.get_tokenizer()
processor = FlavaProcessor(tokenizer=tokenizer, image_processor=image_processor)
input_str = 'lower newer'
encoded_processor = processor(text=input_str)
encoded_tok = tokenizer(input_str)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key])
def test_processor(self):
image_processor = self.get_image_processor()
tokenizer = self.get_tokenizer()
processor = FlavaProcessor(tokenizer=tokenizer, image_processor=image_processor)
input_str = 'lower newer'
image_input = self.prepare_image_inputs()
inputs = processor(text=input_str, images=image_input)
self.assertListEqual(list(inputs.keys()), ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'])
inputs = processor(text=input_str, images=image_input, return_codebook_pixels=True, return_image_mask=True)
self.assertListEqual(list(inputs.keys()), ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values', 'codebook_pixel_values', 'bool_masked_pos'])
with pytest.raises(ValueError):
processor()
def test_tokenizer_decode(self):
image_processor = self.get_image_processor()
tokenizer = self.get_tokenizer()
processor = FlavaProcessor(tokenizer=tokenizer, image_processor=image_processor)
predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
decoded_processor = processor.batch_decode(predicted_ids)
decoded_tok = tokenizer.batch_decode(predicted_ids)
self.assertListEqual(decoded_tok, decoded_processor)
def test_model_input_names(self):
image_processor = self.get_image_processor()
tokenizer = self.get_tokenizer()
processor = FlavaProcessor(tokenizer=tokenizer, image_processor=image_processor)
input_str = 'lower newer'
image_input = self.prepare_image_inputs()
inputs = processor(text=input_str, images=image_input)
self.assertListEqual(list(inputs.keys()), processor.model_input_names) |
def get_lexicon():
global lexicon
if (not lexicon):
lexicon = make_lexicon()
return lexicon |
def generate_many_k_regular_graphs(k, n, N, seed=0):
ngraph = int(ceil((N / n)))
graphs = [generate_k_regular(k, n, s) for s in range(seed, (seed + ngraph))]
index_base = 0
edge_list = []
for graph in graphs:
edge_list.extend([((src + index_base), (dst + index_base)) for (src, dst) in list(graph.edges)])
index_base += n
G = nx.Graph()
G.add_edges_from(edge_list)
G.graph['attributes'] = np.expand_dims(np.log((get_degrees(G) + 1)), 1).astype(np.float32)
return G |
def remote_copy(remote_machine, local_path, remote_path, port=22):
cmd = ('ssh -p %d %s "mkdir -p %s"' % (port, remote_machine, remote_path))
parallax_log.warning(colored(('\n$ %s' % cmd), 'red'))
os.system(cmd)
cmd = ('scp -P %d %s %s:%s' % (port, local_path, remote_machine, remote_path))
parallax_log.warning(colored(('\n$ %s' % cmd), 'red'))
os.system(cmd) |
def main():
for i in list(range(4))[::(- 1)]:
print((i + 1))
time.sleep(1)
paused = False
while True:
if (not paused):
screen = np.array(ImageGrab.grab(bbox=(0, 40, 960, 560)))
timing = datetime.datetime.now()
training_data.append([screen, timing])
if ((len(training_data) % 100) == 0):
print(len(training_data))
np.save(file_name, training_data)
break |
class PlyData(object):
def __init__(self, elements=[], text=False, byte_order='=', comments=[], obj_info=[]):
if ((byte_order == '=') and (not text)):
byte_order = _native_byte_order
self.byte_order = byte_order
self.text = text
self.comments = list(comments)
self.obj_info = list(obj_info)
self.elements = elements
def _get_elements(self):
return self._elements
def _set_elements(self, elements):
self._elements = tuple(elements)
self._index()
elements = property(_get_elements, _set_elements)
def _get_byte_order(self):
return self._byte_order
def _set_byte_order(self, byte_order):
if (byte_order not in ['<', '>', '=']):
raise ValueError("byte order must be '<', '>', or '='")
self._byte_order = byte_order
byte_order = property(_get_byte_order, _set_byte_order)
def _index(self):
self._element_lookup = dict(((elt.name, elt) for elt in self._elements))
if (len(self._element_lookup) != len(self._elements)):
raise ValueError('two elements with same name')
def _parse_header(stream):
lines = []
comments = {'comment': [], 'obj_info': []}
while True:
line = stream.readline().decode('ascii').strip()
fields = _split_line(line, 1)
if (fields[0] == 'end_header'):
break
elif (fields[0] in comments.keys()):
lines.append(fields)
else:
lines.append(line.split())
a = 0
if (lines[a] != ['ply']):
raise PlyParseError("expected 'ply'")
a += 1
while (lines[a][0] in comments.keys()):
comments[lines[a][0]].append(lines[a][1])
a += 1
if (lines[a][0] != 'format'):
raise PlyParseError("expected 'format'")
if (lines[a][2] != 'fv_noise.0'):
raise PlyParseError("expected version 'fv_noise.0'")
if (len(lines[a]) != 3):
raise PlyParseError("too many fields after 'format'")
fmt = lines[a][1]
if (fmt not in _byte_order_map):
raise PlyParseError(("don't understand format %r" % fmt))
byte_order = _byte_order_map[fmt]
text = (fmt == 'ascii')
a += 1
while ((a < len(lines)) and (lines[a][0] in comments.keys())):
comments[lines[a][0]].append(lines[a][1])
a += 1
return PlyData(PlyElement._parse_multi(lines[a:]), text, byte_order, comments['comment'], comments['obj_info'])
def read(stream):
(must_close, stream) = _open_stream(stream, 'read')
try:
data = PlyData._parse_header(stream)
for elt in data:
elt._read(stream, data.text, data.byte_order)
finally:
if must_close:
stream.close()
return data
def write(self, stream):
(must_close, stream) = _open_stream(stream, 'write')
try:
stream.write(self.header.encode('ascii'))
stream.write(b'\r\n')
for elt in self:
elt._write(stream, self.text, self.byte_order)
finally:
if must_close:
stream.close()
def header(self):
lines = ['ply']
if self.text:
lines.append('format ascii fv_noise.0')
else:
lines.append((('format ' + _byte_order_reverse[self.byte_order]) + ' fv_noise.0'))
for c in self.comments:
lines.append(('comment ' + c))
for c in self.obj_info:
lines.append(('obj_info ' + c))
lines.extend((elt.header for elt in self.elements))
lines.append('end_header')
return '\r\n'.join(lines)
def __iter__(self):
return iter(self.elements)
def __len__(self):
return len(self.elements)
def __contains__(self, name):
return (name in self._element_lookup)
def __getitem__(self, name):
return self._element_lookup[name]
def __str__(self):
return self.header
def __repr__(self):
return ('PlyData(%r, text=%r, byte_order=%r, comments=%r, obj_info=%r)' % (self.elements, self.text, self.byte_order, self.comments, self.obj_info)) |
def _to_op(tensor_or_op):
if hasattr(tensor_or_op, 'op'):
return tensor_or_op.op
return tensor_or_op |
_to_string_io
def load_annotation(fhandle: TextIO) -> annotations.MultiAnnotator:
df = pd.read_csv(fhandle)
annotators = []
annotations_ = []
for (id, dfa) in df.groupby('annotator'):
intervals = dfa[['onset', 'offset']].values
label = dfa['event_label'].tolist()
events = annotations.Events(intervals=intervals, intervals_unit='seconds', labels=label, labels_unit='open', confidence=np.ones((len(label),)))
annotators.append(f'{id:02d}')
annotations_.append(events)
return annotations.MultiAnnotator(annotators=annotators, annotations=annotations_) |
def register_functions(root_module):
module = root_module
register_functions_ns3_FatalImpl(module.add_cpp_namespace('FatalImpl'), root_module)
register_functions_ns3_Hash(module.add_cpp_namespace('Hash'), root_module)
register_functions_ns3_TracedValueCallback(module.add_cpp_namespace('TracedValueCallback'), root_module)
register_functions_ns3_internal(module.add_cpp_namespace('internal'), root_module)
return |
class SearchJob(GenericJob):
def __init__(self, problem):
self.type = 'searchfragment'
GenericJob.__init__(self, problem)
self.model = None
self.fragments = None
def run(self):
print(('Process [%s]: %s running %s with model %d' % (os.getpid(), self.type, self.problem_name, self.model)), file=sys.stderr)
for i in range(1, 2000):
ret = [abs((self.model - fragment)) for fragment in self.fragments]
return ret |
_grad()
def convert_chinese_clip_checkpoint(checkpoint_path, pytorch_dump_folder_path, config_path=None):
assert (config_path is not None), 'Please specify the ChineseCLIP model config of the corresponding model size.'
config = ChineseCLIPConfig.from_pretrained(config_path)
hf_model = ChineseCLIPModel(config).eval()
pt_weights = torch.load(checkpoint_path, map_location='cpu')['state_dict']
pt_weights = {(name[7:] if name.startswith('module.') else name): value for (name, value) in pt_weights.items()}
copy_text_model_and_projection(hf_model, pt_weights)
copy_vision_model_and_projection(hf_model, pt_weights)
hf_model.logit_scale.data = pt_weights['logit_scale'].data
hf_model.save_pretrained(pytorch_dump_folder_path) |
class IndexedFreeAbelianMonoidElement(IndexedMonoidElement):
def __init__(self, F, x):
IndexedMonoidElement.__init__(self, F, dict(x))
def _sorted_items(self):
print_options = self.parent().print_options()
v = list(self._monomial.items())
try:
v.sort(key=print_options['sorting_key'], reverse=print_options['sorting_reverse'])
except Exception:
pass
return v
def __hash__(self):
return hash(frozenset(self._monomial.items()))
def _mul_(self, other):
return self.__class__(self.parent(), blas.add(self._monomial, other._monomial))
def __pow__(self, n):
if (not isinstance(n, (int, Integer))):
raise TypeError('Argument n (= {}) must be an integer'.format(n))
if (n < 0):
raise ValueError('Argument n (= {}) must be positive'.format(n))
if (n == 1):
return self
if (n == 0):
return self.parent().one()
return self.__class__(self.parent(), {k: (v * n) for (k, v) in self._monomial.items()})
def __floordiv__(self, elt):
d = copy(self._monomial)
for (k, v) in elt._monomial.items():
if (k not in d):
raise ValueError('invalid cancellation')
diff = (d[k] - v)
if (diff < 0):
raise ValueError('invalid cancellation')
elif (diff == 0):
del d[k]
else:
d[k] = diff
return self.__class__(self.parent(), d)
def __len__(self):
m = self._monomial
return sum((m[gen] for gen in m))
length = __len__
def dict(self):
return copy(self._monomial) |
def load_params_LLM(config, model, fold_data):
no_decay = ['bias', 'LayerNorm.weight']
named = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [{'params': [p for (n, p) in named if (not any(((nd in n) for nd in no_decay)))], 'lr': float(config.bert_lr), 'weight_decay': float(config.weight_decay)}, {'params': [p for (n, p) in named if any(((nd in n) for nd in no_decay))], 'lr': float(config.bert_lr), 'weight_decay': 0.0}]
optimizer = AdamW(optimizer_grouped_parameters, eps=float(config.adam_epsilon))
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=config.warmup_steps, num_training_steps=(config.epoch_size * fold_data.__len__()))
config.score_manager = ScoreManager()
config.optimizer = optimizer
config.scheduler = scheduler
return config |
def p2_2partitions_all_models():
for model in ['wrn_16x4_c100_p2', 'wrn_28x10_c100_dr03_p2']:
plt.figure()
p2_2partitions(model) |
class TestGIL(object):
def setup_method(self):
self.messages = []
def log(self, message):
self.messages.append(message)
def make_worker_thread(self, target, args):
log = self.log
class WorkerThread(threading.Thread):
def run(self):
log('interpolation started')
target(*args)
log('interpolation complete')
return WorkerThread()
.slow
.xfail(reason='race conditions, may depend on system load')
def test_rectbivariatespline(self):
def generate_params(n_points):
x = y = np.linspace(0, 1000, n_points)
(x_grid, y_grid) = np.meshgrid(x, y)
z = (x_grid * y_grid)
return (x, y, z)
def calibrate_delay(requested_time):
for n_points in itertools.count(5000, 1000):
args = generate_params(n_points)
time_started = time.time()
interpolate(*args)
if ((time.time() - time_started) > requested_time):
return args
def interpolate(x, y, z):
scipy.interpolate.RectBivariateSpline(x, y, z)
args = calibrate_delay(requested_time=3)
worker_thread = self.make_worker_thread(interpolate, args)
worker_thread.start()
for i in range(3):
time.sleep(0.5)
self.log('working')
worker_thread.join()
assert_equal(self.messages, ['interpolation started', 'working', 'working', 'working', 'interpolation complete']) |
class ParentFinder():
_parent_map: Dict[(Node, Node)]
def __init__(self, prog: Node):
self._parent_map = dict()
for node in dfs(prog):
for child in node.children:
self._parent_map[child] = node
def get_parent(self, node: Node) -> Optional[Node]:
return self._parent_map.get(node, None)
def get_parent_or_raise(self, node: Node) -> Node:
return self._parent_map[node] |
class Graph(object):
def __init__(self):
self._nodes = {}
def add_edge(self, s, t, label):
s_targets = self._nodes.setdefault(s, {})
s_targets.setdefault(t, set()).add(label)
def nodes(self):
return self._nodes
def __iter__(self):
return iter(self._nodes) |
class AbsCnxp(FunCnxp):
sig = (Constant,)
code = 'abs'
def type_constraints(self, tcs):
tcs.number(self)
tcs.eq_types(self, self._args[0]) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.