code stringlengths 281 23.7M |
|---|
def test_conc_dock() -> None:
a = Mult(Charclass('A'), ONE)
b = Mult(Charclass('B'), ONE)
x = Mult(Charclass('X'), ONE)
x_twice = Mult(Charclass('X'), Multiplier(Bound(2), Bound(2)))
yplus = Mult(Charclass('y'), PLUS)
z = Mult(Charclass('Z'), ONE)
assert (Conc(a, z).dock(Conc(z)) == Conc(a))
assert (Conc(a, b, x, yplus, z).dock(Conc(x, yplus, z)) == Conc(a, b))
assert (Conc(a, b, x, yplus, z).behead(Conc(a, b, x, yplus)) == Conc(z))
assert (Conc(a).dock(Conc()) == Conc(a))
with pytest.raises(ArithmeticError, match="Can't subtract"):
Conc(x_twice, yplus, z).behead(Conc(x, yplus)) |
class LogTestCase(unittest.TestCase):
def setUp(self):
self.out = StringIO()
self.tracker = ClassTracker(stream=self.out)
def output(self):
return self.out.getvalue()
def tearDown(self):
self.tracker.stop_periodic_snapshots()
self.tracker.clear()
def test_dump(self):
foo = Foo()
foo.data = range(1000)
bar = Bar()
self.tracker.track_object(foo, resolution_level=4)
self.tracker.track_object(bar)
self.tracker.create_snapshot('Footest')
f1 = StringIO()
f2 = StringIO()
ConsoleStats(tracker=self.tracker, stream=f1).print_stats()
tmp = BytesIO()
Stats(tracker=self.tracker).dump_stats(tmp, close=False)
self.tracker.clear()
stats = ConsoleStats(stream=f2)
self.assertEqual(stats.index, {})
self.assertEqual(stats.snapshots, [])
tmp.seek(0)
stats.load_stats(tmp)
tmp.close()
self.assertTrue(('Foo' in stats.index))
stats.print_stats()
self.assertEqual(f1.getvalue(), f2.getvalue())
stats.stream = f3 = StringIO()
stats.sort_stats()
tolen = len(stats.sorted)
stats.print_stats(clsname='Bar')
self.assertEqual(len(stats.sorted), tolen)
stats.print_summary()
clsname = f3.getvalue().split('\n')[0]
self.assertNotEqual(re.search('Bar', clsname), None, clsname)
f1.close()
f2.close()
f3.close()
def test_sort_stats(self):
self.tracker.track_class(Bar, name='Bar')
foo = Foo()
foo.data = list(range(1000))
bar1 = Bar()
bar2 = Bar()
self.tracker.track_object(foo, resolution_level=4)
self.tracker.create_snapshot()
stats = self.tracker.stats
self.assertEqual(stats.sort_stats('size'), stats)
self.assertEqual(stats.sorted[0].classname, 'Foo')
stats.reverse_order()
self.assertEqual(stats.sorted[0].classname, 'Bar')
stats.sort_stats('classname', 'birth')
self.assertEqual(stats.sorted[0].classname, 'Bar')
self.assertRaises(ValueError, stats.sort_stats, 'name', 42, 'classn')
stats.sort_stats('classname')
def test_dump_load_with_filename(self):
foo = Foo()
self.tracker.track_object(foo, resolution_level=2)
self.tracker.create_snapshot()
(fhandle, fname) = mkstemp(prefix='pympler_test_dump')
os.close(fhandle)
try:
self.tracker.stats.dump_stats(fname)
output = StringIO()
stats = ConsoleStats(filename=fname, stream=output)
stats.print_stats()
self.assertTrue(('<Foo>' in output.getvalue()), output.getvalue())
stats.dump_stats(fname)
finally:
os.unlink(fname)
def test_tracked_classes(self):
self.tracker.track_class(Foo, name='Foo')
self.tracker.track_class(Bar, name='Bar')
foo = Foo()
self.tracker.create_snapshot()
bar = Bar()
self.tracker.create_snapshot()
foo = FooNew()
self.tracker.track_object(foo)
self.tracker.create_snapshot()
stats = self.tracker.stats
self.assertEqual(stats.tracked_classes, ['Bar', 'Foo', 'FooNew'])
stats.print_summary()
def test_print_stats(self):
self.tracker.track_class(Foo, name='Foo', trace=True)
self.tracker.track_class(Bar, name='Bar')
foo = Foo()
bar = Bar()
self.tracker.create_snapshot()
stats = self.tracker.stats
stats.print_stats(clsname='Foo')
self.assertTrue(('Foo' in self.output), self.output)
self.assertFalse(('Bar' in self.output), self.output)
self.assertTrue(('foo = Foo()' in self.output), self.output)
def test_print_stats_limit(self):
self.tracker.track_class(Foo, name='Foo')
foo = [Foo() for _ in range(10)]
self.tracker.create_snapshot()
stats = self.tracker.stats
stats.print_stats(limit=3)
self.assertEqual(self.output.count('<Foo>'), 3)
self.out.seek(0)
self.out.truncate()
stats.print_stats(limit=0.5)
self.assertEqual(self.output.count('<Foo>'), 5)
def test_snapshots(self):
self.tracker.track_class(Foo, name='Foo')
self.tracker.track_class(Bar, name='Bar')
self.tracker.track_class(FooNew, name='FooNew')
self.tracker.create_snapshot()
f1 = Foo()
self.tracker.create_snapshot()
f2 = Foo()
f3 = FooNew()
self.tracker.create_snapshot()
b = Bar()
del b
self.tracker.create_snapshot()
stats = self.tracker.stats
stats.print_stats()
stats.print_summary()
def test_merge(self):
self.tracker.track_class(FooNew, name='Foo', resolution_level=2)
f1 = FooNew()
f1.a = list(range(1000))
f2 = FooNew()
f2.a = list(range(100))
f2.b = 'This is some stupid spam.'
self.tracker.create_snapshot('Merge test')
sizer = Asizer()
sz1 = sizer.asized(f1)
sz2 = sizer.asized(f2)
stats = self.tracker.stats
for fp in stats.snapshots:
if (fp.desc == 'Merge test'):
stats.annotate_snapshot(fp)
self.assertTrue(hasattr(fp, 'classes'))
classes = fp.classes
stats.annotate_snapshot(fp)
self.assertEqual(fp.classes, classes)
self.assertTrue(('Foo' in fp.classes), fp.classes)
self.assertTrue(('merged' in fp.classes['Foo']))
fm = fp.classes['Foo']['merged']
self.assertEqual(fm.size, (sz1.size + sz2.size), (fm.size, str(sz1), str(sz2)))
refs = {}
for ref in fm.refs:
refs[ref.name] = ref
self.assertTrue(('__dict__' in refs.keys()), refs.keys())
refs2 = {}
for ref in refs['__dict__'].refs:
refs2[ref.name] = ref
self.assertTrue(('[V] a' in refs2.keys()), refs2.keys())
self.assertTrue(('[V] b' in refs2.keys()), refs2.keys())
self.assertEqual(refs2['[V] a'].size, asizeof(f1.a, f2.a))
def test_html(self):
self.tracker.track_class(Foo, name='Foo', resolution_level=2)
self.tracker.track_class(Bar, name='Bar', trace=True)
f1 = Foo()
f1.a = list(range(100000))
f2 = Foo()
f2.a = list(range(1000))
f2.b = 'This is some stupid spam.'
f1 = Bar()
self.tracker.create_snapshot('Merge test')
stats = HtmlStats(tracker=self.tracker)
try:
target = mkdtemp(prefix='pympler_test')
output = os.path.join(target, 'footest.html')
stats.create_html(output)
source = open(output).read()
fname = os.path.join('footest_files', 'Foo.html')
self.assertTrue((('<a href="%s">' % fname) in source), (fname, source))
finally:
rmtree(target)
def test_charts(self):
self.tracker.track_class(Foo, name='Foo', resolution_level=2)
f1 = Foo()
f1.a = list(range(1000))
f2 = Foo()
f2.a = list(range(100))
f2.b = 'This is some stupid spam.'
self.tracker.create_snapshot('Merge test')
from pympler import charts
try:
target = mkdtemp(prefix='pympler_test')
output = os.path.join(target, 'timespace.png')
charts.tracker_timespace(output, self.tracker.stats)
finally:
rmtree(target) |
def _maybe_compute_stride_kjt(keys: List[str], stride: Optional[int], lengths: Optional[torch.Tensor], offsets: Optional[torch.Tensor]) -> int:
if (stride is None):
if (len(keys) == 0):
stride = 0
elif ((offsets is not None) and (offsets.numel() > 0)):
stride = ((offsets.numel() - 1) // len(keys))
elif (lengths is not None):
stride = (lengths.numel() // len(keys))
else:
stride = 0
return stride |
def make_xml(filename, path, box_list, labels, w, h, d):
doc = xml.dom.minidom.Document()
root = doc.createElement('annotation')
doc.appendChild(root)
foldername = doc.createElement('folder')
foldername.appendChild(doc.createTextNode('JPEGImages'))
root.appendChild(foldername)
nodeFilename = doc.createElement('filename')
nodeFilename.appendChild(doc.createTextNode(filename))
root.appendChild(nodeFilename)
pathname = doc.createElement('path')
pathname.appendChild(doc.createTextNode('xxxx'))
root.appendChild(pathname)
sourcename = doc.createElement('source')
databasename = doc.createElement('database')
databasename.appendChild(doc.createTextNode('Unknown'))
sourcename.appendChild(databasename)
annotationname = doc.createElement('annotation')
annotationname.appendChild(doc.createTextNode('xxx'))
sourcename.appendChild(annotationname)
imagename = doc.createElement('image')
imagename.appendChild(doc.createTextNode('xxx'))
sourcename.appendChild(imagename)
flickridname = doc.createElement('flickrid')
flickridname.appendChild(doc.createTextNode('0'))
sourcename.appendChild(flickridname)
root.appendChild(sourcename)
nodesize = doc.createElement('size')
nodewidth = doc.createElement('width')
nodewidth.appendChild(doc.createTextNode(str(w)))
nodesize.appendChild(nodewidth)
nodeheight = doc.createElement('height')
nodeheight.appendChild(doc.createTextNode(str(h)))
nodesize.appendChild(nodeheight)
nodedepth = doc.createElement('depth')
nodedepth.appendChild(doc.createTextNode(str(d)))
nodesize.appendChild(nodedepth)
root.appendChild(nodesize)
segname = doc.createElement('segmented')
segname.appendChild(doc.createTextNode('0'))
root.appendChild(segname)
for (box, label) in zip(box_list, labels):
nodeobject = doc.createElement('object')
nodename = doc.createElement('name')
nodename.appendChild(doc.createTextNode(label))
nodeobject.appendChild(nodename)
nodebndbox = doc.createElement('bndbox')
for ii in range((box_list.shape[1] // 2)):
nodex = doc.createElement('x{}'.format((ii + 1)))
nodex.appendChild(doc.createTextNode(str(box[(2 * ii)])))
nodebndbox.appendChild(nodex)
nodey = doc.createElement('y{}'.format((ii + 1)))
nodey.appendChild(doc.createTextNode(str(box[((2 * ii) + 1)])))
nodebndbox.appendChild(nodey)
nodeobject.appendChild(nodebndbox)
root.appendChild(nodeobject)
fp = open(os.path.join(path, filename), 'w')
doc.writexml(fp, indent='\n')
fp.close() |
def collect_stats(model, data_loader, num_batches):
for (name, module) in model.named_modules():
if isinstance(module, quant_nn.TensorQuantizer):
if (module._calibrator is not None):
module.disable_quant()
module.enable_calib()
else:
module.disable()
for (i, (image, _, _, _)) in tqdm(enumerate(data_loader), total=num_batches):
image = (image.float() / 255.0)
model(image.cuda())
if (i >= num_batches):
break
for (name, module) in model.named_modules():
if isinstance(module, quant_nn.TensorQuantizer):
if (module._calibrator is not None):
module.enable_quant()
module.disable_calib()
else:
module.enable() |
def validate_report(arg):
file_choices = ['annotate', 'html', 'xml', 'json', 'lcov']
term_choices = ['term', 'term-missing']
term_modifier_choices = ['skip-covered']
all_choices = (term_choices + file_choices)
values = arg.split(':', 1)
report_type = values[0]
if (report_type not in (all_choices + [''])):
msg = f'invalid choice: "{arg}" (choose from "{all_choices}")'
raise argparse.ArgumentTypeError(msg)
if ((report_type == 'lcov') and (coverage.version_info <= (6, 3))):
raise argparse.ArgumentTypeError('LCOV output is only supported with coverage.py >= 6.3')
if (len(values) == 1):
return (report_type, None)
report_modifier = values[1]
if ((report_type in term_choices) and (report_modifier in term_modifier_choices)):
return (report_type, report_modifier)
if (report_type not in file_choices):
msg = 'output specifier not supported for: "{}" (choose from "{}")'.format(arg, file_choices)
raise argparse.ArgumentTypeError(msg)
return values |
class Base64BinaryField(TextField):
def db_value(self, value):
if (value is None):
return None
return base64.b64encode(value).decode('ascii')
def python_value(self, value):
if (value is None):
return None
return base64.b64decode(value.encode('ascii')) |
class Cleanup(SquirrelCommand):
def make_subparser(self, subparsers):
headline = 'Remove leftover volatile data entries.'
return subparsers.add_parser('cleanup', help=headline, description=headline)
def run(self, parser, args):
s = sq.Squirrel()
db = s.get_database()
n_removed = db._remove_volatile()
logger.info(('Number of entries removed: %i' % n_removed)) |
class Effect6062(BaseEffect):
type = 'passive'
def handler(fit, ship, context, projectionRange, **kwargs):
fit.drones.filteredItemBoost((lambda drone: drone.item.requiresSkill('Light Drone Operation')), 'shieldCapacity', ship.getModifiedItemAttr('shipBonusGC2'), skill='Gallente Cruiser', **kwargs) |
class FakeFile():
def __init__(self, name, mode):
self.mode = mode
self.name = name
self.data = BytesIO()
self.size = 0
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def read(self, length=(- 1)):
if (length == (- 1)):
if ('b' in self.mode):
return self.data.read()
return self.data.read().decode('utf-8')
if ('b' in self.mode):
return self.data.read(length)
return self.data.read(length).decode('utf-8')
def write(self, data):
if ('b' not in self.mode):
data = data.encode()
self.data.write(data)
def close(self):
self.size = self.data.tell()
self.data.seek(0)
def contents(self):
cont = self.data.read()
self.data.seek(0)
return cont
def json(self):
d = self.contents().decode()
return json.loads(d) |
class sepCEMA():
def __init__(self, num_params, mu_init=None, sigma_init=0.001, pop_size=256, parents=None, elitism=False, antithetic=False):
self.num_params = num_params
if (mu_init is None):
self.mu = np.zeros(self.num_params)
else:
self.mu = np.array(mu_init)
self.sigma = sigma_init
self.cov = (self.sigma * np.ones(self.num_params))
self.elitism = elitism
self.elite = (np.sqrt(self.sigma) * np.random.rand(self.num_params))
self.elite_score = (- np.inf)
self.pop_size = pop_size
self.antithetic = antithetic
if self.antithetic:
assert ((self.pop_size % 2) == 0), 'Population size must be even'
if ((parents is None) or (parents <= 0)):
self.parents = (pop_size // 2)
else:
self.parents = parents
self.weights = np.array([np.log(((self.parents + 1) / i)) for i in range(1, (self.parents + 1))])
self.weights /= self.weights.sum()
def ask(self, pop_size):
if (self.antithetic and (not (pop_size % 2))):
epsilon_half = np.random.randn((pop_size // 2), self.num_params)
epsilon = np.concatenate([epsilon_half, (- epsilon_half)])
else:
epsilon = np.random.randn(pop_size, self.num_params)
inds = (self.mu + (epsilon * np.sqrt(self.cov)))
if self.elitism:
inds[(- 1)] = self.elite
return inds
def tell(self, solutions, scores):
scores = np.array(scores)
scores *= (- 1)
idx_sorted = np.argsort(scores)
old_mu = self.mu
self.mu = (self.weights solutions[idx_sorted[:self.parents]])
if (scores[idx_sorted[0]] > (0.95 * self.elite_score)):
self.sigma *= 0.95
else:
self.sigma *= 1.05
self.elite = solutions[idx_sorted[0]]
self.elite_score = scores[idx_sorted[0]]
z = (solutions[idx_sorted[:self.parents]] - old_mu)
self.cov = (self.weights (z * z))
self.cov = ((self.sigma * self.cov) / np.linalg.norm(self.cov))
print(self.cov)
print(self.sigma)
def get_distrib_params(self):
return (np.copy(self.mu), np.copy(self.cov)) |
def validate_config_section(config, section):
notifications = (config.get('notifications') or {})
if (section == 'email'):
email_config = (notifications.get('email') or {})
validate(email_config, EMAIL_CONFIG_SCHEMA)
elif (section == 'slack'):
slack_config = (notifications.get('slack') or {})
validate(slack_config, SLACK_CONFIG_SCHEMA) |
def load_NarrativeQA(cache_dir):
f = pd.read_csv('datasets/NarrativeQA_LLMs.csv')
q = f['Question'].tolist()
a_human = f['answers'].tolist()
a_human = [_.split(';')[0] for _ in a_human]
mgt_text_list = []
for detectLLM in ['ChatGPT', 'ChatGLM', 'Dolly', 'ChatGPT-turbo', 'GPT4', 'StableLM']:
mgt_text_list.append(f[f'{detectLLM}_answer'].fillna('').tolist())
res = []
for i in range(len(q)):
if ((len(a_human[i].split()) <= 1) or (len(a_human[i].split()) >= 150)):
continue
flag = 1
for mgt_text in mgt_text_list:
if ((len(mgt_text[i].split()) <= 1) or (len(mgt_text[i].split()) >= 150)):
flag = 0
break
if flag:
res.append([q[i], a_human[i], mgt_text_list[0][i], mgt_text_list[1][i], mgt_text_list[2][i], mgt_text_list[3][i], mgt_text_list[4][i], mgt_text_list[5][i]])
data_new = {'train': {'text': [], 'label': []}, 'test': {'text': [], 'label': []}}
index_list = list(range(len(res)))
random.seed(0)
random.shuffle(index_list)
total_num = len(res)
for i in tqdm.tqdm(range(total_num), desc='parsing data'):
if (i < (total_num * 0.8)):
data_partition = 'train'
else:
data_partition = 'test'
for j in range(1, 8):
data_new[data_partition]['text'].append(process_spaces(res[index_list[i]][j]))
data_new[data_partition]['label'].append((j - 1))
return data_new |
class GdbContinue(sublime_plugin.WindowCommand):
def run(self):
global gdb_cursor_position
gdb_cursor_position = 0
update_view_markers()
resume()
def is_enabled(self):
return (is_running() and (gdb_run_status != 'running'))
def is_visible(self):
return is_running() |
def test_special_generics():
assert_normalize(tuple, tuple, [nt_zero(Any), ...])
assert_normalize(Tuple, tuple, [nt_zero(Any), ...])
if HAS_STD_CLASSES_GENERICS:
assert_normalize(tuple[int], tuple, [nt_zero(int)])
assert_normalize(Tuple[int], tuple, [nt_zero(int)])
if HAS_STD_CLASSES_GENERICS:
assert_normalize(tuple[(int, ...)], tuple, [nt_zero(int), ...])
assert_normalize(Tuple[(int, ...)], tuple, [nt_zero(int), ...])
if HAS_STD_CLASSES_GENERICS:
assert_normalize(tuple[()], tuple, [])
assert_normalize(Tuple[()], tuple, [])
any_str_placeholder = make_norm_type(Union, (nt_zero(bytes), nt_zero(str)), source=Union[(bytes, str)])
assert_normalize(Pattern, re.Pattern, [any_str_placeholder])
assert_normalize(Match, re.Match, [any_str_placeholder])
assert_normalize(Pattern[bytes], re.Pattern, [nt_zero(bytes)])
assert_normalize(Match[bytes], re.Match, [nt_zero(bytes)]) |
class DudenWord():
wordcloud_parts_of_speech = ['substantive', 'verben', 'adjektive']
def __init__(self, soup):
self.soup = soup
def __repr__(self):
return '{} ({})'.format(self.title, self.part_of_speech)
def title(self):
return self.soup.h1.get_text().replace('\xad', '').strip()
def name(self):
title_element = self.soup.find('span', {'class': 'lemma__main'})
if (title_element is not None):
return clear_text(title_element.get_text())
if ((self.part_of_speech is not None) and ('Substantiv' not in self.part_of_speech)):
return self.title
if (', ' not in self.title):
return self.title
(name, _) = self.title.split(', ', 1)
return name
def urlname(self):
return self.soup.head.find('link', rel='canonical').attrs['href'].split('/')[(- 1)]
def revision_url(self):
return self.soup.find('input', id='cite-field').attrs['value']
def node_no(self):
return self.revision_url.split('/')[(- 3)]
def revision_no(self):
return self.revision_url.split('/')[(- 1)]
def article(self):
article_element = self.soup.find('span', {'class': 'lemma__determiner'})
if (article_element is not None):
return clear_text(article_element.get_text())
if ((self.part_of_speech is not None) and ('Substantiv' not in self.part_of_speech)):
return None
if (', ' not in self.title):
return None
(_, article) = self.title.split(', ', 1)
return article
def _find_tuple_dl(self, key, element=None):
if (element is None):
element = self.soup.article
dls = element.find_all('dl', class_='tuple', recursive=False)
for dl_node in dls:
label = dl_node.find('dt', class_='tuple__key')
if (key in label.text):
return dl_node.find('dd', class_='tuple__val')
return None
def part_of_speech(self):
try:
pos_element = self._find_tuple_dl('Wortart')
return pos_element.text
except AttributeError:
return None
def frequency(self):
try:
freq_bar = self.soup.find('span', class_='shaft__full')
return len(freq_bar.text)
except AttributeError:
return None
def usage(self):
try:
element = self._find_tuple_dl('Gebrauch')
return element.text
except AttributeError:
return None
def word_separation(self):
containing_div = self.soup.find('div', id='rechtschreibung')
sep_element = self._find_tuple_dl('Worttrennung', containing_div)
if (not sep_element):
return None
return sep_element.text.split('|')
def pronunciation_audio_url(self):
audio_link = self.soup.select('a.pronunciation-guide__sound')
if (not audio_link):
return None
audio_link_href = str(audio_link[0].get('href'))
return audio_link_href
def meaning_overview(self):
section = (self.soup.find('div', id='bedeutung') or self.soup.find('div', id='bedeutungen'))
if (section is None):
return None
section = copy.copy(section)
section.header.extract()
for dl_node in section.find_all('dl', class_='note'):
if (True or (dl_node.dt.text == 'Beispiele')):
dl_node.extract()
for dl_node in section.find_all('dl', class_='tuple'):
if (dl_node.dt.text in ['Grammatik', 'Gebrauch']):
dl_node.extract()
for node in section.find_all('figure'):
node.extract()
return recursively_extract(section, maxdepth=2, exfun=(lambda x: x.text.strip()))
def synonyms(self):
try:
section = self.soup.find('div', id='synonyme')
section = copy.copy(section)
if section.header:
section.header.extract()
return recursively_extract(section, maxdepth=2, exfun=(lambda x: x.text.strip()))
except AttributeError:
return None
def origin(self):
section = self.soup.find('div', id='herkunft')
if (section is None):
return None
section = copy.copy(section)
if section.header:
section.header.extract()
return section.text.strip()
def grammar_overview(self):
section = self.soup.find('div', id='grammatik')
if (section is None):
return None
section = copy.copy(section)
if section.header:
section.header.extract()
if section.nav:
section.nav.extract()
return (section.text.strip() or None)
def compounds(self):
section = self.soup.find('div', id='kontext')
if (not section):
return None
pos_trans = {'noun': 'substantive', 'verb': 'verben', 'adj': 'adjektive'}
compounds = {}
cluster_element = section.find('figure', class_='tag-cluster__cluster')
for a_node in cluster_element.find_all('a'):
compound_word = a_node.text
compound_type = pos_trans[a_node.attrs['data-group']]
if (compound_type not in compounds):
compounds[compound_type] = []
compounds[compound_type].append(compound_word)
compounds_sorted = {}
for pos in sorted(compounds.keys()):
compounds_sorted[pos] = sorted(compounds[pos])
return compounds_sorted
def grammar(self, *target_tags):
tagged_strings = self.grammar_raw
target_tags = set(target_tags)
return [string for (tags, string) in tagged_strings if target_tags.issubset(tags)]
def grammar_raw(self):
section = self.soup.find('div', id='grammatik')
if (not section):
return []
table_nodes = (self.soup.find_all('div', class_='wrap-table') + self.soup.find_all('table', class_='mere-table'))
tagged_strings = []
for table_node in table_nodes:
tagged_strings.extend(table_node_to_tagged_cells(table_node))
return tagged_strings
def export(self):
worddict = {}
for attribute in EXPORT_ATTRIBUTES:
worddict[attribute] = getattr(self, attribute, None)
if (worddict['grammar_raw'] is not None):
listed_grammar = []
for (keylist, form) in worddict['grammar_raw']:
listed_grammar.append([sorted(keylist), form])
worddict['grammar_raw'] = listed_grammar
return worddict
def before_after_structure(self):
result = {}
section = self.soup.find('div', id='block-beforeafterblock-2')
for group in section.find_all('nav', class_='hookup__group'):
h3title = group.h3.text
result[h3title] = []
for item in group.find_all('li'):
link = item.a.attrs['href'].split('/')[(- 1)]
result[h3title].append((clear_text(item.text), link))
return result
def words_before(self):
return [name for (name, _) in self.before_after_structure['Im Alphabet davor']]
def words_after(self):
return [name for (name, _) in self.before_after_structure['Im Alphabet danach']]
def phonetic(self):
ipa = self.soup.find('span', {'class': 'ipa'})
if (ipa is not None):
return ipa.get_text()
return None
def alternative_spellings(self):
alternative_spellings = self.soup.find_all('span', {'class': 'lemma__alt-spelling'})
if (alternative_spellings is None):
return None
return [spelling.get_text() for spelling in alternative_spellings] |
class RRDBNet(nn.Module):
def __init__(self, in_nc, out_nc, nf, nb, gc=32, upscale=4, norm_type=None, act_type='leakyrelu', mode='CNA', upsample_mode='upconv'):
super(RRDBNet, self).__init__()
n_upscale = int(math.log(upscale, 2))
if (upscale == 3):
n_upscale = 1
fea_conv = B.conv_block(in_nc, nf, kernel_size=3, norm_type=None, act_type=None)
rb_blocks = [B.RRDB(nf, kernel_size=3, gc=32, stride=1, bias=True, pad_type='zero', norm_type=norm_type, act_type=act_type, mode='CNA') for _ in range(nb)]
LR_conv = B.conv_block(nf, nf, kernel_size=3, norm_type=norm_type, act_type=None, mode=mode)
if (upsample_mode == 'upconv'):
upsample_block = B.upconv_blcok
elif (upsample_mode == 'pixelshuffle'):
upsample_block = B.pixelshuffle_block
else:
raise NotImplementedError('upsample mode [{:s}] is not found'.format(upsample_mode))
if (upscale == 3):
upsampler = upsample_block(nf, nf, 3, act_type=act_type)
else:
upsampler = [upsample_block(nf, nf, act_type=act_type) for _ in range(n_upscale)]
HR_conv0 = B.conv_block(nf, nf, kernel_size=3, norm_type=None, act_type=act_type)
HR_conv1 = B.conv_block(nf, out_nc, kernel_size=3, norm_type=None, act_type=None)
self.model = B.sequential(fea_conv, B.ShortcutBlock(B.sequential(*rb_blocks, LR_conv)), *upsampler, HR_conv0, HR_conv1)
def forward(self, x, hr_fea=False, lr_fea=False):
return self.model(x) |
def evaluate_command(cmd, solver):
if (cmd.name == smtcmd.SET_INFO):
return solver.set_info(cmd.args[0], cmd.args[1])
if (cmd.name == smtcmd.SET_OPTION):
opt = cmd.args[0]
if (opt[0] == ':'):
opt = opt[1:]
return solver.set_option(opt, cmd.args[1])
elif (cmd.name == smtcmd.ASSERT):
return solver.assert_(cmd.args[0])
elif (cmd.name == smtcmd.CHECK_SAT):
return solver.check_sat()
elif (cmd.name == smtcmd.RESET_ASSERTIONS):
return solver.reset_assertions()
elif (cmd.name == smtcmd.GET_VALUE):
return solver.get_values(cmd.args)
elif (cmd.name == smtcmd.PUSH):
return solver.push(cmd.args[0])
elif (cmd.name == smtcmd.POP):
return solver.pop(cmd.args[0])
elif (cmd.name == smtcmd.EXIT):
return solver.exit()
elif (cmd.name == smtcmd.SET_LOGIC):
name = cmd.args[0]
return solver.set_logic(name)
elif (cmd.name == smtcmd.DECLARE_FUN):
return solver.declare_fun(cmd.args[0])
elif (cmd.name == smtcmd.DECLARE_CONST):
return solver.declare_const(cmd.args[0])
elif (cmd.name == smtcmd.DEFINE_FUN):
(var, formals, typename, body) = cmd.args
return solver.define_fun(var, formals, typename, body)
elif (cmd.name == smtcmd.ECHO):
print(cmd.args[0])
return None
elif (cmd.name == smtcmd.CHECK_SAT_ASSUMING):
return solver.check_sat(cmd.args)
elif (cmd.name == smtcmd.GET_UNSAT_CORE):
return solver.get_unsat_core()
elif (cmd.name == smtcmd.GET_MODEL):
return solver.get_model()
elif (cmd.name == smtcmd.DECLARE_SORT):
name = cmd.args[0].name
arity = cmd.args[0].arity
return solver.declare_sort(name, arity)
elif (cmd.name in smtcmd.ALL_COMMANDS):
raise NotImplementedError(("'%s' is a valid SMT-LIB command but it is currently not supported. Please open a bug-report." % cmd.name))
else:
raise UnknownSmtLibCommandError(cmd.name) |
class PoolFromConfigTests(unittest.TestCase):
def test_empty_config(self):
with self.assertRaises(ConfigurationError):
pool_from_config({})
def test_basic_url(self):
pool = pool_from_config({'memcache.endpoint': 'localhost:1234'})
self.assertEqual(pool.server[0], 'localhost')
self.assertEqual(pool.server[1], 1234)
def test_timeouts(self):
pool = pool_from_config({'memcache.endpoint': 'localhost:1234', 'memcache.timeout': '1.23', 'memcache.connect_timeout': '4.56'})
self.assertEqual(pool.timeout, 1.23)
self.assertEqual(pool.connect_timeout, 4.56)
def test_max_connections(self):
pool = pool_from_config({'memcache.endpoint': 'localhost:1234', 'memcache.max_pool_size': '300'})
self.assertEqual(pool.client_pool.max_size, 300)
def test_alternate_prefix(self):
pool_from_config({'noodle.endpoint': 'localhost:1234'}, prefix='noodle.')
def test_nodelay(self):
pool = pool_from_config({'memcache.endpoint': 'localhost:1234', 'memcache.no_delay': 'False'})
self.assertEqual(pool.no_delay, False) |
def test_multitask_gather():
ann_info = dict(image_size=np.array([256, 256]), heatmap_size=np.array([64, 64]), num_joints=17, joint_weights=np.ones((17, 1), dtype=np.float32), use_different_joint_weights=False)
results = dict(joints_3d=np.zeros([17, 3]), joints_3d_visible=np.ones([17, 3]), ann_info=ann_info)
pipeline_list = [[dict(type='TopDownGenerateTarget', sigma=2)], [dict(type='TopDownGenerateTargetRegression')]]
pipeline = dict(type='MultitaskGatherTarget', pipeline_list=pipeline_list, pipeline_indices=[0, 1, 0])
pipeline = build_from_cfg(pipeline, PIPELINES)
results = pipeline(results)
target = results['target']
target_weight = results['target_weight']
assert isinstance(target, list)
assert isinstance(target_weight, list)
assert (target[0].shape == (17, 64, 64))
assert (target_weight[0].shape == (17, 1))
assert (target[1].shape == (17, 2))
assert (target_weight[1].shape == (17, 2))
assert (target[2].shape == (17, 64, 64))
assert (target_weight[2].shape == (17, 1)) |
_dataframe_method
_alias(column='column_name')
def convert_unix_date(df: pd.DataFrame, column_name: Hashable) -> pd.DataFrame:
try:
df[column_name] = pd.to_datetime(df[column_name], unit='s')
except OutOfBoundsDatetime:
df[column_name] = pd.to_datetime(df[column_name], unit='ms')
return df |
class StringMixin(object):
__schema_type__ = 'string'
def __init__(self, *args, **kwargs):
self.min_length = kwargs.pop('min_length', None)
self.max_length = kwargs.pop('max_length', None)
self.pattern = kwargs.pop('pattern', None)
super(StringMixin, self).__init__(*args, **kwargs)
def schema(self):
schema = super(StringMixin, self).schema()
schema.update(minLength=self._v('min_length'), maxLength=self._v('max_length'), pattern=self._v('pattern'))
return schema |
def build_norm_layer(cfg, num_features, postfix=''):
if (not isinstance(cfg, dict)):
raise TypeError('cfg must be a dict')
if ('type' not in cfg):
raise KeyError('the cfg dict must contain the key "type"')
cfg_ = cfg.copy()
layer_type = cfg_.pop('type')
if (layer_type not in NORM_LAYERS):
raise KeyError(f'Unrecognized norm type {layer_type}')
norm_layer = NORM_LAYERS.get(layer_type)
abbr = infer_abbr(norm_layer)
assert isinstance(postfix, (int, str))
name = (abbr + str(postfix))
requires_grad = cfg_.pop('requires_grad', True)
cfg_.setdefault('eps', 1e-05)
if (layer_type != 'GN'):
layer = norm_layer(num_features, **cfg_)
if (layer_type == 'SyncBN'):
layer._specify_ddp_gpu_num(1)
else:
assert ('num_groups' in cfg_)
layer = norm_layer(num_channels=num_features, **cfg_)
for param in layer.parameters():
param.requires_grad = requires_grad
return (name, layer) |
(os.path.exists(DEFAULT_REPO), 'Tatoeba directory does not exist.')
class TatoebaConversionTester(unittest.TestCase):
_property
def resolver(self):
tmp_dir = tempfile.mkdtemp()
return TatoebaConverter(save_dir=tmp_dir)
def test_resolver(self):
self.resolver.convert_models(['heb-eng'])
def test_model_card(self):
(content, mmeta) = self.resolver.write_model_card('opus-mt-he-en', dry_run=True)
assert (mmeta['long_pair'] == 'heb-eng') |
class FontConfig():
def __init__(self):
self._fontconfig = self._load_fontconfig_library()
self._search_cache = OrderedDict()
self._cache_size = 20
def dispose(self):
while (len(self._search_cache) > 0):
self._search_cache.popitem().dispose()
self._fontconfig.FcFini()
self._fontconfig = None
def create_search_pattern(self):
return FontConfigSearchPattern(self._fontconfig)
def find_font(self, name, size=12, bold=False, italic=False):
result = self._get_from_search_cache(name, size, bold, italic)
if result:
return result
search_pattern = self.create_search_pattern()
search_pattern.name = name
search_pattern.size = size
search_pattern.bold = bold
search_pattern.italic = italic
result = search_pattern.match()
self._add_to_search_cache(search_pattern, result)
search_pattern.dispose()
return result
def have_font(self, name):
result = self.find_font(name)
if result:
if (name and result.name and (result.name.lower() != name.lower())):
return False
return True
else:
return False
def char_index(self, ft_face, character):
return self._fontconfig.FcFreeTypeCharIndex(ft_face, ord(character))
def _add_to_search_cache(self, search_pattern, result_pattern):
self._search_cache[(search_pattern.name, search_pattern.size, search_pattern.bold, search_pattern.italic)] = result_pattern
if (len(self._search_cache) > self._cache_size):
self._search_cache.popitem(last=False)[1].dispose()
def _get_from_search_cache(self, name, size, bold, italic):
result = self._search_cache.get((name, size, bold, italic), None)
if (result and result.is_valid):
return result
else:
return None
def _load_fontconfig_library():
fontconfig = pyglet.lib.load_library('fontconfig')
fontconfig.FcInit()
fontconfig.FcPatternBuild.restype = c_void_p
fontconfig.FcPatternCreate.restype = c_void_p
fontconfig.FcFontMatch.restype = c_void_p
fontconfig.FcFreeTypeCharIndex.restype = c_uint
fontconfig.FcPatternAddDouble.argtypes = [c_void_p, c_char_p, c_double]
fontconfig.FcPatternAddInteger.argtypes = [c_void_p, c_char_p, c_int]
fontconfig.FcPatternAddString.argtypes = [c_void_p, c_char_p, c_char_p]
fontconfig.FcConfigSubstitute.argtypes = [c_void_p, c_void_p, c_int]
fontconfig.FcDefaultSubstitute.argtypes = [c_void_p]
fontconfig.FcFontMatch.argtypes = [c_void_p, c_void_p, c_void_p]
fontconfig.FcPatternDestroy.argtypes = [c_void_p]
fontconfig.FcPatternGetFTFace.argtypes = [c_void_p, c_char_p, c_int, c_void_p]
fontconfig.FcPatternGet.argtypes = [c_void_p, c_char_p, c_int, c_void_p]
return fontconfig |
class LayerOutput():
def __init__(self, session: tf.compat.v1.Session, starting_op_names: List[str], output_op_names: List[str], dir_path: str):
self.session = session
(self.activation_tensor_names, self.activation_tensors) = LayerOutput.get_activation_tensor_info(session, starting_op_names, output_op_names)
save_layer_output_names(self.activation_tensor_names, dir_path)
def get_outputs(self, feed_dict: Dict) -> Dict[(str, np.ndarray)]:
act_outputs = self.session.run(self.activation_tensors, feed_dict=feed_dict)
return dict(zip(self.activation_tensor_names, act_outputs))
def get_activation_tensor_info(session: tf.compat.v1.Session, starting_op_names: List[str], output_op_names: List[str]) -> Tuple[(List, List)]:
connected_graph = ConnectedGraph(session.graph, starting_op_names, output_op_names)
activation_op_names = QuantizationSimModel._get_ops_to_quantize_activations_for(session.graph, connected_graph)
activation_quant_op_names = [op_name for op_name in activation_op_names if op_name.endswith('_quantized')]
if activation_quant_op_names:
activation_op_names = activation_quant_op_names
activation_tensor_names = []
activation_tensors = []
for activation_op_name in activation_op_names:
activation_op = session.graph.get_operation_by_name(activation_op_name)
for output in activation_op.outputs:
activation_tensor_names.append(output.name)
activation_tensors.append(output)
activation_tensor_names = [re.sub('\\W+', '_', name.replace('quantized:', '')) for name in activation_tensor_names]
return (activation_tensor_names, activation_tensors) |
def benchmark(mapping, start_pt, run_num):
topo = TopoGraphGen(mapping, max_raycast_dist=1.5)
topo.test_detect_collisions(start_pt)
topo.node_expansion(start_pt, False)
s = time.time()
topo.node_expansion_benchmark(start_pt, False, run_num=run_num)
dt = (time.time() - s)
print(f'avg node expansion time: {((dt * 1000) / run_num):.2f}ms') |
def KUGW(mf, freq_int='ac', frozen=None):
if (freq_int.lower() == 'ac'):
return kugw_ac.KUGWAC(mf, frozen)
elif (freq_int.lower() == 'cd'):
raise RuntimeError('GWCD does not support UHF or UKS methods.')
else:
raise RuntimeError(("GW frequency integration method %s not recognized. With PBC, options are 'ac' and 'cd'." % freq_int)) |
class VPG(BatchPolopt, Serializable):
def __init__(self, env, policy, baseline, optimizer=None, optimizer_args=None, **kwargs):
Serializable.quick_init(self, locals())
if (optimizer is None):
default_args = dict(batch_size=None, max_epochs=1)
if (optimizer_args is None):
optimizer_args = default_args
else:
optimizer_args = dict(default_args, **optimizer_args)
optimizer = FirstOrderOptimizer(**optimizer_args)
self.optimizer = optimizer
self.opt_info = None
super(VPG, self).__init__(env=env, policy=policy, baseline=baseline, **kwargs)
def init_opt(self):
is_recurrent = int(self.policy.recurrent)
obs_var = self.env.observation_space.new_tensor_variable('obs', extra_dims=(1 + is_recurrent))
action_var = self.env.action_space.new_tensor_variable('action', extra_dims=(1 + is_recurrent))
advantage_var = tensor_utils.new_tensor(name='advantage', ndim=(1 + is_recurrent), dtype=tf.float32)
dist = self.policy.distribution
old_dist_info_vars = {k: tf.placeholder(tf.float32, shape=(([None] * (1 + is_recurrent)) + list(shape)), name=('old_%s' % k)) for (k, shape) in dist.dist_info_specs}
old_dist_info_vars_list = [old_dist_info_vars[k] for k in dist.dist_info_keys]
state_info_vars = {k: tf.placeholder(tf.float32, shape=(([None] * (1 + is_recurrent)) + list(shape)), name=k) for (k, shape) in self.policy.state_info_specs}
state_info_vars_list = [state_info_vars[k] for k in self.policy.state_info_keys]
if is_recurrent:
valid_var = tf.placeholder(tf.float32, shape=[None, None], name='valid')
else:
valid_var = None
dist_info_vars = self.policy.dist_info_sym(obs_var, state_info_vars)
logli = dist.log_likelihood_sym(action_var, dist_info_vars)
kl = dist.kl_sym(old_dist_info_vars, dist_info_vars)
if is_recurrent:
surr_obj = ((- tf.reduce_sum(((logli * advantage_var) * valid_var))) / tf.reduce_sum(valid_var))
mean_kl = (tf.reduce_sum((kl * valid_var)) / tf.reduce_sum(valid_var))
max_kl = tf.reduce_max((kl * valid_var))
else:
surr_obj = (- tf.reduce_mean((logli * advantage_var)))
mean_kl = tf.reduce_mean(kl)
max_kl = tf.reduce_max(kl)
input_list = ([obs_var, action_var, advantage_var] + state_info_vars_list)
if is_recurrent:
input_list.append(valid_var)
self.optimizer.update_opt(loss=surr_obj, target=self.policy, inputs=input_list)
f_kl = tensor_utils.compile_function(inputs=(input_list + old_dist_info_vars_list), outputs=[mean_kl, max_kl])
self.opt_info = dict(f_kl=f_kl)
def optimize_policy(self, itr, samples_data):
logger.log('optimizing policy')
inputs = ext.extract(samples_data, 'observations', 'actions', 'advantages')
agent_infos = samples_data['agent_infos']
state_info_list = [agent_infos[k] for k in self.policy.state_info_keys]
inputs += tuple(state_info_list)
if self.policy.recurrent:
inputs += (samples_data['valids'],)
dist_info_list = [agent_infos[k] for k in self.policy.distribution.dist_info_keys]
loss_before = self.optimizer.loss(inputs)
self.optimizer.optimize(inputs)
loss_after = self.optimizer.loss(inputs)
logger.record_tabular('LossBefore', loss_before)
logger.record_tabular('LossAfter', loss_after)
(mean_kl, max_kl) = self.opt_info['f_kl'](*(list(inputs) + dist_info_list))
logger.record_tabular('MeanKL', mean_kl)
logger.record_tabular('MaxKL', max_kl)
def get_itr_snapshot(self, itr, samples_data):
return dict(itr=itr, policy=self.policy, baseline=self.baseline, env=self.env) |
def daily_analyze_urls(days=30, min_visits=100):
dt_cutoff = (timezone.now() - datetime.timedelta(days=days))
analyzed_urls = AnalyzedUrl.objects.filter(last_analyzed_date__lt=dt_cutoff, visits_since_last_analyzed__gte=min_visits).select_related()
log.debug('URLs to analyze: %s', analyzed_urls.count())
for analyzed_url in analyzed_urls:
analyze_url.apply_async(args=[analyzed_url.url, analyzed_url.publisher.slug], queue='analyzer') |
class Effect5331(BaseEffect):
type = 'passive'
def handler(fit, ship, context, projectionRange, **kwargs):
for layer in ('shieldCapacity', 'armorHP', 'hp'):
fit.drones.filteredItemBoost((lambda drone: drone.item.requiresSkill('Drones')), layer, ship.getModifiedItemAttr('shipBonusABC2'), skill='Amarr Battlecruiser', **kwargs) |
def lr0_goto(I, x):
g = _lr_goto_cache.get((id(I), x), None)
if g:
return g
s = _lr_goto_cache.get(x, None)
if (not s):
s = {}
_lr_goto_cache[x] = s
gs = []
for p in I:
n = p.lr_next
if (n and (n.lrbefore == x)):
s1 = s.get(id(n), None)
if (not s1):
s1 = {}
s[id(n)] = s1
gs.append(n)
s = s1
g = s.get('$end', None)
if (not g):
if gs:
g = lr0_closure(gs)
s['$end'] = g
else:
s['$end'] = gs
_lr_goto_cache[(id(I), x)] = g
return g |
def send_validation(strategy, backend, code, partial_token):
url = '{}?verification_code={}&partial_token={}'.format(reverse('social:complete', args=(backend.name,)), code.code, partial_token)
url = strategy.request.build_absolute_uri(url)
send_mail('Validate your account', f'Validate your account {url}', settings.EMAIL_FROM, [code.email], fail_silently=False) |
class StyleGAN2Model(BaseModel):
def __init__(self, opt):
super(StyleGAN2Model, self).__init__(opt)
self.net_g = define_network(deepcopy(opt['network_g']))
self.net_g = self.model_to_device(self.net_g)
self.print_network(self.net_g)
load_path = self.opt['path'].get('pretrain_network_g', None)
if (load_path is not None):
param_key = self.opt['path'].get('param_key_g', 'params')
self.load_network(self.net_g, load_path, self.opt['path'].get('strict_load_g', True), param_key)
self.num_style_feat = opt['network_g']['num_style_feat']
num_val_samples = self.opt['val'].get('num_val_samples', 16)
self.fixed_sample = torch.randn(num_val_samples, self.num_style_feat, device=self.device)
if self.is_train:
self.init_training_settings()
def init_training_settings(self):
train_opt = self.opt['train']
self.net_d = define_network(deepcopy(self.opt['network_d']))
self.net_d = self.model_to_device(self.net_d)
self.print_network(self.net_d)
load_path = self.opt['path'].get('pretrain_network_d', None)
if (load_path is not None):
self.load_network(self.net_d, load_path, self.opt['path'].get('strict_load_d', True))
self.net_g_ema = define_network(deepcopy(self.opt['network_g'])).to(self.device)
load_path = self.opt['path'].get('pretrain_network_g', None)
if (load_path is not None):
self.load_network(self.net_g_ema, load_path, self.opt['path'].get('strict_load_g', True), 'params_ema')
else:
self.model_ema(0)
self.net_g.train()
self.net_d.train()
self.net_g_ema.eval()
cri_gan_cls = getattr(loss_module, train_opt['gan_opt'].pop('type'))
self.cri_gan = cri_gan_cls(**train_opt['gan_opt']).to(self.device)
self.r1_reg_weight = train_opt['r1_reg_weight']
self.path_reg_weight = train_opt['path_reg_weight']
self.net_g_reg_every = train_opt['net_g_reg_every']
self.net_d_reg_every = train_opt['net_d_reg_every']
self.mixing_prob = train_opt['mixing_prob']
self.mean_path_length = 0
self.setup_optimizers()
self.setup_schedulers()
def setup_optimizers(self):
train_opt = self.opt['train']
net_g_reg_ratio = (self.net_g_reg_every / (self.net_g_reg_every + 1))
if (self.opt['network_g']['type'] == 'StyleGAN2GeneratorC'):
normal_params = []
style_mlp_params = []
modulation_conv_params = []
for (name, param) in self.net_g.named_parameters():
if ('modulation' in name):
normal_params.append(param)
elif ('style_mlp' in name):
style_mlp_params.append(param)
elif ('modulated_conv' in name):
modulation_conv_params.append(param)
else:
normal_params.append(param)
optim_params_g = [{'params': normal_params, 'lr': train_opt['optim_g']['lr']}, {'params': style_mlp_params, 'lr': (train_opt['optim_g']['lr'] * 0.01)}, {'params': modulation_conv_params, 'lr': (train_opt['optim_g']['lr'] / 3)}]
else:
normal_params = []
for (name, param) in self.net_g.named_parameters():
normal_params.append(param)
optim_params_g = [{'params': normal_params, 'lr': train_opt['optim_g']['lr']}]
optim_type = train_opt['optim_g'].pop('type')
if (optim_type == 'Adam'):
self.optimizer_g = torch.optim.Adam(optim_params_g, lr=(train_opt['optim_g']['lr'] * net_g_reg_ratio), betas=((0 ** net_g_reg_ratio), (0.99 ** net_g_reg_ratio)))
else:
raise NotImplementedError(f'optimizer {optim_type} is not supperted yet.')
self.optimizers.append(self.optimizer_g)
net_d_reg_ratio = (self.net_d_reg_every / (self.net_d_reg_every + 1))
if (self.opt['network_d']['type'] == 'StyleGAN2DiscriminatorC'):
normal_params = []
linear_params = []
for (name, param) in self.net_d.named_parameters():
if ('final_linear' in name):
linear_params.append(param)
else:
normal_params.append(param)
optim_params_d = [{'params': normal_params, 'lr': train_opt['optim_d']['lr']}, {'params': linear_params, 'lr': (train_opt['optim_d']['lr'] * (1 / math.sqrt(512)))}]
else:
normal_params = []
for (name, param) in self.net_d.named_parameters():
normal_params.append(param)
optim_params_d = [{'params': normal_params, 'lr': train_opt['optim_d']['lr']}]
optim_type = train_opt['optim_d'].pop('type')
if (optim_type == 'Adam'):
self.optimizer_d = torch.optim.Adam(optim_params_d, lr=(train_opt['optim_d']['lr'] * net_d_reg_ratio), betas=((0 ** net_d_reg_ratio), (0.99 ** net_d_reg_ratio)))
else:
raise NotImplementedError(f'optimizer {optim_type} is not supperted yet.')
self.optimizers.append(self.optimizer_d)
def model_ema(self, decay=0.999):
net_g = self.get_bare_model(self.net_g)
net_g_params = dict(net_g.named_parameters())
net_g_ema_params = dict(self.net_g_ema.named_parameters())
for k in net_g_ema_params.keys():
net_g_ema_params[k].data.mul_(decay).add_(net_g_params[k].data, alpha=(1 - decay))
def feed_data(self, data):
self.real_img = data['gt'].to(self.device)
def make_noise(self, batch, num_noise):
if (num_noise == 1):
noises = torch.randn(batch, self.num_style_feat, device=self.device)
else:
noises = torch.randn(num_noise, batch, self.num_style_feat, device=self.device).unbind(0)
return noises
def mixing_noise(self, batch, prob):
if (random.random() < prob):
return self.make_noise(batch, 2)
else:
return [self.make_noise(batch, 1)]
def optimize_parameters(self, current_iter):
loss_dict = OrderedDict()
for p in self.net_d.parameters():
p.requires_grad = True
self.optimizer_d.zero_grad()
batch = self.real_img.size(0)
noise = self.mixing_noise(batch, self.mixing_prob)
(fake_img, _) = self.net_g(noise)
fake_pred = self.net_d(fake_img.detach())
real_pred = self.net_d(self.real_img)
l_d = (self.cri_gan(real_pred, True, is_disc=True) + self.cri_gan(fake_pred, False, is_disc=True))
loss_dict['l_d'] = l_d
loss_dict['real_score'] = real_pred.detach().mean()
loss_dict['fake_score'] = fake_pred.detach().mean()
l_d.backward()
if ((current_iter % self.net_d_reg_every) == 0):
self.real_img.requires_grad = True
real_pred = self.net_d(self.real_img)
l_d_r1 = r1_penalty(real_pred, self.real_img)
l_d_r1 = ((((self.r1_reg_weight / 2) * l_d_r1) * self.net_d_reg_every) + (0 * real_pred[0]))
loss_dict['l_d_r1'] = l_d_r1.detach().mean()
l_d_r1.backward()
self.optimizer_d.step()
for p in self.net_d.parameters():
p.requires_grad = False
self.optimizer_g.zero_grad()
noise = self.mixing_noise(batch, self.mixing_prob)
(fake_img, _) = self.net_g(noise)
fake_pred = self.net_d(fake_img)
l_g = self.cri_gan(fake_pred, True, is_disc=False)
loss_dict['l_g'] = l_g
l_g.backward()
if ((current_iter % self.net_g_reg_every) == 0):
path_batch_size = max(1, (batch // self.opt['train']['path_batch_shrink']))
noise = self.mixing_noise(path_batch_size, self.mixing_prob)
(fake_img, latents) = self.net_g(noise, return_latents=True)
(l_g_path, path_lengths, self.mean_path_length) = g_path_regularize(fake_img, latents, self.mean_path_length)
l_g_path = (((self.path_reg_weight * self.net_g_reg_every) * l_g_path) + (0 * fake_img[(0, 0, 0, 0)]))
l_g_path.backward()
loss_dict['l_g_path'] = l_g_path.detach().mean()
loss_dict['path_length'] = path_lengths
self.optimizer_g.step()
self.log_dict = self.reduce_loss_dict(loss_dict)
self.model_ema(decay=(0.5 ** (32 / (10 * 1000))))
def test(self):
with torch.no_grad():
self.net_g_ema.eval()
(self.output, _) = self.net_g_ema([self.fixed_sample])
def dist_validation(self, dataloader, current_iter, tb_logger, save_img):
if (self.opt['rank'] == 0):
self.nondist_validation(dataloader, current_iter, tb_logger, save_img)
def nondist_validation(self, dataloader, current_iter, tb_logger, save_img):
assert (dataloader is None), 'Validation dataloader should be None.'
self.test()
result = tensor2img(self.output, min_max=((- 1), 1))
if self.opt['is_train']:
save_img_path = osp.join(self.opt['path']['visualization'], 'train', f'train_{current_iter}.png')
else:
save_img_path = osp.join(self.opt['path']['visualization'], 'test', f"test_{self.opt['name']}.png")
imwrite(result, save_img_path)
result = (result / 255.0).astype(np.float32)
result = cv2.cvtColor(result, cv2.COLOR_BGR2RGB)
if (tb_logger is not None):
tb_logger.add_image('samples', result, global_step=current_iter, dataformats='HWC')
def save(self, epoch, current_iter):
self.save_network([self.net_g, self.net_g_ema], 'net_g', current_iter, param_key=['params', 'params_ema'])
self.save_network(self.net_d, 'net_d', current_iter)
self.save_training_state(epoch, current_iter) |
def test__getting_started__example_multinode_constraints():
from bioptim.examples.getting_started import example_multinode_constraints as ocp_module
bioptim_folder = os.path.dirname(ocp_module.__file__)
ocp_module.prepare_ocp(biorbd_model_path=(bioptim_folder + '/models/cube.bioMod'), phase_dynamics=PhaseDynamics.SHARED_DURING_THE_PHASE, n_shootings=(8, 8, 8), expand_dynamics=False) |
class MetadataTest(unittest.TestCase):
def make_temp(self):
global tempdir
if tempdir.lstat():
tempdir.delete()
tempdir.mkdir()
def testQuote(self):
filenames = [b'foo', b'.', b'hello\nthere', b'\\', b'\\\\\\', b'h\no\t\x87\n', b' ']
for filename in filenames:
quoted = quoting.quote_path(filename)
self.assertNotIn(b'\n', quoted)
result = quoting.unquote_path(quoted)
self.assertEqual(result, filename)
def get_rpaths(self):
vft = rpath.RPath(Globals.local_connection, os.path.join(old_test_dir, b'various_file_types'))
rpaths = [vft.append(x) for x in vft.listdir()]
extra_rpaths = [rpath.RPath(Globals.local_connection, x) for x in [b'/bin/ls', b'/dev/ttyS0', b'/dev/hda', b'aoeuaou']]
return (([vft] + rpaths) + extra_rpaths)
def testRORP2Record(self):
for rp in self.get_rpaths():
record = stdattr.AttrFile._object_to_record(rp)
new_rorp = stdattr.AttrExtractor._record_to_object(record)
self.assertEqual(new_rorp, rp)
def testIterator(self):
def write_rorp_iter_to_file(rorp_iter, file):
for rorp in rorp_iter:
file.write(stdattr.AttrFile._object_to_record(rorp))
rplist = self.get_rpaths()
fp = io.BytesIO()
write_rorp_iter_to_file(iter(rplist), fp)
fp.seek(0)
fp.read()
fp.seek(0)
outlist = list(stdattr.AttrExtractor(fp).iterate())
self.assertEqual(len(rplist), len(outlist))
for i in range(len(rplist)):
self.assertTrue(rplist[i]._equal_verbose(outlist[i]))
fp.close()
def write_metadata_to_temp(self, compress):
global tempdir
meta_file_name = 'mirror_metadata.2005-11-03T14:51:06-06:00.snapshot'
if compress:
meta_file_name += '.gz'
temprp = tempdir.append(meta_file_name)
if temprp.lstat():
return temprp
bigdir_path = os.path.join(comtst.abs_test_dir, b'meta_bigdir')
bigdir_struct = {'subdir{}': {'range': 4, 'contents': {'subdir{}': {'range': 50, 'contents': {'file{}': {'range': 50, 'size': 1024}}}}}}
fileset.create_fileset(bigdir_path, bigdir_struct)
self.make_temp()
rootrp = rpath.RPath(Globals.local_connection, bigdir_path)
rpath_iter = selection.Select(rootrp).get_select_iter()
start_time = time.time()
mf = stdattr.AttrFile(temprp, 'w', compress=compress)
for rp in rpath_iter:
mf.write_object(rp)
mf.close()
print(('Writing metadata took %s seconds' % (time.time() - start_time)))
print(bigdir_path)
fileset.remove_fileset(bigdir_path, bigdir_struct)
return temprp
def helper_speed(self, compress):
temprp = self.write_metadata_to_temp(compress=compress)
mf = stdattr.AttrFile(temprp, 'r')
start_time = time.time()
i = 0
for rorp in mf.get_objects():
i += 1
print(('Reading %s metadata entries took %s seconds (compressed=%s).' % (i, (time.time() - start_time), compress)))
start_time = time.time()
blocksize = (32 * 1024)
with temprp.open('rb', compress=compress) as tempfp:
while 1:
buf = tempfp.read(blocksize)
if (not buf):
break
print(('Simply decompressing metadata file took %s seconds (compressed=%s)' % ((time.time() - start_time), compress)))
def test_speed_compressed(self):
return self.helper_speed(compress=True)
def test_speed_uncompressed(self):
return self.helper_speed(compress=False)
def helper_iterate_restricted(self, compress):
temprp = self.write_metadata_to_temp(compress=compress)
mf = stdattr.AttrFile(temprp, 'rb')
start_time = time.time()
i = 0
for rorp in mf.get_objects((b'subdir3', b'subdir10')):
i += 1
print(('Reading %s metadata entries took %s seconds (compressed=%s).' % (i, (time.time() - start_time), compress)))
self.assertEqual(i, 51)
def test_iterate_restricted_compressed(self):
return self.helper_iterate_restricted(compress=True)
def test_iterate_restricted_uncompressed(self):
return self.helper_iterate_restricted(compress=False)
def helper_write(self, compress):
global tempdir
meta_file_name = 'mirror_metadata.2005-11-03T12:51:06-06:00.snapshot'
if compress:
meta_file_name += '.gz'
temprp = tempdir.append(meta_file_name)
if temprp.lstat():
temprp.delete()
self.make_temp()
rootrp = rpath.RPath(Globals.local_connection, os.path.join(old_test_dir, b'various_file_types'))
sel = selection.Select(rootrp)
sel.parse_selection_args((), ())
rps = list(sel.get_select_iter())
self.assertFalse(temprp.lstat())
write_mf = stdattr.AttrFile(temprp, 'w', compress=compress)
for rp in rps:
write_mf.write_object(rp)
write_mf.close()
self.assertTrue(temprp.lstat())
reread_rps = list(stdattr.AttrFile(temprp, 'r').get_objects())
self.assertEqual(len(reread_rps), len(rps))
for i in range(len(reread_rps)):
self.assertEqual(reread_rps[i], rps[i])
def test_write_compressed(self):
return self.helper_write(compress=True)
def test_write_uncompressed(self):
return self.helper_write(compress=False)
def test_patch(self):
self.make_temp()
xcopytree(os.path.join(old_test_dir, b'various_file_types'), tempdir.path, content=True)
rp1 = tempdir.append('regular_file')
rp2 = tempdir.append('subdir')
rp3 = rp2.append('subdir_file')
rp4 = tempdir.append('test')
rp1new = tempdir.append('regular_file')
rp1new.chmod(0)
zero = rpath.RORPath(('test',))
current = [rp1, rp2, rp3]
diff1 = [rp1, rp4]
diff2 = [rp1new, rp2, zero]
Globals.rbdir = tempdir
output = meta_mgr.PatchDiffMan()._iterate_patched_attr([iter(current), iter(diff1), iter(diff2)])
out1 = next(output)
self.assertIs(out1, rp1new)
out2 = next(output)
self.assertIs(out2, rp2)
out3 = next(output)
self.assertIs(out3, rp3)
self.assertRaises(StopIteration, output.__next__)
def test_meta_patch_cycle(self):
def write_dir_to_meta(manager, rp, time):
metawriter = man._writer_helper(b'snapshot', time, stdattr.get_plugin_class())
sel = selection.Select(rp)
sel.parse_selection_args((), ())
for rorp in sel.get_select_iter():
metawriter.write_object(rorp)
metawriter.close()
def compare(man, rootrp, time):
sel = selection.Select(rootrp)
sel.parse_selection_args((), ())
self.assertTrue(iter_equal(sel.get_select_iter(), man._get_meta_main_at_time(time, None)))
self.make_temp()
Globals.rbdir = tempdir
man = meta_mgr.PatchDiffMan()
inc1 = rpath.RPath(Globals.local_connection, os.path.join(old_test_dir, b'increment1'))
inc2 = rpath.RPath(Globals.local_connection, os.path.join(old_test_dir, b'increment2'))
inc3 = rpath.RPath(Globals.local_connection, os.path.join(old_test_dir, b'increment3'))
inc4 = rpath.RPath(Globals.local_connection, os.path.join(old_test_dir, b'increment4'))
write_dir_to_meta(man, inc1, 10000)
compare(man, inc1, 10000)
write_dir_to_meta(man, inc2, 20000)
compare(man, inc2, 20000)
man.convert_meta_main_to_diff()
man = meta_mgr.PatchDiffMan()
write_dir_to_meta(man, inc3, 30000)
compare(man, inc3, 30000)
man.convert_meta_main_to_diff()
man = meta_mgr.PatchDiffMan()
man.max_diff_chain = 3
write_dir_to_meta(man, inc4, 40000)
compare(man, inc4, 40000)
man.convert_meta_main_to_diff()
man = meta_mgr.PatchDiffMan()
rplist = man.sorted_prefix_inclist(b'mirror_metadata')
self.assertEqual(rplist[0].getinctype(), b'snapshot')
self.assertEqual(rplist[0].getinctime(), 40000)
self.assertEqual(rplist[1].getinctype(), b'snapshot')
self.assertEqual(rplist[1].getinctime(), 30000)
self.assertEqual(rplist[2].getinctype(), b'diff')
self.assertEqual(rplist[2].getinctime(), 20000)
self.assertEqual(rplist[3].getinctype(), b'diff')
self.assertEqual(rplist[3].getinctime(), 10000)
compare(man, inc1, 10000)
compare(man, inc2, 20000)
compare(man, inc3, 30000)
compare(man, inc4, 40000) |
class JSONFormatter(logging.Formatter):
def __init__(self):
pass
def format(self, record):
event = {'timestamp': self.getTimestamp(record.created), 'message': record.getMessage(), 'level': record.levelname, 'logger': record.name}
event_data = getattr(record, 'event_data', None)
if event_data:
event.update(event_data)
if record.exc_info:
event['exc_info'] = self.formatException(record.exc_info)
if record.stack_info:
event['stack_info'] = self.formatStack(record.stack_info)
return json.dumps(event)
def getTimestamp(self, created):
return datetime.datetime.utcfromtimestamp(created).isoformat() |
def require_gdal_version(version, param=None, values=None, is_max_version=False, reason=''):
if (values is not None):
if (param is None):
raise ValueError('require_gdal_version: param must be provided with values')
if (not isinstance(values, (tuple, list, set))):
raise ValueError('require_gdal_version: values must be a tuple, list, or set')
version = GDALVersion.parse(version)
runtime = GDALVersion.runtime()
inequality = ('>=' if (runtime < version) else '<=')
reason = ('\n{0}'.format(reason) if reason else reason)
def decorator(f):
(f)
def wrapper(*args, **kwds):
if (((runtime < version) and (not is_max_version)) or (is_max_version and (runtime > version))):
if (param is None):
raise GDALVersionError('GDAL version must be {0} {1}{2}'.format(inequality, str(version), reason))
argspec = getargspec(f)
full_kwds = kwds.copy()
if argspec.args:
full_kwds.update(dict(zip(argspec.args[:len(args)], args)))
if argspec.defaults:
defaults = dict(zip(reversed(argspec.args), reversed(argspec.defaults)))
else:
defaults = {}
if (param in full_kwds):
if (values is None):
if ((param not in defaults) or (full_kwds[param] != defaults[param])):
raise GDALVersionError('usage of parameter "{0}" requires GDAL {1} {2}{3}'.format(param, inequality, version, reason))
elif (full_kwds[param] in values):
raise GDALVersionError('parameter "{0}={1}" requires GDAL {2} {3}{4}'.format(param, full_kwds[param], inequality, version, reason))
return f(*args, **kwds)
return wrapper
return decorator |
def _find_nodes_to(state: EnvironmentState, node: Node, relations: List[Relation]):
nodes = []
for src_node in AnyNode().enumerate(state):
for r in relations:
nl = state.get_nodes_from(src_node, r)
if (node in nl):
nodes.append(src_node)
return nodes |
def contains(shape, other):
if (not hasattr(shape, GEO_INTERFACE_ATTR)):
raise TypeError((SHAPE_TYPE_ERR % shape))
if (not hasattr(other, GEO_INTERFACE_ATTR)):
raise TypeError((SHAPE_TYPE_ERR % shape))
o = geom.shape(shape)
o2 = geom.shape(other)
return o.contains(o2) |
def task_02(self):
if 0:
self._subscriber_base.subscribe()
while (self._subscriber_base_points is None):
pass
self._subscriber_base.unsubscribe()
obj = safepicking.pybullet.create_bin(X=0.3, Y=0.3, Z=0.11, color=(0.7, 0.7, 0.7, 1))
safepicking.pybullet.set_pose(obj, ((0., (- 0.), 0.), (0.0, 0.0, (- 0.), 0.)))
if 0:
safepicking.pybullet.annotate_pose(obj)
self._env.bg_objects.append(obj)
parent = obj
obj = safepicking.pybullet.create_mesh_body(safepicking.datasets.ycb.get_visual_file(class_id=2))
safepicking.pybullet.set_pose(obj, (((- 0.), (- 0.), (- 0.)), (0., 0., 0., 0.)), parent=parent)
obj = safepicking.pybullet.create_mesh_body(safepicking.datasets.ycb.get_visual_file(class_id=3))
pp.set_pose(obj, ((0., 0., 0.), (0., 0., 0., 0.)))
safepicking.pybullet.set_pose(obj, (((- 0.), 0., (- 0.)), (0., 0., 0., 0.)), parent=parent)
obj = safepicking.pybullet.create_mesh_body(safepicking.datasets.ycb.get_visual_file(class_id=5))
pp.set_pose(obj, ((0., (- 0.), 0.), (0., 0., 0., (- 0.))))
safepicking.pybullet.set_pose(obj, ((0., (- 0.), (- 0.)), (0., 0., 0., (- 0.))), parent=parent)
obj = safepicking.pybullet.create_mesh_body(safepicking.datasets.ycb.get_visual_file(class_id=4), rgba_color=(1, 1, 1, 0.5), mesh_scale=(0.99, 0.99, 0.99))
pp.set_pose(obj, ((0., 0., 0.), ((- 0.), 0., 0., 0.)))
safepicking.pybullet.set_pose(obj, ((0., 0., (- 0.)), ((- 0.), 0., 0., 0.)), parent=parent)
set_obj_goal(self, obj) |
('pyinaturalist.v1.observations.get_observation')
('pyinaturalist.v1.observations.put')
def test_update_observation__with_photo_ids(mock_put, mock_get_observation):
mock_get_observation.return_value = {'photos': [{'id': 1234}]}
update_observation(1234, access_token='token', photo_ids=5678)
payload = mock_put.call_args[1]['json']
assert ('photo_ids' not in payload)
assert (payload['local_photos'] == {'1234': [1234, 5678]}) |
def read_kaldi_datadir(dir):
if os.path.isfile(os.path.join(dir, 'segments')):
logger.info("The data directory '{}' seems to use a 'segments' file. This script does not yet support a 'segments' file. You'll need to use utils/data/extract_wav_segments_data_dir.sh to convert the data dir so it does not use a 'segments' file. Exiting...".format(dir))
sys.exit(1)
logger.info('Loading the data from {}...'.format(dir))
utterances = []
wav_scp = read_kaldi_mapfile(os.path.join(dir, 'wav.scp'))
text = read_kaldi_mapfile(os.path.join(dir, 'text'))
utt2dur = read_kaldi_mapfile(os.path.join(dir, 'utt2dur'))
utt2spk = read_kaldi_mapfile(os.path.join(dir, 'utt2spk'))
num_fail = 0
for utt in wav_scp:
if ((utt in text) and (utt in utt2dur) and (utt in utt2spk)):
utterances.append(Utterance(utt, wav_scp[utt], utt2spk[utt], text[utt], utt2dur[utt]))
else:
num_fail += 1
if ((float(len(utterances)) / len(wav_scp)) < 0.5):
logger.info('More than half your data is problematic. Try fixing using fix_data_dir.sh.')
sys.exit(1)
logger.info('Successfully read {} utterances. Failed for {} utterances.'.format(len(utterances), num_fail))
return utterances |
class TimeSignal(VBObject):
def VBBJECT_TYPE(self):
return 'TimeSignal'
def __init__(self, path=None, sampling_rate=None):
VBObject.__init__(self, path=path)
if sampling_rate:
self.sampling_rate = sampling_rate
def initializeBlank(self):
VBObject.initializeBlank(self)
self.sampling_rate = None
def frame_rate(self):
return self._getFrameRate()
def _getFrameRate(self):
raise NotImplementedError
def getSampleAtTime(self, f):
prev_sample = self.getSampleAtIndex(math.floor(f))
next_sample = self.getSampleAtIndex(math.ceil(f))
sample_progress = (f - np.floor(f))
return ((next_sample * sample_progress) + (prev_sample * (1.0 - sample_progress)))
def getSampleAtIndex(self, i):
return self.getTimeForIndex()
def getDuration(self):
assert False, 'getDuration must be implemented for subclass of TimeSignal'
def getSampleDuration(self):
return (1.0 / self.sampling_rate)
def getTimeForIndex(self, i):
return (i * self.getSampleDuration()) |
def _validate_netloc(value: str, skip_ipv6_addr: bool, skip_ipv4_addr: bool, may_have_port: bool, simple_host: bool, rfc_1034: bool, rfc_2782: bool):
if ((not value) or (value.count('') > 1)):
return False
if (value.count('') < 1):
return hostname((value if (_confirm_ipv6_skip(value, skip_ipv6_addr) or (']:' in value)) else value.lstrip('[').replace(']', '', 1)), skip_ipv6_addr=_confirm_ipv6_skip(value, skip_ipv6_addr), skip_ipv4_addr=skip_ipv4_addr, may_have_port=may_have_port, maybe_simple=simple_host, rfc_1034=rfc_1034, rfc_2782=rfc_2782)
(basic_auth, host) = value.rsplit('', 1)
return (hostname((host if (_confirm_ipv6_skip(host, skip_ipv6_addr) or (']:' in value)) else host.lstrip('[').replace(']', '', 1)), skip_ipv6_addr=_confirm_ipv6_skip(host, skip_ipv6_addr), skip_ipv4_addr=skip_ipv4_addr, may_have_port=may_have_port, maybe_simple=simple_host, rfc_1034=rfc_1034, rfc_2782=rfc_2782) and _validate_auth_segment(basic_auth)) |
_dimension
def test_triangulation_of_standard_simplex(dim):
t = Triangulation(_make_standard_simplex(dim))
expected_simplex = tuple(range((dim + 1)))
assert (t.simplices == {expected_simplex})
_check_triangulation_is_valid(t)
assert np.isclose(t.volume(expected_simplex), _standard_simplex_volume(dim)) |
def create_data(client: Client, args, name='balanced-df') -> Tuple[(int, dask.dataframe.DataFrame)]:
chunksize = (args.partition_size // np.float64().nbytes)
workers = list(client.scheduler_info()['workers'].keys())
assert (len(workers) > 0)
dist = args.partition_distribution
if (dist is None):
dist = ([(args.in_parts // len(workers))] * len(workers))
for i in range((args.in_parts % len(workers))):
dist[i] += 1
if (len(dist) != len(workers)):
raise ValueError(f"The length of `--devs`({len(dist)}) and `--partition-distribution`({len(workers)}) doesn't match")
if (sum(dist) != args.in_parts):
raise ValueError(f'The sum of `--partition-distribution`({sum(dist)}) must match the number of input partitions `--in-parts={args.in_parts}`')
dsk = {}
for (i, part_size) in enumerate(dist):
for _ in range(part_size):
dsk[(name, len(dsk))] = client.submit(create_df, chunksize, args.type, workers=[workers[i]], pure=False)
wait(dsk.values())
df_meta = create_df(0, args.type)
divs = ([None] * (len(dsk) + 1))
ret = new_dd_object(dsk, name, df_meta, divs).persist()
wait(ret)
data_processed = (args.in_parts * args.partition_size)
if (not args.ignore_index):
data_processed += ((args.in_parts * chunksize) * df_meta.index.dtype.itemsize)
return (data_processed, ret) |
def main():
parser = argparse.ArgumentParser(description='PyTorch Lightning DDP')
parser.add_argument('--epochs', default=1, type=int, metavar='N', help='number of total epochs to run')
parser.add_argument('-b', '--batch_size', default=128, type=int, metavar='N')
parser.add_argument('--learning_rate', default=1e-06, type=float, help='initial learning rate')
parser.add_argument('--num_classes', default=1000, type=int, help='Number of classes for the network.')
parser.add_argument('--model_path', help="path to the quantized model's saved checkpoint for QAT", default='na')
parser.add_argument('--imagenet_dir', help='path to imagenet_dir', required=True)
args = parser.parse_args()
print('TRAINING QUANTIZED MODEL ON DDP ...')
print(('' + str(args.batch_size)))
print(('' + str(torch.cuda.device_count())))
pl.seed_everything(0, workers=True)
model = LitImageNet(imagenet_dir=args.imagenet_dir, batch_size=args.batch_size, model_path=args.model_path, learning_rate=args.learning_rate, num_classes=args.num_classes)
trainer = pl.Trainer(deterministic=True, strategy='DDP', accelerator='gpu', devices=(- 1), max_epochs=args.epochs, limit_train_batches=10)
trainer.test(model)
trainer.fit(model) |
class RNet(nn.Module):
def __init__(self):
super(RNet, self).__init__()
self.features = nn.Sequential(OrderedDict([('conv1', nn.Conv2d(3, 28, 3, 1)), ('prelu1', nn.PReLU(28)), ('pool1', nn.MaxPool2d(3, 2, ceil_mode=True)), ('conv2', nn.Conv2d(28, 48, 3, 1)), ('prelu2', nn.PReLU(48)), ('pool2', nn.MaxPool2d(3, 2, ceil_mode=True)), ('conv3', nn.Conv2d(48, 64, 2, 1)), ('prelu3', nn.PReLU(64)), ('flatten', Flatten()), ('conv4', nn.Linear(576, 128)), ('prelu4', nn.PReLU(128))]))
self.conv5_1 = nn.Linear(128, 2)
self.conv5_2 = nn.Linear(128, 4)
weights = np.load('./rnet.npy', allow_pickle=True)[()]
for (n, p) in self.named_parameters():
p.data = torch.FloatTensor(weights[n])
def forward(self, x):
x = self.features(x)
a = self.conv5_1(x)
b = self.conv5_2(x)
a = F.softmax(a)
return (b, a) |
def differentiable_graph_to_smiles_purely_randomwalk(origin_smiles, differentiable_graph, leaf_extend_idx_pair, leaf_nonleaf_lst, topk=3, epsilon=0.7):
leaf2nonleaf = {leaf: nonleaf for (leaf, nonleaf) in leaf_nonleaf_lst}
leaf2extend = {leaf: extend for (leaf, extend) in leaf_extend_idx_pair}
new_smiles_set = set()
origin_mol = Chem.rdchem.RWMol(Chem.MolFromSmiles(origin_smiles))
(origin_idx_lst, origin_node_mat, origin_substructure_lst, origin_atomidx_2substridx, origin_adjacency_matrix, leaf_extend_idx_pair) = smiles2graph(origin_smiles)
(node_indicator, adjacency_weight) = differentiable_graph
N = len(origin_idx_lst)
M = len(leaf_extend_idx_pair)
d = len(vocabulary)
for (leaf_idx, extend_idx) in leaf_extend_idx_pair:
u_shrink = random.random()
(shrink, unchange, expand) = (False, False, False)
if ((u_shrink < 0.7) and (substr_num(origin_smiles) > 1)):
shrink = True
else:
u_expand = random.random()
if (u_expand < 0.3):
expand = True
else:
unchange = True
if (shrink or unchange):
leaf_atom_idx_lst = origin_substructure_lst[leaf_idx]
if (type(leaf_atom_idx_lst) == int):
new_leaf_atom_idx_lst = [leaf_atom_idx_lst]
else:
new_leaf_atom_idx_lst = []
remaining_atoms_idx_lst = []
for (i, v) in enumerate(origin_substructure_lst):
if (i == leaf_idx):
continue
if (type(v) == int):
remaining_atoms_idx_lst.append(v)
else:
remaining_atoms_idx_lst.extend(v)
new_leaf_atom_idx_lst = [leaf_atom_idx for leaf_atom_idx in leaf_atom_idx_lst if (leaf_atom_idx not in remaining_atoms_idx_lst)]
result = delete_substructure_at_idx(editmol=origin_mol, atom_idx_lst=new_leaf_atom_idx_lst)
if (result is None):
continue
(delete_mol, old_idx2new_idx) = result
delete_smiles = Chem.MolToSmiles(delete_mol)
if ((delete_smiles is None) or ('.' in delete_smiles)):
continue
delete_smiles = canonical(delete_smiles)
nonleaf_idx = leaf2nonleaf[leaf_idx]
if shrink:
new_smiles_set.add(delete_smiles)
continue
neighbor_substructures_idx = [idx for (idx, value) in enumerate(origin_adjacency_matrix[leaf_idx]) if (value == 1)]
assert (len(neighbor_substructures_idx) == 1)
neighbor_substructures_idx = neighbor_substructures_idx[0]
neighbor_atom_idx_lst = origin_substructure_lst[neighbor_substructures_idx]
if (type(neighbor_atom_idx_lst) == int):
neighbor_atom_idx_lst = [neighbor_atom_idx_lst]
added_substructure_lst = [random.choice(list(range(len(vocabulary)))) for i in range(topk)]
for substructure_idx in added_substructure_lst:
new_substructure = vocabulary[substructure_idx]
for new_bond in bondtype_list:
for leaf_atom_idx in neighbor_atom_idx_lst:
new_leaf_atom_idx = old_idx2new_idx[leaf_atom_idx]
if ith_substructure_is_atom(substructure_idx):
new_smiles = add_atom_at_position(editmol=delete_mol, position_idx=new_leaf_atom_idx, new_atom=new_substructure, new_bond=new_bond)
new_smiles_set.add(new_smiles)
else:
new_smiles_batch = add_fragment_at_position(editmol=delete_mol, position_idx=new_leaf_atom_idx, fragment=new_substructure, new_bond=new_bond)
new_smiles_set = new_smiles_set.union(new_smiles_batch)
continue
for (leaf_idx, extend_idx) in leaf_extend_idx_pair:
leaf_atom_idx_lst = origin_substructure_lst[leaf_idx]
if (type(leaf_atom_idx_lst) == int):
leaf_atom_idx_lst = [leaf_atom_idx_lst]
for leaf_atom_idx in leaf_atom_idx_lst:
added_substructure_lst = [random.choice(list(range(len(vocabulary)))) for i in range(topk)]
for substructure_idx in added_substructure_lst:
new_substructure = vocabulary[substructure_idx]
for new_bond in bondtype_list:
if ith_substructure_is_atom(substructure_idx):
new_smiles = add_atom_at_position(editmol=origin_mol, position_idx=leaf_atom_idx, new_atom=new_substructure, new_bond=new_bond)
new_smiles_set.add(new_smiles)
else:
new_smiles_batch = add_fragment_at_position(editmol=origin_mol, position_idx=leaf_atom_idx, fragment=new_substructure, new_bond=new_bond)
new_smiles_set = new_smiles_set.union(new_smiles_batch)
return new_smiles_set.difference(set([None])) |
class QPE(QuantumAlgorithm, MinimumEigensolver):
def __init__(self, operator: Optional[Union[(OperatorBase, LegacyBaseOperator)]]=None, state_in: Optional[Union[(InitialState, QuantumCircuit)]]=None, iqft: Optional[QuantumCircuit]=None, num_time_slices: int=1, num_ancillae: int=1, expansion_mode: str='trotter', expansion_order: int=1, shallow_circuit_concat: bool=False, quantum_instance: Optional[Union[(QuantumInstance, BaseBackend, Backend)]]=None) -> None:
validate_min('num_time_slices', num_time_slices, 1)
validate_min('num_ancillae', num_ancillae, 1)
validate_in_set('expansion_mode', expansion_mode, {'trotter', 'suzuki'})
validate_min('expansion_order', expansion_order, 1)
super().__init__(quantum_instance)
self._state_in = state_in
self._iqft = iqft
self._num_time_slices = num_time_slices
self._num_ancillae = num_ancillae
self._expansion_mode = expansion_mode
self._expansion_order = expansion_order
self._shallow_circuit_concat = shallow_circuit_concat
self._binary_fractions = [(1 / (2 ** p)) for p in range(1, (self._num_ancillae + 1))]
self._in_operator = operator
self._operator = None
self._ret = {}
self._pauli_list = None
self._phase_estimation_circuit = None
self._setup(operator)
def _setup(self, operator: Optional[Union[(OperatorBase, LegacyBaseOperator)]]) -> None:
self._operator = None
self._ret = {}
self._pauli_list = None
self._phase_estimation_circuit = None
if operator:
if isinstance(operator, OperatorBase):
operator = operator.to_legacy_op()
self._operator = op_converter.to_weighted_pauli_operator(operator.copy())
self._ret['translation'] = sum([abs(p[0]) for p in self._operator.reorder_paulis()])
self._ret['stretch'] = (0.5 / self._ret['translation'])
self._operator.simplify()
translation_op = WeightedPauliOperator([[self._ret['translation'], Pauli((np.zeros(self._operator.num_qubits), np.zeros(self._operator.num_qubits)))]])
translation_op.simplify()
self._operator += translation_op
self._pauli_list = self._operator.reorder_paulis()
for p in self._pauli_list:
p[0] = (p[0] * self._ret['stretch'])
self._phase_estimation_circuit = PhaseEstimationCircuit(operator=self._operator, state_in=self._state_in, iqft=self._iqft, num_time_slices=self._num_time_slices, num_ancillae=self._num_ancillae, expansion_mode=self._expansion_mode, expansion_order=self._expansion_order, shallow_circuit_concat=self._shallow_circuit_concat, pauli_list=self._pauli_list)
def operator(self) -> Optional[LegacyBaseOperator]:
return self._in_operator
def operator(self, operator: Union[(OperatorBase, LegacyBaseOperator)]) -> None:
self._in_operator = operator
self._setup(operator)
def aux_operators(self) -> Optional[List[Union[(OperatorBase, LegacyBaseOperator)]]]:
raise TypeError('aux_operators not supported.')
_operators.setter
def aux_operators(self, aux_operators: Optional[List[Union[(OperatorBase, LegacyBaseOperator)]]]) -> None:
raise TypeError('aux_operators not supported.')
def construct_circuit(self, measurement: bool=False) -> QuantumCircuit:
if self._phase_estimation_circuit:
return self._phase_estimation_circuit.construct_circuit(measurement=measurement)
return None
def compute_minimum_eigenvalue(self, operator: Optional[Union[(OperatorBase, LegacyBaseOperator)]]=None, aux_operators: Optional[List[Union[(OperatorBase, LegacyBaseOperator)]]]=None) -> MinimumEigensolverResult:
super().compute_minimum_eigenvalue(operator, aux_operators)
return self._run()
def _compute_energy(self):
if self._quantum_instance.is_statevector:
qc = self.construct_circuit(measurement=False)
result = self._quantum_instance.execute(qc)
complete_state_vec = result.get_statevector(qc)
ancilla_density_mat = get_subsystem_density_matrix(complete_state_vec, range(self._num_ancillae, (self._num_ancillae + self._operator.num_qubits)))
ancilla_density_mat_diag = np.diag(ancilla_density_mat)
max_amplitude = max(ancilla_density_mat_diag.min(), ancilla_density_mat_diag.max(), key=abs)
max_amplitude_idx = np.where((ancilla_density_mat_diag == max_amplitude))[0][0]
top_measurement_label = np.binary_repr(max_amplitude_idx, self._num_ancillae)[::(- 1)]
else:
qc = self.construct_circuit(measurement=True)
result = self._quantum_instance.execute(qc)
ancilla_counts = result.get_counts(qc)
top_measurement_label = sorted([(ancilla_counts[k], k) for k in ancilla_counts])[::(- 1)][0][(- 1)][::(- 1)]
top_measurement_decimal = sum([(t[0] * t[1]) for t in zip(self._binary_fractions, [int(n) for n in top_measurement_label])])
self._ret['top_measurement_label'] = top_measurement_label
self._ret['top_measurement_decimal'] = top_measurement_decimal
self._ret['eigvals'] = [((top_measurement_decimal / self._ret['stretch']) - self._ret['translation'])]
self._ret['energy'] = self._ret['eigvals'][0]
def _run(self) -> 'QPEResult':
self._compute_energy()
result = QPEResult()
if ('translation' in self._ret):
result.translation = self._ret['translation']
if ('stretch' in self._ret):
result.stretch = self._ret['stretch']
if ('top_measurement_label' in self._ret):
result.top_measurement_label = self._ret['top_measurement_label']
if ('top_measurement_decimal' in self._ret):
result.top_measurement_decimal = self._ret['top_measurement_decimal']
if ('eigvals' in self._ret):
result.eigenvalue = self._ret['eigvals'][0]
return result |
('satpy.multiscene._multiscene.get_enhanced_image')
def test_save_mp4(smg, tmp_path):
from satpy import MultiScene
area = _create_test_area()
scenes = _create_test_scenes(area=area)
smg.side_effect = _fake_get_enhanced_image
scenes[1]['ds3'] = _create_test_dataset('ds3')
for ds_id in ['ds1', 'ds2', 'ds3']:
scenes[1][ds_id].attrs['start_time'] = datetime(2018, 1, 2)
scenes[1][ds_id].attrs['end_time'] = datetime(2018, 1, 2, 12)
if (ds_id == 'ds3'):
continue
scenes[0][ds_id].attrs['start_time'] = datetime(2018, 1, 1)
scenes[0][ds_id].attrs['end_time'] = datetime(2018, 1, 1, 12)
mscn = MultiScene(scenes)
fn = str((tmp_path / 'test_save_mp4_{name}_{start_time:%Y%m%d_%H}_{end_time:%Y%m%d_%H}.mp4'))
writer_mock = mock.MagicMock()
with mock.patch('satpy.multiscene._multiscene.imageio.get_writer') as get_writer:
get_writer.return_value = writer_mock
mscn.save_animation(fn, datasets=['ds1', 'ds2', 'ds3'], client=False)
assert (writer_mock.append_data.call_count == (3 + 3))
filenames = [os.path.basename(args[0][0]) for args in get_writer.call_args_list]
assert (filenames[0] == 'test_save_mp4_ds1__00__12.mp4')
assert (filenames[1] == 'test_save_mp4_ds2__00__12.mp4')
assert (filenames[2] == 'test_save_mp4_ds3__00__12.mp4')
fn = str((tmp_path / 'test_save_mp4_{name}_{start_time:%Y%m%d_%H}_{end_time:%Y%m%d_%H}.mp4'))
writer_mock = mock.MagicMock()
with mock.patch('satpy.multiscene._multiscene.imageio.get_writer') as get_writer:
get_writer.return_value = writer_mock
mscn.save_animation(fn, client=False)
assert (writer_mock.append_data.call_count == (2 + 2))
assert ('test_save_mp4_ds1__00__12.mp4' in filenames)
assert ('test_save_mp4_ds2__00__12.mp4' in filenames)
assert ('test_save_mp4_ds3__00__12.mp4' in filenames)
fn = str((tmp_path / 'test-{name}_{start_time:%Y%m%d_%H}_{end_time:%Y%m%d_%H}-rich.mp4'))
writer_mock = mock.MagicMock()
with mock.patch('satpy.multiscene._multiscene.imageio.get_writer') as get_writer:
get_writer.return_value = writer_mock
mscn.save_animation(fn, client=False, enh_args={'decorate': {'decorate': [{'text': {'txt': 'Test {start_time:%Y-%m-%d %H:%M} - {end_time:%Y-%m-%d %H:%M}'}}]}})
assert (writer_mock.append_data.call_count == (2 + 2))
assert ('2018-01-02' in smg.call_args_list[(- 1)][1]['decorate']['decorate'][0]['text']['txt']) |
class IndexV2TestSpec(object):
def __init__(self, index_name, method_name, repo_name, scope=None, **kwargs):
self.index_name = index_name
self.repo_name = repo_name
self.method_name = method_name
default_scope = ('push,pull' if ((method_name != 'GET') and (method_name != 'HEAD')) else 'pull')
self.scope = (scope or default_scope)
self.kwargs = kwargs
self.anon_code = 401
self.no_access_code = 403
self.read_code = 200
self.admin_code = 200
self.creator_code = 200
def request_status(self, anon_code=401, no_access_code=403, read_code=200, creator_code=200, admin_code=200):
self.anon_code = anon_code
self.no_access_code = no_access_code
self.read_code = read_code
self.creator_code = creator_code
self.admin_code = admin_code
return self
def get_url(self):
return url_for(self.index_name, repository=self.repo_name, **self.kwargs)
def gen_basic_auth(self, username, password):
encoded = b64encode((b'%s:%s' % (username.encode('ascii'), password.encode('ascii'))))
return ('basic %s' % encoded.decode('ascii'))
def get_scope_string(self):
return ('repository:%s:%s' % (self.repo_name, self.scope)) |
.parametrize('lhs,rhs,result', [(1, 1, True), (1, 1.1, True), (1.1, 1, False), (1.0, 1.0, True), ('abc', 'def', True), ('abc', '', False), ([], [1], True), ((1, 2), (2, 3), True), ((1, 0), (1,), False), (True, True, True), (True, False, False), (False, 1, True), ((1 + 0j), (2 + 0j), util.Uninferable), ((+ 0.0), (- 0.0), True), (0, '1', util.Uninferable), (b'\x00', b'\x01', True)])
def test_compare_lesseq_types(lhs, rhs, result) -> None:
code = f'''
{lhs!r} <= {rhs!r}
'''
node = extract_node(code)
inferred = next(node.infer())
assert (inferred.value == result) |
class _BasePrompt(QWidget):
KEY_MODE = usertypes.KeyMode.prompt
def __init__(self, question, parent=None):
super().__init__(parent)
self.question = question
self._vbox = QVBoxLayout(self)
self._vbox.setSpacing(15)
self._key_grid = None
def __repr__(self):
return utils.get_repr(self, question=self.question, constructor=True)
def _init_texts(self, question):
assert (question.title is not None), question
title = '<font size="4"><b>{}</b></font>'.format(html.escape(question.title))
title_label = QLabel(title, self)
self._vbox.addWidget(title_label)
if (question.text is not None):
text_label = QLabel(question.text)
text_label.setWordWrap(True)
text_label.setTextInteractionFlags(Qt.TextInteractionFlag.TextSelectableByMouse)
self._vbox.addWidget(text_label)
def _init_key_label(self):
assert (self._key_grid is None), self._key_grid
self._key_grid = QGridLayout()
self._key_grid.setVerticalSpacing(0)
all_bindings = config.key_instance.get_reverse_bindings_for(self.KEY_MODE.name)
labels = []
has_bindings = False
for (cmd, text) in self._allowed_commands():
bindings = all_bindings.get(cmd, [])
if bindings:
has_bindings = True
binding = None
preferred = ['<enter>', '<escape>']
for pref in preferred:
if (pref in bindings):
binding = pref
if (binding is None):
binding = bindings[0]
key_label = QLabel('<b>{}</b>'.format(html.escape(binding)))
else:
key_label = QLabel(f'<b>unbound</b> (<tt>{html.escape(cmd)}</tt>)')
text_label = QLabel(text)
labels.append((key_label, text_label))
for (i, (key_label, text_label)) in enumerate(labels):
self._key_grid.addWidget(key_label, i, 0)
self._key_grid.addWidget(text_label, i, 1)
spacer = QSpacerItem(0, 0, QSizePolicy.Policy.Expanding)
self._key_grid.addItem(spacer, 0, 2)
self._vbox.addLayout(self._key_grid)
if (not has_bindings):
label = QLabel(f'<b>Note:</b> You seem to have unbound all keys for this prompt (<tt>{self.KEY_MODE.name}</tt> key mode).<br/>Run <tt>qutebrowser :CMD</tt> with a command from above to close this prompt, then fix this in your config.')
self._vbox.addWidget(label)
def _check_save_support(self, save):
if save:
raise UnsupportedOperationError('Saving answers is only possible with yes/no prompts.')
def accept(self, value=None, save=False):
raise NotImplementedError
def download_open(self, cmdline, pdfjs):
utils.unused(cmdline)
utils.unused(pdfjs)
raise UnsupportedOperationError
def item_focus(self, _which):
raise UnsupportedOperationError
def _allowed_commands(self):
raise NotImplementedError |
class AttrVI_ATTR_SEND_END_EN(BooleanAttribute):
resources = [(constants.InterfaceType.asrl, 'INSTR'), (constants.InterfaceType.gpib, 'INSTR'), (constants.InterfaceType.gpib, 'INTFC'), (constants.InterfaceType.tcpip, 'INSTR'), (constants.InterfaceType.tcpip, 'SOCKET'), (constants.InterfaceType.usb, 'INSTR'), (constants.InterfaceType.usb, 'RAW'), (constants.InterfaceType.vxi, 'INSTR'), (constants.InterfaceType.vxi, 'SERVANT')]
py_name = 'send_end'
visa_name = 'VI_ATTR_SEND_END_EN'
visa_type = 'ViBoolean'
default = True
(read, write, local) = (True, True, True) |
def test_creating_new_catalog():
cf = OSC.CatalogFile()
cf.create_catalog('my_catalog.xml', 'VehicleCatalog', 'My first vehicle catalog', 'Mandolin')
bb = OSC.BoundingBox(2, 5, 1.8, 2.0, 0, 0.9)
fa = OSC.Axle(0., 0.8, 1.68, 2.98, 0.4)
ba = OSC.Axle(0., 0.8, 1.68, 0, 0.4)
white_veh = OSC.Vehicle('car_white', OSC.VehicleCategory.car, bb, fa, ba, 69, 10, 10)
white_veh.add_property_file('../models/car_white.osgb')
white_veh.add_property('control', 'internal')
white_veh.add_property('model_id', '0')
cf.add_to_catalog(white_veh)
prettyprint(cf.catalog_element) |
class Evaluator(object):
def __init__(self, args, num_gpus):
self.args = args
self.num_gpus = num_gpus
self.device = torch.device(args.device)
val_dataset = CSValSet(args.data, os.path.join(os.getcwd(), '../dataset/list/cityscapes/val.lst'), crop_size=(1024, 2048))
val_sampler = make_data_sampler(val_dataset, False, args.distributed)
val_batch_sampler = make_batch_data_sampler(val_sampler, images_per_batch=1)
self.val_loader = data.DataLoader(dataset=val_dataset, batch_sampler=val_batch_sampler, num_workers=args.workers, pin_memory=True)
BatchNorm2d = (nn.SyncBatchNorm if args.distributed else nn.BatchNorm2d)
self.model = get_segmentation_model(model=args.model, backbone=args.backbone, local_rank=args.local_rank, pretrained=args.pretrained, pretrained_base='None', aux=args.aux, norm_layer=BatchNorm2d, num_class=val_dataset.num_class).to(self.device)
if args.distributed:
self.model = nn.parallel.DistributedDataParallel(self.model, device_ids=[args.local_rank], output_device=args.local_rank)
self.model.to(self.device)
self.metric = SegmentationMetric(val_dataset.num_class)
def reduce_tensor(self, tensor):
rt = tensor.clone()
dist.all_reduce(rt, op=dist.ReduceOp.SUM)
return rt
def eval(self):
self.metric.reset()
self.model.eval()
if self.args.distributed:
model = self.model.module
else:
model = self.model
logger.info('Start validation, Total sample: {:d}'.format(len(self.val_loader)))
overall_embeddings = []
overall_labels = []
for (i, (image, target, filename)) in enumerate(self.val_loader):
image = image.to(self.device)
target = target.float().to(self.device)
print('progress: {}/{}'.format(i, len(self.val_loader)))
if (i == 50):
break
with torch.no_grad():
outputs = model(image)
embeddings = outputs[(- 1)]
(B, C, H, W) = embeddings.size()
embeddings = embeddings.permute(0, 2, 3, 1)
embeddings = embeddings.contiguous().view((- 1), embeddings.shape[(- 1)])
labels = target
labels = F.interpolate(labels.unsqueeze(1), (H, W), mode='nearest')
labels = labels.permute(0, 2, 3, 1)
labels = labels.contiguous().view((- 1), 1)
index_1 = (~ (labels == (- 1))).squeeze((- 1))
embeddings = embeddings[index_1]
labels = labels[index_1]
overall_embeddings.append(embeddings)
overall_labels.append(labels)
if (self.args.local_rank == 0):
overall_embeddings = torch.cat(overall_embeddings, dim=0)
overall_labels = torch.cat(overall_labels, dim=0)
print('overall_embeddings', overall_embeddings.size())
print('overall_labels', overall_labels.size())
overall_embeddings = overall_embeddings.cpu().numpy()
overall_labels = overall_labels.cpu().numpy()
import numpy as np
np.save('seg_embeddings.npy', overall_embeddings)
np.save('seg_labels.npy', overall_labels)
synchronize() |
def call_main(cfg: FairseqConfig, main, **kwargs):
if (cfg.distributed_training.distributed_init_method is None):
infer_init_method(cfg.distributed_training)
if (cfg.distributed_training.distributed_init_method is not None):
if (not cfg.distributed_training.distributed_no_spawn):
start_rank = cfg.distributed_training.distributed_rank
cfg.distributed_training.distributed_rank = None
kwargs['start_rank'] = start_rank
torch.multiprocessing.spawn(fn=distributed_main, args=(main, cfg, kwargs), nprocs=min(torch.cuda.device_count(), cfg.distributed_training.distributed_world_size))
else:
distributed_main(cfg.distributed_training.device_id, main, cfg, kwargs)
elif (cfg.common.tpu and (cfg.distributed_training.distributed_world_size > 1)):
import torch_xla.distributed.xla_multiprocessing as xmp
torch.multiprocessing.set_sharing_strategy('file_system')
xmp.spawn(fn=distributed_main, args=(main, cfg, kwargs), nprocs=8)
else:
main(cfg, **kwargs) |
class GlBuffer():
def __init__(self, target=gl.GL_ARRAY_BUFFER, usage=gl.GL_STATIC_DRAW):
self.id_ = gl.glGenBuffers(1)
self.target_ = target
self.usage_ = usage
def assign(self, array):
gl.glBindBuffer(self.target_, self.id_)
gl.glBufferData(self.target_, array, self.usage_)
gl.glBindBuffer(self.target_, 0)
def bind(self):
gl.glBindBuffer(self.target_, self.id_)
def release(self):
gl.glBindBuffer(self.target_, 0)
def id(self):
return self.id_
def usage(self):
return self.usage_
def target(self):
return self.target_ |
class Encoder(nn.Module):
def __init__(self, normalize=False):
super(Encoder, self).__init__()
self.f = nn.Sequential(nn.Conv2d(in_channels=1, out_channels=128, kernel_size=3, stride=1, padding=1), nn.ReLU(inplace=True), nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=2, stride=2), nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=2, stride=2), nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=2, stride=2), nn.Flatten(), nn.Linear((9 * 128), 1024))
self.normalize = normalize
def forward(self, _input):
feature = self.f(_input)
if self.normalize:
return F.normalize(feature, dim=(- 1))
else:
return feature |
.parametrize('membership_role', membership_roles)
def test_project_join_user_error(db, client, membership_role):
client.login(username='user', password='user')
project = Project.objects.get(id=1)
user = get_user_model().objects.get(username='user')
invite = Invite(project=project, user=get_user_model().objects.get(username='guest'), role=membership_role)
invite.make_token()
invite.save()
url = reverse('project_join', args=[invite.token])
response = client.get(url)
assert (response.status_code == 200)
assert (b'guest' in response.content)
assert (not Membership.objects.filter(project=project, user=user, role=membership_role).exists()) |
class SnliProcessor(DataProcessor):
def get_train_examples(self, data_dir):
return self._create_examples(pd.read_csv(os.path.join(data_dir, 'train.csv'), sep='\t', header=None, keep_default_na=False).values.tolist(), 'train')
def get_dev_examples(self, data_dir):
return self._create_examples(pd.read_csv(os.path.join(data_dir, 'dev.csv'), sep='\t', header=None, keep_default_na=False).values.tolist(), 'dev')
def get_test_examples(self, data_dir):
return self._create_examples(pd.read_csv(os.path.join(data_dir, 'test.csv'), sep='\t', header=None, keep_default_na=False).values.tolist(), 'test')
def get_unlabeled_examples(self, data_dir):
return self._create_examples(os.path.join(data_dir, 'unlabeled.jsonl'), 'unlabeled')
def get_dev32_examples(self, data_dir):
return self._create_examples(pd.read_csv(os.path.join(data_dir, 'dev.csv'), sep='\t', header=None, keep_default_na=False).values.tolist(), 'dev')
def get_labels(self):
return ['contradiction', 'entailment', 'neutral']
def _create_examples(self, lines, set_type):
examples = []
for (i, line) in enumerate(lines):
guid = ('%s-%s' % (set_type, i))
examples.append(InputExample(guid=guid, text_a=line[0], text_b=line[1], task=line[2], label=line[3]))
return examples |
def verify_opt(opt):
assert (opt.nz >= opt.infogan_nz)
assert ((opt.infogan_nz == 0) or (opt.infogan_lambda > 0))
assert ((len(opt.layers_to_reg) == 0) or (min(opt.layers_to_reg) >= 1))
assert ((opt.epsilon > 0) or (opt.hp_lambda == 0))
assert ((opt.num_rademacher_samples >= 2) or (opt.hp_lambda == 0))
assert (opt.num_gpus in [1, 2, 4, 8]) |
def main():
args = parser.parse_args()
with open(args.config_file) as f:
config_template = Template(f.read())
args.out_dir.mkdir(parents=True, exist_ok=True)
print('Writing to directory', args.out_dir)
for (scene, split) in scenes:
config = config_template.substitute(scene=scene, split=split, rank_type=args.rank_type, explore_type=args.explore_type)
config_file_name = f'{scene.lower()}.yaml'
with open((args.out_dir / config_file_name), 'w') as f:
f.write(config)
print('Generated file', config_file_name)
print('Done!') |
class TestShowFixtures():
def test_funcarg_compat(self, pytester: Pytester) -> None:
config = pytester.parseconfigure('--funcargs')
assert config.option.showfixtures
def test_show_help(self, pytester: Pytester) -> None:
result = pytester.runpytest('--fixtures', '--help')
assert (not result.ret)
def test_show_fixtures(self, pytester: Pytester) -> None:
result = pytester.runpytest('--fixtures')
result.stdout.fnmatch_lines(['tmp_path_factory [[]session scope[]] -- .../_pytest/tmpdir.py:*', '*for the test session*', 'tmp_path -- .../_pytest/tmpdir.py:*', '*temporary directory*'])
def test_show_fixtures_verbose(self, pytester: Pytester) -> None:
result = pytester.runpytest('--fixtures', '-v')
result.stdout.fnmatch_lines(['tmp_path_factory [[]session scope[]] -- .../_pytest/tmpdir.py:*', '*for the test session*', 'tmp_path -- .../_pytest/tmpdir.py:*', '*temporary directory*'])
def test_show_fixtures_testmodule(self, pytester: Pytester) -> None:
p = pytester.makepyfile('\n import pytest\n \n def _arg0():\n """ hidden """\n \n def arg1():\n """ hello world """\n ')
result = pytester.runpytest('--fixtures', p)
result.stdout.fnmatch_lines('\n *tmp_path -- *\n *fixtures defined from*\n *arg1 -- test_show_fixtures_testmodule.py:6*\n *hello world*\n ')
result.stdout.no_fnmatch_line('*arg0*')
.parametrize('testmod', [True, False])
def test_show_fixtures_conftest(self, pytester: Pytester, testmod) -> None:
pytester.makeconftest('\n import pytest\n \n def arg1():\n """ hello world """\n ')
if testmod:
pytester.makepyfile('\n def test_hello():\n pass\n ')
result = pytester.runpytest('--fixtures')
result.stdout.fnmatch_lines('\n *tmp_path*\n *fixtures defined from*conftest*\n *arg1*\n *hello world*\n ')
def test_show_fixtures_trimmed_doc(self, pytester: Pytester) -> None:
p = pytester.makepyfile(textwrap.dedent(' import pytest\n \n def arg1():\n """\n line1\n line2\n\n """\n \n def arg2():\n """\n line1\n line2\n\n """\n '))
result = pytester.runpytest('--fixtures', p)
result.stdout.fnmatch_lines(textwrap.dedent(' * fixtures defined from test_show_fixtures_trimmed_doc *\n arg2 -- test_show_fixtures_trimmed_doc.py:10\n line1\n line2\n arg1 -- test_show_fixtures_trimmed_doc.py:3\n line1\n line2\n '))
def test_show_fixtures_indented_doc(self, pytester: Pytester) -> None:
p = pytester.makepyfile(textwrap.dedent(' import pytest\n \n def fixture1():\n """\n line1\n indented line\n """\n '))
result = pytester.runpytest('--fixtures', p)
result.stdout.fnmatch_lines(textwrap.dedent(' * fixtures defined from test_show_fixtures_indented_doc *\n fixture1 -- test_show_fixtures_indented_doc.py:3\n line1\n indented line\n '))
def test_show_fixtures_indented_doc_first_line_unindented(self, pytester: Pytester) -> None:
p = pytester.makepyfile(textwrap.dedent(' import pytest\n \n def fixture1():\n """line1\n line2\n indented line\n """\n '))
result = pytester.runpytest('--fixtures', p)
result.stdout.fnmatch_lines(textwrap.dedent(' * fixtures defined from test_show_fixtures_indented_doc_first_line_unindented *\n fixture1 -- test_show_fixtures_indented_doc_first_line_unindented.py:3\n line1\n line2\n indented line\n '))
def test_show_fixtures_indented_in_class(self, pytester: Pytester) -> None:
p = pytester.makepyfile(textwrap.dedent(' import pytest\n class TestClass(object):\n \n def fixture1(self):\n """line1\n line2\n indented line\n """\n '))
result = pytester.runpytest('--fixtures', p)
result.stdout.fnmatch_lines(textwrap.dedent(' * fixtures defined from test_show_fixtures_indented_in_class *\n fixture1 -- test_show_fixtures_indented_in_class.py:4\n line1\n line2\n indented line\n '))
def test_show_fixtures_different_files(self, pytester: Pytester) -> None:
pytester.makepyfile(test_a='\n import pytest\n\n \n def fix_a():\n """Fixture A"""\n pass\n\n def test_a(fix_a):\n pass\n ')
pytester.makepyfile(test_b='\n import pytest\n\n \n def fix_b():\n """Fixture B"""\n pass\n\n def test_b(fix_b):\n pass\n ')
result = pytester.runpytest('--fixtures')
result.stdout.fnmatch_lines('\n * fixtures defined from test_a *\n fix_a -- test_a.py:4\n Fixture A\n\n * fixtures defined from test_b *\n fix_b -- test_b.py:4\n Fixture B\n ')
def test_show_fixtures_with_same_name(self, pytester: Pytester) -> None:
pytester.makeconftest('\n import pytest\n \n def arg1():\n """Hello World in conftest.py"""\n return "Hello World"\n ')
pytester.makepyfile('\n def test_foo(arg1):\n assert arg1 == "Hello World"\n ')
pytester.makepyfile('\n import pytest\n \n def arg1():\n """Hi from test module"""\n return "Hi"\n def test_bar(arg1):\n assert arg1 == "Hi"\n ')
result = pytester.runpytest('--fixtures')
result.stdout.fnmatch_lines('\n * fixtures defined from conftest *\n arg1 -- conftest.py:3\n Hello World in conftest.py\n\n * fixtures defined from test_show_fixtures_with_same_name *\n arg1 -- test_show_fixtures_with_same_name.py:3\n Hi from test module\n ')
def test_fixture_disallow_twice(self):
with pytest.raises(ValueError):
def foo():
raise NotImplementedError() |
class AsyncQdrantClient(AsyncQdrantFastembedMixin):
def __init__(self, location: Optional[str]=None, url: Optional[str]=None, port: Optional[int]=6333, grpc_port: int=6334, prefer_grpc: bool=False, Optional[bool]=None, api_key: Optional[str]=None, prefix: Optional[str]=None, timeout: Optional[float]=None, host: Optional[str]=None, path: Optional[str]=None, force_disable_check_same_thread: bool=False, **kwargs: Any):
super().__init__(**kwargs)
self._client: AsyncQdrantBase
if (sum([(param is not None) for param in (location, url, host, path)]) > 1):
raise ValueError('Only one of <location>, <url>, <host> or <path> should be specified.')
if (location == ':memory:'):
self._client = AsyncQdrantLocal(location=location, force_disable_check_same_thread=force_disable_check_same_thread)
elif (path is not None):
self._client = AsyncQdrantLocal(location=path, force_disable_check_same_thread=force_disable_check_same_thread)
else:
if ((location is not None) and (url is None)):
url = location
self._client = AsyncQdrantRemote(url=url, port=port, grpc_port=grpc_port, prefer_grpc=prefer_grpc, api_key=api_key, prefix=prefix, timeout=timeout, host=host, **kwargs)
self._is_fastembed_installed: Optional[bool] = None
if (self._is_fastembed_installed is None):
try:
from fastembed.embedding import DefaultEmbedding
self._is_fastembed_installed = True
except ImportError:
self._is_fastembed_installed = False
async def close(self, **kwargs: Any) -> None:
if hasattr(self, '_client'):
(await self._client.close(**kwargs))
def grpc_collections(self) -> grpc.CollectionsStub:
if isinstance(self._client, AsyncQdrantRemote):
return self._client.grpc_collections
raise NotImplementedError(f'gRPC client is not supported for {type(self._client)}')
def grpc_points(self) -> grpc.PointsStub:
if isinstance(self._client, AsyncQdrantRemote):
return self._client.grpc_points
raise NotImplementedError(f'gRPC client is not supported for {type(self._client)}')
def rest(self) -> AsyncApis[AsyncApiClient]:
if isinstance(self._client, AsyncQdrantRemote):
return self._client.rest
raise NotImplementedError(f'REST client is not supported for {type(self._client)}')
def -> AsyncApis[AsyncApiClient]:
if isinstance(self._client, AsyncQdrantRemote):
return self._client.http
raise NotImplementedError(f'REST client is not supported for {type(self._client)}')
async def search_batch(self, collection_name: str, requests: Sequence[types.SearchRequest], timeout: Optional[int]=None, consistency: Optional[types.ReadConsistency]=None, **kwargs: Any) -> List[List[types.ScoredPoint]]:
assert (len(kwargs) == 0), f'Unknown arguments: {list(kwargs.keys())}'
return (await self._client.search_batch(collection_name=collection_name, requests=requests, consistency=consistency, timeout=timeout, **kwargs))
async def search(self, collection_name: str, query_vector: Union[(types.NumpyArray, Sequence[float], Tuple[(str, List[float])], types.NamedVector, types.NamedSparseVector)], query_filter: Optional[types.Filter]=None, search_params: Optional[types.SearchParams]=None, limit: int=10, offset: Optional[int]=None, with_payload: Union[(bool, Sequence[str], types.PayloadSelector)]=True, with_vectors: Union[(bool, Sequence[str])]=False, score_threshold: Optional[float]=None, append_payload: bool=True, consistency: Optional[types.ReadConsistency]=None, shard_key_selector: Optional[types.ShardKeySelector]=None, timeout: Optional[int]=None, **kwargs: Any) -> List[types.ScoredPoint]:
assert (len(kwargs) == 0), f'Unknown arguments: {list(kwargs.keys())}'
return (await self._client.search(collection_name=collection_name, query_vector=query_vector, query_filter=query_filter, search_params=search_params, limit=limit, offset=offset, with_payload=with_payload, with_vectors=with_vectors, score_threshold=score_threshold, append_payload=append_payload, consistency=consistency, shard_key_selector=shard_key_selector, timeout=timeout, **kwargs))
async def search_groups(self, collection_name: str, query_vector: Union[(types.NumpyArray, Sequence[float], Tuple[(str, List[float])], types.NamedVector, types.NamedSparseVector)], group_by: str, query_filter: Optional[types.Filter]=None, search_params: Optional[types.SearchParams]=None, limit: int=10, group_size: int=1, with_payload: Union[(bool, Sequence[str], types.PayloadSelector)]=True, with_vectors: Union[(bool, Sequence[str])]=False, score_threshold: Optional[float]=None, with_lookup: Optional[types.WithLookupInterface]=None, consistency: Optional[types.ReadConsistency]=None, shard_key_selector: Optional[types.ShardKeySelector]=None, timeout: Optional[int]=None, **kwargs: Any) -> types.GroupsResult:
assert (len(kwargs) == 0), f'Unknown arguments: {list(kwargs.keys())}'
return (await self._client.search_groups(collection_name=collection_name, query_vector=query_vector, group_by=group_by, query_filter=query_filter, search_params=search_params, limit=limit, group_size=group_size, with_payload=with_payload, with_vectors=with_vectors, score_threshold=score_threshold, with_lookup=with_lookup, consistency=consistency, shard_key_selector=shard_key_selector, timeout=timeout, **kwargs))
async def recommend_batch(self, collection_name: str, requests: Sequence[types.RecommendRequest], consistency: Optional[types.ReadConsistency]=None, timeout: Optional[int]=None, **kwargs: Any) -> List[List[types.ScoredPoint]]:
assert (len(kwargs) == 0), f'Unknown arguments: {list(kwargs.keys())}'
return (await self._client.recommend_batch(collection_name=collection_name, requests=requests, consistency=consistency, timeout=timeout, **kwargs))
async def recommend(self, collection_name: str, positive: Optional[Sequence[types.RecommendExample]]=None, negative: Optional[Sequence[types.RecommendExample]]=None, query_filter: Optional[types.Filter]=None, search_params: Optional[types.SearchParams]=None, limit: int=10, offset: int=0, with_payload: Union[(bool, List[str], types.PayloadSelector)]=True, with_vectors: Union[(bool, List[str])]=False, score_threshold: Optional[float]=None, using: Optional[str]=None, lookup_from: Optional[types.LookupLocation]=None, strategy: Optional[types.RecommendStrategy]=None, consistency: Optional[types.ReadConsistency]=None, shard_key_selector: Optional[types.ShardKeySelector]=None, timeout: Optional[int]=None, **kwargs: Any) -> List[types.ScoredPoint]:
assert (len(kwargs) == 0), f'Unknown arguments: {list(kwargs.keys())}'
return (await self._client.recommend(collection_name=collection_name, positive=positive, negative=negative, query_filter=query_filter, search_params=search_params, limit=limit, offset=offset, with_payload=with_payload, with_vectors=with_vectors, score_threshold=score_threshold, using=using, lookup_from=lookup_from, consistency=consistency, shard_key_selector=shard_key_selector, strategy=strategy, timeout=timeout, **kwargs))
async def recommend_groups(self, collection_name: str, group_by: str, positive: Optional[Sequence[types.RecommendExample]]=None, negative: Optional[Sequence[types.RecommendExample]]=None, query_filter: Optional[types.Filter]=None, search_params: Optional[types.SearchParams]=None, limit: int=10, group_size: int=1, score_threshold: Optional[float]=None, with_payload: Union[(bool, Sequence[str], types.PayloadSelector)]=True, with_vectors: Union[(bool, Sequence[str])]=False, using: Optional[str]=None, lookup_from: Optional[types.LookupLocation]=None, with_lookup: Optional[types.WithLookupInterface]=None, strategy: Optional[types.RecommendStrategy]=None, consistency: Optional[types.ReadConsistency]=None, shard_key_selector: Optional[types.ShardKeySelector]=None, timeout: Optional[int]=None, **kwargs: Any) -> types.GroupsResult:
assert (len(kwargs) == 0), f'Unknown arguments: {list(kwargs.keys())}'
return (await self._client.recommend_groups(collection_name=collection_name, group_by=group_by, positive=positive, negative=negative, query_filter=query_filter, search_params=search_params, limit=limit, group_size=group_size, score_threshold=score_threshold, with_payload=with_payload, with_vectors=with_vectors, using=using, lookup_from=lookup_from, with_lookup=with_lookup, strategy=strategy, consistency=consistency, shard_key_selector=shard_key_selector, timeout=timeout, **kwargs))
async def discover(self, collection_name: str, target: Optional[types.TargetVector]=None, context: Optional[Sequence[types.ContextExamplePair]]=None, query_filter: Optional[types.Filter]=None, search_params: Optional[types.SearchParams]=None, limit: int=10, offset: int=0, with_payload: Union[(bool, List[str], types.PayloadSelector)]=True, with_vectors: Union[(bool, List[str])]=False, using: Optional[str]=None, lookup_from: Optional[types.LookupLocation]=None, consistency: Optional[types.ReadConsistency]=None, shard_key_selector: Optional[types.ShardKeySelector]=None, timeout: Optional[int]=None, **kwargs: Any) -> List[types.ScoredPoint]:
return (await self._client.discover(collection_name=collection_name, target=target, context=context, query_filter=query_filter, search_params=search_params, limit=limit, offset=offset, with_payload=with_payload, with_vectors=with_vectors, using=using, lookup_from=lookup_from, consistency=consistency, shard_key_selector=shard_key_selector, timeout=timeout, **kwargs))
async def discover_batch(self, collection_name: str, requests: Sequence[types.DiscoverRequest], consistency: Optional[types.ReadConsistency]=None, timeout: Optional[int]=None, **kwargs: Any) -> List[List[types.ScoredPoint]]:
return (await self._client.discover_batch(collection_name=collection_name, requests=requests, consistency=consistency, timeout=timeout, **kwargs))
async def scroll(self, collection_name: str, scroll_filter: Optional[types.Filter]=None, limit: int=10, offset: Optional[types.PointId]=None, with_payload: Union[(bool, Sequence[str], types.PayloadSelector)]=True, with_vectors: Union[(bool, Sequence[str])]=False, consistency: Optional[types.ReadConsistency]=None, shard_key_selector: Optional[types.ShardKeySelector]=None, **kwargs: Any) -> Tuple[(List[types.Record], Optional[types.PointId])]:
assert (len(kwargs) == 0), f'Unknown arguments: {list(kwargs.keys())}'
return (await self._client.scroll(collection_name=collection_name, scroll_filter=scroll_filter, limit=limit, offset=offset, with_payload=with_payload, with_vectors=with_vectors, consistency=consistency, shard_key_selector=shard_key_selector, **kwargs))
async def count(self, collection_name: str, count_filter: Optional[types.Filter]=None, exact: bool=True, shard_key_selector: Optional[types.ShardKeySelector]=None, **kwargs: Any) -> types.CountResult:
assert (len(kwargs) == 0), f'Unknown arguments: {list(kwargs.keys())}'
return (await self._client.count(collection_name=collection_name, count_filter=count_filter, exact=exact, shard_key_selector=shard_key_selector, **kwargs))
async def upsert(self, collection_name: str, points: types.Points, wait: bool=True, ordering: Optional[types.WriteOrdering]=None, shard_key_selector: Optional[types.ShardKeySelector]=None, **kwargs: Any) -> types.UpdateResult:
assert (len(kwargs) == 0), f'Unknown arguments: {list(kwargs.keys())}'
return (await self._client.upsert(collection_name=collection_name, points=points, wait=wait, ordering=ordering, shard_key_selector=shard_key_selector, **kwargs))
async def update_vectors(self, collection_name: str, points: Sequence[types.PointVectors], wait: bool=True, ordering: Optional[types.WriteOrdering]=None, shard_key_selector: Optional[types.ShardKeySelector]=None, **kwargs: Any) -> types.UpdateResult:
assert (len(kwargs) == 0), f'Unknown arguments: {list(kwargs.keys())}'
return (await self._client.update_vectors(collection_name=collection_name, points=points, wait=wait, ordering=ordering, shard_key_selector=shard_key_selector))
async def delete_vectors(self, collection_name: str, vectors: Sequence[str], points: types.PointsSelector, wait: bool=True, ordering: Optional[types.WriteOrdering]=None, shard_key_selector: Optional[types.ShardKeySelector]=None, **kwargs: Any) -> types.UpdateResult:
assert (len(kwargs) == 0), f'Unknown arguments: {list(kwargs.keys())}'
return (await self._client.delete_vectors(collection_name=collection_name, vectors=vectors, points=points, wait=wait, ordering=ordering, shard_key_selector=shard_key_selector))
async def retrieve(self, collection_name: str, ids: Sequence[types.PointId], with_payload: Union[(bool, Sequence[str], types.PayloadSelector)]=True, with_vectors: Union[(bool, Sequence[str])]=False, consistency: Optional[types.ReadConsistency]=None, shard_key_selector: Optional[types.ShardKeySelector]=None, **kwargs: Any) -> List[types.Record]:
assert (len(kwargs) == 0), f'Unknown arguments: {list(kwargs.keys())}'
return (await self._client.retrieve(collection_name=collection_name, ids=ids, with_payload=with_payload, with_vectors=with_vectors, consistency=consistency, shard_key_selector=shard_key_selector, **kwargs))
async def delete(self, collection_name: str, points_selector: types.PointsSelector, wait: bool=True, ordering: Optional[types.WriteOrdering]=None, shard_key_selector: Optional[types.ShardKeySelector]=None, **kwargs: Any) -> types.UpdateResult:
assert (len(kwargs) == 0), f'Unknown arguments: {list(kwargs.keys())}'
return (await self._client.delete(collection_name=collection_name, points_selector=points_selector, wait=wait, ordering=ordering, shard_key_selector=shard_key_selector, **kwargs))
async def set_payload(self, collection_name: str, payload: types.Payload, points: types.PointsSelector, wait: bool=True, ordering: Optional[types.WriteOrdering]=None, shard_key_selector: Optional[types.ShardKeySelector]=None, **kwargs: Any) -> types.UpdateResult:
assert (len(kwargs) == 0), f'Unknown arguments: {list(kwargs.keys())}'
return (await self._client.set_payload(collection_name=collection_name, payload=payload, points=points, wait=wait, ordering=ordering, shard_key_selector=shard_key_selector, **kwargs))
async def overwrite_payload(self, collection_name: str, payload: types.Payload, points: types.PointsSelector, wait: bool=True, ordering: Optional[types.WriteOrdering]=None, shard_key_selector: Optional[types.ShardKeySelector]=None, **kwargs: Any) -> types.UpdateResult:
assert (len(kwargs) == 0), f'Unknown arguments: {list(kwargs.keys())}'
return (await self._client.overwrite_payload(collection_name=collection_name, payload=payload, points=points, wait=wait, ordering=ordering, shard_key_selector=shard_key_selector, **kwargs))
async def delete_payload(self, collection_name: str, keys: Sequence[str], points: types.PointsSelector, wait: bool=True, ordering: Optional[types.WriteOrdering]=None, shard_key_selector: Optional[types.ShardKeySelector]=None, **kwargs: Any) -> types.UpdateResult:
assert (len(kwargs) == 0), f'Unknown arguments: {list(kwargs.keys())}'
return (await self._client.delete_payload(collection_name=collection_name, keys=keys, points=points, wait=wait, ordering=ordering, shard_key_selector=shard_key_selector, **kwargs))
async def clear_payload(self, collection_name: str, points_selector: types.PointsSelector, wait: bool=True, ordering: Optional[types.WriteOrdering]=None, shard_key_selector: Optional[types.ShardKeySelector]=None, **kwargs: Any) -> types.UpdateResult:
assert (len(kwargs) == 0), f'Unknown arguments: {list(kwargs.keys())}'
return (await self._client.clear_payload(collection_name=collection_name, points_selector=points_selector, wait=wait, ordering=ordering, shard_key_selector=shard_key_selector, **kwargs))
async def batch_update_points(self, collection_name: str, update_operations: Sequence[types.UpdateOperation], wait: bool=True, ordering: Optional[types.WriteOrdering]=None, **kwargs: Any) -> List[types.UpdateResult]:
assert (len(kwargs) == 0), f'Unknown arguments: {list(kwargs.keys())}'
return (await self._client.batch_update_points(collection_name=collection_name, update_operations=update_operations, wait=wait, ordering=ordering, **kwargs))
async def update_collection_aliases(self, change_aliases_operations: Sequence[types.AliasOperations], timeout: Optional[int]=None, **kwargs: Any) -> bool:
assert (len(kwargs) == 0), f'Unknown arguments: {list(kwargs.keys())}'
return (await self._client.update_collection_aliases(change_aliases_operations=change_aliases_operations, timeout=timeout, **kwargs))
async def get_collection_aliases(self, collection_name: str, **kwargs: Any) -> types.CollectionsAliasesResponse:
assert (len(kwargs) == 0), f'Unknown arguments: {list(kwargs.keys())}'
return (await self._client.get_collection_aliases(collection_name=collection_name, **kwargs))
async def get_aliases(self, **kwargs: Any) -> types.CollectionsAliasesResponse:
assert (len(kwargs) == 0), f'Unknown arguments: {list(kwargs.keys())}'
return (await self._client.get_aliases(**kwargs))
async def get_collections(self, **kwargs: Any) -> types.CollectionsResponse:
assert (len(kwargs) == 0), f'Unknown arguments: {list(kwargs.keys())}'
return (await self._client.get_collections(**kwargs))
async def get_collection(self, collection_name: str, **kwargs: Any) -> types.CollectionInfo:
assert (len(kwargs) == 0), f'Unknown arguments: {list(kwargs.keys())}'
return (await self._client.get_collection(collection_name=collection_name, **kwargs))
async def update_collection(self, collection_name: str, optimizers_config: Optional[types.OptimizersConfigDiff]=None, collection_params: Optional[types.CollectionParamsDiff]=None, vectors_config: Optional[types.VectorsConfigDiff]=None, hnsw_config: Optional[types.HnswConfigDiff]=None, quantization_config: Optional[types.QuantizationConfigDiff]=None, timeout: Optional[int]=None, sparse_vectors_config: Optional[Mapping[(str, types.SparseVectorParams)]]=None, **kwargs: Any) -> bool:
if (('optimizer_config' in kwargs) and (optimizers_config is not None)):
raise ValueError('Only one of optimizer_config and optimizers_config should be specified')
if ('optimizer_config' in kwargs):
optimizers_config = kwargs.pop('optimizer_config')
assert (len(kwargs) == 0), f'Unknown arguments: {list(kwargs.keys())}'
return (await self._client.update_collection(collection_name=collection_name, optimizers_config=optimizers_config, collection_params=collection_params, vectors_config=vectors_config, hnsw_config=hnsw_config, quantization_config=quantization_config, timeout=timeout, sparse_vectors_config=sparse_vectors_config, **kwargs))
async def delete_collection(self, collection_name: str, timeout: Optional[int]=None, **kwargs: Any) -> bool:
assert (len(kwargs) == 0), f'Unknown arguments: {list(kwargs.keys())}'
return (await self._client.delete_collection(collection_name=collection_name, timeout=timeout, **kwargs))
async def create_collection(self, collection_name: str, vectors_config: Union[(types.VectorParams, Mapping[(str, types.VectorParams)])], sparse_vectors_config: Optional[Mapping[(str, types.SparseVectorParams)]]=None, shard_number: Optional[int]=None, sharding_method: Optional[types.ShardingMethod]=None, replication_factor: Optional[int]=None, write_consistency_factor: Optional[int]=None, on_disk_payload: Optional[bool]=None, hnsw_config: Optional[types.HnswConfigDiff]=None, optimizers_config: Optional[types.OptimizersConfigDiff]=None, wal_config: Optional[types.WalConfigDiff]=None, quantization_config: Optional[types.QuantizationConfig]=None, init_from: Optional[types.InitFrom]=None, timeout: Optional[int]=None, **kwargs: Any) -> bool:
assert (len(kwargs) == 0), f'Unknown arguments: {list(kwargs.keys())}'
return (await self._client.create_collection(collection_name=collection_name, vectors_config=vectors_config, shard_number=shard_number, sharding_method=sharding_method, replication_factor=replication_factor, write_consistency_factor=write_consistency_factor, on_disk_payload=on_disk_payload, hnsw_config=hnsw_config, optimizers_config=optimizers_config, wal_config=wal_config, quantization_config=quantization_config, init_from=init_from, timeout=timeout, sparse_vectors_config=sparse_vectors_config, **kwargs))
async def recreate_collection(self, collection_name: str, vectors_config: Union[(types.VectorParams, Mapping[(str, types.VectorParams)])], sparse_vectors_config: Optional[Mapping[(str, types.SparseVectorParams)]]=None, shard_number: Optional[int]=None, sharding_method: Optional[types.ShardingMethod]=None, replication_factor: Optional[int]=None, write_consistency_factor: Optional[int]=None, on_disk_payload: Optional[bool]=None, hnsw_config: Optional[types.HnswConfigDiff]=None, optimizers_config: Optional[types.OptimizersConfigDiff]=None, wal_config: Optional[types.WalConfigDiff]=None, quantization_config: Optional[types.QuantizationConfig]=None, init_from: Optional[types.InitFrom]=None, timeout: Optional[int]=None, **kwargs: Any) -> bool:
assert (len(kwargs) == 0), f'Unknown arguments: {list(kwargs.keys())}'
return (await self._client.recreate_collection(collection_name=collection_name, vectors_config=vectors_config, shard_number=shard_number, sharding_method=sharding_method, replication_factor=replication_factor, write_consistency_factor=write_consistency_factor, on_disk_payload=on_disk_payload, hnsw_config=hnsw_config, optimizers_config=optimizers_config, wal_config=wal_config, quantization_config=quantization_config, init_from=init_from, timeout=timeout, sparse_vectors_config=sparse_vectors_config, **kwargs))
def upload_records(self, collection_name: str, records: Iterable[types.Record], batch_size: int=64, parallel: int=1, method: Optional[str]=None, max_retries: int=3, wait: bool=False, **kwargs: Any) -> None:
assert (len(kwargs) == 0), f'Unknown arguments: {list(kwargs.keys())}'
return self._client.upload_records(collection_name=collection_name, records=records, batch_size=batch_size, parallel=parallel, method=method, max_retries=max_retries, wait=wait, **kwargs)
def upload_collection(self, collection_name: str, vectors: Union[(Dict[(str, types.NumpyArray)], types.NumpyArray, Iterable[types.VectorStruct])], payload: Optional[Iterable[Dict[(Any, Any)]]]=None, ids: Optional[Iterable[types.PointId]]=None, batch_size: int=64, parallel: int=1, method: Optional[str]=None, max_retries: int=3, wait: bool=False, **kwargs: Any) -> None:
assert (len(kwargs) == 0), f'Unknown arguments: {list(kwargs.keys())}'
return self._client.upload_collection(collection_name=collection_name, vectors=vectors, payload=payload, ids=ids, batch_size=batch_size, parallel=parallel, method=method, max_retries=max_retries, wait=wait, **kwargs)
async def create_payload_index(self, collection_name: str, field_name: str, field_schema: Optional[types.PayloadSchemaType]=None, field_type: Optional[types.PayloadSchemaType]=None, wait: bool=True, ordering: Optional[types.WriteOrdering]=None, **kwargs: Any) -> types.UpdateResult:
assert (len(kwargs) == 0), f'Unknown arguments: {list(kwargs.keys())}'
return (await self._client.create_payload_index(collection_name=collection_name, field_name=field_name, field_schema=field_schema, field_type=field_type, wait=wait, ordering=ordering, **kwargs))
async def delete_payload_index(self, collection_name: str, field_name: str, wait: bool=True, ordering: Optional[types.WriteOrdering]=None, **kwargs: Any) -> types.UpdateResult:
assert (len(kwargs) == 0), f'Unknown arguments: {list(kwargs.keys())}'
return (await self._client.delete_payload_index(collection_name=collection_name, field_name=field_name, wait=wait, ordering=ordering, **kwargs))
async def list_snapshots(self, collection_name: str, **kwargs: Any) -> List[types.SnapshotDescription]:
assert (len(kwargs) == 0), f'Unknown arguments: {list(kwargs.keys())}'
return (await self._client.list_snapshots(collection_name=collection_name, **kwargs))
async def create_snapshot(self, collection_name: str, wait: bool=True, **kwargs: Any) -> Optional[types.SnapshotDescription]:
assert (len(kwargs) == 0), f'Unknown arguments: {list(kwargs.keys())}'
return (await self._client.create_snapshot(collection_name=collection_name, wait=wait, **kwargs))
async def delete_snapshot(self, collection_name: str, snapshot_name: str, wait: bool=True, **kwargs: Any) -> Optional[bool]:
assert (len(kwargs) == 0), f'Unknown arguments: {list(kwargs.keys())}'
return (await self._client.delete_snapshot(collection_name=collection_name, snapshot_name=snapshot_name, wait=wait, **kwargs))
async def list_full_snapshots(self, **kwargs: Any) -> List[types.SnapshotDescription]:
assert (len(kwargs) == 0), f'Unknown arguments: {list(kwargs.keys())}'
return (await self._client.list_full_snapshots(**kwargs))
async def create_full_snapshot(self, wait: bool=True, **kwargs: Any) -> Optional[types.SnapshotDescription]:
assert (len(kwargs) == 0), f'Unknown arguments: {list(kwargs.keys())}'
return (await self._client.create_full_snapshot(wait=wait, **kwargs))
async def delete_full_snapshot(self, snapshot_name: str, wait: bool=True, **kwargs: Any) -> Optional[bool]:
assert (len(kwargs) == 0), f'Unknown arguments: {list(kwargs.keys())}'
return (await self._client.delete_full_snapshot(snapshot_name=snapshot_name, wait=wait, **kwargs))
async def recover_snapshot(self, collection_name: str, location: str, priority: Optional[types.SnapshotPriority]=None, wait: bool=True, **kwargs: Any) -> Optional[bool]:
assert (len(kwargs) == 0), f'Unknown arguments: {list(kwargs.keys())}'
return (await self._client.recover_snapshot(collection_name=collection_name, location=location, priority=priority, wait=wait, **kwargs))
async def list_shard_snapshots(self, collection_name: str, shard_id: int, **kwargs: Any) -> List[types.SnapshotDescription]:
assert (len(kwargs) == 0), f'Unknown arguments: {list(kwargs.keys())}'
return (await self._client.list_shard_snapshots(collection_name=collection_name, shard_id=shard_id, **kwargs))
async def create_shard_snapshot(self, collection_name: str, shard_id: int, wait: bool=True, **kwargs: Any) -> Optional[types.SnapshotDescription]:
assert (len(kwargs) == 0), f'Unknown arguments: {list(kwargs.keys())}'
return (await self._client.create_shard_snapshot(collection_name=collection_name, shard_id=shard_id, wait=wait, **kwargs))
async def delete_shard_snapshot(self, collection_name: str, shard_id: int, snapshot_name: str, wait: bool=True, **kwargs: Any) -> Optional[bool]:
assert (len(kwargs) == 0), f'Unknown arguments: {list(kwargs.keys())}'
return (await self._client.delete_shard_snapshot(collection_name=collection_name, shard_id=shard_id, snapshot_name=snapshot_name, wait=wait, **kwargs))
async def recover_shard_snapshot(self, collection_name: str, shard_id: int, location: str, priority: Optional[types.SnapshotPriority]=None, wait: bool=True, **kwargs: Any) -> Optional[bool]:
assert (len(kwargs) == 0), f'Unknown arguments: {list(kwargs.keys())}'
return (await self._client.recover_shard_snapshot(collection_name=collection_name, shard_id=shard_id, location=location, priority=priority, wait=wait, **kwargs))
async def lock_storage(self, reason: str, **kwargs: Any) -> types.LocksOption:
assert (len(kwargs) == 0), f'Unknown arguments: {list(kwargs.keys())}'
return (await self._client.lock_storage(reason=reason, **kwargs))
async def unlock_storage(self, **kwargs: Any) -> types.LocksOption:
assert (len(kwargs) == 0), f'Unknown arguments: {list(kwargs.keys())}'
return (await self._client.unlock_storage(**kwargs))
async def get_locks(self, **kwargs: Any) -> types.LocksOption:
assert (len(kwargs) == 0), f'Unknown arguments: {list(kwargs.keys())}'
return (await self._client.get_locks(**kwargs))
async def create_shard_key(self, collection_name: str, shard_key: types.ShardKey, shards_number: Optional[int]=None, replication_factor: Optional[int]=None, placement: Optional[List[int]]=None, **kwargs: Any) -> bool:
return (await self._client.create_shard_key(collection_name=collection_name, shard_key=shard_key, shards_number=shards_number, replication_factor=replication_factor, placement=placement, **kwargs))
async def delete_shard_key(self, collection_name: str, shard_key: types.ShardKey, **kwargs: Any) -> bool:
return (await self._client.delete_shard_key(collection_name=collection_name, shard_key=shard_key, **kwargs)) |
class KTI1(DataElementGroup):
iban = DataElementField(type='an', max_length=34, required=False, _d='IBAN')
bic = DataElementField(type='an', max_length=11, required=False, _d='BIC')
account_number = DataElementField(type='id', required=False, _d='Konto-/Depotnummer')
subaccount_number = DataElementField(type='id', required=False, _d='Unterkontomerkmal')
bank_identifier = DataElementGroupField(type=BankIdentifier, required=False, _d='Kreditinstitutskennung')
def from_sepa_account(cls, acc):
return cls(iban=acc.iban, bic=acc.bic) |
class Mixed_7a(nn.Module):
def __init__(self):
super(Mixed_7a, self).__init__()
self.branch0 = nn.Sequential(BasicConv2d(1088, 256, kernel_size=1, stride=1), BasicConv2d(256, 384, kernel_size=3, stride=2))
self.branch1 = nn.Sequential(BasicConv2d(1088, 256, kernel_size=1, stride=1), BasicConv2d(256, 288, kernel_size=3, stride=2))
self.branch2 = nn.Sequential(BasicConv2d(1088, 256, kernel_size=1, stride=1), BasicConv2d(256, 288, kernel_size=3, stride=1, padding=1), BasicConv2d(288, 320, kernel_size=3, stride=2))
self.branch3 = nn.MaxPool2d(3, stride=2)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
x2 = self.branch2(x)
x3 = self.branch3(x)
out = torch.cat((x0, x1, x2, x3), 1)
return out |
.parametrize('example, error_msg', [('\n [project]\n name = "myproj"\n version = "1.2"\n requires = [\'pywin32; platform_system=="Windows"\' ]\n ', 'configuration error: .project. must not contain ..requires.. properties')])
def test_invalid_example(tmp_path, example, error_msg):
pyproject = (tmp_path / 'pyproject.toml')
pyproject.write_text(cleandoc(example))
pattern = re.compile(f'invalid pyproject.toml.*{error_msg}.*', (re.M | re.S))
with pytest.raises(ValueError, match=pattern):
read_configuration(pyproject) |
class AoA_Refiner_Layer(nn.Module):
def __init__(self, size, self_attn, feed_forward, dropout):
super(AoA_Refiner_Layer, self).__init__()
self.self_attn = self_attn
self.feed_forward = feed_forward
self.use_ff = 0
self.batch_info = []
self.att_layer_idx = 0
if (self.feed_forward is not None):
self.use_ff = 1
self.sublayer = clones(SublayerConnection(size, dropout), (1 + self.use_ff))
self.size = size
def forward(self, x, flag, mask):
self.self_attn.batch_info = self.batch_info
self.self_attn.att_layer_idx = self.att_layer_idx
x = self.sublayer[0](x, (lambda x: self.self_attn(x, x, x, flag, mask)))
return (self.sublayer[(- 1)](x, self.feed_forward) if self.use_ff else x) |
_test
def test_optimizer_get_updates_legacy_interface():
for optimizer_cls in [keras.optimizers.RMSprop, keras.optimizers.SGD, keras.optimizers.Adadelta, keras.optimizers.Adam, keras.optimizers.Adagrad, keras.optimizers.Nadam, keras.optimizers.Adamax]:
optimizer = optimizer_cls()
param = keras.backend.variable(0.0)
loss = keras.backend.mean(param)
constraints = {param: (lambda x: x)}
params = [param]
optimizer.get_updates(params, constraints, loss)
optimizer.get_updates(params, constraints, loss=loss)
optimizer.get_updates(loss, params)
optimizer.get_updates(loss, params=params)
optimizer.get_updates(loss=loss, params=params) |
class SuffixImporter():
scheme = 'suffix'
suffix = None
path_entry = None
def trigger_url(cls):
if (cls.suffix is None):
raise ValueError(('%s.suffix is not set' % cls.__name__))
return ('suffix:%s' % cls.suffix)
def register(cls):
sys.path_hooks.append(cls)
sys.path.append(cls.trigger_url())
def __init__(self, path_entry):
pr = urlparse(str(path_entry))
if ((pr.scheme != self.scheme) or (pr.path != self.suffix)):
raise ImportError()
self.path_entry = path_entry
self._found = {}
def checkpath_iter(self, fullname):
for dirpath in sys.path:
finder = sys.path_importer_cache.get(dirpath)
if isinstance(finder, (type(None), importlib.machinery.FileFinder)):
checkpath = os.path.join(dirpath, '{}.{}'.format(fullname, self.suffix))
(yield checkpath)
def find_module(self, fullname, path=None):
for checkpath in self.checkpath_iter(fullname):
if os.path.isfile(checkpath):
self._found[fullname] = checkpath
return self
return None
def load_module(self, fullname):
assert (fullname in self._found)
if (fullname in sys.modules):
module = sys.modules[fullname]
else:
sys.modules[fullname] = module = types.ModuleType(fullname)
data = None
with open(self._found[fullname]) as f:
data = f.read()
module.__dict__.clear()
module.__file__ = self._found[fullname]
module.__name__ = fullname
module.__loader__ = self
self.process_filedata(module, data)
return module
def process_filedata(self, module, data):
pass |
def test_interactive_with_git_dependencies_with_reference(tester: CommandTester, repo: TestRepository) -> None:
repo.add_package(get_package('pendulum', '2.0.0'))
repo.add_package(get_package('pytest', '3.6.0'))
inputs = ['my-package', '1.2.3', 'This is a description', 'n', 'MIT', '~2.7 || ^3.6', '', 'git+ '', '', 'pytest', '0', '', '', '\n']
tester.execute(inputs='\n'.join(inputs))
expected = '[tool.poetry]\nname = "my-package"\nversion = "1.2.3"\ndescription = "This is a description"\nauthors = ["Your Name <>"]\nlicense = "MIT"\nreadme = "README.md"\n\n[tool.poetry.dependencies]\npython = "~2.7 || ^3.6"\ndemo = {git = " rev = "develop"}\n\n[tool.poetry.group.dev.dependencies]\npytest = "^3.6.0"\n'
assert (expected in tester.io.fetch_output()) |
def creatMultiItemUserAdj(dataset, cv):
(trainMat, _, _, trainMat_time, _) = loadData(dataset, cv)
ratingClass = np.unique(trainMat.data).size
(userNum, itemNum) = trainMat.shape
multi_adj = sp.lil_matrix(((ratingClass * itemNum), userNum), dtype=np.int)
uidList = trainMat.tocoo().row
iidList = trainMat.tocoo().col
rList = trainMat.tocoo().data
for i in range(uidList.size):
uid = uidList[i]
iid = iidList[i]
r = rList[i]
multi_adj[((((iid * ratingClass) + r) - 1), uid)] = trainMat_time[(uid, iid)]
assert (trainMat_time[(uid, iid)] != 0)
a = sp.csr_matrix((multi_adj.shape[1], multi_adj.shape[1]))
b = sp.csr_matrix((multi_adj.shape[0], multi_adj.shape[0]))
multi_adj2 = sp.vstack([sp.hstack([a, multi_adj.T]), sp.hstack([multi_adj, b])])
DIR = os.path.join(os.getcwd(), 'dataset', dataset, 'implicit', 'cv{0}'.format(cv))
path = (DIR + '/multi_item_adj.pkl')
with open(path, 'wb') as fs:
pickle.dump(multi_adj2.tocsr(), fs)
print('create multi_item_feat') |
def main():
opt = parser.parse_args()
opt.cuda = (opt.gpu > (- 1))
if opt.cuda:
torch.cuda.set_device(opt.gpu)
opt.n_best = opt.beam_size
if (opt.output == 'stdout'):
outF = sys.stdout
else:
outF = open(opt.output, 'w')
(pred_score_total, pred_words_total, gold_score_total, gold_words_total) = (0, 0, 0, 0)
src_batches = []
(src_batch, tgt_batch) = ([], [])
count = 0
tgtF = (open(opt.tgt) if opt.tgt else None)
in_file = None
if (opt.src == 'stdin'):
in_file = sys.stdin
opt.batch_size = 1
elif ((opt.encoder_type == 'audio') and (opt.asr_format == 'h5')):
in_file = h5.File(opt.src, 'r')
elif ((opt.encoder_type == 'audio') and (opt.asr_format == 'scp')):
from onmt.data.audio_utils import ArkLoader
audio_data = open(opt.src)
scp_reader = ArkLoader()
else:
in_file = open(opt.src)
predictor = Predictor(opt)
if (opt.encoder_type == 'audio'):
past_audio_data = (open(opt.past_src) if opt.past_src else None)
past_src_batches = list()
s_prev_context = []
t_prev_context = []
i = 0
concats = opt.concat.split('|')
n_models = len(opt.model.split('|'))
if (len(concats) == 1):
concats = (concats * n_models)
assert (len(concats) == n_models), 'The number of models must match the number of concat configs'
for (j, _) in enumerate(concats):
src_batches.append(list())
if past_audio_data:
past_src_batches.append(list())
sub_src = (open(opt.sub_src) if opt.sub_src else None)
sub_src_batch = list()
while True:
try:
scp_path = next(audio_data).strip().split()[1]
line = scp_reader.load_mat(scp_path)
except StopIteration:
break
if (opt.stride != 1):
line = line[0::opt.stride]
if past_line:
past_line = past_line[0::opt.stride]
line = torch.from_numpy(line)
past_line = (torch.from_numpy(past_line) if past_audio_data else None)
original_line = line
src_length = line.size(0)
if _is_oversized(src_batches[0], src_length, opt.batch_size):
print('Batch sizes :', len(src_batches[0]), len(tgt_batch))
pred_score = predictor.predict(src_batches)
count = get_result(pred_score, predictor, count, outF)
(src_batch, tgt_batch, sub_src_batch) = ([], [], [])
for (j, _) in enumerate(src_batches):
src_batches[j] = []
if past_audio_data:
past_src_batches[j] = []
for (j, concat_) in enumerate(concats):
concat = int(concat_)
line = original_line
if (concat != 1):
add = ((concat - (line.size()[0] % concat)) % concat)
z = torch.FloatTensor(add, line.size()[1]).zero_()
line = torch.cat((line, z), 0)
line = line.reshape(((line.size()[0] // concat), (line.size()[1] * concat)))
if past_audio_data:
add = ((concat - (past_line.size()[0] % concat)) % concat)
z = torch.FloatTensor(add, past_line.size()[1]).zero_()
past_line = torch.cat((past_line, z), 0)
past_line = past_line.reshape(((past_line.size()[0] // concat), (past_line.size()[1] * concat)))
src_batches[j].append(line)
if past_audio_data:
past_src_batches[j].append(past_line)
if opt.sub_src:
sline = sub_src.readline().strip()
if (opt.input_type == 'word'):
src_tokens = sline.split()
elif (opt.input_type == 'char'):
src_tokens = list(sline.strip())
sub_src_batch += [src_tokens]
if (len(src_batches[0]) != 0):
print('Batch size:', len(src_batches[0]), len(tgt_batch), len(sub_src_batch))
pred_score = predictor.predict(src_batches)
count = get_result(pred_score, predictor, count, outF)
(src_batch, tgt_batch) = ([], [])
for (j, _) in enumerate(src_batches):
src_batches[j] = []
if past_audio_data:
past_src_batches[j] = []
else:
raise NotImplementedError
if tgtF:
tgtF.close() |
()
('name', default=None, nargs=(- 1))
_options
_options
('--submit/--no-submit')
def apply_stage(name, metadir, accept_metadir, controller, ctrlopt, modelsetup, modelopt, backend, local, verbosity, submit):
handle_common_options(verbosity)
ys = handle_connection_options(metadir, accept_metadir, controller, ctrlopt, modelsetup, modelopt, backend, local)
controller = ys.controller
if (not name):
click_print_applicable_stages(controller)
return
for n in name:
(offset, scopedname) = n.rsplit('/', 1)
rule = controller.adageobj.view(offset).getRule(scopedname)
if (not rule):
click.secho('No such stage {}, pick one of the applicable below:'.format(n), fg='red')
click_print_applicable_stages(controller)
return
if (rule.identifier in [r.identifier for r in controller.adageobj.applied_rules]):
click.secho('Stage {} was already applied.'.format(n), fg='yellow')
continue
if (rule.identifier not in controller.applicable_rules()):
click.secho('Rule {} not yet applicable'.format(n), fg='red')
continue
controller.apply_rules([rule.identifier])
if submit:
(_, s2r, _) = utils.rule_steps_indices(controller.adageobj)
nodes_to_submit = [x for x in controller.submittable_nodes() if (s2r[x] == rule.identifier)]
controller.submit_nodes(nodes_to_submit)
click.secho('Stage {} applied'.format(n), fg='green') |
def test_used_with_session_scope(testdir: Any) -> None:
testdir.makeini('\n [pytest]\n asyncio_mode=auto\n ')
testdir.makepyfile('\n import pytest\n import random\n\n def get_random_number():\n return random.randint(0, 1)\n\n (autouse=True, scope="session")\n def randint_mock(session_mocker):\n return session_mocker.patch("random.randint", lambda x, y: 5)\n\n def test_get_random_number():\n assert get_random_number() == 5\n ')
result = testdir.runpytest_subprocess()
assert ('AssertionError' not in result.stderr.str())
result.stdout.fnmatch_lines('* 1 passed in *') |
class EEGSupervisedPretrainLoader(torch.utils.data.Dataset):
def __init__(self, tuev_data, chb_mit_data, iiic_data, tuab_data):
(tuev_root, tuev_files) = tuev_data
self.tuev_root = tuev_root
self.tuev_files = tuev_files
self.tuev_size = len(self.tuev_files)
(chb_mit_root, chb_mit_files) = chb_mit_data
self.chb_mit_root = chb_mit_root
self.chb_mit_files = chb_mit_files
self.chb_mit_size = len(self.chb_mit_files)
(iiic_x, iiic_y) = iiic_data
self.iiic_x = iiic_x
self.iiic_y = iiic_y
self.iiic_size = len(self.iiic_x)
(tuab_root, tuab_files) = tuab_data
self.tuab_root = tuab_root
self.tuab_files = tuab_files
self.tuab_size = len(self.tuab_files)
def __len__(self):
return (((self.tuev_size + self.chb_mit_size) + self.iiic_size) + self.tuab_size)
def tuev_load(self, index):
sample = pickle.load(open(os.path.join(self.tuev_root, self.tuev_files[index]), 'rb'))
X = sample['signal']
X = resample(X, 1000, axis=(- 1))
X = (X / (np.quantile(np.abs(X), q=0.95, method='linear', axis=(- 1), keepdims=True) + 1e-08))
Y = int((sample['label'][0] - 1))
X = torch.FloatTensor(X)
return (X, Y, 0)
def chb_mit_load(self, index):
sample = pickle.load(open(os.path.join(self.chb_mit_root, self.chb_mit_files[index]), 'rb'))
X = sample['X']
X = resample(X, 2000, axis=(- 1))
X = (X / (np.quantile(np.abs(X), q=0.95, method='linear', axis=(- 1), keepdims=True) + 1e-08))
Y = sample['y']
X = torch.FloatTensor(X)
return (X, Y, 1)
def iiic_load(self, index):
data = self.iiic_x[index]
samples = torch.FloatTensor(data)
samples = (samples / (torch.quantile(torch.abs(samples), q=0.95, dim=(- 1), keepdim=True) + 1e-08))
y = np.argmax(self.iiic_y[index])
return (samples, y, 2)
def tuab_load(self, index):
sample = pickle.load(open(os.path.join(self.tuab_root, self.tuab_files[index]), 'rb'))
X = sample['X']
X = (X / (np.quantile(np.abs(X), q=0.95, method='linear', axis=(- 1), keepdims=True) + 1e-08))
Y = sample['y']
X = torch.FloatTensor(X)
return (X, Y, 3)
def __getitem__(self, index):
if (index < self.tuev_size):
return self.tuev_load(index)
elif (index < (self.tuev_size + self.chb_mit_size)):
index = (index - self.tuev_size)
return self.chb_mit_load(index)
elif (index < ((self.tuev_size + self.chb_mit_size) + self.iiic_size)):
index = ((index - self.tuev_size) - self.chb_mit_size)
return self.iiic_load(index)
elif (index < (((self.tuev_size + self.chb_mit_size) + self.iiic_size) + self.tuab_size)):
index = (((index - self.tuev_size) - self.chb_mit_size) - self.iiic_size)
return self.tuab_load(index)
else:
raise ValueError('index out of range') |
def train(model, data, target, optimizer, coreset_theta):
model.train()
optimizer.zero_grad()
output = model(data)
acc1 = mean_accuracy(output, target)
loss = torch.sum(((F.binary_cross_entropy_with_logits(output, target, reduction='none') * coreset_theta) / coreset_theta.sum()))
loss.backward()
optimizer.step()
return (acc1, loss) |
def test_cf_rotated_latlon__grid():
crs = CRS.from_cf(dict(grid_mapping_name='rotated_latitude_longitude', grid_north_pole_latitude=32.5, grid_north_pole_longitude=1.0, north_pole_grid_longitude=170.0))
with pytest.warns(UserWarning):
proj_dict = crs.to_dict()
assert (proj_dict == {'proj': 'ob_tran', 'o_proj': 'longlat', 'o_lat_p': 32.5, 'o_lon_p': 170.0, 'lon_0': 181.0, 'datum': 'WGS84', 'no_defs': None, 'type': 'crs'}) |
class _Cached():
def __init__(self, func, count):
self.func = func
self.cache = []
self.count = count
def __call__(self, *args, **kwds):
key = (args, kwds)
for (cached_key, cached_result) in self.cache:
if (cached_key == key):
return cached_result
result = self.func(*args, **kwds)
self.cache.append((key, result))
if (len(self.cache) > self.count):
del self.cache[0]
return result |
class BroadcastUDPClient(Client):
def __init__(self, bcastaddr, prog, vers):
self.pmap = BroadcastUDPPortMapperClient(bcastaddr)
self.pmap.set_reply_handler(self.my_reply_handler)
self.prog = prog
self.vers = vers
self.user_reply_handler = None
self.addpackers()
def close(self):
self.pmap.close()
def set_reply_handler(self, reply_handler):
self.user_reply_handler = reply_handler
def set_timeout(self, timeout):
self.pmap.set_timeout(timeout)
def my_reply_handler(self, reply, fromaddr):
(port, res) = reply
self.unpacker.reset(res)
result = self.unpack_func()
self.unpacker.done()
self.replies.append((result, fromaddr))
if (self.user_reply_handler is not None):
self.user_reply_handler(result, fromaddr)
def make_call(self, proc, args, pack_func, unpack_func):
self.packer.reset()
if pack_func:
pack_func(args)
if (unpack_func is None):
def dummy():
pass
self.unpack_func = dummy
else:
self.unpack_func = unpack_func
self.replies = []
packed_args = self.packer.get_buf()
_ = self.pmap.Callit((self.prog, self.vers, proc, packed_args))
return self.replies |
class VNet(nn.Module):
def __init__(self, classes_num=2):
classes = classes_num
super(VNet, self).__init__()
self.in_block = VNetInBlock(1, 32, 1)
self.down_block1 = VNetDownBlock(32, 32, 2)
self.down_block2 = VNetDownBlock(32, 64, 3)
self.down_block3 = VNetDownBlock(64, 128, 3)
self.down_block4 = VNetDownBlock(128, 256, 3)
self.down_block5 = VNetDownBlock(256, 512, 3)
self.down_block6 = VNetDownBlock(512, 1024, 3)
self.up_block1 = VNetUpBlock(1024, 512, 1024, 3)
self.up_block2 = VNetUpBlock(1024, 256, 512, 3)
self.up_block3 = VNetUpBlock(512, 128, 256, 2)
self.up_block4 = VNetUpBlock(256, 64, 128, 2)
self.up_block5 = VNetUpBlock(128, 32, 64, 2)
self.up_block6 = VNetUpBlock(64, 32, 32, 2)
self.out_block = VNetOutSingleBlock(32, classes)
def forward(self, x, return_features=False):
br1 = self.in_block(x)
br2 = self.down_block1(br1)
br3 = self.down_block2(br2)
br4 = self.down_block3(br3)
br5 = self.down_block4(br4)
br6 = self.down_block5(br5)
out = self.down_block6(br6)
out = self.up_block1(out, br6)
out = self.up_block2(out, br5)
out = self.up_block3(out, br4)
out = self.up_block4(out, br3)
out = self.up_block5(out, br2)
out = self.up_block6(out, br1)
if return_features:
outputs = out
else:
outputs = self.out_block(out)
return outputs |
((types.Array(types.float64, 1, 'C', readonly=True), types.int32, types.float64), nopython=True)
def _numba_sampen(sequence, order, r):
size = sequence.size
numerator = 0
denominator = 0
for offset in range(1, (size - order)):
n_numerator = int((abs((sequence[order] - sequence[(order + offset)])) >= r))
n_denominator = 0
for idx in range(order):
n_numerator += (abs((sequence[idx] - sequence[(idx + offset)])) >= r)
n_denominator += (abs((sequence[idx] - sequence[(idx + offset)])) >= r)
if (n_numerator == 0):
numerator += 1
if (n_denominator == 0):
denominator += 1
prev_in_diff = int((abs((sequence[order] - sequence[(offset + order)])) >= r))
for idx in range(1, ((size - offset) - order)):
out_diff = int((abs((sequence[(idx - 1)] - sequence[((idx + offset) - 1)])) >= r))
in_diff = int((abs((sequence[(idx + order)] - sequence[((idx + offset) + order)])) >= r))
n_numerator += (in_diff - out_diff)
n_denominator += (prev_in_diff - out_diff)
prev_in_diff = in_diff
if (n_numerator == 0):
numerator += 1
if (n_denominator == 0):
denominator += 1
if (denominator == 0):
return 0
elif (numerator == 0):
return np.inf
else:
return (- log((numerator / denominator))) |
class Solution():
def fizzBuzz(self, n: int) -> List[str]:
res = []
for i in range(1, (n + 1)):
if (((i % 3) == 0) and ((i % 5) == 0)):
res.append('FizzBuzz')
elif ((i % 3) == 0):
res.append('Fizz')
elif ((i % 5) == 0):
res.append('Buzz')
else:
res.append(f'{i}')
return res |
class PyCoreScopesTest(unittest.TestCase):
def setUp(self):
super().setUp()
self.project = testutils.sample_project()
self.pycore = self.project.pycore
def tearDown(self):
testutils.remove_project(self.project)
super().tearDown()
def test_simple_scope(self):
code = dedent(' def sample_func():\n pass\n ')
scope = libutils.get_string_scope(self.project, code)
sample_func = scope['sample_func'].get_object()
self.assertEqual(get_base_type('Function'), sample_func.get_type())
def test_simple_function_scope(self):
code = dedent(' def sample_func():\n a = 10\n ')
scope = libutils.get_string_scope(self.project, code)
self.assertEqual(1, len(scope.get_scopes()))
sample_func_scope = scope.get_scopes()[0]
self.assertEqual(1, len(sample_func_scope.get_names()))
self.assertEqual(0, len(sample_func_scope.get_scopes()))
def test_classes_inside_function_scopes(self):
code = dedent(' def sample_func():\n class SampleClass(object):\n pass\n ')
scope = libutils.get_string_scope(self.project, code)
self.assertEqual(1, len(scope.get_scopes()))
sample_func_scope = scope.get_scopes()[0]
self.assertEqual(get_base_type('Type'), sample_func_scope['SampleClass'].get_object().get_type())
def test_list_comprehension_scope_inside_assignment(self):
code = 'a_var = [b_var + d_var for b_var, c_var in e_var]\n'
scope = libutils.get_string_scope(self.project, code)
self.assertEqual(list(sorted(scope.get_defined_names())), ['a_var'])
self.assertEqual(list(sorted(scope.get_scopes()[0].get_defined_names())), ['b_var', 'c_var'])
def test_list_comprehension_scope(self):
code = '[b_var + d_var for b_var, c_var in e_var]\n'
scope = libutils.get_string_scope(self.project, code)
self.assertEqual(list(sorted(scope.get_scopes()[0].get_defined_names())), ['b_var', 'c_var'])
def test_set_comprehension_scope(self):
code = '{b_var + d_var for b_var, c_var in e_var}\n'
scope = libutils.get_string_scope(self.project, code)
self.assertEqual(list(sorted(scope.get_scopes()[0].get_defined_names())), ['b_var', 'c_var'])
def test_generator_comprehension_scope(self):
code = '(b_var + d_var for b_var, c_var in e_var)\n'
scope = libutils.get_string_scope(self.project, code)
self.assertEqual(list(sorted(scope.get_scopes()[0].get_defined_names())), ['b_var', 'c_var'])
def test_dict_comprehension_scope(self):
code = '{b_var: d_var for b_var, c_var in e_var}\n'
scope = libutils.get_string_scope(self.project, code)
self.assertEqual(list(sorted(scope.get_scopes()[0].get_defined_names())), ['b_var', 'c_var'])
_for_versions_higher('3.8')
def test_inline_assignment(self):
code = 'values = (a_var := 2,)'
scope = libutils.get_string_scope(self.project, code)
self.assertEqual(list(sorted(scope.get_defined_names())), ['a_var', 'values'])
_for_versions_higher('3.8')
def test_inline_assignment_in_comprehensions(self):
code = dedent(' [\n (a_var := b_var + (f_var := g_var))\n for b_var in [(j_var := i_var)\n for i_var in c_var] if a_var + (h_var := d_var)\n ]\n ')
scope = libutils.get_string_scope(self.project, code)
self.assertEqual(list(sorted(scope.get_scopes()[0].get_defined_names())), ['a_var', 'b_var', 'f_var'])
self.assertEqual(list(sorted(scope.get_scopes()[0].get_scopes()[0].get_defined_names())), ['i_var', 'j_var'])
def test_nested_comprehension(self):
code = dedent(' [\n b_var + d_var for b_var, c_var in [\n e_var for e_var in f_var\n ]\n ]\n ')
scope = libutils.get_string_scope(self.project, code)
self.assertEqual(list(sorted(scope.get_scopes()[0].get_defined_names())), ['b_var', 'c_var'])
self.assertEqual(list(sorted(scope.get_scopes()[0].get_scopes()[0].get_defined_names())), ['e_var'])
def test_simple_class_scope(self):
code = dedent(' class SampleClass(object):\n def f(self):\n var = 10\n ')
scope = libutils.get_string_scope(self.project, code)
self.assertEqual(1, len(scope.get_scopes()))
sample_class_scope = scope.get_scopes()[0]
self.assertTrue(('f' in sample_class_scope))
self.assertEqual(1, len(sample_class_scope.get_scopes()))
f_in_class = sample_class_scope.get_scopes()[0]
self.assertTrue(('var' in f_in_class))
def test_get_lineno(self):
code = dedent('\n def sample_func():\n a = 10\n ')
scope = libutils.get_string_scope(self.project, code)
self.assertEqual(1, len(scope.get_scopes()))
sample_func_scope = scope.get_scopes()[0]
self.assertEqual(1, scope.get_start())
self.assertEqual(2, sample_func_scope.get_start())
def test_scope_kind(self):
code = dedent(' class SampleClass(object):\n pass\n def sample_func():\n pass\n ')
scope = libutils.get_string_scope(self.project, code)
sample_class_scope = scope.get_scopes()[0]
sample_func_scope = scope.get_scopes()[1]
self.assertEqual('Module', scope.get_kind())
self.assertEqual('Class', sample_class_scope.get_kind())
self.assertEqual('Function', sample_func_scope.get_kind())
def test_function_parameters_in_scope_names(self):
code = dedent(' def sample_func(param):\n a = 10\n ')
scope = libutils.get_string_scope(self.project, code)
sample_func_scope = scope.get_scopes()[0]
self.assertTrue(('param' in sample_func_scope))
def test_get_names_contains_only_names_defined_in_a_scope(self):
code = dedent(' var1 = 10\n def sample_func(param):\n var2 = 20\n ')
scope = libutils.get_string_scope(self.project, code)
sample_func_scope = scope.get_scopes()[0]
self.assertTrue(('var1' not in sample_func_scope))
def test_scope_lookup(self):
code = dedent(' var1 = 10\n def sample_func(param):\n var2 = 20\n ')
scope = libutils.get_string_scope(self.project, code)
self.assertTrue((scope.lookup('var2') is None))
self.assertEqual(get_base_type('Function'), scope.lookup('sample_func').get_object().get_type())
sample_func_scope = scope.get_scopes()[0]
self.assertTrue((sample_func_scope.lookup('var1') is not None))
def test_function_scopes(self):
code = dedent(' def func():\n var = 10\n ')
scope = libutils.get_string_scope(self.project, code)
func_scope = scope.get_scopes()[0]
self.assertTrue(('var' in func_scope))
def test_function_scopes_classes(self):
code = dedent(' def func():\n class Sample(object):\n pass\n ')
scope = libutils.get_string_scope(self.project, code)
func_scope = scope.get_scopes()[0]
self.assertTrue(('Sample' in func_scope))
def test_function_getting_scope(self):
code = dedent(' def func(): var = 10\n ')
mod = libutils.get_string_module(self.project, code)
func_scope = mod['func'].get_object().get_scope()
self.assertTrue(('var' in func_scope))
def test_scopes_in_function_scopes(self):
code = dedent(' def func():\n def inner():\n var = 10\n ')
scope = libutils.get_string_scope(self.project, code)
func_scope = scope.get_scopes()[0]
inner_scope = func_scope.get_scopes()[0]
self.assertTrue(('var' in inner_scope))
def test_for_variables_in_scopes(self):
code = dedent(' for a_var in range(10):\n pass\n ')
scope = libutils.get_string_scope(self.project, code)
self.assertTrue(('a_var' in scope))
def test_assists_inside_fors(self):
code = dedent(' for i in range(10):\n a_var = i\n ')
scope = libutils.get_string_scope(self.project, code)
self.assertTrue(('a_var' in scope))
def test_first_parameter_of_a_method(self):
code = dedent(' class AClass(object):\n def a_func(self, param):\n pass\n ')
a_class = libutils.get_string_module(self.project, code)['AClass'].get_object()
function_scope = a_class['a_func'].get_object().get_scope()
self.assertEqual(a_class, function_scope['self'].get_object().get_type())
self.assertNotEqual(a_class, function_scope['param'].get_object().get_type())
def test_first_parameter_of_static_methods(self):
code = dedent(' class AClass(object):\n \n def a_func(param):\n pass\n ')
a_class = libutils.get_string_module(self.project, code)['AClass'].get_object()
function_scope = a_class['a_func'].get_object().get_scope()
self.assertNotEqual(a_class, function_scope['param'].get_object().get_type())
def test_first_parameter_of_class_methods(self):
code = dedent(' class AClass(object):\n \n def a_func(cls):\n pass\n ')
a_class = libutils.get_string_module(self.project, code)['AClass'].get_object()
function_scope = a_class['a_func'].get_object().get_scope()
self.assertEqual(a_class, function_scope['cls'].get_object())
def test_first_parameter_with_self_as_name_and_unknown_decorator(self):
code = dedent(' def my_decorator(func):\n return func\n class AClass(object):\n _decorator\n def a_func(self):\n pass\n ')
a_class = libutils.get_string_module(self.project, code)['AClass'].get_object()
function_scope = a_class['a_func'].get_object().get_scope()
self.assertEqual(a_class, function_scope['self'].get_object().get_type())
def test_inside_class_scope_attribute_lookup(self):
code = dedent(' class C(object):\n an_attr = 1\n def a_func(self):\n pass')
scope = libutils.get_string_scope(self.project, code)
self.assertEqual(1, len(scope.get_scopes()))
c_scope = scope.get_scopes()[0]
self.assertTrue(('an_attr' in c_scope.get_names()))
self.assertTrue((c_scope.lookup('an_attr') is not None))
f_in_c = c_scope.get_scopes()[0]
self.assertTrue((f_in_c.lookup('an_attr') is None))
def test_inside_class_scope_attribute_lookup2(self):
code = dedent(' class C(object):\n def __init__(self):\n self.an_attr = 1\n def a_func(self):\n pass')
scope = libutils.get_string_scope(self.project, code)
self.assertEqual(1, len(scope.get_scopes()))
c_scope = scope.get_scopes()[0]
f_in_c = c_scope.get_scopes()[0]
self.assertTrue((f_in_c.lookup('an_attr') is None))
def test_get_inner_scope_for_staticmethods(self):
code = dedent(' class C(object):\n \n def a_func(self):\n pass\n ')
scope = libutils.get_string_scope(self.project, code)
c_scope = scope.get_scopes()[0]
f_in_c = c_scope.get_scopes()[0]
self.assertEqual(f_in_c, scope.get_inner_scope_for_line(4))
def test_get_scope_for_offset_for_comprehension(self):
code = 'a = [i for i in range(10)]\n'
scope = libutils.get_string_scope(self.project, code)
c_scope = scope.get_scopes()[0]
self.assertEqual(c_scope, scope.get_inner_scope_for_offset(10))
self.assertEqual(scope, scope.get_inner_scope_for_offset(1))
def test_get_scope_for_offset_for_in_nested_comprehension(self):
code = '[i for i in [j for j in k]]\n'
scope = libutils.get_string_scope(self.project, code)
c_scope = scope.get_scopes()[0]
self.assertEqual(c_scope, scope.get_inner_scope_for_offset(5))
inner_scope = c_scope.get_scopes()[0]
self.assertEqual(inner_scope, scope.get_inner_scope_for_offset(15))
def test_get_scope_for_offset_for_scope_with_indent(self):
code = dedent(' def f(a):\n print(a)\n ')
scope = libutils.get_string_scope(self.project, code)
inner_scope = scope.get_scopes()[0]
self.assertEqual(inner_scope, scope.get_inner_scope_for_offset(10))
def test_get_scope_for_offset_for_function_scope_and_async_with_statement(self):
scope = libutils.get_string_scope(self.project, dedent(' async def func():\n async with a_func() as var:\n print(var)\n '))
inner_scope = scope.get_scopes()[0]
self.assertEqual(inner_scope, scope.get_inner_scope_for_offset(27))
def test_getting_overwritten_scopes(self):
code = dedent(' def f():\n pass\n def f():\n pass\n ')
scope = libutils.get_string_scope(self.project, code)
self.assertEqual(2, len(scope.get_scopes()))
f1_scope = scope.get_scopes()[0]
f2_scope = scope.get_scopes()[1]
self.assertNotEqual(f1_scope, f2_scope)
def test_assigning_builtin_names(self):
code = 'range = 1\n'
mod = libutils.get_string_module(self.project, code)
range = mod.get_scope().lookup('range')
self.assertEqual((mod, 1), range.get_definition_location())
def test_get_inner_scope_and_logical_lines(self):
code = dedent(' class C(object):\n def f():\n s = """\n 1\n 2\n """\n a = 1\n ')
scope = libutils.get_string_scope(self.project, code)
c_scope = scope.get_scopes()[0]
f_in_c = c_scope.get_scopes()[0]
self.assertEqual(f_in_c, scope.get_inner_scope_for_line(7))
def test_getting_defined_names_for_classes(self):
code = dedent(' class A(object):\n def a(self):\n pass\n class B(A):\n def b(self):\n pass\n ')
scope = libutils.get_string_scope(self.project, code)
a_scope = scope['A'].get_object().get_scope()
b_scope = scope['B'].get_object().get_scope()
self.assertTrue(('a' in b_scope.get_names()))
self.assertTrue(('b' in b_scope.get_names()))
self.assertTrue(('a' not in b_scope.get_defined_names()))
self.assertTrue(('b' in b_scope.get_defined_names()))
def test_getting_defined_names_for_modules(self):
code = dedent(' class A(object):\n pass\n ')
scope = libutils.get_string_scope(self.project, code)
self.assertTrue(('open' in scope.get_names()))
self.assertTrue(('A' in scope.get_names()))
self.assertTrue(('open' not in scope.get_defined_names()))
self.assertTrue(('A' in scope.get_defined_names()))
def test_get_inner_scope_for_list_comprhension_with_many_targets(self):
code = 'a = [(i, j) for i,j in enumerate(range(10))]\n'
scope = libutils.get_string_scope(self.project, code)
self.assertEqual(len(scope.get_scopes()), 1)
self.assertNotIn('i', scope)
self.assertNotIn('j', scope)
self.assertIn('i', scope.get_scopes()[0])
self.assertIn('j', scope.get_scopes()[0])
def test_get_inner_scope_for_generator(self):
code = 'a = (i for i in range(10))\n'
scope = libutils.get_string_scope(self.project, code)
self.assertEqual(len(scope.get_scopes()), 1)
self.assertNotIn('i', scope)
self.assertIn('i', scope.get_scopes()[0])
def test_get_inner_scope_for_set_comprehension(self):
code = 'a = {i for i in range(10)}\n'
scope = libutils.get_string_scope(self.project, code)
self.assertEqual(len(scope.get_scopes()), 1)
self.assertNotIn('i', scope)
self.assertIn('i', scope.get_scopes()[0])
def test_get_inner_scope_for_dict_comprehension(self):
code = 'a = {i:i for i in range(10)}\n'
scope = libutils.get_string_scope(self.project, code)
self.assertEqual(len(scope.get_scopes()), 1)
self.assertNotIn('i', scope)
self.assertIn('i', scope.get_scopes()[0])
def test_get_inner_scope_for_nested_list_comprhension(self):
code = 'a = [[i + j for j in range(10)] for i in range(10)]\n'
scope = libutils.get_string_scope(self.project, code)
self.assertEqual(len(scope.get_scopes()), 1)
self.assertNotIn('i', scope)
self.assertNotIn('j', scope)
self.assertIn('i', scope.get_scopes()[0])
self.assertEqual(len(scope.get_scopes()[0].get_scopes()), 1)
self.assertIn('j', scope.get_scopes()[0].get_scopes()[0])
self.assertIn('i', scope.get_scopes()[0].get_scopes()[0])
def test_get_scope_region(self):
scope = libutils.get_string_scope(self.project, dedent('\n def func1(ala):\n pass\n\n def func2(o):\n pass\n '))
self.assertEqual(scope.get_region(), (0, 48))
self.assertEqual(scope.get_scopes()[0].get_region(), (1, 24))
self.assertEqual(scope.get_scopes()[1].get_region(), (26, 47))
def test_only_get_inner_scope_region(self):
scope = libutils.get_string_scope(self.project, dedent('\n def func1(ala):\n pass\n\n def func2(o):\n pass\n '))
self.assertEqual(scope.get_scopes()[1].get_region(), (26, 47)) |
def true_or_false(t: Type) -> ProperType:
t = get_proper_type(t)
if isinstance(t, UnionType):
new_items = [true_or_false(item) for item in t.items]
return make_simplified_union(new_items, line=t.line, column=t.column)
new_t = copy_type(t)
new_t.can_be_true = new_t.can_be_true_default()
new_t.can_be_false = new_t.can_be_false_default()
return new_t |
def fit_svm(features, y, MAX_SAMPLES=10000):
nb_classes = np.unique(y, return_counts=True)[1].shape[0]
train_size = features.shape[0]
svm = SVC(C=np.inf, gamma='scale')
if (((train_size // nb_classes) < 5) or (train_size < 50)):
return svm.fit(features, y)
else:
grid_search = GridSearchCV(svm, {'C': [0.0001, 0.001, 0.01, 0.1, 1, 10, 100, 1000, 10000, np.inf], 'kernel': ['rbf'], 'degree': [3], 'gamma': ['scale'], 'coef0': [0], 'shrinking': [True], 'probability': [False], 'tol': [0.001], 'cache_size': [200], 'class_weight': [None], 'verbose': [False], 'max_iter': [], 'decision_function_shape': ['ovr'], 'random_state': [None]}, cv=5, n_jobs=5, refit=True)
if (train_size > MAX_SAMPLES):
split = train_test_split(features, y, train_size=MAX_SAMPLES, random_state=0, stratify=y)
features = split[0]
y = split[2]
grid_search.fit(features, y)
print('SVM test: ', grid_search.best_estimator_)
return grid_search.best_estimator_ |
def get_rtlir_dtype(obj):
try:
assert (not isinstance(obj, list)), 'array datatype object should be a field of some struct!'
if isinstance(obj, (dsl.Signal, dsl.Const)):
Type = obj._dsl.Type
assert isinstance(Type, type)
if issubclass(Type, Bits):
return Vector(Type.nbits)
elif (Type is int):
return Vector(_get_nbits_from_value(obj), False)
elif is_bitstruct_class(Type):
try:
return _get_rtlir_dtype_struct(Type())
except TypeError:
assert False, f'__init__() of supposed struct {Type.__name__} should take 0 argument ( you can achieve this by adding default values to your arguments )!'
else:
assert False, f'cannot convert object of type {Type} into RTLIR!'
elif isinstance(obj, int):
return Vector(_get_nbits_from_value(obj), False)
elif isinstance(obj, Bits):
return Vector(obj.nbits)
elif is_bitstruct_inst(obj):
return _get_rtlir_dtype_struct(obj)
else:
assert False, 'cannot infer the data type of the given object!'
except AssertionError as e:
msg = ('' if (e.args[0] is None) else e.args[0])
raise RTLIRConversionError(obj, msg) |
class DiscourseTransformer(Transformer):
def forward(self, batch, target_mask=None, streaming=False, zero_encoder=False, mirror=False, streaming_state=None, nce=False, factorize=True, **kwargs):
if ((self.switchout > 0) and self.training):
batch.switchout(self.switchout, self.src_vocab_size, self.tgt_vocab_size)
src = batch.get('source')
tgt = batch.get('target_input')
src_pos = batch.get('source_pos')
tgt_pos = batch.get('target_pos')
src_lang = batch.get('source_lang')
tgt_lang = batch.get('target_lang')
src_lengths = batch.src_lengths
tgt_lengths = batch.tgt_lengths
past_src = batch.get('past_source')
org_src = src
org_tgt = tgt
src = src.transpose(0, 1)
tgt = tgt.transpose(0, 1)
past_src = past_src.transpose(0, 1)
encoder_output = self.encoder(src, past_input=past_src, input_lang=src_lang, factorize=factorize)
encoder_output = defaultdict((lambda : None), encoder_output)
context = encoder_output['context']
streaming_state = encoder_output['streaming_state']
if zero_encoder:
context.zero_()
decoder_output = self.decoder(tgt, context, src, src_lang=src_lang, tgt_lang=tgt_lang, input_pos=tgt_pos, streaming=streaming, src_lengths=src_lengths, tgt_lengths=tgt_lengths, streaming_state=streaming_state, factorize=factorize)
decoder_output = defaultdict((lambda : None), decoder_output)
streaming_state = decoder_output['streaming_state']
output = decoder_output['hidden']
output_dict = defaultdict((lambda : None), decoder_output)
output_dict['hidden'] = output
output_dict['context'] = context
output_dict['src_mask'] = encoder_output['src_mask']
output_dict['src'] = src
output_dict['target_mask'] = target_mask
output_dict['streaming_state'] = streaming_state
output_dict['target'] = batch.get('target_output')
if (self.training and nce):
output_dict = self.generator[0](output_dict)
else:
logprobs = self.generator[0](output_dict)['logits']
output_dict['logprobs'] = logprobs
if mirror:
tgt_pos = torch.flip(batch.get('target_pos'), (0,))
tgt_reverse = torch.flip(batch.get('target'), (0,))
tgt_reverse_input = tgt_reverse[:(- 1)]
tgt_reverse_output = tgt_reverse[1:]
tgt_reverse_input = tgt_reverse_input.transpose(0, 1)
reverse_decoder_output = self.mirror_decoder(tgt_reverse_input, context, src, src_lang=src_lang, tgt_lang=tgt_lang, input_pos=tgt_pos)
reverse_decoder_output['src'] = src
reverse_decoder_output['context'] = context
reverse_decoder_output['target_mask'] = target_mask
reverse_logprobs = self.mirror_generator[0](reverse_decoder_output)['logits']
output_dict['reverse_target'] = tgt_reverse_output
output_dict['reverse_hidden'] = reverse_decoder_output['hidden']
output_dict['reverse_logprobs'] = reverse_logprobs
output_dict['target_input'] = batch.get('target_input')
output_dict['target_lengths'] = batch.tgt_lengths
output_dict['hidden'] = self.mirror_g(output_dict['hidden'])
if self.ctc:
output_dict['encoder_logits'] = self.ctc_linear(output_dict['context'])
del encoder_output
return output_dict
def load_encoder_weights(self, pretrained_model):
encoder_ = self.encoder.encoder
pretrained_model.encoder.language_embedding = None
enc_language_embedding = encoder_.language_embedding
encoder_.language_embedding = None
encoder_state_dict = pretrained_model.encoder.state_dict()
encoder_.load_state_dict(encoder_state_dict)
encoder_.language_embedding = enc_language_embedding
def create_decoder_state(self, batch, beam_size=1, type=1, buffering=True, factorize=True, **kwargs):
src = batch.get('source')
tgt_atb = batch.get('target_atb')
src_lang = batch.get('source_lang')
tgt_lang = batch.get('target_lang')
past_src = batch.get('past_source')
src_transposed = src.transpose(0, 1)
encoder_output = self.encoder(src_transposed, past_input=past_src.transpose(0, 1), input_lang=src_lang, factorize=factorize)
print('[INFO] create Transformer decoding state with buffering', buffering)
decoder_state = TransformerDecodingState(src, tgt_lang, encoder_output['context'], src_lang, beam_size=beam_size, model_size=self.model_size, type=type, buffering=buffering)
return decoder_state |
def reset_version_parts(version: Version, **kwargs: Any) -> None:
internal_version = version._version
parts: dict[(str, Any)] = {}
ordered_part_names = ('epoch', 'release', 'pre', 'post', 'dev', 'local')
reset = False
for part_name in ordered_part_names:
if reset:
parts[part_name] = kwargs.get(part_name)
elif (part_name in kwargs):
parts[part_name] = kwargs[part_name]
reset = True
else:
parts[part_name] = getattr(internal_version, part_name)
version._version = type(internal_version)(**parts) |
_specialize
_canonicalize
_rewriter([Subtensor])
def local_subtensor_inc_subtensor(fgraph, node):
if isinstance(node.op, Subtensor):
x = node.inputs[0]
if ((not x.owner) or (not isinstance(x.owner.op, IncSubtensor))):
return
if (not x.owner.op.set_instead_of_inc):
return
if ((x.owner.inputs[2:] == node.inputs[1:]) and (tuple(x.owner.op.idx_list) == tuple(node.op.idx_list))):
out = node.outputs[0]
y = x.owner.inputs[1]
if (x.dtype != y.dtype):
y = y.astype(x.dtype)
if ((out.type.dtype == y.type.dtype) and (out.type.broadcastable == y.type.broadcastable)):
return [y]
else:
assert (out.broadcastable != y.broadcastable)
x_subtensor = node.op(x.owner.inputs[0], *x.owner.inputs[2:])
return [alloc(y, *x_subtensor.shape)]
else:
return |
_ordering
class Scope(Enum):
Function: _ScopeName = 'function'
Class: _ScopeName = 'class'
Module: _ScopeName = 'module'
Package: _ScopeName = 'package'
Session: _ScopeName = 'session'
def next_lower(self) -> 'Scope':
index = _SCOPE_INDICES[self]
if (index == 0):
raise ValueError(f'{self} is the lower-most scope')
return _ALL_SCOPES[(index - 1)]
def next_higher(self) -> 'Scope':
index = _SCOPE_INDICES[self]
if (index == (len(_SCOPE_INDICES) - 1)):
raise ValueError(f'{self} is the upper-most scope')
return _ALL_SCOPES[(index + 1)]
def __lt__(self, other: 'Scope') -> bool:
self_index = _SCOPE_INDICES[self]
other_index = _SCOPE_INDICES[other]
return (self_index < other_index)
def from_user(cls, scope_name: _ScopeName, descr: str, where: Optional[str]=None) -> 'Scope':
from _pytest.outcomes import fail
try:
scope = Scope(scope_name)
except ValueError:
fail("{} {}got an unexpected scope value '{}'".format(descr, (f'from {where} ' if where else ''), scope_name), pytrace=False)
return scope |
class Command(BaseCommand):
columns = ('id', 'username', 'first_name', 'last_name', 'email', 'date_joined', 'last_login')
def add_arguments(self, parser):
parser.add_argument('since', type=(lambda s: pytz.utc.localize(datetime.strptime(s, '%Y-%m-%d'))), help='Date since the users have been inactive (format: "2022-12-31").')
parser.add_argument('-o|--output-file', dest='output_file', default=None, help='Store the output in a csv file.')
def handle(self, *args, **options):
rows = User.objects.filter(last_login__lt=options['since']).order_by('-last_login').values_list(*self.columns)
if rows:
fp = (open(options['output_file'], 'w') if options['output_file'] else sys.stdout)
csv_writer = csv.writer(fp)
csv_writer.writerow(self.columns)
csv_writer.writerows(rows)
fp.close() |
class TestSetSelectionOwner(EndianTest):
def setUp(self):
self.req_args_0 = {'selection': , 'time': , 'window': }
self.req_bin_0 = b'\x16\x00\x04\x00\xaf4\\xfa\x88a\x9a\x10\xdf\x16'
def testPackRequest0(self):
bin = request.SetSelectionOwner._request.to_binary(*(), **self.req_args_0)
self.assertBinaryEqual(bin, self.req_bin_0)
def testUnpackRequest0(self):
(args, remain) = request.SetSelectionOwner._request.parse_binary(self.req_bin_0, dummy_display, 1)
self.assertBinaryEmpty(remain)
self.assertEqual(args, self.req_args_0) |
def test_slice_inference_in_for_loops_not_working() -> None:
ast_nodes = extract_node('\n from unknown import Unknown\n for a, *b in something:\n b #\n for a, *b in Unknown:\n b #\n for a, *b in (1):\n b #\n ')
for node in ast_nodes:
inferred = next(node.infer())
assert (inferred == util.Uninferable) |
def parse():
parser = argparse.ArgumentParser(description='variational autoencoder')
parser.add_argument('-model_dir', default='train_model', help='output model weight dir')
parser.add_argument('-model_path', help='latest model path')
parser.add_argument('-batch_size', default=96, type=int, help='batch size')
parser.add_argument('-latent_dim', default=300, type=int, help='laten size')
parser.add_argument('-data_dir', default='chinese_data', help='data dir')
parser.add_argument('-saving_step', default=1000, type=int, help='saving step')
parser.add_argument('-num_steps', default=20000, type=int, help='number of steps')
parser.add_argument('-source_length', default=50, type=int, help='source sentence length')
parser.add_argument('-code_length', default=13, type=int, help='code sentence length')
parser.add_argument('-load', action='store_true', help='load pretrained model')
parser.add_argument('-train', action='store_true', help='whether train')
parser.add_argument('-pretrain', action='store_true', help='whether pretrain')
parser.add_argument('-test', action='store_true', help='whether test')
parser.add_argument('-test_input', default='giga_word/test/input.txt', help='path of testing input')
parser.add_argument('-test_output', default='giga_word/test/result.txt', help='path of result file')
parser.add_argument('-pretrain_input', default='giga_word/pretrain_article.txt', help='input path for pretraining generator')
parser.add_argument('-pretrain_target', default='giga_word/pretrain_target.txt', help='target path for pretraining generator')
parser.add_argument('-summary', default='giga_word/train.title.txt', help='summary path as real data for discriminator')
parser.add_argument('-article', default='giga_word/train.article.txt', help='article path for unparalleled training')
args = parser.parse_args()
return args |
def check_valid(config):
data_fn_splits = os.path.basename(config['data_path']).split('.')[0].split('_')
if (len(data_fn_splits) > 1):
clip_arch = data_fn_splits[(- 1)].upper()
if (clip_arch[:3] == 'VIT'):
assert ('' not in clip_arch), f'TODO: {clip_arch}'
(_, model_type, patch_size) = clip_arch.split('-')
clip_arch = f'ViT-{model_type}/{patch_size}'
else:
clip_arch = clip_arch.replace('X', 'x')
print(f"### clip_arch in config: {config['clip_arch']}, clip_arch in data_path: {clip_arch}")
config['clip_arch'] = clip_arch |
def rename_key(key):
if key.startswith('module.'):
key = key[7:]
if ('.downsample.' in key):
return key.replace('downsample', 'skip')
if key.startswith('entropy_bottleneck.'):
if key.startswith('entropy_bottleneck._biases.'):
return f'entropy_bottleneck._bias{key[(- 1)]}'
if key.startswith('entropy_bottleneck._matrices.'):
return f'entropy_bottleneck._matrix{key[(- 1)]}'
if key.startswith('entropy_bottleneck._factors.'):
return f'entropy_bottleneck._factor{key[(- 1)]}'
return key |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.