code stringlengths 281 23.7M |
|---|
def all_smt(formula, keys):
target_logic = get_logic(formula)
print(('Target Logic: %s' % target_logic))
with Solver(logic=target_logic) as solver:
solver.add_assertion(formula)
while solver.solve():
partial_model = [EqualsOrIff(k, solver.get_value(k)) for k in keys]
print(partial_model)
solver.add_assertion(Not(And(partial_model))) |
def _pseudonymise_DS(value):
original_DS_value = str(value)
my_decimal = Decimal(original_DS_value)
as_tuple = my_decimal.as_tuple()
digits = as_tuple.digits
count_digits = len(digits)
my_hash_func = hashlib.new('sha3_256')
encoded_value = original_DS_value.encode('ASCII')
my_hash_func.update(encoded_value)
my_hex_digest = my_hash_func.hexdigest()
sliced_digest = my_hex_digest[0:count_digits]
my_integer = int(sliced_digest, 16)
my_integer_string = str(my_integer)
new_digits = list()
for i in range(0, count_digits):
new_digits.append(int(my_integer_string[i:(i + 1)]))
new_decimal_tuple = tuple((as_tuple.sign, tuple(new_digits), as_tuple.exponent))
new_decimal = Decimal(new_decimal_tuple)
return str(new_decimal) |
class ObserverMeta(type):
def __new__(cls, clsname, superclasses, attributedict):
klass = super().__new__(cls, clsname, superclasses, attributedict)
observes = attributedict.get('observes')
if (clsname == 'Observer'):
return klass
if (observes is None):
raise TypeError('{}.observes must be defined!'.format(clsname))
eventDict = RegisterSignalsFor(observes)
cls.instance = klass()
for (eventType, eventPath) in eventDict.items():
method = getattr(cls.instance, eventType)
Dispatcher.RegisterHandler(eventPath, method)
pass
return klass |
.slow
.parametrize('untied', [True, False])
_figures_equal()
def test_RanksComparatorPlotter_r2_score(fig_test, fig_ref, untied):
test_ax = fig_test.subplots()
rank0 = agg.RankResult('test', ['a', 'b'], [1, 1], {})
rank1 = agg.RankResult('test', ['a', 'b'], [1, 1], {})
rcmp = ranks_cmp.mkrank_cmp(rank0, rank1)
rcmp.plot.r2_score(ax=test_ax, untied=untied)
exp_ax = fig_ref.subplots()
expected = pd.DataFrame.from_dict({'test_1': {'test_1': 1.0, 'test_2': 1.0}, 'test_2': {'test_1': 1.0, 'test_2': 1.0}})
expected.columns.name = 'Method'
expected.index.name = 'Method'
sns.heatmap(expected, annot=True, cbar_kws={'label': '$R^2$'}, ax=exp_ax) |
def load_feature(ft_path, ft_format, shape=None):
if (ft_format == 'npy'):
video_df = np.load(ft_path)
if (shape == 'CT'):
video_df = video_df.T
elif (ft_format == 'torch'):
video_df = torch.load(ft_path).numpy()
else:
raise ValueError('unsupported feature format: {}'.format(ft_format))
return video_df |
def test_shape_tuple():
x = Variable(MyType2(), None, None)
assert (shape_tuple(x) == ())
x = tensor(dtype=np.float64, shape=(1, 2, None))
res = shape_tuple(x)
assert isinstance(res, tuple)
assert isinstance(res[0], ScalarConstant)
assert (res[0].data == 1)
assert isinstance(res[1], ScalarConstant)
assert (res[1].data == 2)
assert (not isinstance(res[2], ScalarConstant)) |
class TestUnittestMethods():
def test_django(self, django_pytester: DjangoPytester) -> None:
django_pytester.create_test_module("\n from django.test import TestCase\n\n class TestFoo(TestCase):\n \n def setUpClass(self):\n print('\\nCALLED: setUpClass')\n\n def setUp(self):\n print('\\nCALLED: setUp')\n\n def tearDown(self):\n print('\\nCALLED: tearDown')\n\n \n def tearDownClass(self):\n print('\\nCALLED: tearDownClass')\n\n def test_pass(self):\n pass\n ")
result = django_pytester.runpytest_subprocess('-v', '-s')
result.stdout.fnmatch_lines(['CALLED: setUpClass', 'CALLED: setUp', 'CALLED: tearDown', 'PASSED*', 'CALLED: tearDownClass'])
assert (result.ret == 0)
def test_setUpClass_not_being_a_classmethod(self, django_pytester: DjangoPytester) -> None:
django_pytester.create_test_module('\n from django.test import TestCase\n\n class TestFoo(TestCase):\n def setUpClass(self):\n pass\n\n def test_pass(self):\n pass\n ')
result = django_pytester.runpytest_subprocess('-v', '-s')
expected_lines = ['* ERROR at setup of TestFoo.test_pass *', 'E * TypeError: *']
result.stdout.fnmatch_lines(expected_lines)
assert (result.ret == 1)
def test_setUpClass_multiple_subclasses(self, django_pytester: DjangoPytester) -> None:
django_pytester.create_test_module('\n from django.test import TestCase\n\n\n class TestFoo(TestCase):\n \n def setUpClass(cls):\n super(TestFoo, cls).setUpClass()\n\n def test_shared(self):\n pass\n\n\n class TestBar(TestFoo):\n def test_bar1(self):\n pass\n\n\n class TestBar2(TestFoo):\n def test_bar21(self):\n pass\n ')
result = django_pytester.runpytest_subprocess('-v')
result.stdout.fnmatch_lines(['*TestFoo::test_shared PASSED*', '*TestBar::test_bar1 PASSED*', '*TestBar::test_shared PASSED*', '*TestBar2::test_bar21 PASSED*', '*TestBar2::test_shared PASSED*'])
assert (result.ret == 0)
def test_setUpClass_mixin(self, django_pytester: DjangoPytester) -> None:
django_pytester.create_test_module('\n from django.test import TestCase\n\n class TheMixin:\n \n def setUpClass(cls):\n super(TheMixin, cls).setUpClass()\n\n\n class TestFoo(TheMixin, TestCase):\n def test_foo(self):\n pass\n\n\n class TestBar(TheMixin, TestCase):\n def test_bar(self):\n pass\n ')
result = django_pytester.runpytest_subprocess('-v')
result.stdout.fnmatch_lines(['*TestFoo::test_foo PASSED*', '*TestBar::test_bar PASSED*'])
assert (result.ret == 0)
def test_setUpClass_skip(self, django_pytester: DjangoPytester) -> None:
django_pytester.create_test_module('\n from django.test import TestCase\n import pytest\n\n\n class TestFoo(TestCase):\n \n def setUpClass(cls):\n if cls is TestFoo:\n raise pytest.skip("Skip base class")\n super(TestFoo, cls).setUpClass()\n\n def test_shared(self):\n pass\n\n\n class TestBar(TestFoo):\n def test_bar1(self):\n pass\n\n\n class TestBar2(TestFoo):\n def test_bar21(self):\n pass\n ')
result = django_pytester.runpytest_subprocess('-v')
result.stdout.fnmatch_lines(['*TestFoo::test_shared SKIPPED*', '*TestBar::test_bar1 PASSED*', '*TestBar::test_shared PASSED*', '*TestBar2::test_bar21 PASSED*', '*TestBar2::test_shared PASSED*'])
assert (result.ret == 0)
def test_multi_inheritance_setUpClass(self, django_pytester: DjangoPytester) -> None:
django_pytester.create_test_module("\n from django.test import TestCase\n\n # Using a mixin is a regression test, see #280 for more details:\n # class SomeMixin:\n pass\n\n class TestA(SomeMixin, TestCase):\n expected_state = ['A']\n state = []\n\n \n def setUpClass(cls):\n super(TestA, cls).setUpClass()\n cls.state.append('A')\n\n \n def tearDownClass(cls):\n assert cls.state.pop() == 'A'\n super(TestA, cls).tearDownClass()\n\n def test_a(self):\n assert self.state == self.expected_state\n\n class TestB(TestA):\n expected_state = ['A', 'B']\n\n \n def setUpClass(cls):\n super(TestB, cls).setUpClass()\n cls.state.append('B')\n\n \n def tearDownClass(cls):\n assert cls.state.pop() == 'B'\n super(TestB, cls).tearDownClass()\n\n def test_b(self):\n assert self.state == self.expected_state\n\n class TestC(TestB):\n expected_state = ['A', 'B', 'C']\n\n \n def setUpClass(cls):\n super(TestC, cls).setUpClass()\n cls.state.append('C')\n\n \n def tearDownClass(cls):\n assert cls.state.pop() == 'C'\n super(TestC, cls).tearDownClass()\n\n def test_c(self):\n assert self.state == self.expected_state\n ")
result = django_pytester.runpytest_subprocess('-vvvv', '-s')
assert (result.parseoutcomes()['passed'] == 6)
assert (result.ret == 0)
def test_unittest(self, django_pytester: DjangoPytester) -> None:
django_pytester.create_test_module("\n from unittest import TestCase\n\n class TestFoo(TestCase):\n \n def setUpClass(self):\n print('\\nCALLED: setUpClass')\n\n def setUp(self):\n print('\\nCALLED: setUp')\n\n def tearDown(self):\n print('\\nCALLED: tearDown')\n\n \n def tearDownClass(self):\n print('\\nCALLED: tearDownClass')\n\n def test_pass(self):\n pass\n ")
result = django_pytester.runpytest_subprocess('-v', '-s')
result.stdout.fnmatch_lines(['CALLED: setUpClass', 'CALLED: setUp', 'CALLED: tearDown', 'PASSED*', 'CALLED: tearDownClass'])
assert (result.ret == 0)
def test_setUpClass_leaf_but_not_in_dunder_dict(self, django_pytester: DjangoPytester) -> None:
django_pytester.create_test_module("\n from django.test import testcases\n\n class CMSTestCase(testcases.TestCase):\n pass\n\n class FooBarTestCase(testcases.TestCase):\n\n \n def setUpClass(cls):\n print('FooBarTestCase.setUpClass')\n super(FooBarTestCase, cls).setUpClass()\n\n class TestContact(CMSTestCase, FooBarTestCase):\n\n def test_noop(self):\n print('test_noop')\n ")
result = django_pytester.runpytest_subprocess('-q', '-s')
result.stdout.fnmatch_lines(['*FooBarTestCase.setUpClass*', '*test_noop*', '1 passed*'])
assert (result.ret == 0) |
def main(data_dir, client, bc, config):
benchmark(read_tables, data_dir, bc, dask_profile=config['dask_profile'])
query_1 = '\n\t\tSELECT\n\t\t\tss.ss_customer_sk AS customer_sk,\n\t\t\tsum( case when (d_year = 2001) THEN ss_net_paid ELSE 0.0 END) first_year_total,\n\t\t\tsum( case when (d_year = 2002) THEN ss_net_paid ELSE 0.0 END) second_year_total\n\t\tFROM store_sales ss\n\t\tJOIN \n\t\t(\n\t\t\tSELECT d_date_sk, d_year\n\t\t\tFROM date_dim d\n\t\t\tWHERE d.d_year in (2001, 2002)\n\t\t) dd on ( ss.ss_sold_date_sk = dd.d_date_sk )\n\t\tGROUP BY ss.ss_customer_sk \n\t\tHAVING sum( case when (d_year = 2001) THEN ss_net_paid ELSE 0.0 END) > 0.0\n\t'
temp_table1 = bc.sql(query_1)
temp_table1 = temp_table1.persist()
wait(temp_table1)
bc.create_table('temp_table1', temp_table1)
query_2 = '\n\t\tSELECT\n\t\t\tws.ws_bill_customer_sk AS customer_sk,\n\t\t\tsum( case when (d_year = 2001) THEN ws_net_paid ELSE 0.0 END) first_year_total,\n\t\t\tsum( case when (d_year = 2002) THEN ws_net_paid ELSE 0.0 END) second_year_total\n\t\tFROM web_sales ws\n\t\tJOIN \n\t\t(\n\t\t\tSELECT d_date_sk, d_year\n\t\t\tFROM date_dim d\n\t\t\tWHERE d.d_year in (2001, 2002)\n\t\t) dd ON ( ws.ws_sold_date_sk = dd.d_date_sk )\n\t\tGROUP BY ws.ws_bill_customer_sk \n\t\tHAVING sum( case when (d_year = 2001) THEN ws_net_paid ELSE 0.0 END) > 0.0\n\t'
temp_table2 = bc.sql(query_2)
temp_table2 = temp_table2.persist()
wait(temp_table2)
bc.create_table('temp_table2', temp_table2)
query = '\n\t\tSELECT\n\t\t\tCAST(c_customer_sk AS BIGINT) as c_customer_sk,\n\t\t\tc_first_name,\n\t\t\tc_last_name,\n\t\t\t(store.second_year_total / store.first_year_total) AS storeSalesIncreaseRatio,\n\t\t\t(web.second_year_total / web.first_year_total) AS webSalesIncreaseRatio \n\t\tFROM temp_table1 store,\n\t\t\ttemp_table2 web,\n\t\t\tcustomer c\n\t\tWHERE store.customer_sk = web.customer_sk\n\t\tAND web.customer_sk = c_customer_sk\n\t\tAND (web.second_year_total / web.first_year_total) > (store.second_year_total / store.first_year_total) \n\t\tORDER BY webSalesIncreaseRatio DESC,\n\t\t\tc_customer_sk,\n\t\t\tc_first_name,\n\t\t\tc_last_name\n\t\tLIMIT 100\n '
result = bc.sql(query)
bc.drop_table('temp_table1')
bc.drop_table('temp_table2')
return result |
def simplified_power(left, right):
(left, right) = _simplify_elementwise_binary_broadcasts(left, right)
out = _simplified_binary_broadcast_concatenation(left, right, simplified_power)
if (out is not None):
return out
if pybamm.is_scalar_zero(right):
return pybamm.ones_like(left)
if pybamm.is_scalar_zero(left):
return pybamm.Scalar(0)
if pybamm.is_scalar_one(right):
return left
if isinstance(left, Multiplication):
if (left.left.is_constant() or left.right.is_constant()):
(l_left, l_right) = left.orphans
new_left = (l_left ** right)
new_right = (l_right ** right)
if (new_left.is_constant() or new_right.is_constant()):
return (new_left * new_right)
elif isinstance(left, Division):
if (left.left.is_constant() or left.right.is_constant()):
(l_left, l_right) = left.orphans
new_left = (l_left ** right)
new_right = (l_right ** right)
if (new_left.is_constant() or new_right.is_constant()):
return (new_left / new_right)
return pybamm.simplify_if_constant(pybamm.Power(left, right)) |
class FileSlice(AbstractContextManager):
def __init__(self, filepath: str, seek_from: int, read_limit: int):
self.filepath = filepath
self.seek_from = seek_from
self.read_limit = read_limit
self.n_seen = 0
def __enter__(self):
self.f = open(self.filepath, 'rb')
self.f.seek(self.seek_from)
return self
def __len__(self):
total_length = os.fstat(self.f.fileno()).st_size
return min(self.read_limit, (total_length - self.seek_from))
def read(self, n=(- 1)):
if (self.n_seen >= self.read_limit):
return b''
remaining_amount = (self.read_limit - self.n_seen)
data = self.f.read((remaining_amount if (n < 0) else min(n, remaining_amount)))
self.n_seen += len(data)
return data
def __iter__(self):
(yield self.read(n=((4 * 1024) * 1024)))
def __exit__(self, *args):
self.f.close() |
class AsyncHypothesisTest(PytestAsyncioFunction):
def _can_substitute(item: Function) -> bool:
func = item.obj
return (getattr(func, 'is_hypothesis_test', False) and asyncio.iscoroutinefunction(func.hypothesis.inner_test))
def runtest(self) -> None:
self.obj.hypothesis.inner_test = wrap_in_sync(self.obj.hypothesis.inner_test)
super().runtest() |
def hook_init(fm):
fm.execute_console('map {key} shell -p lsblk'.format(key=LIST_MOUNTS_KEY))
diskcmd = 'lsblk -lno NAME | awk \'!/[1-9]/ {sub(/sd/, ""); print}\''
disks = subprocess.check_output(diskcmd, shell=True).decode('utf-8').replace('\r', '').replace('\n', '')
for disk in disks:
partcmd = "lsblk -lno NAME /dev/sd{0} | sed 's/sd{0}//' | tail -n 1".format(disk)
try:
numparts = int(subprocess.check_output(partcmd, shell=True).decode('utf-8').replace('\r', '').replace('\n', ''))
except ValueError:
numparts = 0
if (numparts == 0):
fm.execute_console('map {key}{0} chain shell pmount sd{0}; cd /media/sd{0}'.format(disk, key=MOUNT_KEY))
fm.execute_console('map {key}{0} chain cd; chain shell pumount sd{0}'.format(disk, key=UMOUNT_KEY))
elif (numparts == 1):
fm.execute_console('map {key}{0} chain shell pmount sd{0}1; cd /media/sd{0}1'.format(disk, key=MOUNT_KEY))
fm.execute_console('map {key}{0} chain cd; shell pumount sd{0}1'.format(disk, key=UMOUNT_KEY))
else:
for part in range(1, (numparts + 1)):
fm.execute_console('map {key}{0}{1} chain shell pmount sd{0}{1}; cd /media/sd{0}{1}'.format(disk, part, key=MOUNT_KEY))
fm.execute_console('map {key}{0}{1} chain cd; shell pumount sd{0}{1}'.format(disk, part, key=UMOUNT_KEY))
return HOOK_INIT_OLD(fm) |
class SplitTags(SongsMenuPlugin):
PLUGIN_ID = 'Split Tags'
PLUGIN_NAME = _('Split Tags')
PLUGIN_DESC = _('Splits the disc number from the album and the version from the title at the same time.')
PLUGIN_ICON = Icons.EDIT_FIND_REPLACE
plugin_handles = any_song(has_title_splittable)
def plugin_song(self, song):
if has_title_splittable(song):
(title, versions) = split_title(song['title'])
if title:
song['title'] = title
if versions:
song['version'] = '\n'.join(versions)
if has_album_splittable(song):
(album, disc) = split_album(song['album'])
if album:
song['album'] = album
if disc:
song['discnumber'] = disc |
def start_portal_interactive():
def _iportal(fail):
(portal_twistd_cmd, server_twistd_cmd) = _get_twistd_cmdline(False, False)
portal_twistd_cmd.append('--nodaemon')
if _is_windows():
create_no_window =
Popen(server_twistd_cmd, env=getenv(), bufsize=(- 1), creationflags=create_no_window)
else:
Popen(server_twistd_cmd, env=getenv(), bufsize=(- 1))
print('Starting Portal in interactive mode (stop with Ctrl-C)...')
try:
Popen(portal_twistd_cmd, env=getenv(), stderr=STDOUT).wait()
except KeyboardInterrupt:
print('... Stopped Portal with Ctrl-C.')
else:
print('... Portal stopped (leaving interactive mode).')
def _portal_running(response):
print('Evennia must be shut down completely before running Portal in interactive mode.')
_reactor_stop()
send_instruction(PSTATUS, None, _portal_running, _iportal) |
def make_batch_data_sampler(sampler, images_per_batch, num_iters=None, start_iter=0):
batch_sampler = data.sampler.BatchSampler(sampler, images_per_batch, drop_last=False)
if (num_iters is not None):
batch_sampler = IterationBasedBatchSampler(batch_sampler, num_iters, start_iter)
return batch_sampler |
class NewTaskTrainer(Inc_Learning_Appr):
def __init__(self, model, device, nepochs=160, lr=0.1, lr_min=0.0001, lr_factor=10, lr_patience=8, clipgrad=10000, momentum=0.9, wd=0.0005, multi_softmax=False, wu_nepochs=0, wu_lr_factor=1, fix_bn=False, eval_on_train=False, logger=None):
super(NewTaskTrainer, self).__init__(model, device, nepochs, lr, lr_min, lr_factor, lr_patience, clipgrad, momentum, wd, multi_softmax, wu_nepochs, wu_lr_factor, fix_bn, eval_on_train, logger) |
def get_args(method='MLE'):
parser = argparse.ArgumentParser(description=f'training neural Datalog through time (NDTT) using {method}')
parser.add_argument('-d', '--Domain', required=True, type=str, help='which domain to work on?')
parser.add_argument('-db', '--Database', required=True, type=str, help='which database to use?')
parser.add_argument('-ps', '--PathStorage', type=str, default='../..', help='Path of storage which stores domains (with data), logs, results, etc. Must be local (e.g. no HDFS allowed)')
parser.add_argument('-cp', '--CheckPoint', default=(- 1), type=int, help='every # tokens (>1) in a seq to accumulate gradients (and cut compute graph), -1 meaning entire seq')
parser.add_argument('-bs', '--BatchSize', default=1, type=int, help='# checkpoints / seqs to update parameters')
parser.add_argument('-tp', '--TrackPeriod', default=1000, type=int, help='# seqs to train for each dev')
parser.add_argument('-m', '--Multiplier', default=1, type=float, help='constant of N=O(I), where N is # of sampled time points for integral')
parser.add_argument('-dm', '--DevMultiplier', default=1, type=int, help='constant of N=O(I), where N is # of sampled time points for integral')
parser.add_argument('-tr', '--TrainRatio', default=1.0, type=float, help='fraction of training data to use')
parser.add_argument('-dr', '--DevRatio', default=1.0, type=float, help='fraction of dev data to use')
parser.add_argument('-me', '--MaxEpoch', default=20, type=int, help='max # training epochs')
parser.add_argument('-lr', '--LearnRate', default=0.001, type=float, help='learning rate')
parser.add_argument('-wd', '--WeightDecay', default=0, type=float, help='weight decay')
parser.add_argument('-np', '--NumProcess', default=1, type=int, help='# of processes used, default is 1')
parser.add_argument('-nt', '--NumThread', default=1, type=int, help='OMP NUM THREADS')
parser.add_argument('-tdsm', '--TrainDownSampleMode', default='none', type=str, choices=['none', 'uniform'], help='for training, how do you want to down sample it? none? uniform?')
parser.add_argument('-tdss', '--TrainDownSampleSize', default=1, type=int, help='for training, down sample size, 1 <= dss <= K')
parser.add_argument('-ddsm', '--DevDownSampleMode', default='none', type=str, choices=['none', 'uniform'], help='for dev, how do you want to down sample it? none? uniform?')
parser.add_argument('-ddss', '--DevDownSampleSize', default=1, type=int, help='for dev, down sample size, 1 <= dss <= K')
parser.add_argument('-teDim', '--TimeEmbeddingDim', default=100, type=int, help='the dimensionality of time embedding')
parser.add_argument('-layer', '--Layer', default=3, type=int, help='the number of layers of Transformer')
parser.add_argument('-attemp', '--AttentionTemperature', default=1.0, type=float, help='temperature of softmax used in attention')
parser.add_argument('-tem', '--TimeEmbeddingMode', default='Sine', type=str, choices=['Sine', 'Linear'], help='how do you want to get time embedding?')
parser.add_argument('-intenmode', '--IntensityComputationMode', default='extra_layer', type=str, choices=['extra_dim', 'extra_layer'], help='how do you want to compute the intensities? via extra_layer or extra_dim?')
parser.add_argument('-mem', '--MemorySize', default=50, type=int, help='the number of past events that should be attended to in TransformerCell')
parser.add_argument('-lp', '--LSTMPool', default='full', type=str, choices=['full', 'simp'], help='for LSTM pooling, full(default):full-verison-in-paper;simp:a-simplification')
parser.add_argument('-um', '--UpdateMode', default='sync', type=str, choices=['sync', 'async'], help='way of updating lstm after computed new cells')
parser.add_argument('-gpu', '--UseGPU', action='store_true', help='use GPU?')
parser.add_argument('-sd', '--Seed', default=12345, type=int, help='random seed')
args = parser.parse_args()
return args |
def get_prev_current_ss(df, store_sales_col='ss_sum'):
curr_ss_f = ((df['ss_sold_date_sk'] >= df['imp_start_date']) & (df['ss_sold_date_sk'] < (df['imp_start_date'] + df['no_days_comp_price'])))
prev_ss_f = ((df['ss_sold_date_sk'] >= (df['imp_start_date'] - df['no_days_comp_price'])) & (df['ss_sold_date_sk'] < df['imp_start_date']))
df['current_ss_quant'] = 0
df['current_ss_quant'][curr_ss_f] = df[store_sales_col][curr_ss_f]
df['prev_ss_quant'] = 0
df['prev_ss_quant'][prev_ss_f] = df[store_sales_col][prev_ss_f]
return df |
class VGG(nn.Module):
def __init__(self, features, num_classes=1000):
super(VGG, self).__init__()
self.features = features
self.classifier = nn.Linear(512, num_classes)
self._initialize_weights()
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), (- 1))
x = self.classifier(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
if (m.bias is not None):
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_() |
def test_get_conference_found(client, db):
conference = f.ConferenceFactory()
url = reverse('get-conference', kwargs={'conference_slug': conference.slug})
response = client.get(url, follow=True)
assert (response.redirect_chain == [(reverse('proposals-list', kwargs={'conference_slug': conference.slug}), 302)])
assert (str(conference.name) in str(response.content)) |
class V1Protocol(RegistryProtocol):
FAILURE_CODES: Dict[(Enum, Dict[(Failures, int)])] = {V1ProtocolSteps.PUT_IMAGES: {Failures.INVALID_AUTHENTICATION: 403, Failures.UNAUTHENTICATED: 401, Failures.UNAUTHORIZED: 403, Failures.APP_REPOSITORY: 405, Failures.SLASH_REPOSITORY: 400, Failures.INVALID_REPOSITORY: 400, Failures.DISALLOWED_LIBRARY_NAMESPACE: 400, Failures.NAMESPACE_DISABLED: 400, Failures.READ_ONLY: 405, Failures.MIRROR_ONLY: 405, Failures.MIRROR_MISCONFIGURED: 500, Failures.MIRROR_ROBOT_MISSING: 400, Failures.READONLY_REGISTRY: 405}, V1ProtocolSteps.GET_IMAGES: {Failures.INVALID_AUTHENTICATION: 403, Failures.UNAUTHENTICATED: 403, Failures.UNAUTHORIZED: 403, Failures.APP_REPOSITORY: 404, Failures.ANONYMOUS_NOT_ALLOWED: 401, Failures.DISALLOWED_LIBRARY_NAMESPACE: 400, Failures.NAMESPACE_DISABLED: 400}, V1ProtocolSteps.PUT_IMAGE_JSON: {Failures.INVALID_IMAGES: 400, Failures.READ_ONLY: 405, Failures.MIRROR_ONLY: 405, Failures.MIRROR_MISCONFIGURED: 500, Failures.MIRROR_ROBOT_MISSING: 400, Failures.READONLY_REGISTRY: 405}, V1ProtocolSteps.PUT_TAG: {Failures.MISSING_TAG: 404, Failures.INVALID_TAG: 400, Failures.INVALID_IMAGES: 400, Failures.NAMESPACE_DISABLED: 400, Failures.READ_ONLY: 405, Failures.MIRROR_ONLY: 405, Failures.MIRROR_MISCONFIGURED: 500, Failures.MIRROR_ROBOT_MISSING: 400, Failures.READONLY_REGISTRY: 405}, V1ProtocolSteps.GET_LAYER: {Failures.GEO_BLOCKED: 403}, V1ProtocolSteps.GET_TAG: {Failures.UNKNOWN_TAG: 404}}
def __init__(self, jwk):
pass
def _auth_for_credentials(self, credentials):
if (credentials is None):
return None
return credentials
def ping(self, session):
assert (session.get('/v1/_ping').status_code == 200)
def login(self, session, username, password, scopes, expect_success):
data = {'username': username, 'password': password}
response = self.conduct(session, 'POST', '/v1/users/', json_data=data, expected_status=400)
assert ((response.text == '"Username or email already exists"') == expect_success)
def pull(self, session, namespace, repo_name, tag_names, images, credentials=None, expected_failure=None, options=None):
options = (options or ProtocolOptions())
auth = self._auth_for_credentials(credentials)
tag_names = ([tag_names] if isinstance(tag_names, str) else tag_names)
prefix = ('/v1/repositories/%s/' % self.repo_name(namespace, repo_name))
self.ping(session)
headers = {'X-Docker-Token': 'true'}
result = self.conduct(session, 'GET', (prefix + 'images'), auth=auth, headers=headers, expected_status=(200, expected_failure, V1ProtocolSteps.GET_IMAGES))
if (result.status_code != 200):
return
headers = {}
if (credentials is not None):
headers['Authorization'] = ('token ' + result.headers['www-authenticate'])
else:
assert (not ('www-authenticate' in result.headers))
image_ids = self.conduct(session, 'GET', (prefix + 'tags'), headers=headers).json()
for tag_name in tag_names:
image_id_data = self.conduct(session, 'GET', ((prefix + 'tags/') + tag_name), headers=headers, expected_status=(200, expected_failure, V1ProtocolSteps.GET_TAG))
if (tag_name not in image_ids):
assert (expected_failure == Failures.UNKNOWN_TAG)
return None
if (expected_failure == Failures.UNKNOWN_TAG):
return None
tag_image_id = image_ids[tag_name]
assert (image_id_data.json() == tag_image_id)
image_prefix = ('/v1/images/%s/' % tag_image_id)
ancestors = self.conduct(session, 'GET', (image_prefix + 'ancestry'), headers=headers).json()
assert (len(ancestors) == len(images))
for (index, image_id) in enumerate(reversed(ancestors)):
image_prefix = ('/v1/images/%s/' % image_id)
self.conduct(session, 'GET', (image_prefix + 'ancestry'), headers=headers)
result = self.conduct(session, 'GET', (image_prefix + 'json'), headers=headers)
assert (result.json()['id'] == image_id)
self.conduct(session, 'HEAD', (image_prefix + 'layer'), headers=headers)
result = self.conduct(session, 'GET', (image_prefix + 'layer'), headers=headers, expected_status=(200, expected_failure, V1ProtocolSteps.GET_LAYER), options=options)
if (result.status_code == 200):
assert (result.content == images[index].bytes)
return PullResult(manifests=None, image_ids=image_ids)
def push(self, session, namespace, repo_name, tag_names, images, credentials=None, expected_failure=None, options=None):
auth = self._auth_for_credentials(credentials)
tag_names = ([tag_names] if isinstance(tag_names, str) else tag_names)
self.ping(session)
result = self.conduct(session, 'PUT', ('/v1/repositories/%s/' % self.repo_name(namespace, repo_name)), expected_status=(201, expected_failure, V1ProtocolSteps.PUT_IMAGES), json_data={}, auth=auth)
if (result.status_code != 201):
return
headers = {}
headers['Authorization'] = ('token ' + result.headers['www-authenticate'])
for image in images:
assert (image.urls is None)
image_json_data = {'id': image.id}
if (image.size is not None):
image_json_data['Size'] = image.size
if (image.parent_id is not None):
image_json_data['parent'] = image.parent_id
if (image.config is not None):
image_json_data['config'] = image.config
if (image.created is not None):
image_json_data['created'] = image.created
image_json = json.dumps(image_json_data)
response = self.conduct(session, 'PUT', ('/v1/images/%s/json' % image.id), data=image_json, headers=headers, expected_status=(200, expected_failure, V1ProtocolSteps.PUT_IMAGE_JSON))
if (response.status_code != 200):
return
old_checksum = compute_tarsum(BytesIO(image.bytes), image_json)
checksum_headers = {'X-Docker-Checksum': old_checksum}
checksum_headers.update(headers)
self.conduct(session, 'PUT', ('/v1/images/%s/checksum' % image.id), headers=checksum_headers)
self.conduct(session, 'PUT', ('/v1/images/%s/layer' % image.id), data=BytesIO(image.bytes), headers=headers)
checksum = compute_simple(BytesIO(image.bytes), image_json)
checksum_headers = {'X-Docker-Checksum-Payload': checksum}
checksum_headers.update(headers)
self.conduct(session, 'PUT', ('/v1/images/%s/checksum' % image.id), headers=checksum_headers)
for tag_name in tag_names:
self.conduct(session, 'PUT', ('/v1/repositories/%s/tags/%s' % (self.repo_name(namespace, repo_name), tag_name)), data=('"%s"' % images[(- 1)].id), headers=headers, expected_status=(200, expected_failure, V1ProtocolSteps.PUT_TAG))
self.conduct(session, 'PUT', ('/v1/repositories/%s/images' % self.repo_name(namespace, repo_name)), expected_status=204, headers=headers)
return PushResult(manifests=None, headers=headers)
def delete(self, session, namespace, repo_name, tag_names, credentials=None, expected_failure=None, options=None):
auth = self._auth_for_credentials(credentials)
tag_names = ([tag_names] if isinstance(tag_names, str) else tag_names)
self.ping(session)
for tag_name in tag_names:
self.conduct(session, 'DELETE', ('/v1/repositories/%s/tags/%s' % (self.repo_name(namespace, repo_name), tag_name)), auth=auth, expected_status=(200, expected_failure, V1ProtocolSteps.DELETE_TAG))
def tag(self, session, namespace, repo_name, tag_name, image_id, credentials=None, expected_failure=None, options=None):
auth = self._auth_for_credentials(credentials)
self.conduct(session, 'PUT', ('/v1/repositories/%s/tags/%s' % (self.repo_name(namespace, repo_name), tag_name)), data=('"%s"' % image_id), auth=auth, expected_status=(200, expected_failure, V1ProtocolSteps.PUT_TAG)) |
class TestWeightSvdLayerSplitandSVDPrunner():
.parametrize('model_type', ['Sequential', 'Functional'])
.parametrize('rank', [12, 20])
.parametrize('cost_metric', [CostMetric.mac, CostMetric.memory])
def test_split_layer(self, model_type, rank, cost_metric):
model = get_model(model_type)
orig_conv_op = _get_layers(model, model_type)[1]
org_conv_op_shape = orig_conv_op.output_shape
layer1 = Layer(orig_conv_op, orig_conv_op.name, output_shape=org_conv_op_shape)
svd_lib_ref = pymo.GetSVDInstance()
pymo_utils.PymoSvdUtils.configure_layers_in_pymo_svd([layer1], cost_metric, svd_lib_ref, pymo.TYPE_SINGLE)
(split_conv_op1, split_conv_op2) = WeightSvdModuleSplitter.split_module(model, layer1.module, rank, svd_lib_ref)
split_conv_output = split_conv_op2.output_shape
assert (org_conv_op_shape == split_conv_output)
assert (len(split_conv_op2.get_weights()) == len(orig_conv_op.get_weights()))
if (len(orig_conv_op.get_weights()) > 1):
orig_bias_out = orig_conv_op.get_weights()[1]
split_bias_out = split_conv_op2.get_weights()[1]
assert np.allclose(orig_bias_out, split_bias_out, atol=0.0001)
assert (len(split_conv_op1.get_weights()) == 2)
assert (len(split_conv_op1.get_weights()[1]) == rank)
.parametrize('model_type', ['Sequential', 'Functional'])
.parametrize('cost_metric', [CostMetric.mac, CostMetric.memory])
def test_split_layer_with_stride(self, model_type, cost_metric):
model = get_model(model_type)
orig_conv_op = _get_layers(model, model_type)[0]
org_conv_op_shape = orig_conv_op.output_shape
layer1 = Layer(orig_conv_op, orig_conv_op.name, output_shape=org_conv_op_shape)
rank = cost_calculator.WeightSvdCostCalculator.calculate_rank_given_comp_ratio(layer1, 0.5, cost_metric)
svd_lib_ref = pymo.GetSVDInstance()
pymo_utils.PymoSvdUtils.configure_layers_in_pymo_svd([layer1], cost_metric, svd_lib_ref, pymo.TYPE_SINGLE)
(split_conv_op1, split_conv_op2) = WeightSvdModuleSplitter.split_module(model, layer1.module, rank, svd_lib_ref)
split_conv_output = split_conv_op2.output_shape
assert (org_conv_op_shape == split_conv_output)
assert (len(split_conv_op2.get_weights()) == len(orig_conv_op.get_weights()))
if (len(orig_conv_op.get_weights()) > 1):
orig_bias_out = orig_conv_op.get_weights()[1]
split_bias_out = split_conv_op2.get_weights()[1]
assert np.allclose(orig_bias_out, split_bias_out, atol=0.0001)
assert (len(split_conv_op1.get_weights()) == 2)
assert (len(split_conv_op1.get_weights()[1]) == rank)
.parametrize('model_type', ['Sequential', 'Functional'])
.parametrize('cmp_ratio', [0.4, 0.75])
.parametrize('cost_metric', [CostMetric.mac, CostMetric.memory])
.parametrize('layer_index', [1, 3])
def test_perform_svd_and_split_layer(self, model_type, cmp_ratio, cost_metric, layer_index):
model = get_model(model_type)
layer_db = LayerDatabase(model)
comp_layer_db = copy.deepcopy(layer_db)
layer = comp_layer_db.find_layer_by_name(_get_layers(model, model_type)[layer_index].name)
org_count = len(list(comp_layer_db._compressible_layers.values()))
splitter = WeightSvdPruner()
splitter._prune_layer(layer_db, comp_layer_db, layer, 0.5, cost_metric)
assert (layer not in list(comp_layer_db._compressible_layers.values()))
after_split_count = len(list(comp_layer_db._compressible_layers.values()))
assert ((org_count + 1) == after_split_count) |
class YAMLPrinter():
def display(d: Union[(str, Dict[(str, Any)])], **_kwargs: Any) -> None:
try:
import yaml
print(yaml.safe_dump(d, default_flow_style=False))
except ImportError:
sys.exit('PyYaml is not installed.\nInstall it with `pip install PyYaml` to use the yaml output feature')
def display_list(data: List[Union[(str, Dict[(str, Any)], gitlab.base.RESTObject)]], fields: List[str], **_kwargs: Any) -> None:
try:
import yaml
print(yaml.safe_dump([get_dict(obj, fields) for obj in data], default_flow_style=False))
except ImportError:
sys.exit('PyYaml is not installed.\nInstall it with `pip install PyYaml` to use the yaml output feature') |
class TestCFtime():
def test_add_time_bounds_dimension(self):
from satpy.cf.coords import add_time_bounds_dimension
test_array = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
times = np.array(['2018-05-30T10:05:00', '2018-05-30T10:05:01', '2018-05-30T10:05:02', '2018-05-30T10:05:03'], dtype=np.datetime64)
dataarray = xr.DataArray(test_array, dims=['y', 'x'], coords={'time': ('y', times)}, attrs=dict(start_time=times[0], end_time=times[(- 1)]))
ds = dataarray.to_dataset(name='test-array')
ds = add_time_bounds_dimension(ds)
assert ('bnds_1d' in ds.dims)
assert (ds.dims['bnds_1d'] == 2)
assert ('time_bnds' in list(ds.data_vars))
assert ('bounds' in ds['time'].attrs)
assert ('standard_name' in ds['time'].attrs) |
class Blackbox(namedtuple('Blackbox', ['partition', 'output_indices'])):
def hidden_indices(self):
return tuple(sorted((set(self.micro_indices) - set(self.output_indices))))
def micro_indices(self):
return tuple(sorted((idx for part in self.partition for idx in part)))
def macro_indices(self):
return reindex(self.output_indices)
def __len__(self):
return len(self.partition)
def outputs_of(self, partition_index):
partition = self.partition[partition_index]
outputs = set(partition).intersection(self.output_indices)
return tuple(sorted(outputs))
def reindex(self):
_map = dict(zip(self.micro_indices, reindex(self.micro_indices)))
partition = tuple((tuple((_map[index] for index in group)) for group in self.partition))
output_indices = tuple((_map[i] for i in self.output_indices))
return Blackbox(partition, output_indices)
def macro_state(self, micro_state):
assert (len(micro_state) == len(self.micro_indices))
reindexed = self.reindex()
return utils.state_of(reindexed.output_indices, micro_state)
def in_same_box(self, a, b):
assert (a in self.micro_indices)
assert (b in self.micro_indices)
for part in self.partition:
if ((a in part) and (b in part)):
return True
return False
def hidden_from(self, a, b):
return ((a in self.hidden_indices) and (not self.in_same_box(a, b))) |
class FontRow(Adw.ActionRow):
__gtype_name__ = 'FontRow'
def __init__(self, font_data: dict):
super().__init__()
self.props.title = font_data['name']
self.props.subtitle = ' / '.join((font_data['family'], font_data['style'], font_data['weight']))
btn_remove = Gtk.Button(valign=Gtk.Align.CENTER)
btn_remove.props.icon_name = 'webfontkitgenerator-remove-symbolic'
btn_remove.add_css_class('flat')
btn_remove.add_css_class('circular')
btn_remove.connect('clicked', self.remove_font)
self.add_suffix(btn_remove)
def remove_font(self, _widget):
self.activate_action('win.remove-font', GLib.Variant.new_uint32(self.get_index())) |
def get_param_dict(args, model_without_ddp: nn.Module):
try:
param_dict_type = args.param_dict_type
except:
param_dict_type = 'default'
assert (param_dict_type in ['default', 'ddetr_in_mmdet', 'large_wd'])
if (param_dict_type == 'default'):
param_dicts = [{'params': [p for (n, p) in model_without_ddp.named_parameters() if (('backbone' not in n) and p.requires_grad)]}, {'params': [p for (n, p) in model_without_ddp.named_parameters() if (('backbone' in n) and p.requires_grad)], 'lr': args.lr_backbone}]
return param_dicts
if (param_dict_type == 'ddetr_in_mmdet'):
param_dicts = [{'params': [p for (n, p) in model_without_ddp.named_parameters() if ((not match_name_keywords(n, args.lr_backbone_names)) and (not match_name_keywords(n, args.lr_linear_proj_names)) and p.requires_grad)], 'lr': args.lr}, {'params': [p for (n, p) in model_without_ddp.named_parameters() if (match_name_keywords(n, args.lr_backbone_names) and p.requires_grad)], 'lr': args.lr_backbone}, {'params': [p for (n, p) in model_without_ddp.named_parameters() if (match_name_keywords(n, args.lr_linear_proj_names) and p.requires_grad)], 'lr': (args.lr * args.lr_linear_proj_mult)}]
return param_dicts
if (param_dict_type == 'large_wd'):
param_dicts = [{'params': [p for (n, p) in model_without_ddp.named_parameters() if ((not match_name_keywords(n, ['backbone'])) and (not match_name_keywords(n, ['norm', 'bias'])) and p.requires_grad)]}, {'params': [p for (n, p) in model_without_ddp.named_parameters() if (match_name_keywords(n, ['backbone']) and match_name_keywords(n, ['norm', 'bias']) and p.requires_grad)], 'lr': args.lr_backbone, 'weight_decay': 0.0}, {'params': [p for (n, p) in model_without_ddp.named_parameters() if (match_name_keywords(n, ['backbone']) and (not match_name_keywords(n, ['norm', 'bias'])) and p.requires_grad)], 'lr': args.lr_backbone, 'weight_decay': args.weight_decay}, {'params': [p for (n, p) in model_without_ddp.named_parameters() if ((not match_name_keywords(n, ['backbone'])) and match_name_keywords(n, ['norm', 'bias']) and p.requires_grad)], 'lr': args.lr, 'weight_decay': 0.0}]
return param_dicts |
class AddDestroyHandler(GraphRewriter):
def apply(self, fgraph):
supervisor_added = False
for feature in fgraph._features:
if isinstance(feature, Supervisor):
supervisor_added = True
break
if (not supervisor_added):
warnings.warn(f'A Supervisor feature is missing from {fgraph}.', stacklevel=3)
def add_requirements(self, fgraph):
super().add_requirements(fgraph)
fgraph.attach_feature(DestroyHandler()) |
class NormalQueue1EntryRTL(Component):
def construct(s, EntryType):
s.recv = RecvIfcRTL(EntryType)
s.send = SendIfcRTL(EntryType)
s.count = OutPort()
s.full = Wire()
s.entry = Wire(EntryType)
s.count //= s.full
s.send.msg //= s.entry
s.send.val //= s.full
s.recv.rdy //= (lambda : (~ s.full))
_ff
def ff_normal1():
if s.reset:
s.full <<= 0
else:
s.full <<= ((s.recv.val & (~ s.full)) | (s.full & (~ s.send.rdy)))
if (s.recv.val & (~ s.full)):
s.entry <<= s.recv.msg
def line_trace(s):
return f'{s.recv}({s.full}){s.send}' |
def diagonal_coulomb_potential_and_kinetic_terms_as_arrays(hamiltonian):
if (not isinstance(hamiltonian, FermionOperator)):
try:
hamiltonian = normal_ordered(get_fermion_operator(hamiltonian))
except TypeError:
raise TypeError('hamiltonian must be either a FermionOperator or DiagonalCoulombHamiltonian.')
potential = FermionOperator.zero()
kinetic = FermionOperator.zero()
for (term, coeff) in hamiltonian.terms.items():
acted = set((term[i][0] for i in range(len(term))))
if (len(acted) == (len(term) / 2)):
potential += FermionOperator(term, coeff)
else:
kinetic += FermionOperator(term, coeff)
potential_terms = numpy.array([FermionOperator(term, coeff) for (term, coeff) in potential.terms.items()])
kinetic_terms = numpy.array([FermionOperator(term, coeff) for (term, coeff) in kinetic.terms.items()])
return (potential_terms, kinetic_terms) |
def prepare_query_payload(backend, offset, payload_string, column_name=None):
_temp = []
if column_name:
_payload = payload_string.format(offset=offset, column_name=column_name)
if ((backend == 'Microsoft SQL Server') and ('id' in column_name)):
_payload = replace_with(_payload, column_name, f'LTRIM(STR({column_name}))', right=False)
_temp.append(_payload)
else:
_payload = payload_string.format(offset=offset)
_temp.append(_payload)
return _temp |
def parse_old_label(data_root, in_path, img_size=False):
imgid2imgname = {}
imgid2anno = {}
idx = 0
for line in list_from_file(in_path):
line = line.strip().split()
img_full_path = osp.join(data_root, line[0])
if (not osp.exists(img_full_path)):
continue
ann_file = osp.join(data_root, line[1])
if (not osp.exists(ann_file)):
continue
img_info = {}
img_info['file_name'] = line[0]
if img_size:
img = cv2.imread(img_full_path)
(h, w) = img.shape[:2]
img_info['height'] = h
img_info['width'] = w
imgid2imgname[idx] = img_info
imgid2anno[idx] = []
char_annos = []
for (t, ann_line) in enumerate(list_from_file(ann_file)):
ann_line = ann_line.strip()
if (t == 0):
img_info['text'] = ann_line
else:
char_box = [float(x) for x in ann_line.split()]
char_text = img_info['text'][(t - 1)]
char_ann = dict(char_box=char_box, char_text=char_text)
char_annos.append(char_ann)
imgid2anno[idx] = char_annos
idx += 1
return (imgid2imgname, imgid2anno) |
_canonicalize
_stabilize
_rewriter([Reshape])
def local_reshape_lift(fgraph, node):
if (isinstance(node.op, Reshape) and node.inputs[0].owner and isinstance(node.inputs[0].owner.op, Elemwise) and (len(node.inputs[0].owner.inputs) == 1)):
r = node.op(node.inputs[0].owner.inputs[0], node.inputs[1])
copy_stack_trace(node.outputs, r)
e = node.inputs[0].owner.op(r)
copy_stack_trace((node.outputs + node.inputs), e)
return [e] |
class NotAllowedMethodsTests(AuthenticatedAPITestCase):
def setUpTestData(cls):
cls.message = create_offensive_message()
def test_returns_405_for_get(self):
url = reverse('api:bot:offensivemessage-detail', args=(self.message.id,))
response = self.client.get(url)
self.assertEqual(response.status_code, 405) |
def reconstructions(data, model, session, batch_dim=10, sample=False):
(m0, _, _, a, x, _, f, _, _) = data.next_train_batch(batch_dim)
(n, e) = session.run(([model.nodes_gumbel_argmax, model.edges_gumbel_argmax] if sample else [model.nodes_argmax, model.edges_argmax]), feed_dict={model.edges_labels: a, model.nodes_labels: x, model.node_features: f, model.training: False, model.variational: False})
(n, e) = (np.argmax(n, axis=(- 1)), np.argmax(e, axis=(- 1)))
m1 = np.array([(e if (e is not None) else Chem.RWMol()) for e in [data.matrices2mol(n_, e_, strict=True) for (n_, e_) in zip(n, e)]])
mols = np.vstack((m0, m1)).T.flatten()
return mols |
class ServiceConfig():
pathfinding_service_address: Optional[Address] = None
pathfinding_max_paths: int = DEFAULT_PATHFINDING_MAX_PATHS
pathfinding_max_fee: TokenAmount = DEFAULT_PATHFINDING_MAX_FEE
pathfinding_iou_timeout: BlockTimeout = DEFAULT_PATHFINDING_IOU_TIMEOUT
monitoring_enabled: bool = False |
class Quantile8BitQuantization(Quantization):
compression_type = runtime_pb2.QUANTILE_8BIT
def quantize(self, tensor: torch.Tensor, allow_inplace: bool=False) -> Tuple[(np.ndarray, np.ndarray)]:
tensor = tensor.detach().float()
borders = torch.as_tensor(quantile_qq_approximation(tensor.numpy(), (self.n_bins + 1))[1:(- 1)])
quantized = torch.clamp_(torch.bucketize(tensor, borders), 0, (self.n_bins - 1))
codebook = average_buckets(tensor, quantized, self.n_bins)
return (quantized.numpy().astype(np.uint8), codebook.numpy()) |
class TestSilent(unittest.TestCase):
def test_nonsilent(self):
actual = file_info.silent(INPUT_FILE)
expected = False
self.assertEqual(expected, actual)
def test_nonsilent_pathlib(self):
actual = file_info.silent(Path(INPUT_FILE))
expected = False
self.assertEqual(expected, actual)
def test_silent(self):
actual = file_info.silent(SILENT_FILE)
expected = True
self.assertEqual(expected, actual)
def test_empty(self):
actual = file_info.silent(EMPTY_FILE)
expected = True
self.assertEqual(expected, actual) |
class TestInitialSetup(TestCase):
('evennia.server.initial_setup.AccountDB')
def test_get_god_account(self, mocked_accountdb):
mocked_accountdb.objects.get = MagicMock(return_value=1)
self.assertEqual(initial_setup.get_god_account(), 1)
mocked_accountdb.objects.get.assert_called_with(id=1) |
class TestInSubprocess(unittest.TestCase):
def runProcessAndGetAsciiStdoutOrStderr(self, cmdline):
if (sys.platform != 'win32'):
cmdline = shlex.split(cmdline)
p = subprocess.Popen(cmdline, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
retcode = p.poll()
if stdout:
output = stdout.decode('ascii', 'replace')
elif stderr:
output = stderr.decode('ascii', 'replace')
output = output.replace('\r', '')
return (retcode, output)
def versionChecker(self, cmdline_template):
cmdline = (cmdline_template % sys.executable)
(retcode, output) = self.runProcessAndGetAsciiStdoutOrStderr(cmdline)
self.assertEqual(version_str, output.rstrip())
self.assertEqual(0, retcode)
def test_setup_version(self):
self.versionChecker('%s setup.py --version')
def test_sripts_bin2hex_version(self):
self.versionChecker('%s intelhex/scripts/bin2hex.py --version')
def test_sripts_hex2bin_version(self):
self.versionChecker('%s intelhex/scripts/hex2bin.py --version')
def test_sripts_hex2dump_version(self):
self.versionChecker('%s intelhex/scripts/hex2dump.py --version')
def test_sripts_hexdiff_version(self):
self.versionChecker('%s intelhex/scripts/hexdiff.py --version')
def test_sripts_hexmerge_version(self):
self.versionChecker('%s intelhex/scripts/hexmerge.py --version') |
class Up(nn.Module):
def __init__(self, in_channels, out_channels, bilinear=True):
super().__init__()
if bilinear:
self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
else:
self.up = nn.ConvTranspose2d((in_channels // 2), (in_channels // 2), kernel_size=2, stride=2)
self.conv = DoubleConv(in_channels, out_channels)
def forward(self, x1, x2):
x1 = self.up(x1)
diffY = torch.tensor([(x2.size()[2] - x1.size()[2])])
diffX = torch.tensor([(x2.size()[3] - x1.size()[3])])
x1 = F.pad(x1, [(diffX // 2), (diffX - (diffX // 2)), (diffY // 2), (diffY - (diffY // 2))])
x = torch.cat([x2, x1], dim=1)
return self.conv(x) |
def get_pure_function(fcn) -> PureFunction:
errmsg = 'The input function must be a function, a method of torch.nn.Module, a method of xitorch.EditableModule, or a sibling method'
if isinstance(fcn, PureFunction):
return fcn
elif (inspect.isfunction(fcn) or isinstance(fcn, torch.jit.ScriptFunction)):
return FunctionPureFunction(fcn)
elif (inspect.ismethod(fcn) or hasattr(fcn, '__call__')):
if inspect.ismethod(fcn):
obj = fcn.__self__
else:
obj = fcn
fcn = fcn.__call__
if isinstance(obj, EditableModule):
return EditableModulePureFunction(obj, fcn)
elif isinstance(obj, torch.nn.Module):
return TorchNNPureFunction(obj, fcn)
else:
raise RuntimeError(errmsg)
else:
raise RuntimeError(errmsg) |
def get_artifact(owner: str, repo: str, sha: str, action_name: str, artifact_name: str) -> str:
client = authorize(owner, repo)
try:
runs = client.get(f'/repos/{owner}/{repo}/actions/runs', params={'per_page': 100})
runs.raise_for_status()
runs = runs.json()
for run in runs['workflow_runs']:
run = WorkflowRun.from_raw(run)
if ((run.name == action_name) and (sha == run.head_sha)):
break
else:
raise NotFoundError('Could not find a run matching the provided settings in the previous hundred runs.')
url = check_run_status(run)
artifacts = client.get(url)
artifacts.raise_for_status()
for artifact in artifacts.json()['artifacts']:
if (artifact['name'] == artifact_name):
data = client.get(artifact['archive_download_url'])
if (data.status_code == 302):
return str(data.next_request.url)
data.raise_for_status()
raise NotFoundError('Could not find an artifact matching the provided name.')
finally:
client.close() |
def cal_false_alarm(gt, preds, threshold=0.5):
preds = list(preds.cpu().detach().numpy())
gt = list(gt.cpu().detach().numpy())
preds = np.repeat(preds, 16)
preds[(preds < threshold)] = 0
preds[(preds >= threshold)] = 1
(tn, fp, fn, tp) = confusion_matrix(gt, preds, labels=[0, 1]).ravel()
far = (fp / (fp + tn))
return far |
def perform_cle_bc(config: argparse.Namespace):
data_pipeline = ImageNetDataPipeline(config)
input_shape = (image_net_config.dataset['image_width'], image_net_config.dataset['image_height'], image_net_config.dataset['image_channels'])
tf.keras.backend.clear_session()
tf_config = tf.ConfigProto()
tf_config.gpu_options.allow_growth = True
tf.keras.backend.set_session(tf.Session(config=tf_config))
model = MobileNet(weights='imagenet', input_shape=input_shape)
model = update_keras_bn_ops_trainable_flag(model, trainable=False, load_save_path=config.logdir)
sess = tf.keras.backend.get_session()
add_image_net_computational_nodes_in_graph(sess, model.output.name, image_net_config.dataset['images_classes'])
accuracy = data_pipeline.evaluate(sess)
logger.info('Original Model Top-1 accuracy = %.2f', accuracy)
BN_folded_sess = save_and_load_graph(sess=sess, meta_path=config.logdir)
(BN_folded_sess, _) = aimet_bnf.fold_all_batch_norms(BN_folded_sess, input_op_names=['input_1'], output_op_names=[model.output.name.split(':')[0]])
quant_sim = create_quant_sim_model(BN_folded_sess, start_op_names=['input_1'], output_op_names=[model.output.name.split(':')[0]], use_cuda=config.use_cuda, evaluator=data_pipeline.evaluate, logdir=config.logdir)
accuracy = data_pipeline.evaluate(quant_sim.session)
logger.info('Original Model Top-1 accuracy on Quant Simulator = %.2f', accuracy)
logger.info('Starting Aimet Cross Layer Equalization and Bias Correction...')
cle_applied_sess = aimet_cross_layer_equalization(sess, start_op_names=['input_1'], output_op_names=[model.output.name.split(':')[0]])
quant_sim.session.close()
quant_sim = create_quant_sim_model(cle_applied_sess, start_op_names=['input_1'], output_op_names=[model.output.name.split(':')[0]], use_cuda=config.use_cuda, evaluator=data_pipeline.evaluate, logdir=config.logdir)
accuracy = data_pipeline.evaluate(quant_sim.session)
logger.info('CLE applied Model Top-1 accuracy on Quant Simulator = %.2f', accuracy)
cle_bc_applied_sess = aimet_bias_correction(cle_applied_sess, start_op_names=['input_1', 'labels'], output_op_names=[model.output.name.split(':')[0]], data_loader=data_pipeline.data_loader(), use_cuda=config.use_cuda)
quant_sim.session.close()
quant_sim = create_quant_sim_model(cle_bc_applied_sess, start_op_names=['input_1'], output_op_names=[model.output.name.split(':')[0]], use_cuda=config.use_cuda, evaluator=data_pipeline.evaluate, logdir=config.logdir)
accuracy = data_pipeline.evaluate(quant_sim.session)
logger.info('CLE+BC applied Model Top-1 accuracy on Quant Simulator = %.2f', accuracy)
logger.info('Saving Quantized model graph')
quant_sim.export(path=config.logdir, filename_prefix='quantized_model')
logger.info('Quantized model graph is saved!')
logger.info('Aimet Cross Layer Equalization and Bias Correction Done') |
class GlobalDecl(Statement):
__slots__ = ('names',)
__match_args__ = ('names',)
names: list[str]
def __init__(self, names: list[str]) -> None:
super().__init__()
self.names = names
def accept(self, visitor: StatementVisitor[T]) -> T:
return visitor.visit_global_decl(self) |
def build_data_info(args):
args.dataset = osp.basename(osp.normpath(args.data_root))
with open(args.data_info, 'r') as f:
data_info = json.load(f)[args.dataset]
args.train_session_set = data_info['train_session_set']
args.test_session_set = data_info['test_session_set']
args.class_index = data_info['class_index']
args.num_classes = len(args.class_index)
return args |
def resize_dataset(i_root, o_root, mode='train'):
out_images_path = osp.join(o_root, mode, 'images')
out_masks_path = osp.join(o_root, mode, 'masks_12')
check_dir(o_root)
check_dir(out_images_path)
check_dir(out_masks_path)
image_path = osp.join(i_root, mode, 'images')
mask_path = osp.join(i_root, mode, 'masks_12')
for i in os.listdir(image_path):
img = Image.open(osp.join(image_path, i))
(basename, _) = os.path.splitext(i)
mask = Image.open(osp.join(mask_path, (basename + '_mask.png')))
assert (img.size == mask.size)
(w, h) = img.size
if (w > h):
oh = random_size(h)
ow = int((((1.0 * w) * oh) / h))
else:
ow = random_size(w)
oh = int((((1.0 * h) * ow) / w))
new_img = img.resize((ow, oh), Image.BILINEAR)
new_mask = mask.resize((ow, oh), Image.NEAREST)
new_img.save(osp.join(out_images_path, i))
new_mask.save(osp.join(out_masks_path, (basename + '_mask.png')))
print('process image', i) |
class TFCvtIntermediate(tf.keras.layers.Layer):
def __init__(self, config: CvtConfig, embed_dim: int, mlp_ratio: int, **kwargs):
super().__init__(**kwargs)
self.dense = tf.keras.layers.Dense(units=int((embed_dim * mlp_ratio)), kernel_initializer=get_initializer(config.initializer_range), activation='gelu', name='dense')
def call(self, hidden_state: tf.Tensor) -> tf.Tensor:
hidden_state = self.dense(hidden_state)
return hidden_state |
class TextureGrid(TextureRegion, UniformTextureSequence):
items = ()
rows = 1
columns = 1
item_width = 0
item_height = 0
def __init__(self, grid):
image = grid.get_texture()
if isinstance(image, TextureRegion):
owner = image.owner
else:
owner = image
super().__init__(image.x, image.y, image.z, image.width, image.height, owner)
items = []
y = 0
for row in range(grid.rows):
x = 0
for col in range(grid.columns):
items.append(self.get_region(x, y, grid.item_width, grid.item_height))
x += (grid.item_width + grid.column_padding)
y += (grid.item_height + grid.row_padding)
self.items = items
self.rows = grid.rows
self.columns = grid.columns
self.item_width = grid.item_width
self.item_height = grid.item_height
def get(self, row, column):
return self[(row, column)]
def __getitem__(self, index):
if (type(index) is slice):
if ((type(index.start) is not tuple) and (type(index.stop) is not tuple)):
return self.items[index]
else:
row1 = 0
col1 = 0
row2 = self.rows
col2 = self.columns
if (type(index.start) is tuple):
(row1, col1) = index.start
elif (type(index.start) is int):
row1 = (index.start // self.columns)
col1 = (index.start % self.columns)
assert ((0 <= row1 < self.rows) and (0 <= col1 < self.columns))
if (type(index.stop) is tuple):
(row2, col2) = index.stop
elif (type(index.stop) is int):
row2 = (index.stop // self.columns)
col2 = (index.stop % self.columns)
assert ((0 <= row2 <= self.rows) and (0 <= col2 <= self.columns))
result = []
i = (row1 * self.columns)
for row in range(row1, row2):
result += self.items[(i + col1):(i + col2)]
i += self.columns
return result
elif (type(index) is tuple):
(row, column) = index
assert ((0 <= row < self.rows) and (0 <= column < self.columns))
return self.items[((row * self.columns) + column)]
elif (type(index) is int):
return self.items[index]
def __setitem__(self, index, value):
if (type(index) is slice):
for (region, image) in zip(self[index], value):
if ((image.width != self.item_width) or (image.height != self.item_height)):
raise ImageException('Image has incorrect dimensions')
image.blit_into(region, image.anchor_x, image.anchor_y, 0)
else:
image = value
if ((image.width != self.item_width) or (image.height != self.item_height)):
raise ImageException('Image has incorrect dimensions')
image.blit_into(self[index], image.anchor_x, image.anchor_y, 0)
def __len__(self):
return len(self.items)
def __iter__(self):
return iter(self.items) |
class ClassificationTask(LightningModule):
def __init__(self, model: PreTrainedModel, args: ClassificationTrainArguments):
super().__init__()
self.model = model
self.args = args
def configure_optimizers(self):
optimizer = AdamW(self.parameters(), lr=self.args.learning_rate)
scheduler = ExponentialLR(optimizer, gamma=0.9)
return {'optimizer': optimizer, 'scheduler': scheduler}
def training_step(self, inputs, batch_idx):
outputs = self.model(**inputs)
preds = outputs.logits.argmax(dim=(- 1))
labels = inputs['labels']
acc = accuracy(preds, labels)
self.log('loss', outputs.loss, prog_bar=False, logger=True, on_step=True, on_epoch=False)
self.log('acc', acc, prog_bar=True, logger=True, on_step=True, on_epoch=False)
return outputs.loss
def validation_step(self, inputs, batch_idx):
outputs = self.model(**inputs)
preds = outputs.logits.argmax(dim=(- 1))
labels = inputs['labels']
acc = accuracy(preds, labels)
self.log('val_loss', outputs.loss, prog_bar=True, logger=True, on_step=False, on_epoch=True)
self.log('val_acc', acc, prog_bar=True, logger=True, on_step=False, on_epoch=True)
return outputs.loss |
_module()
class MeshAdversarialDataset(Dataset):
def __init__(self, train_dataset, adversarial_dataset):
super().__init__()
self.train_dataset = build_dataset(train_dataset)
self.adversarial_dataset = build_dataset(adversarial_dataset)
self.length = len(self.train_dataset)
def __len__(self):
return self.length
def __getitem__(self, i):
data = self.train_dataset[i]
ind_adv = np.random.randint(low=0, high=len(self.adversarial_dataset), dtype=int)
data.update(self.adversarial_dataset[(ind_adv % len(self.adversarial_dataset))])
return data |
class Graph():
name = ''
def __init__(self, *chain):
self.edges = {BEGIN: set()}
self.named = {}
self.nodes = []
if len(chain):
self.add_chain(*chain)
def __iter__(self):
(yield from self.nodes)
def __len__(self):
return len(self.nodes)
def __getitem__(self, key):
return self.nodes[key]
def __enter__(self):
return self.get_cursor().__enter__()
def __exit__(self, exc_type, exc_val, exc_tb):
return None
def __rshift__(self, other):
return self.get_cursor().__rshift__(other)
def get_cursor(self, ref=BEGIN):
return GraphCursor(self, last=self.index_of(ref))
def orphan(self):
return self.get_cursor(None)
def index_of(self, mixed):
if (mixed is None):
return None
if ((type(mixed) is int) or (mixed in self.edges)):
return mixed
if (isinstance(mixed, str) and (mixed in self.named)):
return self.named[mixed]
if (mixed in self.nodes):
return self.nodes.index(mixed)
raise ValueError('Cannot find node matching {!r}.'.format(mixed))
def indexes_of(self, *things, _type=set):
return _type(map(self.index_of, things))
def outputs_of(self, idx_or_node, create=False):
idx_or_node = self.index_of(idx_or_node)
if (create and (not (idx_or_node in self.edges))):
self.edges[idx_or_node] = set()
return self.edges[idx_or_node]
def add_node(self, new_node, *, _name=None):
idx = len(self.nodes)
self.edges[idx] = set()
self.nodes.append(new_node)
if _name:
if (_name in self.named):
raise KeyError('Duplicate name {!r} in graph.'.format(_name))
self.named[_name] = idx
return idx
def get_or_add_node(self, new_node, *, _name=None):
if (new_node in self.nodes):
if (_name is not None):
raise RuntimeError('Cannot name a node that is already present in the graph.')
return self.index_of(new_node)
return self.add_node(new_node, _name=_name)
def add_chain(self, *nodes, _input=BEGIN, _output=None, _name=None, use_existing_nodes=False):
_input = self.index_of(_input)
_output = self.index_of(_output)
_first = None
_last = None
get_node = (self.get_or_add_node if use_existing_nodes else self.add_node)
if (not len(nodes)):
if ((_input is None) or (_output is None)):
raise ValueError('Using add_chain(...) without nodes is only possible if you provide both _input and _output values.')
if (_name is not None):
raise RuntimeError('Using add_chain(...) without nodes does not allow to use the _name parameter.')
for (i, node) in enumerate(nodes):
_last = get_node(node, _name=(_name if (not i) else None))
if (_first is None):
_first = _last
self.outputs_of(_input, create=True).add(_last)
_input = _last
if (_output is not None):
self.outputs_of(_input, create=True).add(_output)
if hasattr(self, '_topologcally_sorted_indexes_cache'):
del self._topologcally_sorted_indexes_cache
return GraphRange(self, _first, _last)
def copy(self):
g = Graph()
g.edges = copy(self.edges)
g.named = copy(self.named)
g.nodes = copy(self.nodes)
return g
def topologically_sorted_indexes(self):
try:
return self._topologcally_sorted_indexes_cache
except AttributeError:
seen = set()
order = []
explored = set()
for i in self.edges:
if (i in explored):
continue
fringe = [i]
while fringe:
w = fringe[(- 1)]
if (w in explored):
fringe.pop()
continue
seen.add(w)
new_nodes = []
for n in self.outputs_of(w):
if (n not in explored):
if (n in seen):
raise RuntimeError('Graph contains a cycle.')
new_nodes.append(n)
if new_nodes:
fringe.extend(new_nodes)
else:
explored.add(w)
order.append(w)
fringe.pop()
self._topologcally_sorted_indexes_cache = tuple(filter((lambda i: (type(i) is int)), reversed(order)))
return self._topologcally_sorted_indexes_cache
def graphviz(self):
try:
return self._graphviz
except AttributeError:
g = Digraph()
g.attr(rankdir='LR')
g.node('BEGIN', shape='point')
for i in self.outputs_of(BEGIN):
g.edge('BEGIN', str(i))
for ix in self.topologically_sorted_indexes:
g.node(str(ix), label=get_name(self[ix]))
for iy in self.outputs_of(ix):
g.edge(str(ix), str(iy))
self._graphviz = g
return self._graphviz
def _repr_dot_(self):
return str(self.graphviz)
def _repr_html_(self):
try:
return '<div>{}</div><pre>{}</pre>'.format(self.graphviz._repr_svg_(), html.escape(repr(self)))
except (ExecutableNotFound, FileNotFoundError) as exc:
return '<strong>{}</strong>: {}'.format(type(exc).__name__, str(exc)) |
def __get_all_files(root: Path, folder: (Path | None)=None) -> list[str]:
if (not folder):
folder = root
results = []
for sub_folder in folder.iterdir():
results.append(sub_folder.relative_to(root).__str__().replace('\\', '/').replace('.html', ''))
if sub_folder.is_dir():
results.extend(__get_all_files(root, sub_folder))
return results |
def get_method_config(key, config={}, yaml_path='configs/methods.yaml', yaml_data=None):
if ((not key) or (key is None)):
return None
assert (yaml_path or yaml_data)
if (yaml_data is None):
yaml_data = yaml.load(open(yaml_path), Loader=yaml.Loader)
assert (key in yaml_data.keys()), f'`{key}` can not be found in {yaml_path}'
specific_data = yaml_data[key]
if ('inherit_from' in specific_data.keys()):
inherit_from = specific_data.pop('inherit_from')
if (not isinstance(inherit_from, list)):
inherit_from = [inherit_from]
for new_key in inherit_from:
config = get_method_config(key=new_key, config=config, yaml_path=yaml_path, yaml_data=yaml_data)
for (k, v) in specific_data.items():
config[k] = v
return config |
class TestEndecaDgraphCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('EndecaDgraphCollector', {})
self.collector = EndecaDgraphCollector(config, None)
def test_import(self):
self.assertTrue(EndecaDgraphCollector)
('urllib2.urlopen')
(Collector, 'publish')
def test_real_data(self, publish_mock, urlopen_mock):
urlopen_mock.return_value = self.getFixture('data1.xml')
self.collector.collect()
self.assertPublishedMany(publish_mock, {})
urlopen_mock.return_value = self.getFixture('data2.xml')
self.collector.collect()
metrics = {'statistics.cache_section.main_cache.aggregatedrecordcount.entry_count': 3957, 'statistics.cache_section.main_cache.dval_bincount.entry_count': 4922448, 'statistics.hot_spot_analysis.content_spotlighting_performance.min': 0.0209961, 'statistics.hot_spot_analysis.insertion_sort_time.avg': 0., 'statistics.hot_spot_analysis.ordinal_insertion_sort_time.n': 1484793, 'statistics.search_performance_analysis.qconj_lookupphr.min': 0., 'statistics.updates.update_latency.commit.audit_stat_calculation_time_resume_.n': 0}
self.assertPublishedMany(publish_mock, metrics)
self.setDocExample(collector=self.collector.__class__.__name__, metrics=metrics, defaultpath=self.collector.config['path']) |
def load_model_config_from_hf(model_id: str):
assert has_hf_hub(True)
cached_file = _download_from_hf(model_id, 'config.json')
pretrained_cfg = load_cfg_from_json(cached_file)
pretrained_cfg['hf_hub_id'] = model_id
pretrained_cfg['source'] = 'hf-hub'
model_name = pretrained_cfg.get('architecture')
return (pretrained_cfg, model_name) |
def repo_with_no_tags_tag_commits(git_repo_factory, file_in_repo):
git_repo = git_repo_factory()
add_text_to_file(git_repo, file_in_repo)
git_repo.git.commit(m='Initial commit')
add_text_to_file(git_repo, file_in_repo)
git_repo.git.commit(m=':nut_and_bolt: add some more text')
add_text_to_file(git_repo, file_in_repo)
git_repo.git.commit(m=':sparkles: add much more text')
add_text_to_file(git_repo, file_in_repo)
git_repo.git.commit(m=':nut_and_bolt: more text')
(yield git_repo)
git_repo.close() |
class DataTrainingArguments():
task_name: Optional[str] = field(default='ner', metadata={'help': 'The name of the task (ner, pos...).'})
dataset_name: Optional[str] = field(default='nielsr/funsd-layoutlmv3', metadata={'help': 'The name of the dataset to use (via the datasets library).'})
dataset_config_name: Optional[str] = field(default=None, metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
train_file: Optional[str] = field(default=None, metadata={'help': 'The input training data file (a csv or JSON file).'})
validation_file: Optional[str] = field(default=None, metadata={'help': 'An optional input evaluation data file to evaluate on (a csv or JSON file).'})
test_file: Optional[str] = field(default=None, metadata={'help': 'An optional input test data file to predict on (a csv or JSON file).'})
text_column_name: Optional[str] = field(default=None, metadata={'help': 'The column name of text to input in the file (a csv or JSON file).'})
label_column_name: Optional[str] = field(default=None, metadata={'help': 'The column name of label to input in the file (a csv or JSON file).'})
overwrite_cache: bool = field(default=False, metadata={'help': 'Overwrite the cached training and evaluation sets'})
preprocessing_num_workers: Optional[int] = field(default=None, metadata={'help': 'The number of processes to use for the preprocessing.'})
max_seq_length: int = field(default=512, metadata={'help': 'The maximum total input sequence length after tokenization. If set, sequences longer than this will be truncated, sequences shorter will be padded.'})
max_train_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of training examples to this value if set.'})
max_eval_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of evaluation examples to this value if set.'})
max_predict_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of prediction examples to this value if set.'})
label_all_tokens: bool = field(default=False, metadata={'help': 'Whether to put the label for one word on all tokens of generated by that word or just on the one (in which case the other tokens will have a padding index).'})
return_entity_level_metrics: bool = field(default=False, metadata={'help': 'Whether to return all the entity levels during evaluation or just the overall ones.'})
def __post_init__(self):
if ((self.dataset_name is None) and (self.train_file is None) and (self.validation_file is None)):
raise ValueError('Need either a dataset name or a training/validation file.')
else:
if (self.train_file is not None):
extension = self.train_file.split('.')[(- 1)]
assert (extension in ['csv', 'json']), '`train_file` should be a csv or a json file.'
if (self.validation_file is not None):
extension = self.validation_file.split('.')[(- 1)]
assert (extension in ['csv', 'json']), '`validation_file` should be a csv or a json file.'
self.task_name = self.task_name.lower() |
def compute_mIoU(gt_dir, pred_dir, devkit_dir=''):
with open(join(devkit_dir, 'info.json'), 'r') as fp:
info = json.load(fp)
num_classes = np.int(info['classes'])
print('Num classes', num_classes)
name_classes = np.array(info['label'], dtype=np.str)
mapping = np.array(info['label2train'], dtype=np.int)
hist = np.zeros((num_classes, num_classes))
image_path_list = join(devkit_dir, 'val.txt')
label_path_list = join(devkit_dir, 'label.txt')
gt_imgs = open(label_path_list, 'r').read().splitlines()
gt_imgs = [join(gt_dir, x) for x in gt_imgs]
pred_imgs = open(image_path_list, 'r').read().splitlines()
pred_imgs = [join(pred_dir, x.split('/')[(- 1)]) for x in pred_imgs]
for ind in range(len(gt_imgs)):
pred = np.array(Image.open(pred_imgs[ind]))
label = np.array(Image.open(gt_imgs[ind]))
label = label_mapping(label, mapping)
if (len(label.flatten()) != len(pred.flatten())):
print('Skipping: len(gt) = {:d}, len(pred) = {:d}, {:s}, {:s}'.format(len(label.flatten()), len(pred.flatten()), gt_imgs[ind], pred_imgs[ind]))
continue
hist += fast_hist(label.flatten(), pred.flatten(), num_classes)
if ((ind > 0) and ((ind % 10) == 0)):
print('{:d} / {:d}: {:0.2f}'.format(ind, len(gt_imgs), (100 * np.mean(per_class_iu(hist)))))
mIoUs = per_class_iu(hist)
for ind_class in range(num_classes):
print(((('===>' + name_classes[ind_class]) + ':\t') + str(round((mIoUs[ind_class] * 100), 2))))
print(('===> mIoU: ' + str(round((np.nanmean(mIoUs) * 100), 2))))
return mIoUs |
def find_char_span_by_token_idx(id, doc):
doc_text = (doc.text + ' ABCDEF')
token_list = doc_text.split()
token_text = doc[id].text
assert (token_text in token_list[id])
if (token_text != token_list[id]):
return []
new_sen = ' '.join(token_list[id:])
char_star = doc_text.find(new_sen)
lenth_ = len(token_text)
char_end = (char_star + lenth_)
return [char_star, char_end] |
class CT_TabStops(BaseOxmlElement):
tab = OneOrMore('w:tab', successors=())
def insert_tab_in_order(self, pos, align, leader):
new_tab = self._new_tab()
(new_tab.pos, new_tab.val, new_tab.leader) = (pos, align, leader)
for tab in self.tab_lst:
if (new_tab.pos < tab.pos):
tab.addprevious(new_tab)
return new_tab
self.append(new_tab)
return new_tab |
def getWholeCount(path):
count = []
catories = os.listdir(path)
for catory in catories:
pathcat = os.path.join(path, catory)
count.append(len(os.listdir(pathcat)))
print((((((('|' + catory) + '|') + str(len(os.listdir(pathcat)))) + '|') + str(len(os.listdir(os.path.join(pathapks, catory))))) + '|'))
return count |
class StatisticsDisplay(Observer, DisplayElement):
__maxTemp: float = 0.0
__minTemp: float = 200
__tempSum: float = 0.0
__numReadings: int = 0
__weatherData: WeatherData
def __init__(self, weatherData: WeatherData):
self.__weatherData = weatherData
weatherData.registerObserver(self)
def update(self, temp: float, humidity: float, pressure: float) -> None:
self.__tempSum += temp
self.__numReadings += 1
if (temp > self.__maxTemp):
self.__maxTemp = temp
if (temp < self.__minTemp):
self.__minTemp = temp
self.display()
def display(self) -> None:
print(f'Avg/Max/Min temperature = {(self.__tempSum / self.__numReadings)}/{self.__maxTemp}/{self.__minTemp}') |
def main_worker(ngpus_per_node, args):
global best_acc1
if (args.gpu is not None):
print('Use GPU: {} for training'.format(args.gpu))
if args.pretrained:
print("=> using pre-trained model '{}'".format(args.arch))
model = models.__dict__[args.arch](pretrained=True, args=args)
else:
print("=> creating model '{}'".format(args.arch))
model = models.__dict__[args.arch](args=args)
num_ftrs = model.fc1.in_features
model.fc1 = nn.Linear(num_ftrs, args.classes_num)
nn.init.xavier_uniform_(model.fc1.weight, 0.1)
nn.init.constant_(model.fc1.bias, 0.0)
if args.distributed:
if (args.gpu is not None):
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
args.batch_size = int((args.batch_size / ngpus_per_node))
args.workers = int((((args.workers + ngpus_per_node) - 1) / ngpus_per_node))
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
else:
model.cuda()
model = torch.nn.parallel.DistributedDataParallel(model)
elif (args.gpu is not None):
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
elif (args.arch.startswith('alexnet') or args.arch.startswith('vgg')):
model.features = torch.nn.DataParallel(model.features)
model.cuda()
else:
model = torch.nn.DataParallel(model).cuda()
criterion = nn.CrossEntropyLoss().cuda(args.gpu)
criterion_train = nn.CrossEntropyLoss(reduce=False).cuda(args.gpu)
optimizer = torch.optim.SGD(model.parameters(), args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
if (args.gpu is None):
checkpoint = torch.load(args.resume)
else:
loc = 'cuda:{}'.format(args.gpu)
checkpoint = torch.load(args.resume, map_location=loc)
args.start_epoch = checkpoint['epoch']
best_acc1 = checkpoint['best_acc1']
if (args.gpu is not None):
best_acc1 = best_acc1.to(args.gpu)
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})".format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val')
testdir = os.path.join(args.data, 'test')
train_dataset = datasets.ImageFolder(traindir, transforms.Compose([transforms.RandomResizedCrop(224, scale=(args.min_scale, 1.0)), transforms.RandomHorizontalFlip(), transforms.ColorJitter(0.4, 0.4, 0.4, 0.4), transforms.RandomGrayscale(args.gray_scale), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]))
if args.distributed:
print('initializing distributed sampler')
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
else:
train_sampler = None
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None), num_workers=args.workers, pin_memory=True, sampler=train_sampler)
val_loader = torch.utils.data.DataLoader(datasets.ImageFolder(valdir, transforms.Compose([transforms.Resize((224, 224)), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])), batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True)
test_loader = torch.utils.data.DataLoader(datasets.ImageFolder(testdir, transforms.Compose([transforms.Resize((224, 224)), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])), batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True)
log_dir = os.path.dirname(args.log_path)
print('tensorboard dir {}'.format(log_dir))
tensor_writer = SummaryWriter(log_dir)
if args.evaluate:
validate(test_loader, model, criterion, 0, True, args, tensor_writer)
return
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
lr_setter(optimizer, epoch, args)
train(train_loader, model, criterion_train, optimizer, epoch, args, tensor_writer)
val_acc1 = validate(val_loader, model, criterion, epoch, False, args, tensor_writer)
acc1 = validate(test_loader, model, criterion, epoch, True, args, tensor_writer)
is_best = (acc1 > best_acc1)
best_acc1 = max(acc1, best_acc1)
if ((not args.multiprocessing_distributed) or (args.multiprocessing_distributed and ((args.rank % ngpus_per_node) == 0))):
pass |
def add_weight_decay(adjust_per_optimizer=True):
if (adjust_per_optimizer and ('lars' in FLAGS.optimizer)):
return
l2_losses = [tf.nn.l2_loss(v) for v in tf.trainable_variables() if ('batch_normalization' not in v.name)]
tf.losses.add_loss((FLAGS.weight_decay * tf.add_n(l2_losses)), tf.GraphKeys.REGULARIZATION_LOSSES) |
_auth
def update_pwd(request):
if (request.method == 'POST'):
pks = request.POST.getlist('pks')
pwd = request.POST.get('pwd')
try:
for pk in pks:
server = Assets.objects.get(id=pk).serverassets
server.password = CryptPwd().encrypt_pwd(pwd)
server.save()
return JsonResponse({'code': 200, 'msg': '!'})
except Exception as e:
return JsonResponse({'code': 500, 'msg': ':{}'.format(e)}) |
class TransformerEncoderLayer(nn.Module):
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation='relu', normalize_before=False):
super().__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return (tensor if (pos is None) else (tensor + pos))
def forward_post(self, src, src_mask: Optional[Tensor]=None, src_key_padding_mask: Optional[Tensor]=None, pos: Optional[Tensor]=None):
q = k = self.with_pos_embed(src, pos)
src2 = self.self_attn(q, k, value=src, attn_mask=src_mask, key_padding_mask=src_key_padding_mask)[0]
src = (src + self.dropout1(src2))
src = self.norm1(src)
src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))
src = (src + self.dropout2(src2))
src = self.norm2(src)
return src
def forward_pre(self, src, src_mask: Optional[Tensor]=None, src_key_padding_mask: Optional[Tensor]=None, pos: Optional[Tensor]=None):
src2 = self.norm1(src)
q = k = self.with_pos_embed(src2, pos)
src2 = self.self_attn(q, k, value=src2, attn_mask=src_mask, key_padding_mask=src_key_padding_mask)[0]
src = (src + self.dropout1(src2))
src2 = self.norm2(src)
src2 = self.linear2(self.dropout(self.activation(self.linear1(src2))))
src = (src + self.dropout2(src2))
return src
def forward(self, src, src_mask: Optional[Tensor]=None, src_key_padding_mask: Optional[Tensor]=None, pos: Optional[Tensor]=None):
if self.normalize_before:
return self.forward_pre(src, src_mask, src_key_padding_mask, pos)
return self.forward_post(src, src_mask, src_key_padding_mask, pos) |
class RandomChoiceShear(object):
def __init__(self, values, p=None, interp='bilinear', lazy=False):
if isinstance(values, (list, tuple)):
values = th.FloatTensor(values)
self.values = values
if (p is None):
p = (th.ones(len(values)) / len(values))
elif (abs((1.0 - sum(p))) > 0.001):
raise ValueError('Probs must sum to 1')
self.p = p
self.interp = interp
self.lazy = lazy
def __call__(self, *inputs):
shear = th_random_choice(self.values, p=self.p)
if self.lazy:
return Shear(shear, lazy=True)(inputs[0])
else:
outputs = Shear(shear, interp=self.interp)(*inputs)
return outputs |
class SnapshotSerializer(serializers.ModelSerializer):
values = serializers.SerializerMethodField()
class Meta():
model = Snapshot
fields = ('title', 'description', 'values', 'created', 'updated')
def get_values(self, obj):
values = Value.objects.filter(snapshot=obj).select_related('attribute', 'option')
serializer = ValueSerializer(instance=values, many=True)
return serializer.data |
_config
def test_kill(manager):
manager.c.group['SCRATCHPAD'].dropdown_reconfigure('dd-a')
manager.test_window('one')
assert_focused(manager, 'one')
assert ('window' not in manager.c.group['SCRATCHPAD'].dropdown_info('dd-a'))
manager.c.group['SCRATCHPAD'].dropdown_toggle('dd-a')
is_spawned(manager, 'dd-a')
assert_focused(manager, 'dd-a')
assert (manager.c.group['SCRATCHPAD'].dropdown_info('dd-a')['window']['name'] == 'dd-a')
manager.c.window.kill()
manager.c.sync()
is_killed(manager, 'dd-a')
assert_focused(manager, 'one')
assert ('window' not in manager.c.group['SCRATCHPAD'].dropdown_info('dd-a')) |
def _populate_kernel_cache(np_type, blocks, dim_x, dim_z, dim_u, max_tpb):
if (np_type not in _SUPPORTED_TYPES):
raise ValueError('Datatype {} not found for Kalman Filter'.format(np_type))
if (np_type == 'float32'):
c_type = 'float'
else:
c_type = 'double'
specializations = ('_cupy_predict<{}, {}, {}, {}, {}>'.format(c_type, blocks, dim_x, dim_u, max_tpb), '_cupy_update<{}, {}, {}, {}, {}>'.format(c_type, blocks, dim_x, dim_z, max_tpb))
module = cp.RawModule(code=cuda_code_kalman, options=('-std=c++11', '-fmad=true'), name_expressions=specializations)
_cupy_kernel_cache[(str(np_type), 'predict')] = module.get_function(specializations[0])
_cupy_kernel_cache[(str(np_type), 'update')] = module.get_function(specializations[1]) |
def parse_args():
parser = ArgumentParser(description='PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes')
parser.add_argument('--num_cores', type=int, default=1, help='Number of TPU cores to use (1 or 8).')
parser.add_argument('training_script', type=str, help='The full path to the single TPU training program/script to be launched in parallel, followed by all the arguments for the training script')
parser.add_argument('training_script_args', nargs=REMAINDER)
return parser.parse_args() |
def _gen_rhf_response_gam(mf, mo_coeff=None, mo_occ=None, singlet=None, hermi=0, max_memory=None):
from pyscf.pbc.dft import numint, multigrid
assert isinstance(mf, hf.RHF)
if (mo_coeff is None):
mo_coeff = mf.mo_coeff
if (mo_occ is None):
mo_occ = mf.mo_occ
cell = mf.cell
kpt = mf.kpt
if isinstance(mf, khf.pbchf.KohnShamDFT):
ni = mf._numint
ni.libxc.test_deriv_order(mf.xc, 2, raise_error=True)
(omega, alpha, hyb) = ni.rsh_and_hybrid_coeff(mf.xc, spin=cell.spin)
hybrid = ni.libxc.is_hybrid_xc(mf.xc)
if (omega != 0):
raise NotImplementedError
if ((not hybrid) and isinstance(mf.with_df, multigrid.MultiGridFFTDF)):
dm0 = mf.make_rdm1(mo_coeff, mo_occ)
return multigrid._gen_rhf_response(mf, dm0, singlet, hermi)
if (singlet is None):
spin = 0
else:
spin = 1
(rho0, vxc, fxc) = ni.cache_xc_kernel(cell, mf.grids, mf.xc, mo_coeff, mo_occ, spin, kpt)
dm0 = None
if (max_memory is None):
mem_now = lib.current_memory()[0]
max_memory = max(2000, ((mf.max_memory * 0.8) - mem_now))
if (singlet is None):
def vind(dm1):
if (hermi == 2):
v1 = numpy.zeros_like(dm1)
else:
v1 = ni.nr_rks_fxc(cell, mf.grids, mf.xc, dm0, dm1, 0, hermi, rho0, vxc, fxc, kpt, max_memory=max_memory)
if hybrid:
if (hermi != 2):
(vj, vk) = mf.get_jk(cell, dm1, hermi=hermi, kpt=kpt)
v1 += (vj - ((0.5 * hyb) * vk))
else:
v1 -= ((0.5 * hyb) * mf.get_k(cell, dm1, hermi=hermi, kpt=kpt))
elif (hermi != 2):
v1 += mf.get_j(cell, dm1, hermi=hermi, kpt=kpt)
return v1
elif singlet:
def vind(dm1):
if (hermi == 2):
v1 = numpy.zeros_like(dm1)
else:
v1 = numint.nr_rks_fxc_st(ni, cell, mf.grids, mf.xc, dm0, dm1, 0, True, rho0, vxc, fxc, kpt, max_memory=max_memory)
v1 *= 0.5
if hybrid:
if (hermi != 2):
(vj, vk) = mf.get_jk(cell, dm1, hermi=hermi, kpt=kpt)
v1 += (vj - ((0.5 * hyb) * vk))
else:
v1 -= ((0.5 * hyb) * mf.get_k(cell, dm1, hermi=hermi, kpt=kpt))
elif (hermi != 2):
v1 += mf.get_j(cell, dm1, hermi=hermi, kpt=kpt)
return v1
else:
def vind(dm1):
if (hermi == 2):
v1 = numpy.zeros_like(dm1)
else:
v1 = numint.nr_rks_fxc_st(ni, cell, mf.grids, mf.xc, dm0, dm1, 0, False, rho0, vxc, fxc, kpt, max_memory=max_memory)
v1 *= 0.5
if hybrid:
v1 += (((- 0.5) * hyb) * mf.get_k(cell, dm1, hermi=hermi, kpt=kpt))
return v1
elif (((singlet is None) or singlet) and (hermi != 2)):
def vind(dm1):
(vj, vk) = mf.get_jk(cell, dm1, hermi=hermi, kpt=kpt)
return (vj - (0.5 * vk))
else:
def vind(dm1):
return ((- 0.5) * mf.get_k(cell, dm1, hermi=hermi, kpt=kpt))
return vind |
class DLRM_Transformer(DLRM):
def __init__(self, embedding_bag_collection: EmbeddingBagCollection, dense_in_features: int, dense_arch_layer_sizes: List[int], over_arch_layer_sizes: List[int], nhead: int=8, ntransformer_layers: int=4, dense_device: Optional[torch.device]=None) -> None:
super().__init__(embedding_bag_collection, dense_in_features, dense_arch_layer_sizes, over_arch_layer_sizes, dense_device)
embedding_dim: int = embedding_bag_collection.embedding_bag_configs()[0].embedding_dim
num_sparse_features: int = len(self.sparse_arch.sparse_feature_names)
self.inter_arch = InteractionTransformerArch(num_sparse_features=num_sparse_features, embedding_dim=embedding_dim, nhead=nhead, ntransformer_layers=ntransformer_layers)
over_in_features: int = ((num_sparse_features + 1) * embedding_dim)
self.over_arch = OverArch(in_features=over_in_features, layer_sizes=over_arch_layer_sizes, device=dense_device) |
class Embed():
def __init__(self):
self.transformer = SentenceTransformer(model_name, device='cuda')
def __call__(self, text_batch: List[str]):
embeddings = self.transformer.encode(text_batch, batch_size=100, device='cuda').tolist()
return list(zip(text_batch, embeddings)) |
class Book(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
name = models.CharField(max_length=200)
pages = models.IntegerField()
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('books_fbv_user:book_edit', kwargs={'pk': self.pk}) |
def add_GreeterServicer_to_server(servicer, server):
rpc_method_handlers = {'SayHello': grpc.unary_unary_rpc_method_handler(servicer.SayHello, request_deserializer=generated_dot_greeter__pb2.HelloRequest.FromString, response_serializer=generated_dot_greeter__pb2.HelloReply.SerializeToString), 'SayHelloGoodbye': grpc.unary_stream_rpc_method_handler(servicer.SayHelloGoodbye, request_deserializer=generated_dot_greeter__pb2.HelloRequest.FromString, response_serializer=generated_dot_greeter__pb2.HelloReply.SerializeToString), 'SayHelloToMany': grpc.stream_stream_rpc_method_handler(servicer.SayHelloToMany, request_deserializer=generated_dot_greeter__pb2.HelloRequest.FromString, response_serializer=generated_dot_greeter__pb2.HelloReply.SerializeToString), 'SayHelloToManyAtOnce': grpc.stream_unary_rpc_method_handler(servicer.SayHelloToManyAtOnce, request_deserializer=generated_dot_greeter__pb2.HelloRequest.FromString, response_serializer=generated_dot_greeter__pb2.HelloReply.SerializeToString)}
generic_handler = grpc.method_handlers_generic_handler('Greeter', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,)) |
def get_logger(log_path, results_raw_metrics_path, results_avg_metrics_path):
with open(logging_configs_path, 'r') as f:
dict_conf = yaml.safe_load(f)
dict_conf['handlers']['fh']['filename'] = log_path
dict_conf['handlers']['fh_avg']['filename'] = results_avg_metrics_path
dict_conf['handlers']['fh_raw']['filename'] = results_raw_metrics_path
logging.config.dictConfig(dict_conf)
logger_fh = logging.getLogger('logger_fh')
logger_fh_raw = logging.getLogger('logger_fh_raw')
logger_fh_avg = logging.getLogger('logger_fh_avg')
return (logger_fh, logger_fh_raw, logger_fh_avg) |
def round_window_to_full_blocks(window, block_shapes, height=0, width=0):
if (len(set(block_shapes)) != 1):
raise WindowError('All bands must have the same block/stripe structure')
window = evaluate(window, height=height, width=width)
height_shape = block_shapes[0][0]
width_shape = block_shapes[0][1]
((row_start, row_stop), (col_start, col_stop)) = window.toranges()
row_min = (int((row_start // height_shape)) * height_shape)
row_max = ((int((row_stop // height_shape)) * height_shape) + (height_shape if ((row_stop % height_shape) != 0) else 0))
col_min = (int((col_start // width_shape)) * width_shape)
col_max = ((int((col_stop // width_shape)) * width_shape) + (width_shape if ((col_stop % width_shape) != 0) else 0))
return Window(col_min, row_min, (col_max - col_min), (row_max - row_min)) |
class TBItemsTurnHandler(DefaultScript):
def at_script_creation(self):
self.key = 'Combat Turn Handler'
self.interval = 5
self.persistent = True
self.db.fighters = []
for thing in self.obj.contents:
if thing.db.hp:
self.db.fighters.append(thing)
for fighter in self.db.fighters:
self.initialize_for_combat(fighter)
self.obj.db.combat_turnhandler = self
ordered_by_roll = sorted(self.db.fighters, key=roll_init, reverse=True)
self.db.fighters = ordered_by_roll
self.obj.msg_contents(('Turn order is: %s ' % ', '.join((obj.key for obj in self.db.fighters))))
self.start_turn(self.db.fighters[0])
self.db.turn = 0
self.db.timer = TURN_TIMEOUT
def at_stop(self):
for fighter in self.db.fighters:
combat_cleanup(fighter)
self.obj.db.combat_turnhandler = None
def at_repeat(self):
currentchar = self.db.fighters[self.db.turn]
self.db.timer -= self.interval
if (self.db.timer <= 0):
self.obj.msg_contents(("%s's turn timed out!" % currentchar))
spend_action(currentchar, 'all', action_name='disengage')
return
elif ((self.db.timer <= 10) and (not self.db.timeout_warning_given)):
currentchar.msg('WARNING: About to time out!')
self.db.timeout_warning_given = True
def initialize_for_combat(self, character):
combat_cleanup(character)
character.db.combat_actionsleft = 0
character.db.combat_turnhandler = self
character.db.combat_lastaction = 'null'
def start_turn(self, character):
character.db.combat_actionsleft = ACTIONS_PER_TURN
character.at_turn_start()
def next_turn(self):
disengage_check = True
for fighter in self.db.fighters:
if (fighter.db.combat_lastaction != 'disengage'):
disengage_check = False
if disengage_check:
self.obj.msg_contents('All fighters have disengaged! Combat is over!')
self.stop()
return
defeated_characters = 0
for fighter in self.db.fighters:
if (fighter.db.HP == 0):
defeated_characters += 1
if (defeated_characters == (len(self.db.fighters) - 1)):
for fighter in self.db.fighters:
if (fighter.db.HP != 0):
LastStanding = fighter
self.obj.msg_contents(('Only %s remains! Combat is over!' % LastStanding))
self.stop()
return
currentchar = self.db.fighters[self.db.turn]
self.db.turn += 1
if (self.db.turn > (len(self.db.fighters) - 1)):
self.db.turn = 0
newchar = self.db.fighters[self.db.turn]
self.db.timer = (TURN_TIMEOUT + self.time_until_next_repeat())
self.db.timeout_warning_given = False
self.obj.msg_contents(("%s's turn ends - %s's turn begins!" % (currentchar, newchar)))
self.start_turn(newchar)
for fighter in self.db.fighters:
condition_tickdown(fighter, newchar)
def turn_end_check(self, character):
if (not character.db.combat_actionsleft):
self.next_turn()
return
def join_fight(self, character):
self.db.fighters.insert(self.db.turn, character)
self.db.turn += 1
self.initialize_for_combat(character) |
class F8_TestCase(FC3_TestCase):
def runTest(self):
FC3_TestCase.runTest(self)
self.assertFalse(F8_RootPw().lock)
self.assert_parse('rootpw --lock secrethandshake', 'rootpw --lock --plaintext secrethandshake\n')
self.assert_parse('rootpw --plaintext secrethandshake', 'rootpw --plaintext secrethandshake\n')
self.assert_parse('rootpw --plaintext --iscrypted secrethandshake', 'rootpw --iscrypted secrethandshake\n')
self.assert_parse('rootpw --iscrypted --plaintext secrethandshake\n', 'rootpw --plaintext secrethandshake\n')
self.assert_parse('rootpw --lock --plaintext secrethandshake', 'rootpw --lock --plaintext secrethandshake\n')
self.assert_parse('rootpw --iscrypted --lock secrethandshake', 'rootpw --iscrypted --lock secrethandshake\n')
self.assert_parse('rootpw --lock --iscrypted --plaintext secrethandshake', 'rootpw --lock --plaintext secrethandshake\n')
self.assert_parse('rootpw --lock --plaintext --iscrypted secrethandshake', 'rootpw --iscrypted --lock secrethandshake\n')
self.assert_parse('rootpw --plaintext --iscrypted --lock secrethandshake', 'rootpw --iscrypted --lock secrethandshake\n')
self.assert_parse('rootpw --iscrypted --plaintext --lock secrethandshake', 'rootpw --lock --plaintext secrethandshake\n')
obj = self.assert_parse('rootpw --plaintext "comment#inpassword"', 'rootpw --plaintext "comment#inpassword"\n')
self.assertEqual(obj.password, 'comment#inpassword')
self.assert_parse_error('rootpw --plaintext=ISEEENGLAND secrethandshake')
self.assert_parse_error('rootpw --lock=NOKEYSFORYOU secrethandshake')
self.assert_parse_error('rootpw --plaintext')
if (self.__class__.__name__ == 'F8_TestCase'):
self.assert_parse_error('rootpw --lock') |
class PositionWiseFeedForward(nn.Module):
def __init__(self, model_size, inner_size, dropout=0.0, variational=False, activation='relu', glu=False, weight_drop=0.0, dropout_residual=False, res_dropout=0.0):
super().__init__()
self.model_size = model_size
self.inner_size = inner_size
self.dropout = dropout
self.bias = True
self.variational = variational
self.activation = activation
self.glu = glu
self.weight_drop = weight_drop
self.autograd = False
self.fused_dropout_add = False
self.dropout_residual = dropout_residual
self.res_dropout = res_dropout
if (self.activation == 'relu'):
if self.glu:
self.act = nn.ReLU()
else:
self.act = ReLUDropout(p=self.dropout, variational=self.variational, batch_first=False)
elif (self.activation == 'gelu'):
self.act = nn.GELU()
elif (self.activation == 'agelu'):
self.act = AGELU()
elif (self.activation in ['silu', 'swish']):
self.act = SiLU()
elif (self.activation in ['sigmoid']):
if self.glu:
self.act = nn.functional.glu
else:
print('Sigmoid activation function is recommended to be used with -glu')
raise NotImplementedError
self.in_proj_weight = Parameter(torch.Tensor((inner_size * (2 if glu else 1)), model_size))
self.out_proj_weight = Parameter(torch.Tensor(model_size, inner_size))
self.in_proj_bias = Parameter(torch.Tensor((inner_size * (2 if glu else 1))))
self.out_proj_bias = Parameter(torch.Tensor(model_size))
self.reset_parameters()
self.fused = False
if ((not self.glu) and (self.activation in ['relu', 'silu', 'swish', 'gelu', 'agelu']) and (not self.variational)):
if (self.activation == 'relu'):
from onmt.modules.mlp.mlp import mlp_relu_function
if (mlp_relu_function is not None):
self.fused_function = mlp_relu_function
self.fused = True
elif (self.activation in ['silu', 'swish']):
from onmt.modules.mlp.mlp import mlp_silu_function
if (mlp_silu_function is not None):
self.fused_function = mlp_silu_function
self.fused = True
elif (self.activation == 'gelu'):
if self.dropout_residual:
from onmt.modules.mlp.mlp import mlp_gelu_dropout_add_function
if (mlp_gelu_dropout_add_function is not None):
self.fused_function = mlp_gelu_dropout_add_function
self.fused = True
self.fused_dropout_add = True
if (not self.fused):
from onmt.modules.mlp.mlp import mlp_gelu_function
if (mlp_gelu_function is not None):
self.fused_function = mlp_gelu_function
self.fused = True
elif (self.activation == 'agelu'):
from onmt.modules.mlp.mlp import mlp_agelu_function
if (mlp_agelu_function is not None):
self.fused_function = mlp_agelu_function
self.fused = True
def reset_parameters(self, init='normal'):
if (init == 'normal'):
std_ = math.sqrt((2.0 / (self.model_size + self.inner_size)))
nn.init.normal_(self.in_proj_weight, 0.0, std_)
nn.init.normal_(self.out_proj_weight, 0.0, std_)
else:
std_ = math.sqrt((6.0 / (self.model_size + self.inner_size)))
nn.init.uniform_(self.in_proj_weight, (- std_), std_)
nn.init.uniform_(self.out_proj_weight, (- std_), std_)
nn.init.constant_(self.in_proj_bias, 0.0)
nn.init.constant_(self.out_proj_bias, 0.0)
def convert_autograd(self):
if self.autograd:
return
with torch.no_grad():
self.autograd = True
self.linear_in = torch.nn.Linear(self.model_size, self.inner_size)
self.linear_out = torch.nn.Linear(self.inner_size, self.model_size)
self.linear_in.weight.copy_(self.in_proj_weight)
self.linear_in.bias.copy_(self.in_proj_bias)
self.linear_out.weight.copy_(self.out_proj_weight)
self.linear_out.bias.copy_(self.out_proj_bias)
del self.in_proj_weight
del self.in_proj_bias
del self.out_proj_weight
del self.out_proj_bias
def forward(self, input, *args, **kwargs):
if (self.fused and input.is_cuda and (not self.autograd)):
weights = [self.in_proj_weight, self.out_proj_weight]
biases = [self.in_proj_bias, self.out_proj_bias]
(seq_len, bsz, hidden_size) = (input.size(0), input.size(1), input.size(2))
dropout = (self.dropout if self.training else 0.0)
if self.fused_dropout_add:
res_dropout = (self.res_dropout if self.training else 0.0)
hidden = self.fused_function(dropout, res_dropout, input.view((seq_len * bsz), (- 1)), *weights, *biases)
else:
recompute = onmt.constants.recompute
hidden = self.fused_function(dropout, recompute, input.view((seq_len * bsz), (- 1)), *weights, *biases)
hidden = hidden.view(seq_len, bsz, hidden_size)
else:
if self.autograd:
hidden = self.linear_in(input)
else:
hidden = F.linear(input, self.in_proj_weight, self.in_proj_bias)
if (self.glu and (self.activation != 'sigmoid')):
(hidden, gate) = hidden.chunk(2, dim=(- 1))
hidden = (self.act(hidden) * gate)
else:
hidden = self.act(hidden)
if (not ((not self.glu) and (self.activation == 'relu'))):
if self.variational:
hidden = variational_dropout(hidden, p=self.dropout, training=self.training, inplace=(self.activation in ['silu', 'relu', 'swish', 'gelu']))
else:
hidden = F.dropout(hidden, p=self.dropout, training=self.training, inplace=(self.activation in ['silu', 'relu', 'swish', 'gelu']))
if self.autograd:
hidden = self.linear_out(hidden)
else:
hidden = F.linear(hidden, self.out_proj_weight, self.out_proj_bias)
if self.dropout_residual:
if (not self.fused_dropout_add):
if (not self.variational):
hidden = (F.dropout(hidden, p=self.res_dropout, training=self.training) + input)
else:
hidden = (variational_dropout(hidden, p=self.dropout, training=self.training) + input)
return hidden |
def test_make_prompt():
import difflib
def assert_equal_and_show_diff(expected, actual):
if (expected != actual):
diff = difflib.ndiff(expected.splitlines(keepends=True), actual.splitlines(keepends=True))
print(''.join(diff))
assert (expected == actual)
example1 = Example('What is 2+2?', '4', 'Adding 2 and 2')
example2 = Example('What is the capital of France?', 'Paris', 'The capital of France')
example3 = Example('What color is the sky?', 'Blue', 'The sky is generally')
prompt_config = PromptConfig(question_prefix='Q: ', answer_prefix='A: ', final_answer_prefix='The answer is ', intra_example_sep='\n', inter_example_sep='\n\n')
prompt_str = PromptStr('This is a test prompt.')
result = make_prompt(prompt_str, prompt_config, 2, 42, False)
assert (result == 'This is a test prompt.')
result = make_prompt([example1, example2, example3], prompt_config, (- 1), 42, False)
expected_result = 'Q: What color is the sky?\nA: The answer is Blue\n\n\nQ: What is 2+2?\nA: The answer is 4\n\n\nQ: What is the capital of France?\nA: The answer is Paris\n\n\n'
assert_equal_and_show_diff(expected_result, result)
result = make_prompt([example1, example2, example3], prompt_config, 2, 42, False)
expected_result = 'Q: What color is the sky?\nA: The answer is Blue\n\n\nQ: What is 2+2?\nA: The answer is 4\n\n\n'
assert (result == expected_result)
result = make_prompt([example1, example2, example3], prompt_config, 2, 42, True)
expected_result = 'Q: What color is the sky?\nA: The sky is generally The answer is Blue\n\n\nQ: What is 2+2?\nA: Adding 2 and 2 The answer is 4\n\n\n'
assert (result == expected_result) |
_new_faces(MaterialGroup.ROOF)
def create_flat_roof(bm, faces, prop):
top_face = extrude_and_outset(bm, faces, prop.thickness, prop.outset)
if prop.add_border:
bmesh.ops.inset_region(bm, faces=top_face, thickness=prop.border, use_even_offset=True)
ret = bmesh.ops.extrude_face_region(bm, geom=top_face).get('geom')
bmesh.ops.translate(bm, vec=(0, 0, (- (prop.thickness - 0.0011))), verts=filter_geom(ret, BMVert))
bmesh.ops.delete(bm, geom=top_face, context='FACES') |
def test_html_configured_output_dir(testdir):
script = testdir.makepyfile(SCRIPT)
testdir.tmpdir.join('.coveragerc').write('\n[html]\ndirectory = somewhere\n')
result = testdir.runpytest('-v', f'--cov={script.dirpath()}', '--cov-report=html', script)
result.stdout.fnmatch_lines(['*- coverage: platform *, python * -*', 'Coverage HTML written to dir somewhere', '*10 passed*'])
dest_dir = testdir.tmpdir.join('somewhere')
assert dest_dir.check(dir=True)
assert dest_dir.join('index.html').check()
assert (result.ret == 0) |
class TSongsMenuPlugins(TestCase):
def _confirmer(self, *args):
self.confirmed = True
def setUp(self):
self.tempdir = mkdtemp()
self.pm = PluginManager(folders=[self.tempdir])
self.confirmed = False
self.handler = SongsMenuPluginHandler(self._confirmer, self._confirmer)
self.pm.register_handler(self.handler)
self.pm.rescan()
self.assertEqual(self.pm.plugins, [])
self.library = SongLibrary('foo')
def tearDown(self):
self.library.destroy()
self.pm.quit()
shutil.rmtree(self.tempdir)
def create_plugin(self, id='', name='', desc='', icon='', funcs=None, mod=False):
(fd, fn) = mkstemp(suffix='.py', text=True, dir=self.tempdir)
file = os.fdopen(fd, 'w')
if mod:
indent = ''
else:
file.write('from quodlibet.plugins.songsmenu import SongsMenuPlugin\n')
file.write(('class %s(SongsMenuPlugin):\n' % name))
indent = ' '
file.write(('%spass\n' % indent))
if name:
file.write(f'''{indent}PLUGIN_ID = {name!r}
''')
if name:
file.write(f'''{indent}PLUGIN_NAME = {name!r}
''')
if desc:
file.write(f'''{indent}PLUGIN_DESC = {desc!r}
''')
if icon:
file.write(f'''{indent}PLUGIN_ICON = {icon!r}
''')
for f in (funcs or []):
if (f in ['__init__']):
file.write(f'''{indent}def {f}(self, *args): super().__init__(*args); raise Exception("as expected")
''')
else:
file.write(f'''{indent}def {f}(*args): return args
''')
file.flush()
file.close()
def test_empty_has_no_plugins(self):
self.pm.rescan()
self.assertEqual(self.pm.plugins, [])
def test_name_and_desc_plus_func_is_one(self):
self.create_plugin(name='Name', desc='Desc', funcs=['plugin_song'])
self.pm.rescan()
self.assertEqual(len(self.pm.plugins), 1)
def test_additional_functions_still_only_one(self):
self.create_plugin(name='Name', desc='Desc', funcs=['plugin_song', 'plugin_songs'])
self.pm.rescan()
self.assertEqual(len(self.pm.plugins), 1)
def test_two_plugins_are_two(self):
self.create_plugin(name='Name', desc='Desc', funcs=['plugin_song'])
self.create_plugin(name='Name2', desc='Desc2', funcs=['plugin_albums'])
self.pm.rescan()
self.assertEqual(len(self.pm.plugins), 2)
def test_disables_plugin(self):
self.create_plugin(name='Name', desc='Desc', funcs=['plugin_song'])
self.pm.rescan()
self.assertFalse(self.pm.enabled(self.pm.plugins[0]))
def test_enabledisable_plugin(self):
self.create_plugin(name='Name', desc='Desc', funcs=['plugin_song'])
self.pm.rescan()
plug = self.pm.plugins[0]
self.pm.enable(plug, True)
self.assertTrue(self.pm.enabled(plug))
self.pm.enable(plug, False)
self.assertFalse(self.pm.enabled(plug))
def test_ignores_broken_plugin(self):
self.create_plugin(name='Broken', desc='Desc', funcs=['__init__', 'plugin_song'])
self.pm.rescan()
plug = self.pm.plugins[0]
self.pm.enable(plug, True)
with capture_output():
menu = self.handler.menu(None, [AudioFile()])
self.assertFalse((menu and menu.get_children()))
def test_Menu(self):
self.create_plugin(name='Name', desc='Desc', funcs=['plugin_song'])
self.handler.menu(None, [AudioFile()])
def test_handling_songs_without_confirmation(self):
plugin = Plugin(FakeSongsMenuPlugin)
self.handler.plugin_enable(plugin)
MAX = FakeSongsMenuPlugin.MAX_INVOCATIONS
songs = [AudioFile({'~filename': ('/tmp/%s' % x), 'artist': 'foo'}) for x in range(MAX)]
self.handler.handle(plugin.id, self.library, None, songs)
self.assertFalse(self.confirmed, ("Wasn't expecting a confirmation for %d invocations" % len(songs)))
def test_handling_lots_of_songs_with_confirmation(self):
plugin = Plugin(FakeSongsMenuPlugin)
self.handler.plugin_enable(plugin)
MAX = FakeSongsMenuPlugin.MAX_INVOCATIONS
songs = [AudioFile({'~filename': ('/tmp/%s' % x), 'artist': 'foo'}) for x in range((MAX + 1))]
self.handler.handle(plugin.id, self.library, None, songs)
self.assertTrue(self.confirmed, ('Should have confirmed %d invocations (Max=%d).' % (len(songs), MAX))) |
def main():
import argparse
import IPython
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
choices = ['franka_panda/panda_suction', 'franka_panda/panda_drl']
parser.add_argument('--robot-model', default=choices[0], choices=choices, help=' ')
args = parser.parse_args()
env = Env(class_ids=[2, 3, 5, 11, 12, 15], robot_model=args.robot_model)
env.reset()
IPython.embed() |
class MobileViTASPP(nn.Module):
def __init__(self, config: MobileViTConfig) -> None:
super().__init__()
in_channels = config.neck_hidden_sizes[(- 2)]
out_channels = config.aspp_out_channels
if (len(config.atrous_rates) != 3):
raise ValueError('Expected 3 values for atrous_rates')
self.convs = nn.ModuleList()
in_projection = MobileViTConvLayer(config, in_channels=in_channels, out_channels=out_channels, kernel_size=1, use_activation='relu')
self.convs.append(in_projection)
self.convs.extend([MobileViTConvLayer(config, in_channels=in_channels, out_channels=out_channels, kernel_size=3, dilation=rate, use_activation='relu') for rate in config.atrous_rates])
pool_layer = MobileViTASPPPooling(config, in_channels, out_channels)
self.convs.append(pool_layer)
self.project = MobileViTConvLayer(config, in_channels=(5 * out_channels), out_channels=out_channels, kernel_size=1, use_activation='relu')
self.dropout = nn.Dropout(p=config.aspp_dropout_prob)
def forward(self, features: torch.Tensor) -> torch.Tensor:
pyramid = []
for conv in self.convs:
pyramid.append(conv(features))
pyramid = torch.cat(pyramid, dim=1)
pooled_features = self.project(pyramid)
pooled_features = self.dropout(pooled_features)
return pooled_features |
class ImageDecoder(Decoder):
def get_animation_file_extensions(self):
return []
def decode(self, filename, file):
raise NotImplementedError()
def decode_animation(self, filename, file):
raise ImageDecodeException('This decoder cannot decode animations.')
def __repr__(self):
return '{0}{1}'.format(self.__class__.__name__, (self.get_animation_file_extensions() + self.get_file_extensions())) |
def mask(self, operation, destination_kind, x_offset, y_offset, source_bitmap):
Mask(display=self.display, opcode=self.display.get_extension_major(extname), destination_window=self, operation=operation, destination_kind=destination_kind, x_offset=x_offset, y_offset=y_offset, source_bitmap=source_bitmap) |
def get_image_path(image_lists, label_name, index, image_dir, category):
if (label_name not in image_lists):
tf.logging.fatal('Label does not exist %s.', label_name)
label_lists = image_lists[label_name]
if (category not in label_lists):
tf.logging.fatal('Category does not exist %s.', category)
category_list = label_lists[category]
if (not category_list):
tf.logging.fatal('Label %s has no images in the category %s.', label_name, category)
mod_index = (index % len(category_list))
base_name = category_list[mod_index]
sub_dir = label_lists['dir']
full_path = os.path.join(image_dir, sub_dir, base_name)
return full_path |
def _set_ctrl_swap(ctrl_bit, bloq: CSwap):
states = [ZeroState(), OneState()]
effs = [ZeroEffect(), OneEffect()]
bb = BloqBuilder()
q0 = bb.add(states[ctrl_bit])
q1 = bb.add_register('q1', bloq.bitsize)
q2 = bb.add_register('q2', bloq.bitsize)
(q0, q1, q2) = bb.add(bloq, ctrl=q0, x=q1, y=q2)
bb.add(effs[ctrl_bit], q=q0)
return bb.finalize(q1=q1, q2=q2) |
class TestBloombergTickerMapper(TestCase):
def test_ticker_to_figi__no_data_preloading(self):
mapper = BloombergTickerMapper(data_caching=False)
tickers = [BloombergTicker('SPX Index', SecurityType.INDEX), BloombergTicker('SPY US Equity', SecurityType.STOCK), BloombergTicker('USDCHF Curncy', SecurityType.FX)]
expected_figis = ['BBG000H4FSM0', 'BBG000BDTBL9', 'BBG0013HFN45']
for (ticker, figi) in zip(tickers, expected_figis):
with self.subTest(f'Testing {ticker} to FIGI mapping.'):
self.assertEqual(mapper.ticker_to_contract(ticker), figi)
def test_incorrect_ticker_to_figi__no_data_preloading(self):
mapper = BloombergTickerMapper(data_caching=False)
tickers = [BloombergTicker('Incorrect Index', SecurityType.INDEX), BloombergTicker('Hihihi', SecurityType.STOCK)]
for ticker in tickers:
with self.subTest(f'Testing {ticker} to FIGI mapping.'):
self.assertIsNone(mapper.ticker_to_contract(ticker))
def test_incorrect_figi_to_ticker__no_data_preloading(self):
mapper = BloombergTickerMapper(data_caching=False)
figi = 'HIHIHIHIHI'
self.assertIsNone(mapper.contract_to_ticker(figi))
def test_figi_to_ticker__no_data_preloading(self):
mapper = BloombergTickerMapper(data_caching=False)
figis = ['BBG000H4FSM0', 'BBG000BDTBL9', 'BBG0013HFN45']
expected_tickers = [BloombergTicker('SPX Index', SecurityType.INDEX), BloombergTicker('SPY US Equity', SecurityType.STOCK), BloombergTicker('USDCHF Curncy', SecurityType.FX)]
for (figi, ticker) in zip(figis, expected_tickers):
with self.subTest(f'Testing {figi} to Bloomberg Ticker mapping.'):
self.assertEqual(mapper.contract_to_ticker(figi), ticker)
def test_ticker_to_figi__with_data_preloading(self):
mapper = BloombergTickerMapper(data_caching=True)
tickers = [BloombergTicker('SPX Index', SecurityType.INDEX), BloombergTicker('SPY US Equity', SecurityType.STOCK), BloombergTicker('USDCHF Curncy', SecurityType.FX)]
mapper.preload_tickers_mapping(tickers)
expected_figis = ['BBG000H4FSM0', 'BBG000BDTBL9', 'BBG0013HFN45']
for (ticker, figi) in zip(tickers, expected_figis):
with self.subTest(f'Testing {ticker} to FIGI mapping.'):
self.assertEqual(mapper.ticker_to_contract_data[ticker], figi)
def test_figi_to_ticker__with_data_preloading(self):
mapper = BloombergTickerMapper(data_caching=True)
figis = ['BBG000H4FSM0', 'BBG000BDTBL9', 'BBG0013HFN45']
mapper.preload_figi_mapping(figis)
expected_tickers = [BloombergTicker('SPX Index', SecurityType.INDEX), BloombergTicker('SPY US Equity', SecurityType.STOCK), BloombergTicker('USDCHF Curncy', SecurityType.FX)]
for (figi, ticker) in zip(figis, expected_tickers):
with self.subTest(f'Testing {figi} to Bloomberg Ticker mapping.'):
self.assertEqual(mapper.contract_data_to_ticker[figi], ticker) |
class BiRecurrentMapper(SequenceMapper):
def __init__(self, fw, bw=None, merge: MergeLayer=None, swap_memory=False):
self.fw = fw
self.swap_memory = swap_memory
self.bw = bw
self.merge = merge
def apply(self, is_train, inputs, mask=None):
fw = self.fw(is_train)
bw_spec = (self.fw if (self.bw is None) else self.bw)
bw = bw_spec(is_train)
if (self.merge is None):
return tf.concat(bidirectional_dynamic_rnn(fw, bw, inputs, mask, swap_memory=self.swap_memory, dtype=tf.float32)[0], 2)
else:
(fw, bw) = bidirectional_dynamic_rnn(fw, bw, inputs, mask, swap_memory=self.swap_memory, dtype=tf.float32)[0]
return self.merge.apply(is_train, fw, bw) |
class AttrVI_ATTR_USB_INTR_IN_PIPE(RangeAttribute):
resources = [(constants.InterfaceType.usb, 'RAW')]
py_name = ''
visa_name = 'VI_ATTR_USB_INTR_IN_PIPE'
visa_type = 'ViInt16'
default = NotAvailable
(read, write, local) = (True, True, True)
(min_value, max_value, values) = (129, 143, [(- 1)]) |
class TestInlineQueryWithoutRequest(TestInlineQueryBase):
def test_slot_behaviour(self, inline_query):
for attr in inline_query.__slots__:
assert (getattr(inline_query, attr, 'err') != 'err'), f"got extra slot '{attr}'"
assert (len(mro_slots(inline_query)) == len(set(mro_slots(inline_query)))), 'duplicate slot'
def test_de_json(self, bot):
json_dict = {'id': self.id_, 'from': self.from_user.to_dict(), 'query': self.query, 'offset': self.offset, 'location': self.location.to_dict()}
inline_query_json = InlineQuery.de_json(json_dict, bot)
assert (inline_query_json.api_kwargs == {})
assert (inline_query_json.id == self.id_)
assert (inline_query_json.from_user == self.from_user)
assert (inline_query_json.location == self.location)
assert (inline_query_json.query == self.query)
assert (inline_query_json.offset == self.offset)
def test_to_dict(self, inline_query):
inline_query_dict = inline_query.to_dict()
assert isinstance(inline_query_dict, dict)
assert (inline_query_dict['id'] == inline_query.id)
assert (inline_query_dict['from'] == inline_query.from_user.to_dict())
assert (inline_query_dict['location'] == inline_query.location.to_dict())
assert (inline_query_dict['query'] == inline_query.query)
assert (inline_query_dict['offset'] == inline_query.offset)
def test_equality(self):
a = InlineQuery(self.id_, User(1, '', False), '', '')
b = InlineQuery(self.id_, User(1, '', False), '', '')
c = InlineQuery(self.id_, User(0, '', False), '', '')
d = InlineQuery(0, User(1, '', False), '', '')
e = Update(self.id_)
assert (a == b)
assert (hash(a) == hash(b))
assert (a is not b)
assert (a == c)
assert (hash(a) == hash(c))
assert (a != d)
assert (hash(a) != hash(d))
assert (a != e)
assert (hash(a) != hash(e))
async def test_answer_error(self, inline_query):
with pytest.raises(ValueError, match='mutually exclusive'):
(await inline_query.answer(results=[], auto_pagination=True, current_offset='foobar'))
async def test_answer(self, monkeypatch, inline_query):
async def make_assertion(*_, **kwargs):
return (kwargs['inline_query_id'] == inline_query.id)
assert check_shortcut_signature(InlineQuery.answer, Bot.answer_inline_query, ['inline_query_id'], ['auto_pagination'])
assert (await check_shortcut_call(inline_query.answer, inline_query.get_bot(), 'answer_inline_query'))
assert (await check_defaults_handling(inline_query.answer, inline_query.get_bot()))
monkeypatch.setattr(inline_query.get_bot(), 'answer_inline_query', make_assertion)
assert (await inline_query.answer(results=[]))
async def test_answer_auto_pagination(self, monkeypatch, inline_query):
async def make_assertion(*_, **kwargs):
inline_query_id_matches = (kwargs['inline_query_id'] == inline_query.id)
offset_matches = (kwargs.get('current_offset') == inline_query.offset)
return (offset_matches and inline_query_id_matches)
monkeypatch.setattr(inline_query.get_bot(), 'answer_inline_query', make_assertion)
assert (await inline_query.answer(results=[], auto_pagination=True)) |
def parse_pyproject_toml(text, rootdir, name=None, *, tools=None, requirefiles=True):
data = tomllib.loads(text)
unused = list(data)
for (section, normalize) in SECTIONS.items():
try:
secdata = data[section]
except KeyError:
data[section] = None
else:
data[section] = normalize(secdata, name=name, tools=tools, rootdir=rootdir, requirefiles=requirefiles)
unused.remove(section)
if unused:
raise ValueError(f"unsupported sections ({', '.join(sorted(unused))})")
return data |
class DRN_A(nn.Module):
def __init__(self, block, layers, num_classes=1000):
self.inplanes = 64
super(DRN_A, self).__init__()
self.out_dim = (512 * block.expansion)
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4)
self.avgpool = nn.AvgPool2d(28, stride=1)
self.fc = nn.Linear((512 * block.expansion), num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
elif isinstance(m, BatchNorm):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1, dilation=1):
downsample = None
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(nn.Conv2d(self.inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((planes * block.expansion)))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = (planes * block.expansion)
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, dilation=(dilation, dilation)))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), (- 1))
x = self.fc(x)
return x |
class Add(GateWithRegisters, cirq.ArithmeticGate):
bitsize: int
def signature(self):
return Signature.build(a=self.bitsize, b=self.bitsize)
def registers(self) -> Sequence[Union[(int, Sequence[int])]]:
return (([2] * self.bitsize), ([2] * self.bitsize))
def with_registers(self, *new_registers) -> 'Add':
return Add(len(new_registers[0]))
def apply(self, *register_values: int) -> Union[(int, Iterable[int])]:
(a, b) = register_values
return (a, (a + b))
def on_classical_vals(self, a: 'ClassicalValT', b: 'ClassicalValT') -> Dict[(str, 'ClassicalValT')]:
return {'a': a, 'b': (a + b)}
def short_name(self) -> str:
return 'a+b'
def _circuit_diagram_info_(self, _) -> cirq.CircuitDiagramInfo:
wire_symbols = (['In(x)'] * self.bitsize)
wire_symbols += (['In(y)/Out(x+y)'] * self.bitsize)
return cirq.CircuitDiagramInfo(wire_symbols=wire_symbols)
def _has_unitary_(self):
return True
def _left_building_block(self, inp, out, anc, depth):
if (depth == (self.bitsize - 1)):
return
else:
(yield cirq.CX(anc[(depth - 1)], inp[depth]))
(yield cirq.CX(anc[(depth - 1)], out[depth]))
(yield And().on(inp[depth], out[depth], anc[depth]))
(yield cirq.CX(anc[(depth - 1)], anc[depth]))
(yield from self._left_building_block(inp, out, anc, (depth + 1)))
def _right_building_block(self, inp, out, anc, depth):
if (depth == 0):
return
else:
(yield cirq.CX(anc[(depth - 1)], anc[depth]))
(yield And(adjoint=True).on(inp[depth], out[depth], anc[depth]))
(yield cirq.CX(anc[(depth - 1)], inp[depth]))
(yield cirq.CX(inp[depth], out[depth]))
(yield from self._right_building_block(inp, out, anc, (depth - 1)))
def decompose_from_registers(self, *, context: cirq.DecompositionContext, **quregs: NDArray[cirq.Qid]) -> cirq.OP_TREE:
input_bits = quregs['a'][::(- 1)]
output_bits = quregs['b'][::(- 1)]
ancillas = context.qubit_manager.qalloc((self.bitsize - 1))[::(- 1)]
(yield And().on(input_bits[0], output_bits[0], ancillas[0]))
(yield from self._left_building_block(input_bits, output_bits, ancillas, 1))
(yield cirq.CX(ancillas[(- 1)], output_bits[(- 1)]))
(yield cirq.CX(input_bits[(- 1)], output_bits[(- 1)]))
(yield from self._right_building_block(input_bits, output_bits, ancillas, (self.bitsize - 2)))
(yield And(adjoint=True).on(input_bits[0], output_bits[0], ancillas[0]))
(yield cirq.CX(input_bits[0], output_bits[0]))
context.qubit_manager.qfree(ancillas)
def _t_complexity_(self):
num_clifford = (((self.bitsize - 2) * 19) + 16)
num_t_gates = ((4 * self.bitsize) - 4)
return TComplexity(t=num_t_gates, clifford=num_clifford)
def build_call_graph(self, ssa: 'SympySymbolAllocator') -> Set['BloqCountT']:
num_clifford = (((self.bitsize - 2) * 19) + 16)
num_toffoli = (self.bitsize - 1)
return {(Toffoli(), num_toffoli), (ArbitraryClifford(n=1), num_clifford)} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.