code stringlengths 281 23.7M |
|---|
.usefixtures('save_env')
class TestUtil():
def test_get_host_platform(self):
with mock.patch('os.name', 'nt'):
with mock.patch('sys.version', '... [... (ARM64)]'):
assert (get_host_platform() == 'win-arm64')
with mock.patch('sys.version', '... [... (ARM)]'):
assert (get_host_platform() == 'win-arm32')
with mock.patch('sys.version_info', (3, 9, 0, 'final', 0)):
assert (get_host_platform() == stdlib_sysconfig.get_platform())
def test_get_platform(self):
with mock.patch('os.name', 'nt'):
with mock.patch.dict('os.environ', {'VSCMD_ARG_TGT_ARCH': 'x86'}):
assert (get_platform() == 'win32')
with mock.patch.dict('os.environ', {'VSCMD_ARG_TGT_ARCH': 'x64'}):
assert (get_platform() == 'win-amd64')
with mock.patch.dict('os.environ', {'VSCMD_ARG_TGT_ARCH': 'arm'}):
assert (get_platform() == 'win-arm32')
with mock.patch.dict('os.environ', {'VSCMD_ARG_TGT_ARCH': 'arm64'}):
assert (get_platform() == 'win-arm64')
def test_convert_path(self):
os.sep = '/'
def _join(path):
return '/'.join(path)
os.path.join = _join
assert (convert_path('/home/to/my/stuff') == '/home/to/my/stuff')
os.sep = '\\'
def _join(*path):
return '\\'.join(path)
os.path.join = _join
with pytest.raises(ValueError):
convert_path('/home/to/my/stuff')
with pytest.raises(ValueError):
convert_path('home/to/my/stuff/')
assert (convert_path('home/to/my/stuff') == 'home\\to\\my\\stuff')
assert (convert_path('.') == os.curdir)
def test_change_root(self):
os.name = 'posix'
def _isabs(path):
return (path[0] == '/')
os.path.isabs = _isabs
def _join(*path):
return '/'.join(path)
os.path.join = _join
assert (change_root('/root', '/old/its/here') == '/root/old/its/here')
assert (change_root('/root', 'its/here') == '/root/its/here')
os.name = 'nt'
def _isabs(path):
return path.startswith('c:\\')
os.path.isabs = _isabs
def _splitdrive(path):
if path.startswith('c:'):
return ('', path.replace('c:', ''))
return ('', path)
os.path.splitdrive = _splitdrive
def _join(*path):
return '\\'.join(path)
os.path.join = _join
assert (change_root('c:\\root', 'c:\\old\\its\\here') == 'c:\\root\\old\\its\\here')
assert (change_root('c:\\root', 'its\\here') == 'c:\\root\\its\\here')
os.name = 'BugsBunny'
with pytest.raises(DistutilsPlatformError):
change_root('c:\\root', 'its\\here')
def test_check_environ(self):
util.check_environ.cache_clear()
os.environ.pop('HOME', None)
check_environ()
assert (os.environ['PLAT'] == get_platform())
.skipif("os.name != 'posix'")
def test_check_environ_getpwuid(self):
util.check_environ.cache_clear()
os.environ.pop('HOME', None)
import pwd
result = pwd.struct_passwd((None, None, None, None, None, '/home/distutils', None))
with mock.patch.object(pwd, 'getpwuid', return_value=result):
check_environ()
assert (os.environ['HOME'] == '/home/distutils')
util.check_environ.cache_clear()
os.environ.pop('HOME', None)
with mock.patch.object(pwd, 'getpwuid', side_effect=KeyError):
check_environ()
assert ('HOME' not in os.environ)
def test_split_quoted(self):
assert (split_quoted('""one"" "two" \'three\' \\four') == ['one', 'two', 'three', 'four'])
def test_strtobool(self):
yes = ('y', 'Y', 'yes', 'True', 't', 'true', 'True', 'On', 'on', '1')
no = ('n', 'no', 'f', 'false', 'off', '0', 'Off', 'No', 'N')
for y in yes:
assert strtobool(y)
for n in no:
assert (not strtobool(n))
def test_rfc822_escape(self):
header = 'I am a\npoor\nlonesome\nheader\n'
res = rfc822_escape(header)
wanted = ('I am a%(8s)spoor%(8s)slonesome%(8s)sheader%(8s)s' % {'8s': ('\n' + (8 * ' '))})
assert (res == wanted)
def test_dont_write_bytecode(self):
old_dont_write_bytecode = sys.dont_write_bytecode
sys.dont_write_bytecode = True
try:
with pytest.raises(DistutilsByteCompileError):
byte_compile([])
finally:
sys.dont_write_bytecode = old_dont_write_bytecode
def test_grok_environment_error(self):
exc = IOError('Unable to find batch file')
msg = grok_environment_error(exc)
assert (msg == 'error: Unable to find batch file') |
def load_dataset_stats(config):
if (config.data.dataset == 'CIFAR10'):
filename = 'assets/stats/cifar10_stats.npz'
elif (config.data.dataset == 'CELEBA'):
filename = 'assets/stats/celeba_stats.npz'
elif (config.data.dataset == 'LSUN'):
filename = f'assets/stats/lsun_{config.data.category}_{config.data.image_size}_stats.npz'
elif (config.data.dataset == 'ImageNet'):
filename = f'assets/stats/imagenet{config.data.image_size}_stats.npz'
else:
raise ValueError(f'Dataset {config.data.dataset} stats not found.')
with tf.io.gfile.GFile(filename, 'rb') as fin:
stats = np.load(fin)
return stats |
class TestHandlersApp(RapidTest):
def setUp(self):
self.connection = self.create_connection()
def test_init(self):
settings = {'INSTALLED_APPS': ['rapidsms.contrib.echo']}
with override_settings(**settings):
app = HandlersApp(self.router)
self.assertEqual(len(app.handlers), 2)
self.assertTrue((EchoHandler in app.handlers))
self.assertTrue((PingHandler in app.handlers))
def test_handle(self):
app = HandlersApp(self.router)
app.handlers = [EchoKeywordHandler]
msg = IncomingMessage(self.connection, 'hello world')
retVal = app.handle(msg)
self.assertTrue(retVal)
self.assertEqual(len(msg.responses), 1)
self.assertEqual(msg.responses[0]['text'], 'world')
def test_no_handlers(self):
app = HandlersApp(self.router)
app.handlers = []
msg = IncomingMessage(self.connection, 'hello world')
retVal = app.handle(msg)
self.assertEqual(retVal, None)
self.assertEqual(len(msg.responses), 0) |
class DenseModule(nn.Module):
def __init__(self, in_channels, growth, layers, bottleneck_factor=4, norm_act=ABN, dilation=1):
super(DenseModule, self).__init__()
self.in_channels = in_channels
self.growth = growth
self.layers = layers
self.convs1 = nn.ModuleList()
self.convs3 = nn.ModuleList()
for i in range(self.layers):
self.convs1.append(nn.Sequential(OrderedDict([('bn', norm_act(in_channels)), ('conv', nn.Conv2d(in_channels, (self.growth * bottleneck_factor), 1, bias=False))])))
self.convs3.append(nn.Sequential(OrderedDict([('bn', norm_act((self.growth * bottleneck_factor))), ('conv', nn.Conv2d((self.growth * bottleneck_factor), self.growth, 3, padding=dilation, bias=False, dilation=dilation))])))
in_channels += self.growth
def out_channels(self):
return (self.in_channels + (self.growth * self.layers))
def forward(self, x):
inputs = [x]
for i in range(self.layers):
x = torch.cat(inputs, dim=1)
x = self.convs1[i](x)
x = self.convs3[i](x)
inputs += [x]
return torch.cat(inputs, dim=1) |
def tencent_trick(model: nn.Module) -> list:
(decay, no_decay) = ([], [])
for (name, param) in model.named_parameters():
if (not param.requires_grad):
continue
elif ((len(param.shape) == 1) or name.endswith('.bias')):
no_decay.append(param)
else:
decay.append(param)
return [{'params': no_decay, 'weight_decay': 0.0}, {'params': decay}] |
class Match(operator):
def __init__(self, exact, vars, pattern, expr):
self.exact = exact
self.vars = vars
self.pattern = pattern
self.expr = expr
def defined_vars(self):
return set(self.vars)
def execute(self, table, prior_locs, prior_globs):
from pythonql.Executor import processMatchClause
return processMatchClause(self, table, prior_locs, prior_globs) |
_module()
class DBHead(HeadMixin, BaseModule):
def __init__(self, in_channels, with_bias=False, downsample_ratio=1.0, loss=dict(type='DBLoss'), postprocessor=dict(type='DBPostprocessor', text_repr_type='quad'), init_cfg=[dict(type='Kaiming', layer='Conv'), dict(type='Constant', layer='BatchNorm', val=1.0, bias=0.0001)], train_cfg=None, test_cfg=None, **kwargs):
old_keys = ['text_repr_type', 'decoding_type']
for key in old_keys:
if kwargs.get(key, None):
postprocessor[key] = kwargs.get(key)
warnings.warn(f'{key} is deprecated, please specify it in postprocessor config dict. See for details.', UserWarning)
BaseModule.__init__(self, init_cfg=init_cfg)
HeadMixin.__init__(self, loss, postprocessor)
assert isinstance(in_channels, int)
self.in_channels = in_channels
self.train_cfg = train_cfg
self.test_cfg = test_cfg
self.downsample_ratio = downsample_ratio
self.binarize = Sequential(nn.Conv2d(in_channels, (in_channels // 4), 3, bias=with_bias, padding=1), nn.BatchNorm2d((in_channels // 4)), nn.ReLU(inplace=True), nn.ConvTranspose2d((in_channels // 4), (in_channels // 4), 2, 2), nn.BatchNorm2d((in_channels // 4)), nn.ReLU(inplace=True), nn.ConvTranspose2d((in_channels // 4), 1, 2, 2), nn.Sigmoid())
self.threshold = self._init_thr(in_channels)
def diff_binarize(self, prob_map, thr_map, k):
return torch.reciprocal((1.0 + torch.exp(((- k) * (prob_map - thr_map)))))
def forward(self, inputs):
prob_map = self.binarize(inputs)
thr_map = self.threshold(inputs)
binary_map = self.diff_binarize(prob_map, thr_map, k=50)
outputs = torch.cat((prob_map, thr_map, binary_map), dim=1)
return outputs
def _init_thr(self, inner_channels, bias=False):
in_channels = inner_channels
seq = Sequential(nn.Conv2d(in_channels, (inner_channels // 4), 3, padding=1, bias=bias), nn.BatchNorm2d((inner_channels // 4)), nn.ReLU(inplace=True), nn.ConvTranspose2d((inner_channels // 4), (inner_channels // 4), 2, 2), nn.BatchNorm2d((inner_channels // 4)), nn.ReLU(inplace=True), nn.ConvTranspose2d((inner_channels // 4), 1, 2, 2), nn.Sigmoid())
return seq |
class LxmertConfig(PretrainedConfig):
model_type = 'lxmert'
attribute_map = {}
def __init__(self, vocab_size=30522, hidden_size=768, num_attention_heads=12, num_qa_labels=9500, num_object_labels=1600, num_attr_labels=400, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, l_layers=9, x_layers=5, r_layers=5, visual_feat_dim=2048, visual_pos_dim=4, visual_loss_normalizer=6.67, task_matched=True, task_mask_lm=True, task_obj_predict=True, task_qa=True, visual_obj_loss=True, visual_attr_loss=True, visual_feat_loss=True, **kwargs):
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.num_qa_labels = num_qa_labels
self.num_object_labels = num_object_labels
self.num_attr_labels = num_attr_labels
self.l_layers = l_layers
self.x_layers = x_layers
self.r_layers = r_layers
self.visual_feat_dim = visual_feat_dim
self.visual_pos_dim = visual_pos_dim
self.visual_loss_normalizer = visual_loss_normalizer
self.task_matched = task_matched
self.task_mask_lm = task_mask_lm
self.task_obj_predict = task_obj_predict
self.task_qa = task_qa
self.visual_obj_loss = visual_obj_loss
self.visual_attr_loss = visual_attr_loss
self.visual_feat_loss = visual_feat_loss
self.num_hidden_layers = {'vision': r_layers, 'cross_encoder': x_layers, 'language': l_layers}
super().__init__(**kwargs) |
class PartialPipelineData(unittest.TestCase):
def test_returns_partial_when_uid_and_email_do_match(self):
email = ''
backend = self._backend({'uid': email})
backend.strategy.request_data.return_value = {backend.ID_KEY: email}
(key, val) = ('foo', 'bar')
partial = partial_pipeline_data(backend, None, *(), **dict([(key, val)]))
self.assertTrue((key in partial.kwargs))
self.assertEqual(partial.kwargs[key], val)
self.assertEqual(backend.strategy.clean_partial_pipeline.call_count, 0)
def test_clean_pipeline_when_uid_does_not_match(self):
backend = self._backend({'uid': ''})
backend.strategy.request_data.return_value = {backend.ID_KEY: ''}
(key, val) = ('foo', 'bar')
partial = partial_pipeline_data(backend, None, *(), **dict([(key, val)]))
self.assertIsNone(partial)
self.assertEqual(backend.strategy.clean_partial_pipeline.call_count, 1)
def test_kwargs_included_in_result(self):
backend = self._backend()
(key, val) = ('foo', 'bar')
partial = partial_pipeline_data(backend, None, *(), **dict([(key, val)]))
self.assertTrue((key in partial.kwargs))
self.assertEqual(partial.kwargs[key], val)
self.assertEqual(backend.strategy.clean_partial_pipeline.call_count, 0)
def test_update_user(self):
user = object()
backend = self._backend(session_kwargs={'user': None})
partial = partial_pipeline_data(backend, user)
self.assertTrue(('user' in partial.kwargs))
self.assertEqual(partial.kwargs['user'], user)
self.assertEqual(backend.strategy.clean_partial_pipeline.call_count, 0)
def _backend(self, session_kwargs=None):
backend = Mock()
backend.ID_KEY = 'email'
backend.name = 'mock-backend'
strategy = Mock()
strategy.request = None
strategy.request_data.return_value = {}
strategy.session_get.return_value = object()
strategy.partial_load.return_value = TestPartial.prepare(backend.name, 0, {'args': [], 'kwargs': (session_kwargs or {})})
backend.strategy = strategy
return backend |
class NagiosPerfdataCollector(diamond.collector.Collector):
GENERIC_FIELDS = ['DATATYPE', 'HOSTNAME', 'TIMET']
HOST_FIELDS = ['HOSTPERFDATA']
SERVICE_FIELDS = ['SERVICEDESC', 'SERVICEPERFDATA']
TOKENIZER_RE = ("([^\\s]+|'[^']+')=([-.\\d]+)(c|s|ms|us|B|KB|MB|GB|TB|%)?" + '(?:;([-.\\d]+))?(?:;([-.\\d]+))?(?:;([-.\\d]+))?(?:;([-.\\d]+))?')
def get_default_config_help(self):
config_help = super(NagiosPerfdataCollector, self).get_default_config_help()
config_help.update({'perfdata_dir': 'The directory containing Nagios perfdata files'})
return config_help
def get_default_config(self):
config = super(NagiosPerfdataCollector, self).get_default_config()
config.update({'path': 'nagiosperfdata', 'perfdata_dir': '/var/spool/diamond/nagiosperfdata'})
return config
def collect(self):
perfdata_dir = self.config['perfdata_dir']
try:
filenames = os.listdir(perfdata_dir)
except OSError:
self.log.error("Cannot read directory `{dir}'".format(dir=perfdata_dir))
return
for filename in filenames:
self._process_file(os.path.join(perfdata_dir, filename))
def _extract_fields(self, line):
acc = {}
field_tokens = line.split('\t')
for field_token in field_tokens:
kv_tokens = field_token.split('::')
if (len(kv_tokens) == 2):
(key, value) = kv_tokens
acc[key] = value
return acc
def _fields_valid(self, d):
if ('DATATYPE' not in d):
return False
datatype = d['DATATYPE']
if (datatype == 'HOSTPERFDATA'):
fields = (self.GENERIC_FIELDS + self.HOST_FIELDS)
elif (datatype == 'SERVICEPERFDATA'):
fields = (self.GENERIC_FIELDS + self.SERVICE_FIELDS)
else:
return False
for field in fields:
if (field not in d):
return False
return True
def _normalize_to_unit(self, value, unit):
if (unit == 'ms'):
return (value / 1000.0)
if (unit == 'us'):
return (value / 1000000.0)
if (unit == 'KB'):
return (value * 1024)
if (unit == 'MB'):
return ((value * 1024) * 1024)
if (unit == 'GB'):
return (((value * 1024) * 1024) * 1024)
if (unit == 'TB'):
return ((((value * 1024) * 1024) * 1024) * 1024)
return value
def _parse_perfdata(self, s):
metrics = []
counters = re.findall(self.TOKENIZER_RE, s)
if (counters is None):
self.log.warning('Failed to parse performance data: {s}'.format(s=s))
return metrics
for (key, value, uom, warn, crit, min, max) in counters:
try:
norm_value = self._normalize_to_unit(float(value), uom)
metrics.append((key, norm_value))
except ValueError:
self.log.warning("Couldn't convert value '{value}' to float".format(value=value))
return metrics
def _process_file(self, path):
try:
f = open(path)
for line in f:
self._process_line(line)
os.remove(path)
except IOError as ex:
self.log.error("Could not open file `{path}': {error}".format(path=path, error=ex.strerror))
def _process_line(self, line):
fields = self._extract_fields(line)
if (not self._fields_valid(fields)):
self.log.warning('Missing required fields for line: {line}'.format(line=line))
metric_path_base = []
graphite_prefix = fields.get('GRAPHITEPREFIX')
graphite_postfix = fields.get('GRAPHITEPOSTFIX')
if graphite_prefix:
metric_path_base.append(graphite_prefix)
hostname = fields['HOSTNAME'].lower()
metric_path_base.append(hostname)
datatype = fields['DATATYPE']
if (datatype == 'HOSTPERFDATA'):
metric_path_base.append('host')
elif (datatype == 'SERVICEPERFDATA'):
service_desc = fields.get('SERVICEDESC')
graphite_postfix = fields.get('GRAPHITEPOSTFIX')
if graphite_postfix:
metric_path_base.append(graphite_postfix)
else:
metric_path_base.append(service_desc)
perfdata = fields[datatype]
counters = self._parse_perfdata(perfdata)
for (counter, value) in counters:
metric_path = (metric_path_base + [counter])
metric_path = [self._sanitize(x) for x in metric_path]
metric_name = '.'.join(metric_path)
self.publish(metric_name, value)
def _sanitize(self, s):
return re.sub('[^\\w-]', '_', s) |
class ERB(nn.Module):
def __init__(self, in_channels, out_channels):
super(ERB, self).__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0)
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1)
self.relu = nn.ReLU()
self.bn = nn.BatchNorm2d(out_channels)
self.conv3 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1)
def forward(self, x, relu=True):
x = self.conv1(x)
res = self.conv2(x)
res = self.bn(res)
res = self.relu(res)
res = self.conv3(res)
if relu:
return self.relu((x + res))
else:
return (x + res) |
def _nominal_center_frequency(center, fraction):
def _roundn(x, n):
return round(x, ((- int(np.floor((np.sign(x) * np.log10(abs(x)))))) + n))
b = fraction
x = center
if (b == 1):
n = index_of_frequency(x, b)
if ((- 6) <= n < 5):
return acoustics.standards.iec_61672_1_2013.NOMINAL_OCTAVE_CENTER_FREQUENCIES[(n + 6)]
elif (n >= 5):
return (2.0 * _nominal_center_frequency(exact_center_frequency((n - 1), b), b))
else:
return ((1.0 / 2.0) * _nominal_center_frequency(exact_center_frequency((n + 1), b), b))
elif (b == 2):
return _roundn(x, 2)
elif (b == 3):
n = index_of_frequency(x, b)
if ((- 20) <= n < 14):
return acoustics.standards.iec_61672_1_2013.NOMINAL_THIRD_OCTAVE_CENTER_FREQUENCIES[(n + 20)]
elif (n >= 14):
return (10.0 * _nominal_center_frequency(exact_center_frequency((n - 10), b), b))
else:
return ((1.0 / 10.0) * _nominal_center_frequency(exact_center_frequency((n + 10), b), b))
elif (4 <= b <= 24):
msd = (x // (10.0 ** np.floor(np.log10(x))))
if (msd < 5):
return _roundn(x, 2)
else:
return _roundn(x, 1)
elif (b > 24):
raise NotImplementedError('b > 24 is not implemented')
else:
raise ValueError('Wrong value for b') |
_deprecated
def simple_evaluate(model, load='', args='', tasks=[], num_fewshot=0, batch_size=None, device=None, no_cache=False, limit=None, bootstrap_iters=100000, description_dict=None, check_integrity=False, decontamination_ngrams_path=None):
random.seed(1234)
np.random.seed(1234)
assert (tasks != []), 'No tasks specified'
if ('opt' in model):
lm = lm_eval.models.get_model('opt').create_from_arg_string('', {'model': model, 'batch_size': batch_size, 'device': device, 'args': args})
elif ('bloom' in model):
lm = lm_eval.models.get_model('bloom').create_from_arg_string('', {'model': model, 'batch_size': batch_size, 'device': device, 'args': args})
else:
lm = lm_eval.models.get_model('llama').create_from_arg_string('', {'model': model, 'batch_size': batch_size, 'device': device, 'args': args})
if load:
import torch
print(f'Loading {model} checkpoint {load} ....')
lm.model.load_state_dict(torch.load(load))
print('Done.')
task_dict = lm_eval.tasks.get_task_dict(tasks)
print('get_task_dict')
if check_integrity:
run_task_tests(task_list=tasks)
results = evaluate(lm=lm, task_dict=task_dict, num_fewshot=num_fewshot, limit=limit, bootstrap_iters=bootstrap_iters, description_dict=description_dict, decontamination_ngrams_path=decontamination_ngrams_path)
results['config'] = {'model': (model + load), 'batch_size': batch_size, 'no_cache': no_cache, 'bootstrap_iters': bootstrap_iters}
return results |
class TestPredictiveFunctions():
def test_correct_sensitivity(self):
r = sensitivity(25, 50)
assert (r[0] == 0.5)
def test_sensitivity_match_sas_ci(self):
sas_ci = (0., 0.)
r = sensitivity(25, 50, confint='wald')
npt.assert_allclose(r[1:3], sas_ci)
def test_sensitivity_match_sas_se(self):
sas_se = 0.
r = sensitivity(25, 50, confint='wald')
npt.assert_allclose(r[3], sas_se)
def test_correct_specificity(self):
r = specificity(25, 50)
assert (r[0] == 0.5)
def test_specificity_match_sas_ci(self):
sas_ci = (0., 0.)
r = specificity(25, 50, confint='wald')
npt.assert_allclose(r[1:3], sas_ci)
def test_specificity_match_sas_se(self):
sas_se = 0.
r = specificity(25, 50, confint='wald')
npt.assert_allclose(r[3], sas_se)
def test_ppv_conversion(self):
sens = 0.8
spec = 0.8
prev = 0.1
ppv_formula = ((sens * prev) / ((sens * prev) + ((1 - spec) * (1 - prev))))
ppv = ppv_converter(sens, spec, prev)
npt.assert_allclose(ppv, ppv_formula)
def test_npv_conversion(self):
sens = 0.8
spec = 0.8
prev = 0.1
npv_formula = ((spec * 0.9) / ((spec * 0.9) + ((1 - sens) * prev)))
npv = npv_converter(sens, spec, prev)
npt.assert_allclose(npv, npv_formula) |
class HandlerStates(int, enum.Enum):
END = ConversationHandler.END
STATE_1 = 1
STATE_2 = 2
STATE_3 = 3
STATE_4 = 4
def next(self):
cls = self.__class__
members = list(cls)
index = (members.index(self) + 1)
if (index >= len(members)):
index = 0
return members[index] |
class TestEventletSemaphore(test_lock.TestSemaphore):
def setUp(self):
if (not EVENTLET_HANDLER_AVAILABLE):
pytest.skip('eventlet handler not available.')
super(TestEventletSemaphore, self).setUp()
def make_condition():
return threading.Condition()
def make_event():
return threading.Event()
def make_thread(*args, **kwargs):
return threading.Thread(*args, **kwargs)
def _makeOne(self, *args):
return eventlet_handler.SequentialEventletHandler(*args)
def _get_client(self, **kwargs):
kwargs['handler'] = self._makeOne()
c = KazooClient(self.hosts, **kwargs)
try:
self._clients.append(c)
except AttributeError:
self._client = [c]
return c |
class CompareAction(actions.BaseAction):
name = 'compare'
security = 'validate'
parent_parsers = [actions.SELECTION_PARSER]
def add_action_subparser(cls, sub_handler):
subparser = super().add_action_subparser(sub_handler)
subparser.add_argument('--method', choices=['meta', 'full', 'hash'], default='meta', help='use metadata, complete file or hash to compare directories')
subparser.add_argument('--at', metavar='TIME', default='now', help="compare with the backup at the given time, default is 'now'")
subparser.add_argument('locations', metavar='[[]SERVER::]PATH', nargs=2, help='locations of SOURCE_DIR and backup REPOSITORY to compare (same order as for a backup)')
return subparser
def connect(self):
conn_value = super().connect()
if conn_value.is_connection_ok():
self.dir = directory.ReadDir(self.connected_locations[0], self.values.force)
self.repo = repository.Repo(self.connected_locations[1], self.values.force, must_be_writable=False, must_exist=True, can_be_sub_path=True)
return conn_value
def check(self):
ret_code = super().check()
ret_code |= self.dir.check()
ret_code |= self.repo.check()
return ret_code
def setup(self):
ret_code = super().setup()
if (ret_code & Globals.RET_CODE_ERR):
return ret_code
ret_code = self.dir.setup()
if (ret_code & Globals.RET_CODE_ERR):
return ret_code
ret_code = self.repo.setup(self.dir)
if (ret_code & Globals.RET_CODE_ERR):
return ret_code
(select_opts, select_data) = selection.get_prepared_selections(self.values.selections)
self.dir.set_select(select_opts, select_data)
self.action_time = self.repo.get_parsed_time(self.values.at)
if (self.action_time is None):
return (ret_code & Globals.RET_CODE_ERR)
return ret_code
def run(self):
ret_code = super().run()
if (ret_code & Globals.RET_CODE_ERR):
return ret_code
compare_funcs = {'meta': self._compare_meta, 'hash': self._compare_hash, 'full': self._compare_full}
reports_iter = compare_funcs[self.values.method](self.action_time)
ret_code |= self._print_reports(reports_iter, self.values.parsable_output)
self.repo.finish_loop()
return ret_code
def _compare_meta(self, compare_time):
repo_iter = self.repo.init_and_get_loop(compare_time)
report_iter = self.dir.compare_meta(repo_iter)
return report_iter
def _compare_hash(self, compare_time):
repo_iter = self.repo.init_and_get_loop(compare_time)
report_iter = self.dir.compare_hash(repo_iter)
return report_iter
def _compare_full(self, compare_time):
src_iter = self.dir.get_select()
repo_iter = self.repo.init_and_get_loop(compare_time, src_iter)
report_iter = self.dir.compare_full(repo_iter)
return report_iter
def _print_reports(self, report_iter, parsable=False):
assert (not Globals.server), "This function shouldn't run as server."
changed_files_found = 0
reason_verify_list = []
for report in report_iter:
changed_files_found += 1
indexpath = ((report.index and b'/'.join(report.index)) or b'.')
indexpath = indexpath.decode(errors='replace')
if parsable:
reason_verify_list.append({'reason': report.reason, 'path': indexpath})
else:
print('{rr}: {ip}'.format(rr=report.reason, ip=indexpath))
if parsable:
print(yaml.safe_dump(reason_verify_list, explicit_start=True, explicit_end=True))
if (not changed_files_found):
log.Log('No changes found. Directory matches backup data', log.NOTE)
return Globals.RET_CODE_OK
else:
log.Log('Directory has {fd} file differences to backup'.format(fd=changed_files_found), log.WARNING)
return Globals.RET_CODE_FILE_WARN |
def eval(epoch, trainer, dataset_name, testset_loader, test_batch_generator):
trainer.model.eval()
eval_result = {}
cur_sample_idx = 0
for (itr, (inputs, targets, meta_info)) in enumerate(tqdm(test_batch_generator)):
inputs = {k: v.cuda() for (k, v) in inputs.items()}
targets = {k: v.cuda() for (k, v) in targets.items()}
with torch.no_grad():
out = trainer.model(inputs, targets, meta_info, 'test')
out = {k: v.cpu().numpy() for (k, v) in out.items()}
key = list(out.keys())[0]
batch_size = out[key].shape[0]
out = [{k: v[bid] for (k, v) in out.items()} for bid in range(batch_size)]
if (not dist.is_initialized()):
cur_eval_result = testset_loader.evaluate(out, cur_sample_idx)
for (k, v) in cur_eval_result.items():
if (k in eval_result):
eval_result[k] += v
else:
eval_result[k] = v
cur_sample_idx += len(out)
else:
index_list = meta_info['idx'].flatten().long().tolist()
cur_eval_result = testset_loader.random_idx_eval(out, index_list)
for (k, v) in cur_eval_result.items():
if (k in eval_result):
eval_result[k] += v
else:
eval_result[k] = v
mpjpe = torch.tensor(np.mean(eval_result['mpjpe'])).float().cuda().flatten()
pa_mpjpe = torch.tensor(np.mean(eval_result['pa_mpjpe'])).float().cuda().flatten()
mpvpe = torch.tensor(np.mean(eval_result['mpvpe'])).float().cuda().flatten()
samples = torch.tensor(len(eval_result['mpjpe'])).float().cuda().flatten()
dist.barrier()
gather_list = [torch.zeros_like(mpjpe) for _ in range(dist.get_world_size())]
dist.all_gather(gather_list, mpjpe)
mpjpe_pre_rank = torch.stack(gather_list).flatten()
dist.all_gather(gather_list, pa_mpjpe)
pa_mpjpe_pre_rank = torch.stack(gather_list).flatten()
dist.all_gather(gather_list, mpvpe)
mpvpe_pre_rank = torch.stack(gather_list).flatten()
dist.all_gather(gather_list, samples)
samples_pre_rank = torch.stack(gather_list).flatten()
all_samples = samples_pre_rank.sum()
all_mpjpe = (mpjpe_pre_rank * samples_pre_rank)
all_pa_mpjpe = (pa_mpjpe_pre_rank * samples_pre_rank)
all_mpvpe = (mpvpe_pre_rank * samples_pre_rank)
mean_mpjpe = (all_mpjpe.sum() / all_samples)
mean_pa_mpjpe = (all_pa_mpjpe.sum() / all_samples)
mean_mpvpe = (all_mpvpe.sum() / all_samples)
result_dict = {'mpjpe': mean_mpjpe.item(), 'pa_mpjpe': mean_pa_mpjpe.item(), 'mpvpe': mean_mpvpe.item()}
if (dist.get_rank() == 0):
print('{} {}'.format(dataset_name, epoch))
for (k, v) in result_dict.items():
trainer.writer.add_scalar(f'test/epoch/{dataset_name}_{k}', v, epoch)
print(f'{k}: {v:.2f}')
message = [f'{k}: {v:.2f}' for (k, v) in result_dict.items()]
trainer.logger.info(('{} '.format(dataset_name) + ' '.join(message)))
if (result_dict['mpjpe'] < best_dict[dataset_name]['best_MPJPE']):
best_dict[dataset_name]['best_MPJPE'] = result_dict['mpjpe']
trainer.logger.info('best model: {}, best mpjpe: {:.2f}'.format(epoch, result_dict['mpjpe']))
torch.save(trainer.model.state_dict(), os.path.join(cfg.model_dir, '{}_best_ckpt.pth.tar'.format(dataset_name)))
dist.barrier() |
class TestMultiSceneGrouping():
()
def scene1(self):
from satpy import Scene
scene = Scene()
dsid1 = make_dataid(name='ds1', resolution=123, wavelength=(1, 2, 3), polarization='H')
scene[dsid1] = _create_test_dataset(name='ds1')
dsid2 = make_dataid(name='ds2', resolution=456, wavelength=(4, 5, 6), polarization='V')
scene[dsid2] = _create_test_dataset(name='ds2')
return scene
()
def scene2(self):
from satpy import Scene
scene = Scene()
dsid1 = make_dataid(name='ds3', resolution=123.1, wavelength=(1.1, 2.1, 3.1), polarization='H')
scene[dsid1] = _create_test_dataset(name='ds3')
dsid2 = make_dataid(name='ds4', resolution=456.1, wavelength=(4.1, 5.1, 6.1), polarization='V')
scene[dsid2] = _create_test_dataset(name='ds4')
return scene
()
def multi_scene(self, scene1, scene2):
from satpy import MultiScene
return MultiScene([scene1, scene2])
()
def groups(self):
return {DataQuery(name='odd'): ['ds1', 'ds3'], DataQuery(name='even'): ['ds2', 'ds4']}
def test_multi_scene_grouping(self, multi_scene, groups, scene1):
multi_scene.group(groups)
shared_ids_exp = {make_dataid(name='odd'), make_dataid(name='even')}
assert (multi_scene.shared_dataset_ids == shared_ids_exp)
assert (DataQuery(name='odd') not in scene1)
xr.testing.assert_allclose(multi_scene.scenes[0]['ds1'], scene1['ds1'])
def test_fails_to_add_multiple_datasets_from_the_same_scene_to_a_group(self, multi_scene):
groups = {DataQuery(name='mygroup'): ['ds1', 'ds2']}
multi_scene.group(groups)
with pytest.raises(ValueError, match='Cannot add multiple datasets from a scene to the same group'):
next(multi_scene.scenes) |
class CmdSetHandler(object):
def __init__(self, obj, init_true=True):
self.obj = obj
self.key = None
self.current = None
self.cmdset_stack = [_EmptyCmdSet(cmdsetobj=self.obj)]
self.mergetype_stack = ['Union']
self.permanent_paths = ['']
if init_true:
self.update(init_mode=True)
def __str__(self):
string = ''
mergelist = []
if (len(self.cmdset_stack) > 1):
for (snum, cmdset) in enumerate(self.cmdset_stack):
mergetype = self.mergetype_stack[snum]
permstring = 'non-perm'
if cmdset.permanent:
permstring = 'perm'
if (mergetype != cmdset.mergetype):
mergetype = ('%s^' % mergetype)
string += ('\n %i: <%s (%s, prio %i, %s)>: %s' % (snum, cmdset.key, mergetype, cmdset.priority, permstring, cmdset))
mergelist.append(str(snum))
string += '\n'
mergetype = self.mergetype_stack[(- 1)]
if (mergetype != self.current.mergetype):
merged_on = self.cmdset_stack[(- 2)].key
mergetype = _("custom {mergetype} on cmdset '{cmdset}'")
mergetype = mergetype.format(mergetype=mergetype, cmdset=merged_on)
if mergelist:
tmpstring = _(' <Merged {mergelist} {mergetype}, prio {prio}>: {current}')
string += tmpstring.format(mergelist='+'.join(mergelist), mergetype=mergetype, prio=self.current.priority, current=self.current)
else:
permstring = 'non-perm'
if self.current.permanent:
permstring = 'perm'
tmpstring = _(' <{key} ({mergetype}, prio {prio}, {permstring})>:\n {keylist}')
string += tmpstring.format(key=self.current.key, mergetype=mergetype, prio=self.current.priority, permstring=permstring, keylist=', '.join((cmd.key for cmd in sorted(self.current, key=(lambda o: o.key)))))
return string.strip()
def _import_cmdset(self, cmdset_path, emit_to_obj=None):
if (not emit_to_obj):
emit_to_obj = self.obj
return import_cmdset(cmdset_path, self.obj, emit_to_obj)
def update(self, init_mode=False):
if init_mode:
storage = self.obj.cmdset_storage
if storage:
self.cmdset_stack = []
for (pos, path) in enumerate(storage):
if ((pos == 0) and (not path)):
self.cmdset_stack = [_EmptyCmdSet(cmdsetobj=self.obj)]
elif path:
cmdset = self._import_cmdset(path)
if cmdset:
if (cmdset.key == '_CMDSET_ERROR'):
fallback_path = _CMDSET_FALLBACKS.get(path, None)
if fallback_path:
err = _ERROR_CMDSET_FALLBACK.format(path=path, fallback_path=fallback_path)
logger.log_err(err)
if _IN_GAME_ERRORS:
self.obj.msg(err)
cmdset = self._import_cmdset(fallback_path)
if (not cmdset):
err = _ERROR_CMDSET_NO_FALLBACK.format(fallback_path=fallback_path)
logger.log_err(err)
if _IN_GAME_ERRORS:
self.obj.msg(err)
continue
cmdset.permanent = (cmdset.key != '_CMDSET_ERROR')
self.cmdset_stack.append(cmdset)
new_current = None
self.mergetype_stack = []
for cmdset in self.cmdset_stack:
try:
new_current = (cmdset + new_current)
except TypeError:
continue
self.mergetype_stack.append(new_current.actual_mergetype)
self.current = new_current
def add(self, cmdset, emit_to_obj=None, permanent=False, default_cmdset=False):
if (not (isinstance(cmdset, str) or utils.inherits_from(cmdset, CmdSet))):
string = _('Only CmdSets can be added to the cmdsethandler!')
raise Exception(string)
if callable(cmdset):
cmdset = cmdset(self.obj)
elif isinstance(cmdset, str):
cmdset = self._import_cmdset(cmdset)
if (cmdset and (cmdset.key != '_CMDSET_ERROR')):
cmdset.permanent = permanent
if (permanent and (cmdset.key != '_CMDSET_ERROR')):
storage = (self.obj.cmdset_storage or [''])
if default_cmdset:
storage[0] = cmdset.path
else:
storage.append(cmdset.path)
self.obj.cmdset_storage = storage
if default_cmdset:
self.cmdset_stack[0] = cmdset
else:
self.cmdset_stack.append(cmdset)
self.update()
def add_default(self, cmdset, emit_to_obj=None, permanent=True):
self.add(cmdset, emit_to_obj=emit_to_obj, permanent=permanent, default_cmdset=True)
def remove(self, cmdset=None, default_cmdset=False):
if default_cmdset:
if self.cmdset_stack:
cmdset = self.cmdset_stack[0]
if cmdset.permanent:
storage = (self.obj.cmdset_storage or [''])
storage[0] = ''
self.obj.cmdset_storage = storage
self.cmdset_stack[0] = _EmptyCmdSet(cmdsetobj=self.obj)
else:
self.cmdset_stack = [_EmptyCmdSet(cmdsetobj=self.obj)]
self.update()
return
if (len(self.cmdset_stack) < 2):
return
if (not cmdset):
cmdset = self.cmdset_stack.pop()
if cmdset.permanent:
storage = self.obj.cmdset_storage
storage.pop()
self.obj.cmdset_storage = storage
else:
if (callable(cmdset) and hasattr(cmdset, 'path')):
delcmdsets = [cset for cset in self.cmdset_stack[1:] if (cset.path == cmdset.path)]
else:
delcmdsets = [cset for cset in self.cmdset_stack[1:] if ((cset.path == cmdset) or (cset.key == cmdset))]
storage = []
if any((cset.permanent for cset in delcmdsets)):
storage = self.obj.cmdset_storage
updated = False
for cset in delcmdsets:
if cset.permanent:
try:
storage.remove(cset.path)
updated = True
except ValueError:
pass
if updated:
self.obj.cmdset_storage = storage
for cset in delcmdsets:
try:
self.cmdset_stack.remove(cset)
except ValueError:
pass
self.update()
delete = remove
def remove_default(self):
self.remove(default_cmdset=True)
delete_default = remove_default
def get(self):
return self.cmdset_stack
all = get
def clear(self):
self.cmdset_stack = [self.cmdset_stack[0]]
storage = self.obj.cmdset_storage
if storage:
storage = storage[0]
self.obj.cmdset_storage = storage
self.update()
def has(self, cmdset, must_be_default=False):
if (callable(cmdset) and hasattr(cmdset, 'path')):
if must_be_default:
return (self.cmdset_stack and (self.cmdset_stack[0].path == cmdset.path))
else:
return any([cset for cset in self.cmdset_stack if (cset.path == cmdset.path)])
elif must_be_default:
return (self.cmdset_stack and ((self.cmdset_stack[0].key == cmdset) or (self.cmdset_stack[0].path == cmdset)))
else:
return any([cset for cset in self.cmdset_stack if ((cset.path == cmdset) or (cset.key == cmdset))])
has_cmdset = has
def reset(self):
new_cmdset_stack = []
for cmdset in self.cmdset_stack:
if (cmdset.key == '_EMPTY_CMDSET'):
new_cmdset_stack.append(cmdset)
else:
new_cmdset_stack.append(self._import_cmdset(cmdset.path))
self.cmdset_stack = new_cmdset_stack
self.update() |
def CreateDataLoader(opt):
(train_dataset, test_dataset) = CreateDataset(opt)
train_dl = torch.utils.data.DataLoader(train_dataset, batch_size=opt.batch_size, sampler=data_sampler(train_dataset, shuffle=True, distributed=opt.distributed), drop_last=True)
test_dl = torch.utils.data.DataLoader(test_dataset, batch_size=opt.batch_size, sampler=data_sampler(test_dataset, shuffle=False, distributed=opt.distributed), drop_last=False)
test_dl_for_eval = torch.utils.data.DataLoader(test_dataset, batch_size=max(int((opt.batch_size // 2)), 1), sampler=data_sampler(test_dataset, shuffle=False, distributed=opt.distributed), drop_last=False)
return (train_dl, test_dl, test_dl_for_eval) |
class Flatc(iw.CustomCommand):
def __init__(self, path, unit):
self._path = path
self._incl_dirs = ['$S', '$B']
def descr(self):
return ('FL', self._path, 'light-green')
def tools(self):
return ['contrib/tools/flatc']
def input(self):
return common.make_tuples([self._path, '$S/build/scripts/stdout2stderr.py'])
def output(self):
return common.make_tuples([(common.tobuilddir(common.stripext(self._path)) + '.fbs.h')])
def run(self, binary):
return self.do_run(binary, self._path)
def do_run(self, binary, path):
def incls():
for x in self._incl_dirs:
(yield '-I')
(yield self.resolve_path(x))
output_dir = os.path.dirname(self.resolve_path(common.get(self.output, 0)))
cmd = (((common.get_interpreter_path() + ['$S/build/scripts/stdout2stderr.py', binary, '--cpp']) + list(incls())) + ['-o', output_dir, path])
self.call(cmd) |
def get_dataloader(dataset='coco', img_size=128):
if (dataset == 'coco'):
dataset = CocoSceneGraphDataset(image_dir='./datasets/coco/images/val2017/', instances_json='./datasets/coco/annotations/instances_val2017.json', stuff_json='./datasets/coco/annotations/stuff_val2017.json', stuff_only=True, image_size=(img_size, img_size), left_right_flip=False)
elif (dataset == 'vg'):
with open('./datasets/vg/vocab.json', 'r') as read_file:
vocab = json.load(read_file)
dataset = VgSceneGraphDataset(vocab=vocab, h5_path='./datasets/vg/val.h5', image_dir='./datasets/vg/images/', image_size=(128, 128), left_right_flip=False, max_objects=30)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, drop_last=True, shuffle=False, num_workers=1)
return dataloader |
def test_no_matches(keyhint, config_stub):
bindings = {'normal': {'aa': 'message-info cmd-aa', 'ab': 'message-info cmd-ab'}}
config_stub.val.bindings.default = {}
config_stub.val.bindings.commands = bindings
keyhint.update_keyhint(usertypes.KeyMode.normal, 'z')
assert (not keyhint.text())
assert (not keyhint.isVisible()) |
def normalize_path(base, filename):
abs_path = os.path.abspath(base)
joined = os.path.join(abs_path, filename)
normalized = os.path.normpath(joined)
if normalized.startswith(os.path.join(abs_path, '')):
return normalized
raise PathTraversalException('Path Traversal detected') |
class TestReceiver():
def testHasNoIntentFilter(SAMPLE_PATH_13667):
receiver = getReceivers(SAMPLE_PATH_13667)[2]
assert (receiver.hasIntentFilter() is False)
def testHasIntentFilter(SAMPLE_PATH_13667):
receiver = getReceivers(SAMPLE_PATH_13667)[0]
assert (receiver.hasIntentFilter() is True)
def testIsNotExported(SAMPLE_PATH_13667):
receiver = getReceivers(SAMPLE_PATH_13667)[2]
assert (receiver.isExported() is False)
def testIsExported(SAMPLE_PATH_13667):
receiver = getReceivers(SAMPLE_PATH_13667)[0]
assert (receiver.isExported() is True) |
class HAN(nn.Module):
def __init__(self, args, conv=common.default_conv):
super(HAN, self).__init__()
n_resgroups = args.n_resgroups
n_resblocks = args.n_resblocks
n_feats = args.n_feats
kernel_size = 3
reduction = args.reduction
scale = args.scale[0]
act = nn.ReLU(True)
rgb_mean = (0.4488, 0.4371, 0.404)
rgb_std = (1.0, 1.0, 1.0)
self.sub_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std)
modules_head = [conv(args.n_colors, n_feats, kernel_size)]
modules_body = [ResidualGroup(conv, n_feats, kernel_size, reduction, act=act, res_scale=args.res_scale, n_resblocks=n_resblocks) for _ in range(n_resgroups)]
modules_body.append(conv(n_feats, n_feats, kernel_size))
modules_tail = [common.Upsampler(conv, scale, n_feats, act=False), conv(n_feats, args.n_colors, kernel_size)]
self.add_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std, 1)
self.head = nn.Sequential(*modules_head)
self.body = nn.Sequential(*modules_body)
self.csa = CSAM_Module(n_feats)
self.la = LAM_Module(n_feats)
self.last_conv = nn.Conv2d((n_feats * 11), n_feats, 3, 1, 1)
self.last = nn.Conv2d((n_feats * 2), n_feats, 3, 1, 1)
self.tail = nn.Sequential(*modules_tail)
def forward(self, x):
x = self.sub_mean(x)
x = self.head(x)
res = x
for (name, midlayer) in self.body._modules.items():
res = midlayer(res)
if (name == '0'):
res1 = res.unsqueeze(1)
else:
res1 = torch.cat([res.unsqueeze(1), res1], 1)
out1 = res
res = self.la(res1)
out2 = self.last_conv(res)
out1 = self.csa(out1)
out = torch.cat([out1, out2], 1)
res = self.last(out)
res += x
x = self.tail(res)
x = self.add_mean(x)
return x
def load_state_dict(self, state_dict, strict=False):
own_state = self.state_dict()
for (name, param) in state_dict.items():
if (name in own_state):
if isinstance(param, nn.Parameter):
param = param.data
try:
own_state[name].copy_(param)
except Exception:
if (name.find('tail') >= 0):
print('Replace pre-trained upsampler to new one...')
else:
raise RuntimeError('While copying the parameter named {}, whose dimensions in the model are {} and whose dimensions in the checkpoint are {}.'.format(name, own_state[name].size(), param.size()))
elif strict:
if (name.find('tail') == (- 1)):
raise KeyError('unexpected key "{}" in state_dict'.format(name))
if strict:
missing = (set(own_state.keys()) - set(state_dict.keys()))
if (len(missing) > 0):
raise KeyError('missing keys in state_dict: "{}"'.format(missing)) |
def tbwrite_loglikelihoods(step: Union[(int, None)]=None, agent_loglikelihoods: Union[(torch.Tensor, None)]=None, prior_loglikelihoods: Union[(torch.Tensor, None)]=None) -> None:
avg_agent_loglikelihood = torch.mean(agent_loglikelihoods)
avg_prior_loglikelihood = torch.mean(prior_loglikelihoods)
tb_writer.add_scalar('Train/agent_loglikelihood', avg_agent_loglikelihood, step)
tb_writer.add_scalar('Train/prior_loglikelihood', avg_prior_loglikelihood, step) |
class Corpus(object):
def __init__(self, params, dictionary, is_poison=False):
self.path = params['data_folder']
authors_no = params['number_of_total_participants']
self.dictionary = dictionary
self.no_tokens = len(self.dictionary)
self.authors_no = authors_no
self.train = self.tokenize_train(f'{self.path}/shard_by_author', is_poison=is_poison)
self.test = self.tokenize(os.path.join(self.path, 'test_data.json'))
def load_poison_data(self, number_of_words):
current_word_count = 0
path = f'{self.path}/shard_by_author'
list_of_authors = iter(os.listdir(path))
word_list = list()
line_number = 0
posts_count = 0
while (current_word_count < number_of_words):
posts_count += 1
file_name = next(list_of_authors)
with open(f'{path}/{file_name}', 'r') as f:
for line in f:
words = get_word_list(line, self.dictionary)
if (len(words) > 2):
word_list.extend([self.dictionary.word2idx[word] for word in words])
current_word_count += len(words)
line_number += 1
ids = torch.LongTensor(word_list[:number_of_words])
return ids
def tokenize_train(self, path, is_poison=False):
files = os.listdir(path)
per_participant_ids = list()
for file in tqdm(files[:self.authors_no]):
if ('checkpoint' in file):
continue
new_path = f'{path}/{file}'
with open(new_path, 'r') as f:
tokens = 0
word_list = list()
for line in f:
words = get_word_list(line, self.dictionary)
tokens += len(words)
word_list.extend([self.dictionary.word2idx[x] for x in words])
ids = torch.LongTensor(word_list)
per_participant_ids.append(ids)
return per_participant_ids
def tokenize(self, path):
assert os.path.exists(path)
word_list = list()
with open(path, 'r') as f:
tokens = 0
for line in f:
words = get_word_list(line, self.dictionary)
tokens += len(words)
word_list.extend([self.dictionary.word2idx[x] for x in words])
ids = torch.LongTensor(word_list)
return ids |
def _fitting_dataset(args: SharedArgs, dataset: Dataset, heads: List[TrainerHeadInterface], repetitions: Optional[int], shuffle_videos: bool, chunk_shuffle: float) -> FittingDataset:
video_start_providers = _video_start_providers(args, dataset)
tf_dataset = _tf_dataset(args, dataset, heads, video_start_providers, repetitions, shuffle_videos, chunk_shuffle)
num_tasks = len(dataset.tasks)
num_batches_per_epoch = (num_tasks * math.ceil((CHUNKS_PER_EPOCH / args.batch_size)))
return FittingDataset(tf_dataset, num_batches_per_epoch) |
def _reserve_kjt_storage(topology: Topology, batch_size: int, batch_inputs: List[float], input_data_type_size: int, multiplier: int) -> Storage:
kjt_size = (math.ceil((sum(batch_inputs) * float(input_data_type_size))) * multiplier)
kjt_storage = Storage(hbm=(kjt_size if (topology.compute_device == 'cuda') else 0), ddr=(kjt_size if (topology.compute_device in {'cpu', 'mtia'}) else 0))
for device in topology.devices:
device.storage -= kjt_storage
return kjt_storage |
def _configure_project_with_groups(poetry: Poetry, installed: Repository) -> None:
poetry.package.add_dependency(Factory.create_dependency('cachy', '^0.1.0'))
poetry.package.add_dependency_group(DependencyGroup(name='time', optional=True))
poetry.package.add_dependency(Factory.create_dependency('pendulum', '^2.0.0', groups=['time']))
poetry.package.add_dependency(Factory.create_dependency('pytest', '^3.7.3', groups=['test']))
cachy_010 = get_package('cachy', '0.1.0')
cachy_010.description = 'Cachy package'
pendulum_200 = get_package('pendulum', '2.0.0')
pendulum_200.description = 'Pendulum package'
pytest_373 = get_package('pytest', '3.7.3')
pytest_373.description = 'Pytest package'
installed.add_package(cachy_010)
installed.add_package(pendulum_200)
installed.add_package(pytest_373)
assert isinstance(poetry.locker, TestLocker)
poetry.locker.mock_lock_data({'package': [{'name': 'cachy', 'version': '0.1.0', 'description': 'Cachy package', 'optional': False, 'platform': '*', 'python-versions': '*', 'checksum': []}, {'name': 'pendulum', 'version': '2.0.0', 'description': 'Pendulum package', 'optional': False, 'platform': '*', 'python-versions': '*', 'checksum': []}, {'name': 'pytest', 'version': '3.7.3', 'description': 'Pytest package', 'optional': False, 'platform': '*', 'python-versions': '*', 'checksum': []}], 'metadata': {'python-versions': '*', 'platform': '*', 'content-hash': '', 'files': {'cachy': [], 'pendulum': [], 'pytest': []}}}) |
def write_human_readable_meta(game: GameDescription, output: TextIO) -> None:
output.write('\nTemplates\n')
for (template_name, template) in game.resource_database.requirement_template.items():
output.write(f'''
* {template_name}:
''')
for (level, text) in pretty_print_requirement(template):
output.write(' {}{}\n'.format((' ' * level), text))
output.write('\n\nDock Weaknesses\n')
for dock_type in game.dock_weakness_database.dock_types:
output.write(f'''
> {dock_type.long_name}''')
for (extra_name, extra_field) in dock_type.extra.items():
output.write(f'''
* Extra - {extra_name}: {extra_field}''')
for weakness in game.dock_weakness_database.get_by_type(dock_type):
output.write(f'''
* {weakness.name}
''')
for (extra_name, extra_field) in weakness.extra.items():
output.write(f''' Extra - {extra_name}: {extra_field}
''')
output.write(' Open:\n')
for (level, text) in pretty_print_requirement(weakness.requirement, level=1):
output.write(' {}{}\n'.format((' ' * level), text))
if (weakness.lock is not None):
output.write(f''' Lock type: {weakness.lock}
''')
for (level, text) in pretty_print_requirement(weakness.lock.requirement, level=1):
output.write(' {}{}\n'.format((' ' * level), text))
else:
output.write(' No lock\n')
output.write('\n')
dock_rando = game.dock_weakness_database.dock_rando_params.get(dock_type)
if (dock_rando is None):
output.write(' > Dock Rando: Disabled\n\n')
else:
output.write(' > Dock Rando:')
output.write(f'''
Unlocked: {dock_rando.unlocked.name}''')
output.write(f'''
Locked: {dock_rando.locked.name}''')
output.write('\n Change from:')
for weakness in sorted(dock_rando.change_from):
output.write(f'''
{weakness.name}''')
output.write('\n Change to:')
for weakness in sorted(dock_rando.change_to):
output.write(f'''
{weakness.name}''')
output.write('\n\n') |
class BatchNormalization(layers.BatchNormalization):
__doc__ += layers.BatchNormalization.__doc__
def call(self, inputs, params=None, training=None):
if (params[(self.name + '/gamma:0')] is None):
return super(layers.BatchNormalization, self).call(inputs)
else:
gamma = params.get((self.name + '/gamma:0'))
beta = params.get((self.name + '/beta:0'))
training = self._get_training_value(training)
if (self.virtual_batch_size is not None):
raise NotImplementedError()
if (not self.fused):
raise NotImplementedError()
else:
outputs = self._fused_batch_norm(inputs, training=training, gamma=gamma, beta=beta)
return outputs
def _fused_batch_norm(self, inputs, training, beta=None, gamma=None):
if (beta is None):
beta = (self.beta if self.center else self._beta_const)
if (gamma is None):
gamma = (self.gamma if self.scale else self._gamma_const)
inputs_size = array_ops.size(inputs)
def _fused_batch_norm_training():
return nn.fused_batch_norm(inputs, gamma, beta, epsilon=self.epsilon, data_format=self._data_format)
def _fused_batch_norm_inference():
return nn.fused_batch_norm(inputs, gamma, beta, mean=self.moving_mean, variance=self.moving_variance, epsilon=self.epsilon, is_training=False, data_format=self._data_format)
(output, mean, variance) = tf_utils.smart_cond(training, _fused_batch_norm_training, _fused_batch_norm_inference)
if (not self._bessels_correction_test_only):
sample_size = math_ops.cast((array_ops.size(inputs) / array_ops.size(variance)), variance.dtype)
factor = ((sample_size - math_ops.cast(1.0, variance.dtype)) / sample_size)
variance *= factor
training_value = tf_utils.constant_value(training)
if (training_value is None):
momentum = tf_utils.smart_cond(training, (lambda : self.momentum), (lambda : 1.0))
else:
momentum = ops.convert_to_tensor(self.momentum)
if (training_value or (training_value is None)):
if distribution_strategy_context.in_cross_replica_context():
raise NotImplementedError()
else:
def mean_update():
return self._assign_moving_average(self.moving_mean, mean, momentum, inputs_size)
def variance_update():
return self._assign_moving_average(self.moving_variance, variance, momentum, inputs_size)
self.add_update(mean_update, inputs=True)
self.add_update(variance_update, inputs=True)
return output |
class Database(Element):
_e_label = 'DATABASE'
def backend_id(self) -> (int, None):
def version_info(self) -> tuple:
def client_address(self) -> (str, None):
def client_port(self) -> (int, None):
def xact(self, isolation=None, mode=None) -> Transaction:
def settings(self) -> Settings:
def do(language, source) -> None:
def execute(sql) -> None:
def prepare(self, sql: str) -> Statement:
def query(self, sql: str, *args) -> Execution:
def statement_from_id(self, statement_id) -> Statement:
def cursor_from_id(self, cursor_id) -> Cursor:
def proc(self, procedure_id) -> StoredProcedure:
def reset(self) -> None:
def notify(self, *channels, **channel_and_payload) -> int:
def listen(self, *channels) -> None:
def unlisten(self, *channels) -> None:
def listening_channels(self) -> ['channel name', ...]:
def iternotifies(self, timeout=None) -> collections.abc.Iterator: |
def _find_facility_from_conf():
facility_names = logging.handlers.SysLogHandler.facility_names
facility = getattr(logging.handlers.SysLogHandler, CONF.syslog_log_facility, None)
if ((facility is None) and (CONF.syslog_log_facility in facility_names)):
facility = facility_names.get(CONF.syslog_log_facility)
if (facility is None):
valid_facilities = facility_names.keys()
consts = ['LOG_AUTH', 'LOG_AUTHPRIV', 'LOG_CRON', 'LOG_DAEMON', 'LOG_FTP', 'LOG_KERN', 'LOG_LPR', 'LOG_MAIL', 'LOG_NEWS', 'LOG_AUTH', 'LOG_SYSLOG', 'LOG_USER', 'LOG_UUCP', 'LOG_LOCAL0', 'LOG_LOCAL1', 'LOG_LOCAL2', 'LOG_LOCAL3', 'LOG_LOCAL4', 'LOG_LOCAL5', 'LOG_LOCAL6', 'LOG_LOCAL7']
valid_facilities.extend(consts)
raise TypeError(('syslog facility must be one of: %s' % ', '.join((("'%s'" % fac) for fac in valid_facilities))))
return facility |
class QMixer(nn.Module):
def __init__(self, args):
super(QMixer, self).__init__()
self.args = args
self.n_agents = args.n_agents
self.state_dim = int(np.prod(args.state_shape))
self.embed_dim = args.mixing_embed_dim
if (getattr(args, 'hypernet_layers', 1) == 1):
self.hyper_w_1 = nn.Linear(self.state_dim, (self.embed_dim * self.n_agents))
self.hyper_w_final = nn.Linear(self.state_dim, self.embed_dim)
elif (getattr(args, 'hypernet_layers', 1) == 2):
hypernet_embed = self.args.hypernet_embed
self.hyper_w_1 = nn.Sequential(nn.Linear(self.state_dim, hypernet_embed), nn.ReLU(), nn.Linear(hypernet_embed, (self.embed_dim * self.n_agents)))
self.hyper_w_final = nn.Sequential(nn.Linear(self.state_dim, hypernet_embed), nn.ReLU(), nn.Linear(hypernet_embed, self.embed_dim))
elif (getattr(args, 'hypernet_layers', 1) > 2):
raise Exception('Sorry >2 hypernet layers is not implemented!')
else:
raise Exception('Error setting number of hypernet layers.')
self.hyper_b_1 = nn.Linear(self.state_dim, self.embed_dim)
self.V = nn.Sequential(nn.Linear(self.state_dim, self.embed_dim), nn.ReLU(), nn.Linear(self.embed_dim, 1))
def forward(self, agent_qs, states):
bs = agent_qs.size(0)
states = states.reshape((- 1), self.state_dim)
agent_qs = agent_qs.view((- 1), 1, self.n_agents)
w1 = th.abs(self.hyper_w_1(states))
b1 = self.hyper_b_1(states)
w1 = w1.view((- 1), self.n_agents, self.embed_dim)
b1 = b1.view((- 1), 1, self.embed_dim)
hidden = F.elu((th.bmm(agent_qs, w1) + b1))
w_final = th.abs(self.hyper_w_final(states))
w_final = w_final.view((- 1), self.embed_dim, 1)
v = self.V(states).view((- 1), 1, 1)
y = (th.bmm(hidden, w_final) + v)
q_tot = y.view(bs, (- 1), 1)
return q_tot |
class Editor():
def __init__(self, game: GameDescription):
self.game = game
self.next_node_index = len(game.region_list.all_nodes)
def new_node_index(self) -> NodeIndex:
result = self.next_node_index
self.next_node_index += 1
return result
def edit_connections(self, area: Area, from_node: Node, target_node: Node, requirement: (Requirement | None)) -> None:
current_connections = area.connections[from_node]
if (requirement is None):
del area.connections[from_node][target_node]
else:
area.connections[from_node][target_node] = requirement
area.connections[from_node] = {node: current_connections[node] for node in area.nodes if (node in current_connections)}
def add_node(self, area: Area, node: Node) -> None:
if (area.node_with_name(node.name) is not None):
raise ValueError(f'A node named {node.name} already exists.')
area.nodes.append(node)
area.connections[node] = {}
area.clear_dock_cache()
self.game.region_list.invalidate_node_cache()
def remove_node(self, area: Area, node: Node) -> None:
area.nodes.remove(node)
area.connections.pop(node, None)
for connection in area.connections.values():
connection.pop(node, None)
area.clear_dock_cache()
self.game.region_list.invalidate_node_cache()
if isinstance(node, DockNode):
self.remove_node(area, node.lock_node)
def replace_node(self, area: Area, old_node: Node, new_node: Node) -> None:
def sub(n: Node) -> Node:
return (new_node if (n == old_node) else n)
if (old_node not in area.nodes):
raise ValueError('Given {} does does not belong to {}{}'.format(old_node.name, area.name, (', but the area contains a node with that name.' if (area.node_with_name(old_node.name) is not None) else '.')))
if ((old_node.name != new_node.name) and (area.node_with_name(new_node.name) is not None)):
raise ValueError(f'A node named {new_node.name} already exists.')
if isinstance(old_node, DockNode):
self.remove_node(area, old_node.lock_node)
old_identifier = old_node.identifier
self.replace_references_to_node_identifier(old_identifier, old_identifier.renamed(new_node.name))
area.nodes[area.nodes.index(old_node)] = new_node
new_connections = {sub(source_node): {sub(target_node): requirements for (target_node, requirements) in connection.items()} for (source_node, connection) in area.connections.items()}
area.connections.clear()
area.connections.update(new_connections)
if (area.default_node == old_node.name):
object.__setattr__(area, 'default_node', new_node.name)
area.clear_dock_cache()
if isinstance(new_node, DockNode):
self.add_node(area, DockLockNode.create_from_dock(new_node, self.new_node_index(), self.game.resource_database))
self.game.region_list.invalidate_node_cache()
def rename_node(self, area: Area, node: Node, new_name: str) -> None:
self.replace_node(area, node, dataclasses.replace(node, identifier=node.identifier.renamed(new_name)))
def rename_area(self, current_area: Area, new_name: str) -> None:
current_world = self.game.region_list.region_with_area(current_area)
old_identifier = self.game.region_list.identifier_for_area(current_area)
new_identifier = dataclasses.replace(old_identifier, area_name=new_name)
self.replace_references_to_area_identifier(old_identifier, new_identifier)
new_area = dataclasses.replace(current_area, name=new_name)
current_world.areas[current_world.areas.index(current_area)] = new_area
self.game.region_list.invalidate_node_cache()
def replace_references_to_area_identifier(self, old_identifier: AreaIdentifier, new_identifier: AreaIdentifier) -> None:
if (old_identifier == new_identifier):
return
for region in self.game.region_list.regions:
for area in region.areas:
for i in range(len(area.nodes)):
node = area.nodes[i]
new_node = None
if isinstance(node, DockNode):
if (node.default_connection.area_identifier == old_identifier):
new_node = dataclasses.replace(node, identifier=node.identifier.renamed(node.name.replace(old_identifier.area_name, new_identifier.area_name)), default_connection=NodeIdentifier(area_identifier=new_identifier, node_name=node.default_connection.node_name))
if (new_node is not None):
self.replace_node(area, node, new_node)
def replace_references_to_node_identifier(self, old_identifier: NodeIdentifier, new_identifier: NodeIdentifier) -> None:
if (old_identifier == new_identifier):
return
for region in self.game.region_list.regions:
for area in region.areas:
for i in range(len(area.nodes)):
node = area.nodes[i]
new_node = None
if isinstance(node, DockNode):
if (node.default_connection == old_identifier):
new_node = dataclasses.replace(node, identifier=node.identifier.renamed(node.name.replace(old_identifier.area_name, new_identifier.area_name)), default_connection=new_identifier)
if (new_node is not None):
self.replace_node(area, node, new_node)
def move_node_from_area_to_area(self, old_area: Area, new_area: Area, node: Node) -> None:
assert (node in old_area.nodes)
if (new_area.node_with_name(node.name) is not None):
raise ValueError(f'New area {new_area.name} already contains a node named {node.name}')
old_region = self.game.region_list.region_with_area(old_area)
new_region = self.game.region_list.region_with_area(new_area)
self.remove_node(old_area, node)
self.add_node(new_area, node)
self.replace_references_to_node_identifier(NodeIdentifier.create(old_region.name, old_area.name, node.name), NodeIdentifier.create(new_region.name, new_area.name, node.name)) |
def test_get_protocol():
model0 = get_protocol(protocol_name='0')
assert (model0.lj_on_polar_h is True)
assert (model0.free_parameters['X'].r_free == 1.083)
assert (model0.free_parameters['H'].r_free == 1.738)
assert (model0.free_parameters['C'].r_free == 2.008)
assert (model0.free_parameters['N'].r_free == 1.765)
assert (model0.free_parameters['O'].r_free == 1.499)
assert (model0.alpha == 1)
assert (model0.beta == 0) |
def test_connection_get_item():
conn = Connection(REGION)
table_name = 'Thread'
conn.add_meta_table(MetaTable(DESCRIBE_TABLE_DATA[TABLE_KEY]))
with patch(PATCH_METHOD) as req:
req.return_value = GET_ITEM_DATA
item = conn.get_item(table_name, 'Amazon DynamoDB', 'How do I update multiple items?')
assert (item == GET_ITEM_DATA)
with patch(PATCH_METHOD) as req:
req.side_effect = BotoCoreError
with pytest.raises(GetError):
conn.get_item(table_name, 'Amazon DynamoDB', 'How do I update multiple items?')
with patch(PATCH_METHOD) as req:
req.return_value = GET_ITEM_DATA
conn.get_item(table_name, 'Amazon DynamoDB', 'How do I update multiple items?', attributes_to_get=['ForumName'])
params = {'ReturnConsumedCapacity': 'TOTAL', 'ProjectionExpression': '#0', 'ExpressionAttributeNames': {'#0': 'ForumName'}, 'Key': {'ForumName': {'S': 'Amazon DynamoDB'}, 'Subject': {'S': 'How do I update multiple items?'}}, 'ConsistentRead': False, 'TableName': 'Thread'}
assert (req.call_args[0][1] == params) |
class CaptureManager():
def __init__(self, method: CaptureMethod) -> None:
self._method = method
self._capturing: (MultiCapture[str] | None) = None
def __repr__(self) -> str:
return '<CaptureManager _method={!r} _capturing={!r}>'.format(self._method, self._capturing)
def is_capturing(self) -> bool:
return (self._method != 'no')
def start_capturing(self) -> None:
assert (self._capturing is None)
self._capturing = _get_multicapture(self._method)
self._capturing.start_capturing()
def stop_capturing(self) -> None:
if (self._capturing is not None):
self._capturing.pop_outerr_to_orig()
self._capturing.stop_capturing()
self._capturing = None
def resume(self) -> None:
if (self._capturing is not None):
self._capturing.resume_capturing()
def suspend(self, in_: bool=False) -> None:
if (self._capturing is not None):
self._capturing.suspend_capturing(in_=in_)
def read(self) -> CaptureResult[str]:
assert (self._capturing is not None)
return self._capturing.readouterr()
def task_capture(self, when: str, task: PTask) -> Generator[(None, None, None)]:
self.resume()
try:
(yield)
finally:
self.suspend(in_=False)
(out, err) = self.read()
if out:
task.report_sections.append((when, 'stdout', out))
if err:
task.report_sections.append((when, 'stderr', err))
(hookwrapper=True)
def pytask_execute_task_setup(self, task: PTask) -> Generator[(None, None, None)]:
with self.task_capture('setup', task):
(yield)
(hookwrapper=True)
def pytask_execute_task(self, task: PTask) -> Generator[(None, None, None)]:
with self.task_capture('call', task):
(yield)
(hookwrapper=True)
def pytask_execute_task_teardown(self, task: PTask) -> Generator[(None, None, None)]:
with self.task_capture('teardown', task):
(yield)
(hookwrapper=True)
def pytask_collect_log(self) -> Generator[(None, None, None)]:
self.suspend(in_=True)
(yield) |
def main(args):
vocab = Tokenizer(args.vocab_path, args.emb_dim, args.vsize)
word2id = vocab.token2idx_dict
(train_batcher, val_batcher) = build_batchers(word2id, args.cuda, args.debug, args.augment)
(net, net_args) = configure_net(len(word2id), args.emb_dim, args.n_hidden, args.bi, args.n_layer)
net.set_embedding(torch.tensor(vocab.embedding, dtype=torch.float))
(criterion, train_params) = configure_training('adam', args.lr, args.clip, args.decay, args.batch)
if (not exists(args.path)):
os.makedirs(args.path)
with open(join(args.path, 'vocab.pkl'), 'wb') as f:
pkl.dump(word2id, f, pkl.HIGHEST_PROTOCOL)
meta = {}
meta['net'] = 'base_abstractor'
meta['net_args'] = net_args
meta['traing_params'] = train_params
with open(join(args.path, 'meta.json'), 'w') as f:
json.dump(meta, f, indent=4)
val_fn = basic_validate(net, criterion)
grad_fn = get_basic_grad_fn(net, args.clip)
optimizer = optim.Adam(net.parameters(), **train_params['optimizer'][1])
scheduler = ReduceLROnPlateau(optimizer, 'min', verbose=True, factor=args.decay, min_lr=0, patience=args.lr_p)
if args.cuda:
net = net.cuda()
pipeline = BasicPipeline(meta['net'], net, train_batcher, val_batcher, args.batch, val_fn, criterion, optimizer, grad_fn)
trainer = BasicTrainer(pipeline, args.path, args.ckpt_freq, args.patience, scheduler)
print('start training with the following hyper-parameters:')
print(meta)
trainer.train() |
def entangling_power(U):
if (not U.isoper):
raise Exception('U must be an operator.')
if (U.dims != [[2, 2], [2, 2]]):
raise Exception('U must be a two-qubit gate.')
from qutip.core.gates import swap
swap13 = expand_operator(swap(), [2, 2, 2, 2], [1, 3])
a = (((tensor(U, U).dag() * swap13) * tensor(U, U)) * swap13)
Uswap = (swap() * U)
b = (((tensor(Uswap, Uswap).dag() * swap13) * tensor(Uswap, Uswap)) * swap13)
return ((5.0 / 9) - ((1.0 / 36) * (a.tr() + b.tr()).real)) |
def init_model(prototxt_file, model_file):
caffe.set_mode_gpu()
caffe.set_device(0)
net = caffe.Net(prototxt_file, model_file, caffe.TEST)
transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})
transformer.set_mean('data', np.array([111, 111, 111]))
transformer.set_transpose('data', (2, 0, 1))
transformer.set_channel_swap('data', (2, 1, 0))
transformer.set_raw_scale('data', 255.0)
net.blobs['data'].reshape(1, 3, 224, 224)
return (net, transformer) |
class TestThreading(TestCase):
def test_validation_across_a_second_thread(self):
failed = []
def validate():
try:
validators.validate(instance=37, schema=True)
except:
failed.append(sys.exc_info())
validate()
from threading import Thread
thread = Thread(target=validate)
thread.start()
thread.join()
self.assertEqual((thread.is_alive(), failed), (False, [])) |
class BottleneckBlock(nn.Module):
def __init__(self, in_chs, out_chs, dilation=1, bottle_ratio=0.25, groups=1, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, attn_last=False, attn_layer=None, drop_block=None, drop_path=0.0):
super(BottleneckBlock, self).__init__()
mid_chs = int(round((out_chs * bottle_ratio)))
ckwargs = dict(act_layer=act_layer, norm_layer=norm_layer)
attn_last = ((attn_layer is not None) and attn_last)
attn_first = ((attn_layer is not None) and (not attn_last))
self.conv1 = ConvNormAct(in_chs, mid_chs, kernel_size=1, **ckwargs)
self.conv2 = ConvNormAct(mid_chs, mid_chs, kernel_size=3, dilation=dilation, groups=groups, drop_layer=drop_block, **ckwargs)
self.attn2 = (attn_layer(mid_chs, act_layer=act_layer) if attn_first else nn.Identity())
self.conv3 = ConvNormAct(mid_chs, out_chs, kernel_size=1, apply_act=False, **ckwargs)
self.attn3 = (attn_layer(out_chs, act_layer=act_layer) if attn_last else nn.Identity())
self.drop_path = (DropPath(drop_path) if drop_path else nn.Identity())
self.act3 = create_act_layer(act_layer)
def zero_init_last(self):
nn.init.zeros_(self.conv3.bn.weight)
def forward(self, x):
shortcut = x
x = self.conv1(x)
x = self.conv2(x)
x = self.attn2(x)
x = self.conv3(x)
x = self.attn3(x)
x = (self.drop_path(x) + shortcut)
x = self.act3(x)
return x |
def _find_all_split_bn_in_graph(connected_graph: ConnectedGraph):
def _examine_split_bn(op_subset):
split_bn_pair_list.append(op_subset)
split_bn_pair_list = []
handler = PatternHandler(_examine_split_bn)
_support_split_op = ['Concat']
patterns_with_callbacks = []
for _split_op in _support_split_op:
patterns_with_callbacks.append(PatternType(pattern=[_split_op, 'BatchNormalization'], action=handler))
patterns_with_callbacks.append(PatternType(pattern=[_split_op, 'BatchNorm3d'], action=handler))
graph_searcher = GraphSearcher(connected_graph, patterns_with_callbacks)
graph_searcher.find_all_patterns_in_graph_apply_actions()
return split_bn_pair_list |
class ScaledDotProductAttention(nn.Module):
def __init__(self, d_model, d_k, d_v, h):
super(ScaledDotProductAttention, self).__init__()
self.fc_q = nn.Linear(d_model, (h * d_k))
self.fc_k = nn.Linear(d_model, (h * d_k))
self.fc_v = nn.Linear(d_model, (h * d_v))
self.fc_o = nn.Linear((h * d_v), d_model)
self.d_model = d_model
self.d_k = d_k
self.d_v = d_v
self.h = h
self.init_weights()
def init_weights(self):
nn.init.xavier_uniform_(self.fc_q.weight)
nn.init.xavier_uniform_(self.fc_k.weight)
nn.init.xavier_uniform_(self.fc_v.weight)
nn.init.xavier_uniform_(self.fc_o.weight)
nn.init.constant_(self.fc_q.bias, 0)
nn.init.constant_(self.fc_k.bias, 0)
nn.init.constant_(self.fc_v.bias, 0)
nn.init.constant_(self.fc_o.bias, 0)
def forward(self, queries, keys, values, attention_mask=None, attention_weights=None, way='mul', use_knn=False):
(b_s, nq) = queries.shape[:2]
nk = keys.shape[1]
q = self.fc_q(queries)
q = q.view(b_s, nq, self.h, self.d_k).permute(0, 2, 1, 3)
k = self.fc_k(keys).view(b_s, nk, self.h, self.d_k).permute(0, 2, 3, 1)
v = self.fc_v(values).view(b_s, nk, self.h, self.d_v).permute(0, 2, 1, 3)
att = (torch.matmul(q, k) / np.sqrt(self.d_k))
att_map = att.clone()
if use_knn:
att = attention_weights
elif (attention_weights is not None):
if (way == 'mul'):
att = (att * attention_weights)
elif (way == 'add'):
att = (att + attention_weights)
else:
raise NotImplementedError(way)
if (attention_mask is not None):
att = att.masked_fill((attention_mask == 0), (- np.inf))
att = torch.softmax(att, (- 1))
out = torch.matmul(att, v).permute(0, 2, 1, 3).contiguous().view(b_s, nq, (self.h * self.d_v))
out = self.fc_o(out)
return (out, att_map) |
class _Roles(EnvConfig, env_prefix='roles_'):
advent_of_code: int =
announcements: int =
lovefest: int =
pyweek_announcements: int =
revival_of_code: int =
legacy_help_channels_access: int =
contributors: int =
partners: int =
python_community: int =
voice_verified: int =
video: int =
admins: int =
core_developers: int =
code_jam_event_team: int =
devops: int =
domain_leads: int =
events_lead: int =
helpers: int =
moderators: int =
mod_team: int =
owners: int =
project_leads: int =
jammers: int =
patreon_tier_1: int =
patreon_tier_2: int =
patreon_tier_3: int = |
def test_linewidth(use_dask):
mc_hdu = moment_cube()
sc = SpectralCube.read(mc_hdu, use_dask=use_dask)
with warnings.catch_warnings(record=True) as w:
assert_allclose(sc.moment2(), MOMENTS[2][0])
assert (len(w) == 1)
assert (w[0].category == VarianceWarning)
assert (str(w[0].message) == 'Note that the second moment returned will be a variance map. To get a linewidth map, use the SpectralCube.linewidth_fwhm() or SpectralCube.linewidth_sigma() methods instead.')
with warnings.catch_warnings(record=True) as w:
assert_allclose(sc.linewidth_sigma(), (MOMENTS[2][0] ** 0.5))
assert_allclose(sc.linewidth_fwhm(), ((MOMENTS[2][0] ** 0.5) * 2.))
assert (len(w) == 0) |
.command()
('--hostname', default='docker-for-desktop')
('--path', default=None)
def create_state(hostname, path):
pvc_name = 'yadagedata'
sc_name = 'local-storage'
path_base = (path or os.getcwd())
size = '1G'
kubeyaml = 'kind: PersistentVolumeClaim\napiVersion: v1\nmetadata:\n name: {pvc_name}\nspec:\n accessModes:\n - ReadWriteMany\n resources:\n requests:\n storage: {size}\n storageClassName: {sc_name}\n---\nkind: StorageClass\napiVersion: storage.k8s.io/v1\nmetadata:\n name: {sc_name}\nprovisioner: kubernetes.io/no-provisioner\nvolumeBindingMode: WaitForFirstConsumer\n---\napiVersion: v1\nkind: PersistentVolume\nmetadata:\n name: yadage-pv\nspec:\n capacity:\n storage: {size}\n volumeMode: Filesystem\n accessModes:\n - ReadWriteMany\n persistentVolumeReclaimPolicy: Delete\n storageClassName: {sc_name}\n local:\n path: {path_base}\n nodeAffinity:\n required:\n nodeSelectorTerms:\n - matchExpressions:\n - key: kubernetes.io/hostname\n operator: In\n values:\n - {hostname}\n'.format(pvc_name=pvc_name, sc_name=sc_name, path_base=path_base, size=size, hostname=hostname)
click.echo(kubeyaml) |
class nonnegative_float(click.ParamType):
name = 'FLT'
def convert(self, value, param, ctx):
msg = 'must be a positive float or zero'
if isinstance(value, str):
try:
value = float(value)
except ValueError:
self.fail(msg, param, ctx)
if (not (value >= 0.0)):
self.fail(msg, param, ctx)
return value |
def cleanup_server_instance(tcp_port):
sock = socket.socket()
sock.connect(('localhost', tcp_port))
try:
for _ in range(10):
sock.sendall(b'exit\n')
sock.recv(1)
time.sleep(0.1)
except (BrokenPipeError, ConnectionAbortedError, ConnectionResetError):
pass
sock.close() |
class F12Handler(BaseHandler):
version = F12
commandMap = {'auth': commands.authconfig.FC3_Authconfig, 'authconfig': commands.authconfig.FC3_Authconfig, 'autopart': commands.autopart.F12_AutoPart, 'autostep': commands.autostep.FC3_AutoStep, 'bootloader': commands.bootloader.F12_Bootloader, 'cdrom': commands.cdrom.FC3_Cdrom, 'clearpart': commands.clearpart.FC3_ClearPart, 'cmdline': commands.displaymode.FC3_DisplayMode, 'device': commands.device.F8_Device, 'deviceprobe': commands.deviceprobe.FC3_DeviceProbe, 'dmraid': commands.dmraid.FC6_DmRaid, 'driverdisk': commands.driverdisk.F12_DriverDisk, 'fcoe': commands.fcoe.F12_Fcoe, 'firewall': commands.firewall.F10_Firewall, 'firstboot': commands.firstboot.FC3_Firstboot, 'graphical': commands.displaymode.FC3_DisplayMode, 'group': commands.group.F12_Group, 'halt': commands.reboot.FC6_Reboot, 'harddrive': commands.harddrive.FC3_HardDrive, 'ignoredisk': commands.ignoredisk.F8_IgnoreDisk, 'install': commands.upgrade.F11_Upgrade, 'interactive': commands.interactive.FC3_Interactive, 'iscsi': commands.iscsi.F10_Iscsi, 'iscsiname': commands.iscsiname.FC6_IscsiName, 'keyboard': commands.keyboard.FC3_Keyboard, 'lang': commands.lang.FC3_Lang, 'logging': commands.logging.FC6_Logging, 'logvol': commands.logvol.F12_LogVol, 'mediacheck': commands.mediacheck.FC4_MediaCheck, 'method': commands.method.FC6_Method, 'monitor': commands.monitor.F10_Monitor, 'multipath': commands.multipath.FC6_MultiPath, 'network': commands.network.F9_Network, 'nfs': commands.nfs.FC6_NFS, 'part': commands.partition.F12_Partition, 'partition': commands.partition.F12_Partition, 'poweroff': commands.reboot.FC6_Reboot, 'raid': commands.raid.F12_Raid, 'reboot': commands.reboot.FC6_Reboot, 'repo': commands.repo.F11_Repo, 'rescue': commands.rescue.F10_Rescue, 'rootpw': commands.rootpw.F8_RootPw, 'selinux': commands.selinux.FC3_SELinux, 'services': commands.services.FC6_Services, 'shutdown': commands.reboot.FC6_Reboot, 'skipx': commands.skipx.FC3_SkipX, 'text': commands.displaymode.FC3_DisplayMode, 'timezone': commands.timezone.FC6_Timezone, 'updates': commands.updates.F7_Updates, 'upgrade': commands.upgrade.F11_Upgrade, 'url': commands.url.FC3_Url, 'user': commands.user.F12_User, 'vnc': commands.vnc.F9_Vnc, 'volgroup': commands.volgroup.FC3_VolGroup, 'xconfig': commands.xconfig.F10_XConfig, 'zerombr': commands.zerombr.F9_ZeroMbr, 'zfcp': commands.zfcp.F12_ZFCP}
dataMap = {'DriverDiskData': commands.driverdisk.F12_DriverDiskData, 'DeviceData': commands.device.F8_DeviceData, 'DmRaidData': commands.dmraid.FC6_DmRaidData, 'FcoeData': commands.fcoe.F12_FcoeData, 'GroupData': commands.group.F12_GroupData, 'IscsiData': commands.iscsi.F10_IscsiData, 'LogVolData': commands.logvol.F12_LogVolData, 'MultiPathData': commands.multipath.FC6_MultiPathData, 'NetworkData': commands.network.F8_NetworkData, 'PartData': commands.partition.F12_PartData, 'RaidData': commands.raid.F12_RaidData, 'RepoData': commands.repo.F11_RepoData, 'UserData': commands.user.F12_UserData, 'VolGroupData': commands.volgroup.FC3_VolGroupData, 'ZFCPData': commands.zfcp.F12_ZFCPData} |
.parametrize(['alias', 'dtype'], zip(dtype_names, dtype_types), ids=[str(dtype) for dtype in dtype_names])
.parametrize('func', [rand_herm, rand_unitary, rand_dm, rand_ket, rand_stochastic, rand_super, rand_super_bcsz, rand_kraus_map])
def test_random_dtype(func, alias, dtype):
with CoreOptions(default_dtype=alias):
object = func(2)
if isinstance(object, Qobj):
assert isinstance(object.data, dtype)
else:
for obj in object:
assert isinstance(obj.data, dtype) |
class TableSection(Section):
has_data_type_header = True
def read(cls, reader):
if cls.has_data_type_header:
DataType.read(reader)
ts = cls.table_setup
header = get_versioned(ts['header'], reader.version_dialect)
blocks = list(cls.read_table(reader, header, ts['cls'], end=ts.get('end', end_section)))
return cls(**{ts['attribute']: blocks})
def write(self, writer):
if self.has_data_type_header:
self.write_datatype(writer)
ts = self.table_setup
header = get_versioned(ts['header'], writer.version_dialect)
self.write_table(writer, header, getattr(self, ts['attribute'])) |
def test_upload(copy_sample):
responses.add(responses.POST, upload.PYPI, status=200)
td = copy_sample('module1_toml')
with temp_pypirc(pypirc1) as pypirc, patch('flit.upload.get_repository', return_value=repo_settings):
upload.main((td / 'pyproject.toml'), repo_name='pypi', pypirc_path=pypirc)
assert (len(responses.calls) == 2) |
def decay_lr(step, boundaries, values, max_steps):
if (FLAGS.decay_lr_type == 'linear'):
decayed_lr = linear_decay_lr(step, boundaries, values, max_steps)
elif (FLAGS.decay_lr_type == 'cosine'):
decayed_lr = cos_decay_lr(step, boundaries, values, max_steps)
elif (FLAGS.decay_lr_type == 'sine'):
decayed_lr = sin_decay_lr(step, boundaries, values, max_steps)
else:
raise ValueError('decay_lr_type %s was not recognized.')
return decayed_lr |
_SEG_HEADS_REGISTRY.register()
class SemSegFPNHead(nn.Module):
def __init__(self, input_shape: Dict[(str, ShapeSpec)], *, num_classes: int, conv_dims: int, common_stride: int, loss_weight: float=1.0, norm: Optional[Union[(str, Callable)]]=None, ignore_value: int=(- 1)):
super().__init__()
input_shape = sorted(input_shape.items(), key=(lambda x: x[1].stride))
if (not len(input_shape)):
raise ValueError('SemSegFPNHead(input_shape=) cannot be empty!')
self.in_features = [k for (k, v) in input_shape]
feature_strides = [v.stride for (k, v) in input_shape]
feature_channels = [v.channels for (k, v) in input_shape]
self.ignore_value = ignore_value
self.common_stride = common_stride
self.loss_weight = loss_weight
self.scale_heads = []
for (in_feature, stride, channels) in zip(self.in_features, feature_strides, feature_channels):
head_ops = []
head_length = max(1, int((np.log2(stride) - np.log2(self.common_stride))))
for k in range(head_length):
norm_module = get_norm(norm, conv_dims)
conv = Conv2d((channels if (k == 0) else conv_dims), conv_dims, kernel_size=3, stride=1, padding=1, bias=(not norm), norm=norm_module, activation=F.relu)
weight_init.c2_msra_fill(conv)
head_ops.append(conv)
if (stride != self.common_stride):
head_ops.append(nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False))
self.scale_heads.append(nn.Sequential(*head_ops))
self.add_module(in_feature, self.scale_heads[(- 1)])
self.predictor = Conv2d(conv_dims, num_classes, kernel_size=1, stride=1, padding=0)
weight_init.c2_msra_fill(self.predictor)
def from_config(cls, cfg, input_shape: Dict[(str, ShapeSpec)]):
return {'input_shape': {k: v for (k, v) in input_shape.items() if (k in cfg.MODEL.SEM_SEG_HEAD.IN_FEATURES)}, 'ignore_value': cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE, 'num_classes': cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES, 'conv_dims': cfg.MODEL.SEM_SEG_HEAD.CONVS_DIM, 'common_stride': cfg.MODEL.SEM_SEG_HEAD.COMMON_STRIDE, 'norm': cfg.MODEL.SEM_SEG_HEAD.NORM, 'loss_weight': cfg.MODEL.SEM_SEG_HEAD.LOSS_WEIGHT}
def forward(self, features, targets=None):
x = self.layers(features)
if self.training:
return (None, self.losses(x, targets))
else:
x = F.interpolate(x, scale_factor=self.common_stride, mode='bilinear', align_corners=False)
return (x, {})
def layers(self, features):
for (i, f) in enumerate(self.in_features):
if (i == 0):
x = self.scale_heads[i](features[f])
else:
x = (x + self.scale_heads[i](features[f]))
x = self.predictor(x)
return x
def losses(self, predictions, targets):
predictions = predictions.float()
predictions = F.interpolate(predictions, scale_factor=self.common_stride, mode='bilinear', align_corners=False)
loss = F.cross_entropy(predictions, targets, reduction='mean', ignore_index=self.ignore_value)
losses = {'loss_sem_seg': (loss * self.loss_weight)}
return losses |
def test_items_bounding_rect_given_items(view):
item1 = BeePixmapItem(QtGui.QImage())
view.scene.addItem(item1)
item1.setSelected(True)
item1.setPos(4, (- 6))
item2 = BeePixmapItem(QtGui.QImage())
view.scene.addItem(item2)
item2.setSelected(True)
item2.setPos((- 33), 22)
item3 = BeePixmapItem(QtGui.QImage())
view.scene.addItem(item3)
item3.setSelected(True)
item3.setPos(1000, 1000)
with patch.object(item1, 'bounding_rect_unselected', return_value=QtCore.QRectF(0, 0, 100, 100)):
with patch.object(item2, 'bounding_rect_unselected', return_value=QtCore.QRectF(0, 0, 100, 100)):
rect = view.scene.itemsBoundingRect(items=[item1, item2])
assert (rect.topLeft().x() == (- 33))
assert (rect.topLeft().y() == (- 6))
assert (rect.bottomRight().x() == 104)
assert (rect.bottomRight().y() == 122) |
_on_failure
.parametrize('number_of_nodes', [2])
.parametrize('enable_rest_api', [True])
def test_api_payments_with_hash_no_secret(api_server_test_instance, raiden_network: List[RaidenService], token_addresses, pfs_mock):
(_, app1) = raiden_network
token_address = token_addresses[0]
target_address = app1.address
secret_hash = factories.make_secret_hash()
pfs_mock.add_apps(raiden_network)
request = grequests.post(api_url_for(api_server_test_instance, 'token_target_paymentresource', token_address=to_checksum_address(token_address), target_address=to_checksum_address(target_address)), json={'amount': DEFAULT_AMOUNT, 'identifier': DEFAULT_ID, 'secret_hash': to_hex(secret_hash)})
response = request.send().response
assert_proper_response(response, status_code=HTTPStatus.CONFLICT) |
def calculate_sentence_transformer_embedding(text_to_encode, args):
num = len(text_to_encode)
emb_model = SentenceTransformer(args.embedding_model)
embeddings = []
bar = tqdm(range(0, num, 20), desc='calculate embeddings')
for i in range(0, num, 20):
embeddings += emb_model.encode(text_to_encode[i:(i + 20)]).tolist()
bar.update(1)
embeddings = torch.tensor(embeddings)
mean_embeddings = torch.mean(embeddings, 0, True)
embeddings = (embeddings - mean_embeddings)
return embeddings |
def GetClipboardFormats():
win32clipboard.OpenClipboard()
available_formats = []
current_format = 0
while True:
current_format = win32clipboard.EnumClipboardFormats(current_format)
if (not current_format):
break
available_formats.append(current_format)
win32clipboard.CloseClipboard()
return available_formats |
def _strategy_dispatch(T, limit):
if isinstance(limit, st.SearchStrategy):
return limit
if isinstance(T, list):
return bitslists(T, limit)
if is_bitstruct_class(T):
return bitstructs(T, limit)
assert issubclass(T, Bits)
if (limit is None):
return bits(T.nbits)
assert isinstance(limit, range), f'We only accept range as min/max value specifier, not {type(limit)}'
assert (limit.step == 1), f'We only accept step=1 range, not {limit}.'
assert (limit.start < limit.stop), f'We only accept start < stop range, not {limit}'
return bits(T.nbits, False, limit.start, (limit.stop - 1)) |
_db
def test_query_job_board(rf, graphql_client, job_listing_factory, conference_factory):
listing = job_listing_factory()
job_listing_factory(conference=conference_factory())
request = rf.get('/')
resp = _query_job_board(graphql_client, conference=listing.conference.code)
assert (len(resp['data']['jobListings']) == 1)
assert ({'id': str(listing.id), 'title': str(listing.title), 'slug': str(listing.slug), 'description': str(listing.description), 'company': str(listing.company), 'companyLogo': get_image_url_from_request(request, listing.company_logo), 'applyUrl': str(listing.apply_url)} == resp['data']['jobListings'][0]) |
class BoundingBox():
def __init__(self, imageName, classId, x, y, w, h, typeCoordinates=CoordinatesType.Absolute, imgSize=None, bbType=BBType.GroundTruth, classConfidence=None, format=BBFormat.XYWH):
self._imageName = imageName
self._typeCoordinates = typeCoordinates
if ((typeCoordinates == CoordinatesType.Relative) and (imgSize is None)):
raise IOError("Parameter 'imgSize' is required. It is necessary to inform the image size.")
if ((bbType == BBType.Detected) and (classConfidence is None)):
raise IOError("For bbType='Detection', it is necessary to inform the classConfidence value.")
self._classConfidence = classConfidence
self._bbType = bbType
self._classId = classId
self._format = format
if (typeCoordinates == CoordinatesType.Relative):
(self._x, self._y, self._w, self._h) = convertToAbsoluteValues(imgSize, (x, y, w, h))
self._width_img = imgSize[0]
self._height_img = imgSize[1]
if (format == BBFormat.XYWH):
self._x2 = self._w
self._y2 = self._h
self._w = (self._x2 - self._x)
self._h = (self._y2 - self._y)
else:
raise IOError('For relative coordinates, the format must be XYWH (x,y,width,height)')
else:
self._x = x
self._y = y
if (format == BBFormat.XYWH):
self._w = w
self._h = h
self._x2 = (self._x + self._w)
self._y2 = (self._y + self._h)
else:
self._x2 = w
self._y2 = h
self._w = (self._x2 - self._x)
self._h = (self._y2 - self._y)
if (imgSize is None):
self._width_img = None
self._height_img = None
else:
self._width_img = imgSize[0]
self._height_img = imgSize[1]
def getAbsoluteBoundingBox(self, format=BBFormat.XYWH):
if (format == BBFormat.XYWH):
return (self._x, self._y, self._w, self._h)
elif (format == BBFormat.XYX2Y2):
return (self._x, self._y, self._x2, self._y2)
def getRelativeBoundingBox(self, imgSize=None):
if ((imgSize is None) and (self._width_img is None) and (self._height_img is None)):
raise IOError("Parameter 'imgSize' is required. It is necessary to inform the image size.")
if (imgSize is not None):
return convertToRelativeValues((imgSize[0], imgSize[1]), (self._x, self._x2, self._y, self._y2))
else:
return convertToRelativeValues((self._width_img, self._height_img), (self._x, self._x2, self._y, self._y2))
def getImageName(self):
return self._imageName
def getConfidence(self):
return self._classConfidence
def getFormat(self):
return self._format
def getClassId(self):
return self._classId
def getImageSize(self):
return (self._width_img, self._height_img)
def getCoordinatesType(self):
return self._typeCoordinates
def getBBType(self):
return self._bbType
def compare(det1, det2):
det1BB = det1.getAbsoluteBoundingBox()
det1ImgSize = det1.getImageSize()
det2BB = det2.getAbsoluteBoundingBox()
det2ImgSize = det2.getImageSize()
if ((det1.getClassId() == det2.getClassId()) and (det1.classConfidence == det2.classConfidenc()) and (det1BB[0] == det2BB[0]) and (det1BB[1] == det2BB[1]) and (det1BB[2] == det2BB[2]) and (det1BB[3] == det2BB[3]) and (det1ImgSize[0] == det2ImgSize[0]) and (det1ImgSize[1] == det2ImgSize[1])):
return True
return False
def clone(boundingBox):
absBB = boundingBox.getAbsoluteBoundingBox(format=BBFormat.XYWH)
newBoundingBox = BoundingBox(boundingBox.getImageName(), boundingBox.getClassId(), absBB[0], absBB[1], absBB[2], absBB[3], typeCoordinates=boundingBox.getCoordinatesType(), imgSize=boundingBox.getImageSize(), bbType=boundingBox.getBBType(), classConfidence=boundingBox.getConfidence(), format=BBFormat.XYWH)
return newBoundingBox |
def _encode_python_objects(obj):
if (isinstance(obj, (list, tuple)) and all([(not isinstance(item, (list, tuple))) for item in obj])):
return [_encode_to_cf(item) for item in obj]
try:
dump = _encode_object(obj)
except ValueError:
decoded = _try_decode_object(obj)
dump = json.dumps(decoded, cls=AttributeEncoder).strip('"')
return dump |
def test_regex_reversal() -> None:
assert (parse('b').reversed() == parse('b'))
assert (parse('e*').reversed() == parse('e*'))
assert (parse('bear').reversed() == parse('raeb'))
assert (parse('beer').reversed() == parse('reeb'))
assert (parse('abc|def|ghi').reversed() == parse('cba|fed|ihg'))
assert (parse('(abc)*d').reversed() == parse('d(cba)*')) |
def DiffAugment(x, policy=None, channels_first=True):
if (policy is not None):
if (not channels_first):
x = x.permute(0, 3, 1, 2)
for p in policy:
for f in AUGMENT_FNS[p]:
x = f(x)
if (not channels_first):
x = x.permute(0, 2, 3, 1)
x = x.contiguous()
return x |
_module()
class Body3DH36MDataset(Kpt3dSviewKpt2dDataset):
JOINT_NAMES = ['Root', 'RHip', 'RKnee', 'RFoot', 'LHip', 'LKnee', 'LFoot', 'Spine', 'Thorax', 'NeckBase', 'Head', 'LShoulder', 'LElbow', 'LWrist', 'RShoulder', 'RElbow', 'RWrist']
SUPPORTED_JOINT_2D_SRC = {'gt', 'detection', 'pipeline'}
ALLOWED_METRICS = {'mpjpe', 'p-mpjpe', 'n-mpjpe'}
def __init__(self, ann_file, img_prefix, data_cfg, pipeline, dataset_info=None, test_mode=False):
if (dataset_info is None):
warnings.warn('dataset_info is missing. Check for details.', DeprecationWarning)
cfg = Config.fromfile('configs/_base_/datasets/h36m.py')
dataset_info = cfg._cfg_dict['dataset_info']
super().__init__(ann_file, img_prefix, data_cfg, pipeline, dataset_info=dataset_info, test_mode=test_mode)
def load_config(self, data_cfg):
super().load_config(data_cfg)
self.joint_2d_src = data_cfg.get('joint_2d_src', 'gt')
if (self.joint_2d_src not in self.SUPPORTED_JOINT_2D_SRC):
raise ValueError(f'Unsupported joint_2d_src "{self.joint_2d_src}". Supported options are {self.SUPPORTED_JOINT_2D_SRC}')
self.joint_2d_det_file = data_cfg.get('joint_2d_det_file', None)
self.need_camera_param = data_cfg.get('need_camera_param', False)
if self.need_camera_param:
assert ('camera_param_file' in data_cfg)
self.camera_param = self._load_camera_param(data_cfg['camera_param_file'])
ann_info = {}
ann_info['use_different_joint_weights'] = False
actions = data_cfg.get('actions', '_all_')
self.actions = set((actions if isinstance(actions, (list, tuple)) else [actions]))
subjects = data_cfg.get('subjects', '_all_')
self.subjects = set((subjects if isinstance(subjects, (list, tuple)) else [subjects]))
self.ann_info.update(ann_info)
def load_annotations(self):
data_info = super().load_annotations()
if (self.joint_2d_src == 'gt'):
data_info['joints_2d'] = data_info['joints_2d']
elif (self.joint_2d_src == 'detection'):
data_info['joints_2d'] = self._load_joint_2d_detection(self.joint_2d_det_file)
assert (data_info['joints_2d'].shape[0] == data_info['joints_3d'].shape[0])
assert (data_info['joints_2d'].shape[2] == 3)
elif (self.joint_2d_src == 'pipeline'):
pass
else:
raise NotImplementedError(f'Unhandled joint_2d_src option {self.joint_2d_src}')
return data_info
def _parse_h36m_imgname(imgname):
(subj, rest) = osp.basename(imgname).split('_', 1)
(action, rest) = rest.split('.', 1)
(camera, rest) = rest.split('_', 1)
return (subj, action, camera)
def build_sample_indices(self):
video_frames = defaultdict(list)
for (idx, imgname) in enumerate(self.data_info['imgnames']):
(subj, action, camera) = self._parse_h36m_imgname(imgname)
if (('_all_' not in self.actions) and (action not in self.actions)):
continue
if (('_all_' not in self.subjects) and (subj not in self.subjects)):
continue
video_frames[(subj, action, camera)].append(idx)
sample_indices = []
_len = (((self.seq_len - 1) * self.seq_frame_interval) + 1)
_step = self.seq_frame_interval
for (_, _indices) in sorted(video_frames.items()):
n_frame = len(_indices)
if self.temporal_padding:
if self.causal:
frames_left = (self.seq_len - 1)
frames_right = 0
else:
frames_left = ((self.seq_len - 1) // 2)
frames_right = frames_left
for i in range(n_frame):
pad_left = max(0, (frames_left - (i // _step)))
pad_right = max(0, (frames_right - (((n_frame - 1) - i) // _step)))
start = max((i % _step), (i - (frames_left * _step)))
end = min((n_frame - (((n_frame - 1) - i) % _step)), ((i + (frames_right * _step)) + 1))
sample_indices.append(((([_indices[0]] * pad_left) + _indices[start:end:_step]) + ([_indices[(- 1)]] * pad_right)))
else:
seqs_from_video = [_indices[i:(i + _len):_step] for i in range(0, ((n_frame - _len) + 1))]
sample_indices.extend(seqs_from_video)
assert (0 < self.subset <= 1)
subset_size = int((len(sample_indices) * self.subset))
start = np.random.randint(0, ((len(sample_indices) - subset_size) + 1))
end = (start + subset_size)
return sample_indices[start:end]
def _load_joint_2d_detection(self, det_file):
joints_2d = np.load(det_file).astype(np.float32)
return joints_2d
def evaluate(self, outputs, res_folder, metric='mpjpe', logger=None, **kwargs):
metrics = (metric if isinstance(metric, list) else [metric])
for _metric in metrics:
if (_metric not in self.ALLOWED_METRICS):
raise ValueError(f'Unsupported metric "{_metric}" for human3.6 dataset.Supported metrics are {self.ALLOWED_METRICS}')
res_file = osp.join(res_folder, 'result_keypoints.json')
kpts = []
for output in outputs:
preds = output['preds']
image_paths = output['target_image_paths']
batch_size = len(image_paths)
for i in range(batch_size):
target_id = self.name2id[image_paths[i]]
kpts.append({'keypoints': preds[i], 'target_id': target_id})
mmcv.dump(kpts, res_file)
name_value_tuples = []
for _metric in metrics:
if (_metric == 'mpjpe'):
_nv_tuples = self._report_mpjpe(kpts)
elif (_metric == 'p-mpjpe'):
_nv_tuples = self._report_mpjpe(kpts, mode='p-mpjpe')
elif (_metric == 'n-mpjpe'):
_nv_tuples = self._report_mpjpe(kpts, mode='n-mpjpe')
else:
raise NotImplementedError
name_value_tuples.extend(_nv_tuples)
return OrderedDict(name_value_tuples)
def _report_mpjpe(self, keypoint_results, mode='mpjpe'):
preds = []
gts = []
masks = []
action_category_indices = defaultdict(list)
for (idx, result) in enumerate(keypoint_results):
pred = result['keypoints']
target_id = result['target_id']
(gt, gt_visible) = np.split(self.data_info['joints_3d'][target_id], [3], axis=(- 1))
preds.append(pred)
gts.append(gt)
masks.append(gt_visible)
action = self._parse_h36m_imgname(self.data_info['imgnames'][target_id])[1]
action_category = action.split('_')[0]
action_category_indices[action_category].append(idx)
preds = np.stack(preds)
gts = np.stack(gts)
masks = (np.stack(masks).squeeze((- 1)) > 0)
err_name = mode.upper()
if (mode == 'mpjpe'):
alignment = 'none'
elif (mode == 'p-mpjpe'):
alignment = 'procrustes'
elif (mode == 'n-mpjpe'):
alignment = 'scale'
else:
raise ValueError(f'Invalid mode: {mode}')
error = keypoint_mpjpe(preds, gts, masks, alignment)
name_value_tuples = [(err_name, error)]
for (action_category, indices) in action_category_indices.items():
_error = keypoint_mpjpe(preds[indices], gts[indices], masks[indices])
name_value_tuples.append((f'{err_name}_{action_category}', _error))
return name_value_tuples
def _load_camera_param(self, camera_param_file):
return mmcv.load(camera_param_file)
def get_camera_param(self, imgname):
assert hasattr(self, 'camera_param')
(subj, _, camera) = self._parse_h36m_imgname(imgname)
return self.camera_param[(subj, camera)] |
_ARCH_REGISTRY.register()
class SSRCNN(nn.Module):
def __init__(self, cfg):
super().__init__()
self.device = torch.device(cfg.MODEL.DEVICE)
self.backbone = build_backbone(cfg)
self.proposal_generator = build_proposal_generator(cfg, self.backbone.output_shape())
self.from_config(cfg)
self.roi_heads = build_roi_heads(cfg, self.backbone.output_shape())
self.ss_head = build_ss_head(cfg, self.backbone.bottom_up.output_shape())
for i in range(len(self.ss_head)):
setattr(self, 'ss_head_{}'.format(i), self.ss_head[i])
self.to(self.device)
def from_config(self, cfg):
self.ss_only = cfg.MODEL.SS.ONLY
self.feat_level = cfg.MODEL.SS.FEAT_LEVEL
assert (len(cfg.MODEL.PIXEL_MEAN) == len(cfg.MODEL.PIXEL_STD))
num_channels = len(cfg.MODEL.PIXEL_MEAN)
pixel_mean = torch.Tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(num_channels, 1, 1)
pixel_std = torch.Tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(num_channels, 1, 1)
self.normalizer = (lambda x: ((x - pixel_mean) / pixel_std))
def forward(self, batched_inputs):
if (not self.training):
return self.inference(batched_inputs)
losses = {}
accuracies = {}
for i in range(len(self.ss_head)):
head = getattr(self, 'ss_head_{}'.format(i))
if (head.input != 'images'):
continue
(out, tar, ss_losses) = head(batched_inputs, self.backbone.bottom_up, self.feat_level)
losses.update(ss_losses)
acc = ((out.argmax(axis=1) == tar).float().mean().item() * 100)
accuracies['accuracy_ss_{}'.format(head.name)] = {'accuracy': acc, 'num': len(tar)}
images = self.preprocess_image(batched_inputs)
if ('instances' in batched_inputs[0]):
gt_instances = [x['instances'].to(self.device) for x in batched_inputs]
else:
gt_instances = None
features = self.backbone(images.tensor)
if self.proposal_generator:
(proposals, proposal_losses) = self.proposal_generator(images, features, gt_instances)
else:
assert ('proposals' in batched_inputs[0])
proposals = [x['proposals'].to(self.device) for x in batched_inputs]
proposal_losses = {}
(_, detector_losses) = self.roi_heads(images, features, proposals, gt_instances)
if isinstance(detector_losses, tuple):
(detector_losses, box_features) = detector_losses
for i in range(len(self.ss_head)):
head = getattr(self, 'ss_head_{}'.format(i))
if (head.input != 'ROI'):
continue
(ss_losses, acc) = head(box_features)
losses.update(ss_losses)
accuracies['accuracy_ss_{}'.format(head.name)] = {'accuracy': acc, 'num': 1}
losses.update(detector_losses)
losses.update(proposal_losses)
for (k, v) in losses.items():
assert (math.isnan(v) == False), batched_inputs
return losses
def det_inference(self, batched_inputs, detected_instances=None, do_postprocess=True):
assert (not self.training)
images = self.preprocess_image(batched_inputs)
features = self.backbone(images.tensor)
if (detected_instances is None):
if self.proposal_generator:
(proposals, _) = self.proposal_generator(images, features, None)
else:
assert ('proposals' in batched_inputs[0])
proposals = [x['proposals'].to(self.device) for x in batched_inputs]
(results, others) = self.roi_heads(images, features, proposals, None)
if isinstance(others, tuple):
(others, box_features) = others
else:
box_features = None
else:
detected_instances = [x.to(self.device) for x in detected_instances]
results = self.roi_heads.forward_with_given_boxes(features, detected_instances)
box_features = None
if do_postprocess:
processed_results = []
for (results_per_image, input_per_image, image_size) in zip(results, batched_inputs, images.image_sizes):
height = input_per_image.get('height', image_size[0])
width = input_per_image.get('width', image_size[1])
r = detector_postprocess(results_per_image, height, width)
processed_results.append({'instances': r})
return (processed_results, box_features)
else:
return (results, box_features)
def inference(self, batched_inputs, detected_instances=None, do_postprocess=True):
(results, _) = self.det_inference(batched_inputs, detected_instances, do_postprocess)
return results
def preprocess_image(self, batched_inputs):
images = [x['image'].to(self.device) for x in batched_inputs]
images = [self.normalizer(x) for x in images]
images = ImageList.from_tensors(images, self.backbone.size_divisibility)
return images |
def prefetch_test(opt):
if (not opt.not_set_cuda_env):
os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str
Dataset = dataset_factory[opt.test_dataset]
opt = opts().update_dataset_info_and_set_heads(opt, Dataset)
print(opt)
Logger(opt)
split = ('val' if (not opt.trainval) else 'test')
dataset = Dataset(opt, split)
detector = Detector(opt)
if (opt.load_results != ''):
load_results = json.load(open(opt.load_results, 'r'))
for img_id in load_results:
for k in range(len(load_results[img_id])):
if ((load_results[img_id][k]['class'] - 1) in opt.ignore_loaded_cats):
load_results[img_id][k]['score'] = (- 1)
else:
load_results = {}
data_loader = torch.utils.data.DataLoader(PrefetchDataset(opt, dataset, detector.pre_process), batch_size=1, shuffle=False, num_workers=1, pin_memory=True)
results = {}
num_iters = (len(data_loader) if (opt.num_iters < 0) else opt.num_iters)
bar = Bar('{}'.format(opt.exp_id), max=num_iters)
time_stats = ['tot', 'load', 'pre', 'net', 'dec', 'post', 'merge', 'track']
avg_time_stats = {t: AverageMeter() for t in time_stats}
if opt.use_loaded_results:
for img_id in data_loader.dataset.images:
results[img_id] = load_results['{}'.format(img_id)]
num_iters = 0
for (ind, (img_id, pre_processed_images)) in enumerate(data_loader):
if (ind >= num_iters):
break
if (opt.tracking and ('is_first_frame' in pre_processed_images)):
if ('{}'.format(int(img_id.numpy().astype(np.int32)[0])) in load_results):
pre_processed_images['meta']['pre_dets'] = load_results['{}'.format(int(img_id.numpy().astype(np.int32)[0]))]
else:
print()
print('No pre_dets for', int(img_id.numpy().astype(np.int32)[0]), '. Use empty initialization.')
pre_processed_images['meta']['pre_dets'] = []
detector.reset_tracking()
print('Start tracking video', int(pre_processed_images['video_id']))
if opt.public_det:
if ('{}'.format(int(img_id.numpy().astype(np.int32)[0])) in load_results):
pre_processed_images['meta']['cur_dets'] = load_results['{}'.format(int(img_id.numpy().astype(np.int32)[0]))]
else:
print('No cur_dets for', int(img_id.numpy().astype(np.int32)[0]))
pre_processed_images['meta']['cur_dets'] = []
ret = detector.run(pre_processed_images)
results[int(img_id.numpy().astype(np.int32)[0])] = ret['results']
Bar.suffix = '[{0}/{1}]|Tot: {total:} |ETA: {eta:} '.format(ind, num_iters, total=bar.elapsed_td, eta=bar.eta_td)
for t in avg_time_stats:
avg_time_stats[t].update(ret[t])
Bar.suffix = (Bar.suffix + '|{} {tm.val:.3f}s ({tm.avg:.3f}s) '.format(t, tm=avg_time_stats[t]))
if (opt.print_iter > 0):
if ((ind % opt.print_iter) == 0):
print('{}/{}| {}'.format(opt.task, opt.exp_id, Bar.suffix))
else:
bar.next()
bar.finish()
if opt.save_results:
print('saving results to', (opt.save_dir + '/save_results_{}{}.json'.format(opt.test_dataset, opt.dataset_version)))
json.dump(_to_list(copy.deepcopy(results)), open((opt.save_dir + '/save_results_{}{}.json'.format(opt.test_dataset, opt.dataset_version)), 'w'))
dataset.run_eval(results, opt.save_dir) |
.parametrize('in_memory_ds', [True, False])
.filterwarnings('ignore::sgkit.io.vcfzarr_reader.DimensionNameForFixedFormatFieldWarning')
def test_write_vcf(shared_datadir, tmp_path, in_memory_ds):
path = path_for_test(shared_datadir, 'sample.vcf.gz')
intermediate = tmp_path.joinpath('intermediate.vcf.zarr').as_posix()
output = tmp_path.joinpath('output.vcf').as_posix()
kwargs = zarr_array_sizes(path)
vcf_to_zarr(path, intermediate, fields=['INFO/*', 'FORMAT/*'], mixed_ploidy=True, **kwargs)
ds = load_dataset(intermediate)
if in_memory_ds:
ds = ds.load()
write_vcf(ds, output)
v = VCF(output)
assert (v.samples == ['NA00001', 'NA00002', 'NA00003'])
variant = next(v)
assert (variant.CHROM == '19')
assert (variant.POS == 111)
assert (variant.ID is None)
assert (variant.REF == 'A')
assert (variant.ALT == ['C'])
assert (variant.QUAL == pytest.approx(9.6))
assert (variant.FILTER is None)
assert (variant.genotypes == [[0, 0, True], [0, 0, True], [0, 1, False]])
assert_array_equal(variant.format('HQ'), [[10, 15], [10, 10], [3, 3]])
assert_vcfs_close(path, output) |
class BaseFeaturesCollection():
def __init__(self):
self.compute_base_features_topic = rospy.get_param('~compute_base_features_topic', '/base_features_computation_node/compute_base_features')
self.data_dir = rospy.get_param('~data_dir_path', os.path.join(rospkg.RosPack().get_path('rail_semantic_grasping'), 'data'))
self.labeled_data_dir = os.path.join(self.data_dir, 'labeled')
self.base_features_dir = os.path.join(self.data_dir, 'base_features')
if (not os.path.exists(self.base_features_dir)):
os.mkdir(self.base_features_dir)
if (not os.path.exists(self.data_dir)):
rospy.loginfo('Data folder {} does not exist. Exiting!'.format(self.data_dir))
exit()
if (not os.path.exists(self.data_dir)):
rospy.loginfo('Labeled data folder {} does not exist. Exiting!'.format(self.labeled_data_dir))
exit()
self.markers_pub = rospy.Publisher('~data_collection/markers', MarkerArray, queue_size=10, latch=True)
self.grasp_pub = rospy.Publisher('~data_collection/grasp', PoseStamped, queue_size=10, latch=True)
self.marker_pub = rospy.Publisher('~data_collection/marker', Marker, queue_size=10, latch=True)
self.color_image_pub = rospy.Publisher('~data_collection/color_image', Image, queue_size=10, latch=True)
self.pc_pub = rospy.Publisher('~data_collection/point_cloud', PointCloud2, queue_size=10, latch=True)
rospy.wait_for_service(self.compute_base_features_topic)
self.compute_base_features = rospy.ServiceProxy(self.compute_base_features_topic, ComputeBaseFeatures)
def compute_features(self):
session_dirs = glob.glob(os.path.join(self.labeled_data_dir, '*'))
for session_dir in session_dirs:
base_features_session_dir = os.path.join(self.base_features_dir, session_dir.split('/')[(- 1)])
if (not os.path.exists(base_features_session_dir)):
os.mkdir(base_features_session_dir)
object_files = glob.glob(os.path.join(session_dir, '*.pkl'))
for object_file in object_files:
with open(object_file, 'rb') as fh:
semantic_objects = pickle.load(fh)
if (not semantic_objects.objects):
continue
markers = MarkerArray()
semantic_object = semantic_objects.objects[0]
marker = semantic_object.marker
for semantic_part in semantic_object.parts:
markers.markers.append(semantic_part.marker)
markers.markers.append(semantic_part.text_marker)
self.markers_pub.publish(markers)
self.color_image_pub.publish(semantic_object.color_image)
self.pc_pub.publish(semantic_object.point_cloud)
self.marker_pub.publish(marker)
try:
resp = self.compute_base_features(semantic_objects)
except rospy.ServiceException:
rospy.loginfo('Service call failed')
exit()
base_features_file = os.path.join(base_features_session_dir, object_file.split('/')[(- 1)])
with open(base_features_file, 'wb') as fh:
pickle.dump(resp.base_features_list, fh)
rospy.loginfo('Computation of base features for all objects are finished.') |
class AttenSepConvLSTM2DCell(DropoutRNNCellMixin, Layer):
def __init__(self, filters, kernel_size, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1), depth_multiplier=1, activation='tanh', recurrent_activation='hard_sigmoid', use_bias=True, kernel_initializer='glorot_uniform', recurrent_initializer='orthogonal', bias_initializer='zeros', unit_forget_bias=True, kernel_regularizer=None, recurrent_regularizer=None, bias_regularizer=None, kernel_constraint=None, recurrent_constraint=None, bias_constraint=None, dropout=0.0, recurrent_dropout=0.0, **kwargs):
super(AttenSepConvLSTM2DCell, self).__init__(**kwargs)
self.filters = filters
self.kernel_size = conv_utils.normalize_tuple(kernel_size, 2, 'kernel_size')
self.strides = conv_utils.normalize_tuple(strides, 2, 'strides')
self.padding = conv_utils.normalize_padding(padding)
self.data_format = conv_utils.normalize_data_format(data_format)
self.dilation_rate = conv_utils.normalize_tuple(dilation_rate, 2, 'dilation_rate')
self.depth_multiplier = depth_multiplier
self.activation = activations.get(activation)
self.recurrent_activation = activations.get(recurrent_activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.unit_forget_bias = unit_forget_bias
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.dropout = min(1.0, max(0.0, dropout))
self.recurrent_dropout = min(1.0, max(0.0, recurrent_dropout))
self.state_size = (self.filters, self.filters)
def build(self, input_shape):
if (self.data_format == 'channels_first'):
channel_axis = 1
else:
channel_axis = (- 1)
if (input_shape[channel_axis] is None):
raise ValueError('The channel dimension of the inputs should be defined. Found `None`.')
self.feat_shape = input_shape
input_dim = input_shape[channel_axis]
depth_kernel_shape = (self.kernel_size + (input_dim, (self.depth_multiplier * 4)))
point_kernel_shape = ((1, 1) + ((input_dim * self.depth_multiplier), (self.filters * 4)))
depth_kernel_a_shape = (self.kernel_size + (input_dim, self.depth_multiplier))
point_kernel_a_shape = ((1, 1) + ((input_dim * self.depth_multiplier), input_dim))
self.depth_kernel_shape = depth_kernel_shape
self.point_kernel_shape = point_kernel_shape
recurrent_depth_kernel_shape = (self.kernel_size + (self.filters, (self.depth_multiplier * 4)))
recurrent_point_kernel_shape = ((1, 1) + ((self.filters * self.depth_multiplier), (self.filters * 4)))
recurrent_depth_kernel_a_shape = (self.kernel_size + (self.filters, self.depth_multiplier))
recurrent_point_kernel_a_shape = ((1, 1) + ((self.filters * self.depth_multiplier), input_dim))
self.recurrent_depth_kernel_shape = depth_kernel_shape
self.recurrent_point_kernel_shape = point_kernel_shape
self.depth_kernel = self.add_weight(shape=depth_kernel_shape, initializer=self.kernel_initializer, name='depth_kernel', regularizer=self.kernel_regularizer, constraint=self.kernel_constraint)
self.point_kernel = self.add_weight(shape=point_kernel_shape, initializer=self.kernel_initializer, name='point_kernel', regularizer=self.kernel_regularizer, constraint=self.kernel_constraint)
self.depth_kernel_a = self.add_weight(shape=depth_kernel_a_shape, initializer=self.kernel_initializer, name='depth_kernel_a', regularizer=self.kernel_regularizer, constraint=self.kernel_constraint)
self.point_kernel_a = self.add_weight(shape=point_kernel_a_shape, initializer=self.kernel_initializer, name='point_kernel_a', regularizer=self.kernel_regularizer, constraint=self.kernel_constraint)
self.recurrent_depth_kernel = self.add_weight(shape=recurrent_depth_kernel_shape, initializer=self.recurrent_initializer, name='recurrent_depth_kernel', regularizer=self.recurrent_regularizer, constraint=self.recurrent_constraint)
self.recurrent_point_kernel = self.add_weight(shape=recurrent_point_kernel_shape, initializer=self.recurrent_initializer, name='recurrent_point_kernel', regularizer=self.recurrent_regularizer, constraint=self.recurrent_constraint)
self.recurrent_depth_kernel_a = self.add_weight(shape=recurrent_depth_kernel_a_shape, initializer=self.recurrent_initializer, name='recurrent_depth_kernel_a', regularizer=self.recurrent_regularizer, constraint=self.recurrent_constraint)
self.recurrent_point_kernel_a = self.add_weight(shape=recurrent_point_kernel_a_shape, initializer=self.recurrent_initializer, name='recurrent_point_kernel_a', regularizer=self.recurrent_regularizer, constraint=self.recurrent_constraint)
self.attention_weight = self.add_weight(shape=(self.kernel_size + (input_dim, 1)), initializer=self.kernel_initializer, name='attention_weight', regularizer=self.kernel_regularizer, constraint=self.kernel_constraint)
if self.use_bias:
if self.unit_forget_bias:
def bias_initializer(_, *args, **kwargs):
return K.concatenate([self.bias_initializer((self.filters,), *args, **kwargs), initializers.Ones()((self.filters,), *args, **kwargs), self.bias_initializer(((self.filters * 2),), *args, **kwargs)])
else:
bias_initializer = self.bias_initializer
self.bias = self.add_weight(shape=((self.filters * 4),), name='bias', initializer=bias_initializer, regularizer=self.bias_regularizer, constraint=self.bias_constraint)
self.bias_a = self.add_weight(shape=(input_dim,), name='bias_a', initializer=self.bias_initializer, regularizer=self.bias_regularizer, constraint=self.bias_constraint)
else:
self.bias = None
self.built = True
def call(self, inputs, states, training=None):
h_tm1 = states[0]
c_tm1 = states[1]
dp_mask = self.get_dropout_mask_for_cell(inputs, training, count=4)
rec_dp_mask = self.get_recurrent_dropout_mask_for_cell(h_tm1, training, count=4)
(depth_kernel_i, depth_kernel_f, depth_kernel_c, depth_kernel_o) = array_ops.split(self.depth_kernel, 4, axis=3)
(recurrent_depth_kernel_i, recurrent_depth_kernel_f, recurrent_depth_kernel_c, recurrent_depth_kernel_o) = array_ops.split(self.recurrent_depth_kernel, 4, axis=3)
(point_kernel_i, point_kernel_f, point_kernel_c, point_kernel_o) = array_ops.split(self.point_kernel, 4, axis=3)
(recurrent_point_kernel_i, recurrent_point_kernel_f, recurrent_point_kernel_c, recurrent_point_kernel_o) = array_ops.split(self.recurrent_point_kernel, 4, axis=3)
if self.use_bias:
(bias_i, bias_f, bias_c, bias_o) = array_ops.split(self.bias, 4)
else:
(bias_i, bias_f, bias_c, bias_o) = (None, None, None, None)
if (0 < self.dropout < 1.0):
inputs_i = (inputs * dp_mask[0])
else:
inputs_i = inputs
if (0 < self.recurrent_dropout < 1.0):
h_tm1_i = (h_tm1 * rec_dp_mask[0])
else:
h_tm1_i = h_tm1
x_a = self.input_conv(inputs_i, self.depth_kernel_a, self.point_kernel_a, self.bias_a, padding=self.padding)
h_a = self.recurrent_conv(h_tm1_i, self.recurrent_depth_kernel_a, self.recurrent_point_kernel_a)
inputs = (inputs * self.attention((x_a + h_a), self.attention_weight))
if (0 < self.dropout < 1.0):
inputs_f = (inputs * dp_mask[1])
inputs_c = (inputs * dp_mask[2])
inputs_o = (inputs * dp_mask[3])
else:
inputs_f = inputs
inputs_c = inputs
inputs_o = inputs
if (0 < self.recurrent_dropout < 1.0):
h_tm1_f = (h_tm1 * rec_dp_mask[1])
h_tm1_c = (h_tm1 * rec_dp_mask[2])
h_tm1_o = (h_tm1 * rec_dp_mask[3])
else:
h_tm1_f = h_tm1
h_tm1_c = h_tm1
h_tm1_o = h_tm1
x_i = self.input_conv(inputs, depth_kernel_i, point_kernel_i, bias_i, padding=self.padding)
x_f = self.input_conv(inputs_f, depth_kernel_f, point_kernel_f, bias_f, padding=self.padding)
x_c = self.input_conv(inputs_c, depth_kernel_c, point_kernel_c, bias_c, padding=self.padding)
x_o = self.input_conv(inputs_o, depth_kernel_o, point_kernel_o, bias_o, padding=self.padding)
h_i = self.recurrent_conv(h_tm1, recurrent_depth_kernel_i, recurrent_point_kernel_i)
h_f = self.recurrent_conv(h_tm1_f, recurrent_depth_kernel_f, recurrent_point_kernel_f)
h_c = self.recurrent_conv(h_tm1_c, recurrent_depth_kernel_c, recurrent_point_kernel_c)
h_o = self.recurrent_conv(h_tm1_o, recurrent_depth_kernel_o, recurrent_point_kernel_o)
i = self.recurrent_activation((x_i + h_i))
f = self.recurrent_activation((x_f + h_f))
c = ((f * c_tm1) + (i * self.activation((x_c + h_c))))
o = self.recurrent_activation((x_o + h_o))
h = (o * self.activation(c))
return (h, [h, c])
def input_conv(self, x, dw, pw, b=None, padding='valid'):
conv_out = K.separable_conv2d(x, dw, pw, strides=self.strides, padding=padding, data_format=self.data_format, dilation_rate=self.dilation_rate)
if (b is not None):
conv_out = K.bias_add(conv_out, b, data_format=self.data_format)
return conv_out
def recurrent_conv(self, x, dw, pw):
conv_out = K.separable_conv2d(x, dw, pw, strides=(1, 1), padding='same', data_format=self.data_format)
return conv_out
def attention(self, x, w):
z = K.conv2d(K.tanh(x), w, strides=self.strides, padding=self.padding, data_format=self.data_format, dilation_rate=self.dilation_rate)
shape_2d = tf.convert_to_tensor([(- 1), self.feat_shape[1], self.feat_shape[2], 1])
shape_1d = tf.convert_to_tensor([(- 1), (self.feat_shape[1] * self.feat_shape[2])])
att = K.softmax(K.reshape(z, shape_1d))
att = K.reshape(att, shape_2d)
return K.repeat_elements(att, self.feat_shape[3], 3)
def get_config(self):
config = {'filters': self.filters, 'kernel_size': self.kernel_size, 'strides': self.strides, 'padding': self.padding, 'data_format': self.data_format, 'dilation_rate': self.dilation_rate, 'depth_multiplier': self.depth_multiplier, 'activation': activations.serialize(self.activation), 'recurrent_activation': activations.serialize(self.recurrent_activation), 'use_bias': self.use_bias, 'kernel_initializer': initializers.serialize(self.kernel_initializer), 'recurrent_initializer': initializers.serialize(self.recurrent_initializer), 'bias_initializer': initializers.serialize(self.bias_initializer), 'unit_forget_bias': self.unit_forget_bias, 'kernel_regularizer': regularizers.serialize(self.kernel_regularizer), 'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer), 'bias_regularizer': regularizers.serialize(self.bias_regularizer), 'kernel_constraint': constraints.serialize(self.kernel_constraint), 'recurrent_constraint': constraints.serialize(self.recurrent_constraint), 'bias_constraint': constraints.serialize(self.bias_constraint), 'dropout': self.dropout, 'recurrent_dropout': self.recurrent_dropout}
base_config = super(AttenSepConvLSTM2DCell, self).get_config()
return dict((list(base_config.items()) + list(config.items()))) |
class Solution():
def isSubsequence(self, s: str, t: str) -> bool:
for a in s:
if (a in t):
for b in range(0, len(t)):
if (a == t[b]):
t = t[(b + 1):]
break
else:
return False
return True |
def test_exact():
constrainer = SomeNotInSetConstraint(set('abc'), n=2, exact=True)
(v1, v2, v3) = variables = [Variable('v1'), Variable('v2'), Variable('v3')]
assignments = {v1: 'a', v2: 'y', v3: 'z'}
assert constrainer(variables, {}, assignments)
assignments = {v1: 'a', v2: 'y'}
assert constrainer(variables, {}, assignments)
assignments = {v1: 'a', v2: 'b', v3: 'z'}
assert (not constrainer(variables, {}, assignments))
assignments = {v1: 'a', v2: 'b'}
assert (not constrainer(variables, {}, assignments))
assignments = {v1: 'a', v2: 'b', v3: 'c'}
assert (not constrainer(variables, {}, assignments))
assignments = {v1: 'x', v2: 'y', v3: 'z'}
assert (not constrainer(variables, {}, assignments)) |
class InvCompress(Cheng2020Anchor):
def __init__(self, N=192, **kwargs):
super().__init__(N=N)
self.g_a = None
self.g_s = None
self.enh = EnhModule(64)
self.inv = InvComp(M=N)
self.attention = AttModule(N)
def g_a_func(self, x):
x = self.enh(x)
x = self.inv(x)
x = self.attention(x)
return x
def g_s_func(self, x):
x = self.attention(x, rev=True)
x = self.inv(x, rev=True)
x = self.enh(x, rev=True)
return x
def forward(self, x):
y = self.g_a_func(x)
z = self.h_a(y)
(z_hat, z_likelihoods) = self.entropy_bottleneck(z)
params = self.h_s(z_hat)
y_hat = self.gaussian_conditional.quantize(y, ('noise' if self.training else 'dequantize'))
ctx_params = self.context_prediction(y_hat)
gaussian_params = self.entropy_parameters(torch.cat((params, ctx_params), dim=1))
(scales_hat, means_hat) = gaussian_params.chunk(2, 1)
(_, y_likelihoods) = self.gaussian_conditional(y, scales_hat, means=means_hat)
x_hat = self.g_s_func(y_hat)
return {'x_hat': x_hat, 'likelihoods': {'y': y_likelihoods, 'z': z_likelihoods}}
def from_state_dict(cls, state_dict):
N = state_dict['h_a.0.weight'].size(0)
net = cls(N)
net.load_state_dict(state_dict)
return net
def compress(self, x):
if (next(self.parameters()).device != torch.device('cpu')):
warnings.warn('Inference on GPU is not recommended for the autoregressive models (the entropy coder is run sequentially on CPU).')
y = self.g_a_func(x)
z = self.h_a(y)
z_strings = self.entropy_bottleneck.compress(z)
z_hat = self.entropy_bottleneck.decompress(z_strings, z.size()[(- 2):])
params = self.h_s(z_hat)
s = 4
kernel_size = 5
padding = ((kernel_size - 1) // 2)
y_height = (z_hat.size(2) * s)
y_width = (z_hat.size(3) * s)
y_hat = F.pad(y, (padding, padding, padding, padding))
y_strings = []
for i in range(y.size(0)):
string = self._compress_ar(y_hat[i:(i + 1)], params[i:(i + 1)], y_height, y_width, kernel_size, padding)
y_strings.append(string)
return {'strings': [y_strings, z_strings], 'shape': z.size()[(- 2):], 'y': y}
def decompress(self, strings, shape):
assert (isinstance(strings, list) and (len(strings) == 2))
if (next(self.parameters()).device != torch.device('cpu')):
warnings.warn('Inference on GPU is not recommended for the autoregressive models (the entropy coder is run sequentially on CPU).')
z_hat = self.entropy_bottleneck.decompress(strings[1], shape)
params = self.h_s(z_hat)
s = 4
kernel_size = 5
padding = ((kernel_size - 1) // 2)
y_height = (z_hat.size(2) * s)
y_width = (z_hat.size(3) * s)
y_hat = torch.zeros((z_hat.size(0), self.M, (y_height + (2 * padding)), (y_width + (2 * padding))), device=z_hat.device)
for (i, y_string) in enumerate(strings[0]):
self._decompress_ar(y_string, y_hat[i:(i + 1)], params[i:(i + 1)], y_height, y_width, kernel_size, padding)
y_hat = F.pad(y_hat, ((- padding), (- padding), (- padding), (- padding)))
x_hat = self.g_s_func(y_hat).clamp_(0, 1)
return {'x_hat': x_hat} |
def resnet152(pretrained=False, root='~/.encoding/models', **kwargs):
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
from ..models.model_store import get_model_file
model.load_state_dict(torch.load(get_model_file('resnet152', root=root)), strict=False)
return model |
class _ChunkResizer():
def __init__(self, adapter, chunk_size):
self.adapter = adapter
self.old_chunk_size = None
self.new_chunk_size = (int(chunk_size) if chunk_size else 0)
def __enter__(self):
if ((self.adapter.connection is not None) and hasattr(self.adapter.connection, 'chunk_size')):
if (self.new_chunk_size > self.adapter.connection.chunk_size):
self.old_chunk_size = self.adapter.connection.chunk_size
self.adapter.connection.chunk_size = self.new_chunk_size
def __exit__(self, exc_type, exc_val, exc_tb):
if (self.old_chunk_size is not None):
self.adapter.connection.chunk_size = self.old_chunk_size |
class HKPRO3(FinTS3Segment):
date_start = DataElementField(type='dat', required=False, _d='Von Datum')
date_end = DataElementField(type='dat', required=False, _d='Bis Datum')
max_number_responses = DataElementField(type='num', max_length=4, required=False, _d='Maximale Anzahl Eintrage')
touchdown_point = DataElementField(type='an', max_length=35, required=False, _d='Aufsetzpunkt') |
def setup(args, modify_exp_name=False):
cfg = get_config()
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
if ((cfg.MODEL.DEVICE != 'cpu') and (not torch.cuda.is_available())):
cfg.MODEL.DEVICE = 'cpu'
if modify_exp_name:
curr_time = datetime.datetime.now().strftime('%Y%m%d_%H%M%S')
cfg.EXP_NAME = (cfg.EXP_NAME + f'-{curr_time}')
cfg.freeze()
return cfg |
def ToStructure(device):
LayersList = []
MatList = []
for i in range(device['numlayers']):
layer = device['layers'][i]
MatList.append(ToSolcoreMaterial(layer['properties']['composition'], device['T']))
LayersList.append(ToLayer(layer['properties']['width'], MatList[i], layer['label']))
LayersList[(- 1)].material.strained = 'True'
LayersList = Structure(LayersList)
LayersList.substrate = ToSolcoreMaterial(device['substrate'], device['T'], execute=True)
return LayersList |
def get_submodules_from_kwargs(kwargs):
backend = kwargs.get('backend', _KERAS_BACKEND)
layers = kwargs.get('layers', _KERAS_LAYERS)
layers.Conv2D = Conv2D
layers.BatchNormalization = BatchNormalization
layers.Dense = Dense
layers.DepthwiseConv2D = DepthwiseConv2D
layers.SeparableConv2D = SeparableConv2D
models = kwargs.get('models', _KERAS_MODELS)
utils = kwargs.get('utils', _KERAS_UTILS)
for key in kwargs.keys():
if (key not in ['backend', 'layers', 'models', 'utils']):
raise TypeError('Invalid keyword argument: %s', key)
return (backend, layers, models, utils) |
class SeekBar(Gtk.Box):
def __init__(self, player, library):
super().__init__()
self._elapsed_label = TimeLabel()
self._remaining_label = TimeLabel()
scale = Gtk.Scale(orientation=Gtk.Orientation.HORIZONTAL)
scale.set_adjustment(Gtk.Adjustment.new(0, 0, 0, 3, (- 15), 0))
scale.set_draw_value(False)
self._scale = scale
self.pack_start(Align(self._elapsed_label, border=6), False, True, 0)
self.pack_start(scale, True, True, 0)
self.pack_start(Align(self._remaining_label, border=6), False, True, 0)
for child in self.get_children():
child.show_all()
self._id = self._scale.connect('value-changed', self._on_user_changed, player)
self._scale.connect('value-changed', self._on_scale_value_changed, player)
self._tracker = TimeTracker(player)
self._tracker.connect('tick', self._on_tick, player)
connect_destroy(player, 'seek', self._on_player_seek)
connect_destroy(player, 'song-started', self._on_song_started)
connect_destroy(player, 'notify::seekable', self._on_seekable_changed)
connect_destroy(library, 'changed', self._on_song_changed, player)
self.connect('destroy', self._on_destroy)
with self._inhibit():
self._update(player)
self._tracker.tick()
def _on_destroy(self, *args):
self._tracker.destroy()
def _inhibit(self):
with GObject.signal_handler_block(self._scale, self._id):
(yield)
def _on_user_changed(self, scale, player):
if player.seekable:
player.seek((scale.get_value() * 1000))
def _on_scale_value_changed(self, scale, player):
self._update(player)
def _on_tick(self, tracker, player):
position = (player.get_position() // 1000)
with self._inhibit():
self._scale.set_value(position)
def _on_seekable_changed(self, player, *args):
with self._inhibit():
self._update(player)
def _on_song_changed(self, library, songs, player):
if (player.info in songs):
with self._inhibit():
self._update(player)
def _on_player_seek(self, player, song, ms):
with self._inhibit():
self._scale.set_value((ms // 1000))
self._update(player)
def _on_song_started(self, player, song):
with self._inhibit():
self._scale.set_value(0)
self._update(player)
def _update(self, player):
if player.info:
self._scale.set_range(0, player.info('~#length'))
else:
self._scale.set_range(0, 1)
if (not player.seekable):
self._scale.set_value(0)
value = self._scale.get_value()
max_ = self._scale.get_adjustment().get_upper()
remaining = (value - max_)
self._elapsed_label.set_time(value)
self._remaining_label.set_time(remaining)
self._remaining_label.set_disabled((not player.seekable))
self._elapsed_label.set_disabled((not player.seekable))
self.set_sensitive(player.seekable) |
class ItemParams(wx.Panel):
def __init__(self, parent, stuff, item, context=None):
wx.Panel.__init__(self, parent, size=(1000, 1000))
self.SetBackgroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_BTNFACE))
self.mainFrame = gui.mainFrame.MainFrame.getInstance()
mainSizer = wx.BoxSizer(wx.VERTICAL)
self.paramList = wx.lib.agw.hypertreelist.HyperTreeList(self, wx.ID_ANY, agwStyle=(((wx.TR_HIDE_ROOT | wx.TR_NO_LINES) | wx.TR_FULL_ROW_HIGHLIGHT) | wx.TR_HAS_BUTTONS))
self.paramList.SetBackgroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_WINDOW))
mainSizer.Add(self.paramList, 1, (wx.ALL | wx.EXPAND), 0)
self.SetSizer(mainSizer)
self.toggleView = AttributeView.NORMAL
self.stuff = stuff
self.item = item
self.isStuffItem = ((stuff is not None) and (item is not None) and (getattr(stuff, 'item', None) == item))
self.isStuffCharge = ((stuff is not None) and (item is not None) and (getattr(stuff, 'charge', None) == item))
self.attrInfo = {}
self.attrValues = {}
self._fetchValues()
self.paramList.AddColumn(_t('Attribute'))
self.paramList.AddColumn(_t('Current Value'))
if (self.stuff is not None):
self.paramList.AddColumn(_t('Base Value'))
self.m_staticline = wx.StaticLine(self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.LI_HORIZONTAL)
mainSizer.Add(self.m_staticline, 0, wx.EXPAND)
bSizer = wx.BoxSizer(wx.HORIZONTAL)
self.toggleViewBtn = wx.ToggleButton(self, wx.ID_ANY, _t('View Raw Data'), wx.DefaultPosition, wx.DefaultSize, 0)
bSizer.Add(self.toggleViewBtn, 0, wx.ALIGN_CENTER_VERTICAL)
self.exportStatsBtn = wx.ToggleButton(self, wx.ID_ANY, _t('Export Item Stats'), wx.DefaultPosition, wx.DefaultSize, 0)
bSizer.Add(self.exportStatsBtn, 0, wx.ALIGN_CENTER_VERTICAL)
if (stuff is not None):
self.refreshBtn = wx.Button(self, wx.ID_ANY, _t('Refresh'), wx.DefaultPosition, wx.DefaultSize, wx.BU_EXACTFIT)
bSizer.Add(self.refreshBtn, 0, wx.ALIGN_CENTER_VERTICAL)
self.refreshBtn.Bind(wx.EVT_BUTTON, self.RefreshValues)
mainSizer.Add(bSizer, 0, wx.ALIGN_RIGHT)
self.imageList = wx.ImageList(16, 16)
self.PopulateList()
self.toggleViewBtn.Bind(wx.EVT_TOGGLEBUTTON, self.ToggleViewMode)
self.exportStatsBtn.Bind(wx.EVT_TOGGLEBUTTON, self.ExportItemStats)
self.mainFrame.Bind(GE.ITEM_CHANGED_INPLACE, self.OnUpdateStuff)
def OnWindowClose(self):
self.mainFrame.Unbind(GE.ITEM_CHANGED_INPLACE)
def _fetchValues(self):
if (self.stuff is None):
self.attrInfo.clear()
self.attrValues.clear()
self.attrInfo.update(self.item.attributes)
self.attrValues.update(self.item.attributes)
elif self.isStuffItem:
self.attrInfo.clear()
self.attrValues.clear()
self.attrInfo.update(self.stuff.item.attributes)
self.attrValues.update(self.stuff.itemModifiedAttributes)
elif self.isStuffCharge:
self.attrInfo.clear()
self.attrValues.clear()
self.attrInfo.update(self.stuff.charge.attributes)
self.attrValues.update(self.stuff.chargeModifiedAttributes)
else:
return
def UpdateList(self):
self.Freeze()
self.paramList.DeleteRoot()
self.PopulateList()
self.Thaw()
def RefreshValues(self, event):
self._fetchValues()
self.UpdateList()
if event:
event.Skip()
def ToggleViewMode(self, event):
self.toggleView *= (- 1)
self.UpdateList()
event.Skip()
def ExportItemStats(self, event):
exportFileName = (((self.item.name + ' (') + str(self.item.ID)) + ').csv')
with wx.FileDialog(self, _t('Save CSV file'), '', exportFileName, (_t('CSV files') + ' (*.csv)|*.csv'), (wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT)) as dlg:
if (dlg.ShowModal() == wx.ID_CANCEL):
return
with open(dlg.GetPath(), 'w') as exportFile:
writer = csv.writer(exportFile, delimiter=',')
writer.writerow(['ID', 'Internal Name', 'Friendly Name', 'Modified Value', 'Base Value'])
for attribute in self.attrValues:
try:
attribute_id = self.attrInfo[attribute].ID
except (KeyError, AttributeError):
attribute_id = ''
try:
attribute_name = self.attrInfo[attribute].name
except (KeyError, AttributeError):
attribute_name = attribute
try:
attribute_displayname = self.attrInfo[attribute].displayName
except (KeyError, AttributeError):
attribute_displayname = ''
try:
attribute_value = self.attrInfo[attribute].value
except (KeyError, AttributeError):
attribute_value = ''
try:
attribute_modified_value = self.attrValues[attribute].value
except (KeyError, AttributeError):
attribute_modified_value = self.attrValues[attribute]
writer.writerow([attribute_id, attribute_name, attribute_displayname, attribute_modified_value, attribute_value])
def OnUpdateStuff(self, event):
if (self.stuff is event.old):
self.stuff = event.new
def SetupImageList(self):
self.imageList.RemoveAll()
self.blank_icon = self.imageList.Add(BitmapLoader.getBitmap('transparent16x16', 'gui'))
self.unknown_icon = self.imageList.Add(BitmapLoader.getBitmap('0', 'icons'))
self.paramList.AssignImageList(self.imageList)
def AddAttribute(self, parent, attr):
display = None
if isinstance(attr, tuple):
display = attr[1]
attr = attr[0]
if ((attr in self.attrValues) and (attr not in self.processed_attribs)):
data = self.GetData(attr, display)
if (data is None):
return
(attrIcon, attrName, currentVal, baseVal) = data
attr_item = self.paramList.AppendItem(parent, attrName)
self.paramList.SetItemTextColour(attr_item, wx.SystemSettings.GetColour(wx.SYS_COLOUR_WINDOWTEXT))
self.paramList.SetItemText(attr_item, currentVal, 1)
if (self.stuff is not None):
self.paramList.SetItemText(attr_item, baseVal, 2)
self.paramList.SetItemImage(attr_item, attrIcon, which=wx.TreeItemIcon_Normal)
self.processed_attribs.add(attr)
def ExpandOrDelete(self, item):
if (self.paramList.GetChildrenCount(item) == 0):
self.paramList.Delete(item)
else:
self.paramList.Expand(item)
def PopulateList(self):
self.SetupImageList()
self.processed_attribs = set()
root = self.paramList.AddRoot('The Root Item')
misc_parent = root
order = CategoryGroups.get(self.item.category.name, [GuiAttrGroup.FITTING, GuiAttrGroup.SHIP_GROUP])
for data in [AttrGroupDict[o] for o in order]:
heading = data.get('label')
header_item = self.paramList.AppendItem(root, heading)
self.paramList.SetItemTextColour(header_item, wx.SystemSettings.GetColour(wx.SYS_COLOUR_WINDOWTEXT))
for attr in data.get('attributes', []):
if (attr in GroupedAttributes):
for grouping in AttrGroups:
if (attr in grouping[0]):
break
item = self.paramList.AppendItem(header_item, grouping[1])
self.paramList.SetItemTextColour(item, wx.SystemSettings.GetColour(wx.SYS_COLOUR_WINDOWTEXT))
for attr2 in grouping[0]:
self.AddAttribute(item, attr2)
self.ExpandOrDelete(item)
continue
self.AddAttribute(header_item, attr)
self.ExpandOrDelete(header_item)
names = list(self.attrValues.keys())
names.sort()
for name in names:
if (name in GroupedAttributes):
for grouping in AttrGroups:
if (name in grouping[0]):
break
item = self.paramList.AppendItem(root, grouping[1])
self.paramList.SetItemTextColour(item, wx.SystemSettings.GetColour(wx.SYS_COLOUR_WINDOWTEXT))
for attr2 in grouping[0]:
self.AddAttribute(item, attr2)
self.ExpandOrDelete(item)
continue
self.AddAttribute(root, name)
self.Layout()
for i in range(self.paramList.GetMainWindow().GetColumnCount()):
self.paramList.SetColumnWidth(i, wx.LIST_AUTOSIZE)
def GetData(self, attr, displayOveride=None):
info = self.attrInfo.get(attr)
att = self.attrValues[attr]
valDefault = getattr(info, 'value', None)
if (self.stuff is not None):
if self.isStuffItem:
valDefault = self.stuff.getItemBaseAttrValue(attr, valDefault)
elif self.isStuffCharge:
valDefault = self.stuff.getChargeBaseAttrValue(attr, valDefault)
valueDefault = (valDefault if (valDefault is not None) else att)
val = getattr(att, 'value', None)
value = (val if (val is not None) else att)
if ((self.toggleView == AttributeView.NORMAL) and (((attr not in GroupedAttributes) and (not (value or valueDefault))) or (info is None) or (not info.published) or (attr in RequiredSkillAttrs))):
return None
if (info and info.displayName and (self.toggleView == AttributeView.NORMAL)):
attrName = (displayOveride or info.displayName)
else:
attrName = attr
if (info and config.debug):
attrName += ' ({})'.format(info.ID)
if info:
if (info.iconID is not None):
iconFile = info.iconID
icon = BitmapLoader.getBitmap(iconFile, 'icons')
if (icon is None):
attrIcon = self.blank_icon
else:
attrIcon = self.imageList.Add(icon)
else:
attrIcon = self.unknown_icon
else:
attrIcon = self.unknown_icon
if (self.toggleView == AttributeView.RAW):
valueUnit = str(value)
elif (info and info.unit):
valueUnit = self.FormatValue(*info.unit.PreformatValue(value))
else:
valueUnit = formatAmount(value, 3, 0, 0)
if (self.toggleView == AttributeView.RAW):
valueUnitDefault = str(valueDefault)
elif (info and info.unit):
valueUnitDefault = self.FormatValue(*info.unit.PreformatValue(valueDefault))
else:
valueUnitDefault = formatAmount(valueDefault, 3, 0, 0)
return (attrIcon, attrName, valueUnit, valueUnitDefault)
def FormatValue(value, unit, rounding='prec', digits=3):
if (isinstance(value, (int, float)) and (rounding == 'prec')):
fvalue = formatAmount(value, digits, 0, 0)
elif (isinstance(value, (int, float)) and (rounding == 'dec')):
fvalue = roundDec(value, digits)
else:
fvalue = value
unitSuffix = (f' {unit}' if (unit is not None) else '')
return f'{fvalue}{unitSuffix}' |
class Effect4385(BaseEffect):
type = 'passive'
def handler(fit, ship, context, projectionRange, **kwargs):
fit.modules.filteredChargeBoost((lambda mod: mod.charge.requiresSkill('Heavy Assault Missiles')), 'maxVelocity', ship.getModifiedItemAttr('eliteBonusReconShip1'), skill='Recon Ships', **kwargs) |
class CascadedManager(nvCompManager):
def __init__(self, **kwargs):
super().__init__(kwargs)
default_options = {'chunk_size': (1 << 12), 'type': np.int32, 'num_RLEs': 2, 'num_deltas': 1, 'use_bp': True}
for (k, v) in default_options.items():
try:
getattr(self, k)
except Exception:
setattr(self, k, v)
self.options = {'chunk_size': self.chunk_size, 'type': self.type, 'num_RLEs': self.num_RLEs, 'num_deltas': self.num_deltas, 'use_bp': self.use_bp}
self._manager = _lib._CascadedManager(default_options, self.stream, self.device_id) |
class CategoricalMLPPolicy(StochasticPolicy, LasagnePowered, Serializable):
def __init__(self, env_spec, hidden_sizes=(32, 32), hidden_nonlinearity=NL.tanh, num_seq_inputs=1, prob_network=None):
Serializable.quick_init(self, locals())
assert isinstance(env_spec.action_space, Discrete)
if (prob_network is None):
prob_network = MLP(input_shape=((env_spec.observation_space.flat_dim * num_seq_inputs),), output_dim=env_spec.action_space.n, hidden_sizes=hidden_sizes, hidden_nonlinearity=hidden_nonlinearity, output_nonlinearity=NL.softmax)
self._l_prob = prob_network.output_layer
self._l_obs = prob_network.input_layer
self._f_prob = ext.compile_function([prob_network.input_layer.input_var], L.get_output(prob_network.output_layer))
self._dist = Categorical(env_spec.action_space.n)
super(CategoricalMLPPolicy, self).__init__(env_spec)
LasagnePowered.__init__(self, [prob_network.output_layer])
def dist_info_sym(self, obs_var, state_info_vars=None):
return dict(prob=L.get_output(self._l_prob, {self._l_obs: obs_var}))
def dist_info(self, obs, state_infos=None):
return dict(prob=self._f_prob(obs))
def get_action(self, observation, deterministic=False):
flat_obs = self.observation_space.flatten(observation)
prob = self._f_prob([flat_obs])[0]
if deterministic:
action = np.argmax(prob)
else:
action = self.action_space.weighted_sample(prob)
return (action, dict(prob=prob))
def get_actions(self, observations):
flat_obs = self.observation_space.flatten_n(observations)
probs = self._f_prob(flat_obs)
actions = list(map(self.action_space.weighted_sample, probs))
return (actions, dict(prob=probs))
def distribution(self):
return self._dist |
('PyQt6.QtWidgets.QGraphicsTextItem.keyPressEvent')
('beeref.items.BeeTextItem.exit_edit_mode')
def test_key_press_event_enter(exit_mock, key_press_mock, view):
item = BeeTextItem('foo bar')
view.scene.addItem(item)
view.scene.edit_item = item
event = MagicMock()
event.key.return_value = Qt.Key.Key_Enter
event.modifiers.return_value = Qt.KeyboardModifier.NoModifier
item.keyPressEvent(event)
key_press_mock.assert_not_called()
exit_mock.assert_called_once_with() |
class Visualization(ScreenProgram):
NUM_PARTICLES = 400
FPS_MEASURE_WINDOW = 20.0
def get_variants() -> List[str]:
try:
with open('/proc/device-tree/model', encoding='utf-8') as model_file:
model = model_file.read()
if model.startswith('Raspberry Pi 3'):
return []
except FileNotFoundError:
pass
try:
import raveberry_visualization
controller = raveberry_visualization.Controller()
return controller.get_variants()
except ModuleNotFoundError:
return []
def __init__(self, manager: 'DeviceManager', variant: str) -> None:
super().__init__(manager, variant)
import raveberry_visualization
self.controller = raveberry_visualization.Controller()
self.last_fps_check = time.time()
def start(self) -> None:
self.manager.utilities.cava.use()
self.controller.start(self.name, self.manager.settings['ups'], Visualization.NUM_PARTICLES, Visualization.FPS_MEASURE_WINDOW)
def compute(self) -> None:
now = time.time()
if ((now - self.last_fps_check) > (Visualization.FPS_MEASURE_WINDOW / 2)):
self.last_fps_check = now
current_fps = self.controller.get_fps()
redis.put('current_fps', current_fps)
if (self.manager.settings['dynamic_resolution'] and (current_fps < (0.9 * self.manager.settings['ups']))):
self.manager.devices.screen.lower_resolution()
self.manager.restart_screen_program(sleep_time=2, has_lock=True)
else:
lights.update_state()
if (not self.controller.is_active()):
raise ScreenProgramStopped
self.controller.set_parameters(self.manager.utilities.alarm.factor, self.manager.utilities.cava.current_frame)
def stop(self) -> None:
self.controller.stop()
self.manager.utilities.cava.release() |
class TagViewTests(django.test.TestCase):
def setUp(self):
super().setUp()
self.commit = Commit.objects.create(**TEST_COMMIT_KWARGS)
def test_routing(self):
Tag.objects.create(name='example', last_commit=self.commit)
Tag.objects.create(name='grouped-tag', group='group-name', last_commit=self.commit)
cases = [('/pages/tags/example/', 'content/tag.html'), ('/pages/tags/group-name/', 'content/listing.html'), ('/pages/tags/group-name/grouped-tag/', 'content/tag.html')]
for (url, template) in cases:
with self.subTest(url=url):
response = self.client.get(url)
self.assertEqual(200, response.status_code)
self.assertTemplateUsed(response, template)
def test_valid_tag_returns_200(self):
Tag.objects.create(name='example', body='This is the tag body.', last_commit=self.commit)
response = self.client.get('/pages/tags/example/')
self.assertEqual(200, response.status_code)
self.assertIn('This is the tag body', response.content.decode('utf-8'))
self.assertTemplateUsed(response, 'content/tag.html')
def test_invalid_tag_404(self):
with mock.patch('pydis_site.apps.content.utils.fetch_tags', autospec=True):
response = self.client.get('/pages/tags/non-existent/')
self.assertEqual(404, response.status_code)
def test_context_tag(self):
body = textwrap.dedent('\n ---\n unused: frontmatter\n ----\n Tag content here.\n ')
tag = Tag.objects.create(name='example', body=body, last_commit=self.commit)
response = self.client.get('/pages/tags/example/')
expected = {'page_title': 'example', 'page': markdown.markdown('Tag content here.'), 'tag': tag, 'breadcrumb_items': [{'name': 'Pages', 'path': '.'}, {'name': 'Tags', 'path': 'tags'}]}
for key in expected:
self.assertEqual(expected[key], response.context.get(key), f'context.{key} did not match')
def test_context_grouped_tag(self):
Tag.objects.create(name='example', body='Body text', group='group-name', last_commit=self.commit)
response = self.client.get('/pages/tags/group-name/example/')
self.assertListEqual([{'name': 'Pages', 'path': '.'}, {'name': 'Tags', 'path': 'tags'}, {'name': 'group-name', 'path': 'tags/group-name'}], response.context.get('breadcrumb_items'))
def test_group_page(self):
Tag.objects.create(name='tag-1', body='Body 1', group='group-name', last_commit=self.commit)
Tag.objects.create(name='tag-2', body='Body 2', group='group-name', last_commit=self.commit)
Tag.objects.create(name='not-included', last_commit=self.commit)
response = self.client.get('/pages/tags/group-name/')
content = response.content.decode('utf-8')
self.assertInHTML("<div class='level-left'>group-name</div>", content)
self.assertInHTML(f"<a class='level-item fab fa-github' href='{Tag.URL_BASE}/group-name'>", content)
self.assertIn('>tag-1</span>', content)
self.assertIn('>tag-2</span>', content)
self.assertNotIn('>not-included</span>', content, "Tags not in this group shouldn't be rendered.")
self.assertInHTML('<p>Body 1</p>', content)
def test_markdown(self):
body = textwrap.dedent('\n ```py\n Hello world!\n ```\n\n **This text is in bold**\n ')
Tag.objects.create(name='example', body=body, last_commit=self.commit)
response = self.client.get('/pages/tags/example/')
content = response.content.decode('utf-8')
self.assertInHTML('<code class="language-py">Hello world!</code>', content)
self.assertInHTML('<strong>This text is in bold</strong>', content)
def test_embed(self):
body = textwrap.dedent('\n ---\n embed:\n title: Embed title\n image:\n url: ---\n Tag body.\n ')
Tag.objects.create(name='example', body=body, last_commit=self.commit)
response = self.client.get('/pages/tags/example/')
content = response.content.decode('utf-8')
self.assertInHTML('<img alt="Embed title" src=" content)
self.assertInHTML('<p>Tag body.</p>', content)
def test_embed_title(self):
body = textwrap.dedent('\n ---\n embed:\n title: Embed title\n ---\n ')
Tag.objects.create(name='example', body=body, last_commit=self.commit)
response = self.client.get('/pages/tags/example/')
self.assertEqual('Embed title', response.context.get('page_title'), 'The page title must match the embed title.')
def test_hyperlinked_item(self):
(filler_before, filler_after) = ('empty filler text\n\n', 'more\nfiller')
body = ((filler_before + '`!tags return`') + filler_after)
Tag.objects.create(name='example', body=body, last_commit=self.commit)
other_url = reverse('content:tag', kwargs={'location': 'return'})
response = self.client.get('/pages/tags/example/')
self.assertEqual(markdown.markdown(((filler_before + f'[`!tags return`]({other_url})') + filler_after)), response.context.get('page'))
def test_hyperlinked_group(self):
Tag.objects.create(name='example', body='!tags group-name grouped-tag', last_commit=self.commit)
Tag.objects.create(name='grouped-tag', group='group-name')
other_url = reverse('content:tag', kwargs={'location': 'group-name/grouped-tag'})
response = self.client.get('/pages/tags/example/')
self.assertEqual(markdown.markdown(f'[!tags group-name grouped-tag]({other_url})'), response.context.get('page'))
def test_hyperlinked_extra_text(self):
Tag.objects.create(name='example', body='!tags other unrelated text', last_commit=self.commit)
Tag.objects.create(name='other')
other_url = reverse('content:tag', kwargs={'location': 'other'})
response = self.client.get('/pages/tags/example/')
self.assertEqual(markdown.markdown(f'[!tags other]({other_url}) unrelated text'), response.context.get('page'))
def test_tags_have_no_edit_on_github_link(self):
Tag.objects.create(name='example', body='Joe William Banks', last_commit=self.commit)
response = self.client.get('/pages/tags/example/')
self.assertNotContains(response, 'Edit on GitHub')
def test_tag_root_page(self):
Tag.objects.create(name='tag-1', last_commit=self.commit)
Tag.objects.create(name='tag-2', last_commit=self.commit)
Tag.objects.create(name='tag-3', last_commit=self.commit)
response = self.client.get('/pages/tags/')
content = response.content.decode('utf-8')
self.assertTemplateUsed(response, 'content/listing.html')
self.assertInHTML('<div class="level-left">Tags</div>', content)
for tag_number in range(1, 4):
self.assertIn(f'tag-{tag_number}</span>', content) |
def filled_stream(stream, audio_source):
with stream.mainloop.lock:
stream.connect_playback()
assert stream.is_ready
with stream.mainloop.lock:
writable_size = stream.get_writable_size()
assert (writable_size > 0)
nbytes = min(1024, writable_size)
audio_data = audio_source.get_audio_data(nbytes)
with stream.mainloop.lock:
stream.write(audio_data.pointer, nbytes)
assert stream.is_ready
return stream |
class TestUtils(unittest.TestCase):
def test_line_info_at(self):
text = 'abc\ndef'
self.assertEqual(line_info_at(text, 0), (0, 0))
self.assertEqual(line_info_at(text, 2), (0, 2))
self.assertEqual(line_info_at(text, 3), (0, 3))
self.assertEqual(line_info_at(text, 4), (1, 0))
self.assertEqual(line_info_at(text, 7), (1, 3))
self.assertRaises(ValueError, (lambda : line_info_at(text, 8))) |
class RSoftmax(nn.Module):
def __init__(self, radix, groups):
super().__init__()
self.radix = radix
self.groups = groups
def forward(self, x):
batch = x.size(0)
if (self.radix > 1):
x = x.view(batch, self.groups, self.radix, (- 1)).transpose(1, 2)
x = F.softmax(x, dim=1)
x = x.reshape(batch, (- 1))
else:
x = torch.sigmoid(x)
return x |
class Wav2Vec2ProcessorWithLM(ProcessorMixin):
feature_extractor_class = 'Wav2Vec2FeatureExtractor'
tokenizer_class = 'Wav2Vec2CTCTokenizer'
def __init__(self, feature_extractor: 'FeatureExtractionMixin', tokenizer: 'PreTrainedTokenizerBase', decoder: 'BeamSearchDecoderCTC'):
from pyctcdecode import BeamSearchDecoderCTC
super().__init__(feature_extractor, tokenizer)
if (not isinstance(decoder, BeamSearchDecoderCTC)):
raise ValueError(f'`decoder` has to be of type {BeamSearchDecoderCTC.__class__}, but is {type(decoder)}')
missing_decoder_tokens = self.get_missing_alphabet_tokens(decoder, tokenizer)
if (len(missing_decoder_tokens) > 0):
raise ValueError(f"The tokens {missing_decoder_tokens} are defined in the tokenizer's vocabulary, but not in the decoder's alphabet. Make sure to include {missing_decoder_tokens} in the decoder's alphabet.")
self.decoder = decoder
self.current_processor = self.feature_extractor
self._in_target_context_manager = False
def save_pretrained(self, save_directory):
super().save_pretrained(save_directory)
self.decoder.save_to_dir(save_directory)
def from_pretrained(cls, pretrained_model_name_or_path, **kwargs):
requires_backends(cls, 'pyctcdecode')
from pyctcdecode import BeamSearchDecoderCTC
(feature_extractor, tokenizer) = super()._get_arguments_from_pretrained(pretrained_model_name_or_path, **kwargs)
if (os.path.isdir(pretrained_model_name_or_path) or os.path.isfile(pretrained_model_name_or_path)):
decoder = BeamSearchDecoderCTC.load_from_dir(pretrained_model_name_or_path)
else:
kwargs.pop('_from_auto', None)
kwargs.pop('trust_remote_code', None)
language_model_filenames = os.path.join(BeamSearchDecoderCTC._LANGUAGE_MODEL_SERIALIZED_DIRECTORY, '*')
alphabet_filename = BeamSearchDecoderCTC._ALPHABET_SERIALIZED_FILENAME
allow_regex = [language_model_filenames, alphabet_filename]
decoder = BeamSearchDecoderCTC.load_from_hf_hub(pretrained_model_name_or_path, allow_regex=allow_regex, **kwargs)
for attribute in ['alpha', 'beta', 'unk_score_offset', 'score_boundary']:
value = kwargs.pop(attribute, None)
if (value is not None):
cls._set_language_model_attribute(decoder, attribute, value)
missing_decoder_tokens = cls.get_missing_alphabet_tokens(decoder, tokenizer)
if (len(missing_decoder_tokens) > 0):
raise ValueError(f"The tokens {missing_decoder_tokens} are defined in the tokenizer's vocabulary, but not in the decoder's alphabet. Make sure to include {missing_decoder_tokens} in the decoder's alphabet.")
return cls(feature_extractor=feature_extractor, tokenizer=tokenizer, decoder=decoder)
def _set_language_model_attribute(decoder: 'BeamSearchDecoderCTC', attribute: str, value: float):
setattr(decoder.model_container[decoder._model_key], attribute, value)
def language_model(self):
return self.decoder.model_container[self.decoder._model_key]
def get_missing_alphabet_tokens(decoder, tokenizer):
from pyctcdecode.alphabet import BLANK_TOKEN_PTN, UNK_TOKEN, UNK_TOKEN_PTN
tokenizer_vocab_list = list(tokenizer.get_vocab().keys())
for (i, token) in enumerate(tokenizer_vocab_list):
if BLANK_TOKEN_PTN.match(token):
tokenizer_vocab_list[i] = ''
if (token == tokenizer.word_delimiter_token):
tokenizer_vocab_list[i] = ' '
if UNK_TOKEN_PTN.match(token):
tokenizer_vocab_list[i] = UNK_TOKEN
missing_tokens = (set(tokenizer_vocab_list) - set(decoder._alphabet.labels))
return missing_tokens
def __call__(self, *args, **kwargs):
if self._in_target_context_manager:
return self.current_processor(*args, **kwargs)
if ('raw_speech' in kwargs):
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.')
audio = kwargs.pop('raw_speech')
else:
audio = kwargs.pop('audio', None)
text = kwargs.pop('text', None)
if (len(args) > 0):
audio = args[0]
args = args[1:]
if ((audio is None) and (text is None)):
raise ValueError('You need to specify either an `audio` or `text` input to process.')
if (audio is not None):
inputs = self.feature_extractor(audio, *args, **kwargs)
if (text is not None):
encodings = self.tokenizer(text, **kwargs)
if (text is None):
return inputs
elif (audio is None):
return encodings
else:
inputs['labels'] = encodings['input_ids']
return inputs
def pad(self, *args, **kwargs):
if self._in_target_context_manager:
return self.current_processor.pad(*args, **kwargs)
input_features = kwargs.pop('input_features', None)
labels = kwargs.pop('labels', None)
if (len(args) > 0):
input_features = args[0]
args = args[1:]
if (input_features is not None):
input_features = self.feature_extractor.pad(input_features, *args, **kwargs)
if (labels is not None):
labels = self.tokenizer.pad(labels, **kwargs)
if (labels is None):
return input_features
elif (input_features is None):
return labels
else:
input_features['labels'] = labels['input_ids']
return input_features
def batch_decode(self, logits: np.ndarray, num_processes: Optional[int]=None, beam_width: Optional[int]=None, beam_prune_logp: Optional[float]=None, token_min_logp: Optional[float]=None, hotwords: Optional[Iterable[str]]=None, hotword_weight: Optional[float]=None, alpha: Optional[float]=None, beta: Optional[float]=None, unk_score_offset: Optional[float]=None, lm_score_boundary: Optional[bool]=None, output_word_offsets: bool=False):
from pyctcdecode.constants import DEFAULT_BEAM_WIDTH, DEFAULT_HOTWORD_WEIGHT, DEFAULT_MIN_TOKEN_LOGP, DEFAULT_PRUNE_LOGP
beam_width = (beam_width if (beam_width is not None) else DEFAULT_BEAM_WIDTH)
beam_prune_logp = (beam_prune_logp if (beam_prune_logp is not None) else DEFAULT_PRUNE_LOGP)
token_min_logp = (token_min_logp if (token_min_logp is not None) else DEFAULT_MIN_TOKEN_LOGP)
hotword_weight = (hotword_weight if (hotword_weight is not None) else DEFAULT_HOTWORD_WEIGHT)
self.decoder.reset_params(alpha=alpha, beta=beta, unk_score_offset=unk_score_offset, lm_score_boundary=lm_score_boundary)
logits_list = [array[(array != (- 100.0)).all(axis=(- 1))] for array in logits]
pool = get_context('fork').Pool(num_processes)
decoded_beams = self.decoder.decode_beams_batch(pool, logits_list=logits_list, beam_width=beam_width, beam_prune_logp=beam_prune_logp, token_min_logp=token_min_logp, hotwords=hotwords, hotword_weight=hotword_weight)
pool.close()
(batch_texts, logit_scores, lm_scores, word_offsets) = ([], [], [], [])
for d in decoded_beams:
batch_texts.append(d[0][0])
logit_scores.append(d[0][(- 2)])
lm_scores.append(d[0][(- 1)])
word_offsets.append([{'word': t[0], 'start_offset': t[1][0], 'end_offset': t[1][1]} for t in d[0][1]])
word_offsets = (word_offsets if output_word_offsets else None)
return Wav2Vec2DecoderWithLMOutput(text=batch_texts, logit_score=logit_scores, lm_score=lm_scores, word_offsets=word_offsets)
def decode(self, logits: np.ndarray, beam_width: Optional[int]=None, beam_prune_logp: Optional[float]=None, token_min_logp: Optional[float]=None, hotwords: Optional[Iterable[str]]=None, hotword_weight: Optional[float]=None, alpha: Optional[float]=None, beta: Optional[float]=None, unk_score_offset: Optional[float]=None, lm_score_boundary: Optional[bool]=None, output_word_offsets: bool=False):
from pyctcdecode.constants import DEFAULT_BEAM_WIDTH, DEFAULT_HOTWORD_WEIGHT, DEFAULT_MIN_TOKEN_LOGP, DEFAULT_PRUNE_LOGP
beam_width = (beam_width if (beam_width is not None) else DEFAULT_BEAM_WIDTH)
beam_prune_logp = (beam_prune_logp if (beam_prune_logp is not None) else DEFAULT_PRUNE_LOGP)
token_min_logp = (token_min_logp if (token_min_logp is not None) else DEFAULT_MIN_TOKEN_LOGP)
hotword_weight = (hotword_weight if (hotword_weight is not None) else DEFAULT_HOTWORD_WEIGHT)
self.decoder.reset_params(alpha=alpha, beta=beta, unk_score_offset=unk_score_offset, lm_score_boundary=lm_score_boundary)
decoded_beams = self.decoder.decode_beams(logits, beam_width=beam_width, beam_prune_logp=beam_prune_logp, token_min_logp=token_min_logp, hotwords=hotwords, hotword_weight=hotword_weight)
word_offsets = None
if output_word_offsets:
word_offsets = [{'word': word, 'start_offset': start_offset, 'end_offset': end_offset} for (word, (start_offset, end_offset)) in decoded_beams[0][2]]
return Wav2Vec2DecoderWithLMOutput(text=decoded_beams[0][0], logit_score=decoded_beams[0][(- 2)], lm_score=decoded_beams[0][(- 1)], word_offsets=word_offsets)
def as_target_processor(self):
warnings.warn('`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your labels by using the argument `text` of the regular `__call__` method (either in the same call as your audio inputs, or in a separate call.')
self._in_target_context_manager = True
self.current_processor = self.tokenizer
(yield)
self.current_processor = self.feature_extractor
self._in_target_context_manager = False |
def pytest_addoption(parser: Parser) -> None:
group = parser.getgroup('general')
group._addoption('-k', action='store', dest='keyword', default='', metavar='EXPRESSION', help="Only run tests which match the given substring expression. An expression is a Python evaluatable expression where all names are substring-matched against test names and their parent classes. Example: -k 'test_method or test_other' matches all test functions and classes whose name contains 'test_method' or 'test_other', while -k 'not test_method' matches those that don't contain 'test_method' in their names. -k 'not test_method and not test_other' will eliminate the matches. Additionally keywords are matched to classes and functions containing extra names in their 'extra_keyword_matches' set, as well as functions which have names assigned directly to them. The matching is case-insensitive.")
group._addoption('-m', action='store', dest='markexpr', default='', metavar='MARKEXPR', help="Only run tests matching given mark expression. For example: -m 'mark1 and not mark2'.")
group.addoption('--markers', action='store_true', help='show markers (builtin, plugin and per-project ones).')
parser.addini('markers', 'Markers for test functions', 'linelist')
parser.addini(EMPTY_PARAMETERSET_OPTION, 'Default marker for empty parametersets') |
class Fp16OptimizerHook(OptimizerHook):
def __init__(self, grad_clip=None, coalesce=True, bucket_size_mb=(- 1), loss_scale=512.0, distributed=True):
self.grad_clip = grad_clip
self.coalesce = coalesce
self.bucket_size_mb = bucket_size_mb
self.loss_scale = loss_scale
self.distributed = distributed
def before_run(self, runner):
runner.optimizer.param_groups = copy.deepcopy(runner.optimizer.param_groups)
wrap_fp16_model(runner.model)
def copy_grads_to_fp32(fp16_net, fp32_weights):
for (fp32_param, fp16_param) in zip(fp32_weights, fp16_net.parameters()):
if (fp16_param.grad is not None):
if (fp32_param.grad is None):
fp32_param.grad = fp32_param.data.new(fp32_param.size())
fp32_param.grad.copy_(fp16_param.grad)
def copy_params_to_fp16(fp16_net, fp32_weights):
for (fp16_param, fp32_param) in zip(fp16_net.parameters(), fp32_weights):
fp16_param.data.copy_(fp32_param.data)
def after_train_iter(self, runner):
runner.model.zero_grad()
runner.optimizer.zero_grad()
scaled_loss = (runner.outputs['loss'] * self.loss_scale)
scaled_loss.backward()
fp32_weights = []
for param_group in runner.optimizer.param_groups:
fp32_weights += param_group['params']
self.copy_grads_to_fp32(runner.model, fp32_weights)
if self.distributed:
allreduce_grads(fp32_weights, self.coalesce, self.bucket_size_mb)
for param in fp32_weights:
if (param.grad is not None):
param.grad.div_(self.loss_scale)
if (self.grad_clip is not None):
self.clip_grads(fp32_weights)
runner.optimizer.step()
self.copy_params_to_fp16(runner.model, fp32_weights) |
def invert(model: torch.nn.Module) -> torch.nn.Module:
fx_model = fx.symbolic_trace(model)
new_graph = fx.Graph()
env = {}
for node in reversed(fx_model.graph.nodes):
if (node.op == 'call_function'):
new_node = new_graph.call_function(invert_mapping[node.target], (env[node.name],))
env[node.args[0].name] = new_node
elif (node.op == 'output'):
new_node = new_graph.placeholder(node.name)
env[node.args[0].name] = new_node
elif (node.op == 'placeholder'):
new_graph.output(env[node.name])
else:
raise RuntimeError('Not implemented')
new_graph.lint()
return fx.GraphModule(fx_model, new_graph) |
def test_quantizable_mha_with_value():
B = 5
T = 8
S = 4
q_inputs = keras.Input(shape=(T, 16))
v_inputs = keras.Input(shape=(S, 16))
k_inputs = keras.Input(shape=(S, 16))
model_output = keras.layers.MultiHeadAttention(key_dim=2, num_heads=2)(q_inputs, v_inputs, k_inputs)
unquantized_model = keras.Model(inputs=[q_inputs, v_inputs, k_inputs], outputs=model_output)
quantized_model = QuantizationSimModel(unquantized_model)
query = np.ones([B, T, 16])
value = np.ones([B, S, 16])
key = np.ones([B, S, 16])
unquantized_model_tensor = unquantized_model([query, value, key])
quantized_model_tensor = quantized_model.model([query, value, key])
for layer in quantized_model.model.layers:
if isinstance(layer, QcQuantizableMultiHeadAttention):
layer.deactivate_quantizers()
quantized_model_tensor_without_quantizers = quantized_model.model([query, value, key])
for layer in quantized_model.model.layers:
if isinstance(layer, QcQuantizableMultiHeadAttention):
layer.reactivate_quantizers()
assert (unquantized_model_tensor.shape == quantized_model_tensor.shape == quantized_model_tensor_without_quantizers.shape)
assert tf.equal(unquantized_model_tensor, quantized_model_tensor_without_quantizers).numpy().flatten().all()
assert (not any((isinstance(layer, QcQuantizableMultiHeadAttention) for layer in unquantized_model.layers)))
assert any((isinstance(layer, QcQuantizableMultiHeadAttention) for layer in quantized_model.model.layers)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.