code stringlengths 281 23.7M |
|---|
def rating_tamplate(pattern_identifier, context=None):
return InlineKeyboardMarkup([[InlineButton(get_text('bad', context), callback_data=(pattern_identifier + str(BAD_RATING)))], [InlineButton(get_text('regular', context), callback_data=(pattern_identifier + str(REGULAR_RATING)))], [InlineButton(get_text('good', context), callback_data=(pattern_identifier + str(GOOD_RATING)))]]) |
def main():
print('initializing arduino')
config = {'host': 'localhost', 'hat': {'arduino': {'device': '/dev/spidev0.1', 'resetpin': '26'}}, 'actions': {}, 'arduino.nmea.baud': 4800, 'arduino.nmea.in': False, 'arduino.nmea.out': False, 'arduino.ir': True, 'arduino.debug': True, 'arduino.adc_channels': []}
a = arduino(config)
lt = 0
while True:
t0 = time.monotonic()
events = a.poll()
if events:
print(events, t0, (t0 - lt))
lt = t0
baud_rate = a.get_baud_rate()
if baud_rate:
print('baud rate', baud_rate) |
class UnexpectedPasswordHashVersion(InvalidPassword, WalletFileException):
def __init__(self, version):
self.version = version
def __str__(self):
return '{unexpected}: {version}\n{instruction}'.format(unexpected=_('Unexpected password hash version'), version=self.version, instruction=_('You are most likely using an outdated version of Electrum. Please update.')) |
def test_environment_only(hatch, helpers, temp_dir, config_file):
config_file.model.template.plugins['default']['tests'] = False
config_file.save()
project_name = 'My.App'
with temp_dir.as_cwd():
result = hatch('new', project_name)
assert (result.exit_code == 0), result.output
project_path = (temp_dir / 'my-app')
data_path = (temp_dir / 'data')
data_path.mkdir()
project = Project(project_path)
helpers.update_project_environment(project, 'default', {'dependencies': ['foo-bar-baz']})
with project_path.as_cwd():
result = hatch('dep', 'show', 'table', '--ascii', '-e')
assert (result.exit_code == 0), result.output
assert (helpers.remove_trailing_spaces(result.output) == helpers.dedent('\n Env: default\n ++\n | Name |\n ++\n | foo-bar-baz |\n ++\n ')) |
def _send_button_click_event(widget, **kwargs):
assert widget.get_realized()
assert widget.get_visible()
ev = Gdk.Event()
window = widget.get_window()
ev.any.window = window
ev.button.x = (window.get_width() / 2.0)
ev.button.y = (window.get_height() / 2.0)
for (key, value) in kwargs.items():
assert hasattr(ev.button, key)
setattr(ev.button, key, value)
ev.any.type = Gdk.EventType.BUTTON_PRESS
handled = widget.event(ev)
ev.any.type = Gdk.EventType.BUTTON_RELEASE
handled |= widget.event(ev)
return handled |
def FlagsForFile(filename, **kwargs):
if database:
compilation_info = GetCompilationInfoForFile(filename)
if (not compilation_info):
return None
final_flags = MakeRelativePathsInFlagsAbsolute(compilation_info.compiler_flags_, compilation_info.compiler_working_dir_)
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute(flags, relative_to)
return {'flags': final_flags, 'do_cache': True} |
class MyTree(Tree):
def set_index(self, ind=0):
if (len(self.leaves()) == 1):
self._i = (ind, (ind + 1))
if isinstance(self[0], MyTree):
self[0].set_index(ind)
return (ind + 1)
else:
self._i = (ind, (ind + 1))
for l in self:
if isinstance(l, unicode):
return ind
if (len(l.leaves()) == 1):
ind = l.set_index(ind)
else:
ind = l.set_index(ind)
self._i = (self._i[0], ind)
return ind
def get_index(self):
return self._i |
class ASPP(nn.Module):
def __init__(self, in_channels, out_channels, atrous_rates, separable=False):
super(ASPP, self).__init__()
modules = []
modules.append(nn.Sequential(nn.Conv2d(in_channels, out_channels, 1, bias=False), nn.BatchNorm2d(out_channels), nn.ReLU()))
(rate1, rate2, rate3) = tuple(atrous_rates)
ASPPConvModule = (ASPPConv if (not separable) else ASPPSeparableConv)
modules.append(ASPPConvModule(in_channels, out_channels, rate1))
modules.append(ASPPConvModule(in_channels, out_channels, rate2))
modules.append(ASPPConvModule(in_channels, out_channels, rate3))
modules.append(ASPPPooling(in_channels, out_channels))
self.convs = nn.ModuleList(modules)
self.project = nn.Sequential(nn.Conv2d((5 * out_channels), out_channels, kernel_size=1, bias=False), nn.BatchNorm2d(out_channels), nn.ReLU(), nn.Dropout(0.5))
def forward(self, x):
res = []
for conv in self.convs:
res.append(conv(x))
res = torch.cat(res, dim=1)
return self.project(res) |
class RemoteReceiveEvent(ModbusEvent):
def __init__(self, **kwargs):
self.overrun = kwargs.get('overrun', False)
self.listen = kwargs.get('listen', False)
self.broadcast = kwargs.get('broadcast', False)
def encode(self) -> bytes:
bits = ([False] * 3)
bits += [self.overrun, self.listen, self.broadcast, True]
packet = pack_bitstring(bits)
return packet
def decode(self, event: bytes) -> None:
bits = unpack_bitstring(event)
self.overrun = bits[4]
self.listen = bits[5]
self.broadcast = bits[6] |
_train('PCQM4Mv2-inference')
def ogblsc_inference(loggers, loaders, model, optimizer=None, scheduler=None):
from ogb.lsc import PCQM4Mv2Evaluator
evaluator = PCQM4Mv2Evaluator()
num_splits = 3
split_names = ['valid', 'test-dev', 'test-challenge']
assert (len(loaders) == num_splits), 'Expecting 3 particular splits.'
logging.info(f'0 ({split_names[0]}): {len(loaders[0].dataset)}')
assert all([(not torch.isnan(d.y)[0]) for d in loaders[0].dataset])
logging.info(f'1 ({split_names[1]}): {len(loaders[1].dataset)}')
assert all([torch.isnan(d.y)[0] for d in loaders[1].dataset])
logging.info(f'2 ({split_names[2]}): {len(loaders[2].dataset)}')
assert all([torch.isnan(d.y)[0] for d in loaders[2].dataset])
model.eval()
for i in range(num_splits):
all_true = []
all_pred = []
for batch in loaders[i]:
batch.to(torch.device(cfg.accelerator))
(pred, true) = model(batch)
all_true.append(true.detach().to('cpu', non_blocking=True))
all_pred.append(pred.detach().to('cpu', non_blocking=True))
(all_true, all_pred) = (torch.cat(all_true), torch.cat(all_pred))
if (i == 0):
input_dict = {'y_pred': all_pred.squeeze(), 'y_true': all_true.squeeze()}
result_dict = evaluator.eval(input_dict)
logging.info(f"{split_names[i]}: MAE = {result_dict['mae']}")
else:
input_dict = {'y_pred': all_pred.squeeze()}
evaluator.save_test_submission(input_dict=input_dict, dir_path=cfg.run_dir, mode=split_names[i]) |
def get_class(module, superclass=None):
classes = get_classes(module, superclass)
if (len(classes) == 1):
return classes[0]
desc = (('subclasses of %s' % superclass.__name__) if superclass else 'new-style classes')
if (len(classes) > 1):
names = ', '.join([cls.__name__ for cls in classes])
raise AttributeError(('Module %s contains multiple %s (%s).' % (module.__name__, desc, names)))
else:
raise AttributeError(('Module %s contains no %s.' % (module.__name__, desc))) |
def docker_start(image, volumes={}, env_variables={}):
client = docker.from_env()
dvolumes = {host: {'bind': ctr, 'mode': 'rw'} for (ctr, host) in volumes.items()}
logger.info('Starting container with image %r', image)
con = client.containers.run(image, ['sleep', '10000'], detach=True, volumes=dvolumes, environment=env_variables)
logger.info('Started container %s', con.id[:12])
return con |
def test_order_dependencies_no_auto_mark(no_dep_marks):
no_dep_marks.makefile('.ini', pytest='\n [pytest]\n automark_dependency = 0\n console_output_style = classic\n ')
result = no_dep_marks.runpytest('-v', '--order-dependencies')
result.assert_outcomes(passed=2, skipped=1)
result.stdout.fnmatch_lines(['test_auto.py::test_a SKIPPED*', 'test_auto.py::test_b PASSED', 'test_auto.py::test_c PASSED']) |
def parse(tokens):
key = tokens.pop(0)[1:]
parsed = {key: {}}
while tokens:
token = tokens.pop(0)
if token.endswith(')'):
if token[:(- 1)]:
val = token[:(- 1)].strip('"')
if (val.startswith('#') and val.endswith('#')):
val = int(val[1:(- 1)], 16)
parsed[key] = val
return (parsed, tokens)
if token.startswith('('):
(pdata, tokens) = parse(([token] + tokens))
parsed[key].update(pdata)
return (parsed, []) |
class FractionalCloudCover(metaclass=_EnumMeta):
zeroOktas = _OscEnum('FractionalCloudCover', 'zeroOktas', min_minor_version=2)
oneOktas = _OscEnum('FractionalCloudCover', 'oneOktas', min_minor_version=2)
twoOktas = _OscEnum('FractionalCloudCover', 'twoOktas', min_minor_version=2)
threeOktas = _OscEnum('FractionalCloudCover', 'threeOktas', min_minor_version=2)
fourOktas = _OscEnum('FractionalCloudCover', 'fourOktas', min_minor_version=2)
fiveOktas = _OscEnum('FractionalCloudCover', 'fiveOktas', min_minor_version=2)
sixOktas = _OscEnum('FractionalCloudCover', 'sixOktas', min_minor_version=2)
sevenOktas = _OscEnum('FractionalCloudCover', 'sevenOktas', min_minor_version=2)
eightOktas = _OscEnum('FractionalCloudCover', 'eightOktas', min_minor_version=2)
nineOktas = _OscEnum('FractionalCloudCover', 'nineOktas', min_minor_version=2) |
class TTCON(TestCase):
def _g(self, s):
return TCON(text=s).genres
def test_empty(self):
self.assertEquals(self._g(''), [])
def test_num(self):
for i in range(len(GENRES)):
self.assertEquals(self._g(('%02d' % i)), [GENRES[i]])
def test_parened_num(self):
for i in range(len(GENRES)):
self.assertEquals(self._g(('(%02d)' % i)), [GENRES[i]])
def test_unknown(self):
self.assertEquals(self._g('(255)'), ['Unknown'])
self.assertEquals(self._g('199'), ['Unknown'])
self.assertNotEqual(self._g('256'), ['Unknown'])
def test_parened_multi(self):
self.assertEquals(self._g('(00)(02)'), ['Blues', 'Country'])
def test_coverremix(self):
self.assertEquals(self._g('CR'), ['Cover'])
self.assertEquals(self._g('(CR)'), ['Cover'])
self.assertEquals(self._g('RX'), ['Remix'])
self.assertEquals(self._g('(RX)'), ['Remix'])
def test_parened_text(self):
self.assertEquals(self._g('(00)(02)Real Folk Blues'), ['Blues', 'Country', 'Real Folk Blues'])
def test_escape(self):
self.assertEquals(self._g('(0)((A genre)'), ['Blues', '(A genre)'])
self.assertEquals(self._g('(10)((20)'), ['New Age', '(20)'])
def test_nullsep(self):
self.assertEquals(self._g('0\x00A genre'), ['Blues', 'A genre'])
def test_nullsep_empty(self):
self.assertEquals(self._g('\x000\x00A genre'), ['Blues', 'A genre'])
def test_crazy(self):
self.assertEquals(self._g('(20)(CR)\x0030\x00\x00Another\x00(51)Hooray'), ['Alternative', 'Cover', 'Fusion', 'Another', 'Techno-Industrial', 'Hooray'])
def test_repeat(self):
self.assertEquals(self._g('(20)Alternative'), ['Alternative'])
self.assertEquals(self._g('(20)\x00Alternative'), ['Alternative', 'Alternative'])
def test_set_genre(self):
gen = TCON(encoding=0, text='')
self.assertEquals(gen.genres, [])
gen.genres = ['a genre', 'another']
self.assertEquals(gen.genres, ['a genre', 'another'])
def test_set_string(self):
gen = TCON(encoding=0, text='')
gen.genres = 'foo'
self.assertEquals(gen.genres, ['foo'])
def test_nodoubledecode(self):
gen = TCON(encoding=1, text=u'(255)genre')
gen.genres = gen.genres
self.assertEquals(gen.genres, [u'Unknown', u'genre']) |
def get_normalized_dependency(requirement: Requirement) -> str:
from packaging.specifiers import SpecifierSet
requirement.name = normalize_project_name(requirement.name)
if requirement.specifier:
requirement.specifier = SpecifierSet(str(requirement.specifier).lower())
if requirement.extras:
requirement.extras = {normalize_project_name(extra) for extra in requirement.extras}
return str(requirement).replace('"', "'") |
class BatchNorm(nn.BatchNorm2d):
def __init__(self, num_features, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True, use_tracked_mean=True, use_tracked_var=True):
nn.BatchNorm2d.__init__(self, num_features=num_features, eps=eps, momentum=momentum, affine=affine, track_running_stats=track_running_stats)
self.use_tracked_mean = use_tracked_mean
self.use_tracked_var = use_tracked_var
def forward(self, x):
self._check_input_dim(x)
y = x.transpose(0, 1)
return_shape = y.shape
y = y.contiguous().view(x.size(1), (- 1))
mu = y.mean(dim=1)
sigma2 = y.var(dim=1)
if (self.training is not True):
if self.use_tracked_mean:
y = (y - self.running_mean.view((- 1), 1))
else:
y = (y - mu.view((- 1), 1))
if self.use_tracked_var:
y = (y / ((self.running_var.view((- 1), 1) ** 0.5) + self.eps))
else:
y = (y / ((sigma2.view((- 1), 1) ** 0.5) + self.eps))
elif (self.training is True):
if (self.track_running_stats is True):
with torch.no_grad():
self.running_mean = (((1 - self.momentum) * self.running_mean) + (self.momentum * mu))
self.running_var = (((1 - self.momentum) * self.running_var) + (self.momentum * sigma2))
y = (y - mu.view((- 1), 1))
y = (y / ((sigma2.view((- 1), 1) ** 0.5) + self.eps))
y = ((self.weight.view((- 1), 1) * y) + self.bias.view((- 1), 1))
return y.view(return_shape).transpose(0, 1) |
class CustomRandomCrop(RandomCrop):
def forward(self, img):
(width, height) = F.get_image_size(img)
(tar_h, tar_w) = self.size
tar_h = min(tar_h, height)
tar_w = min(tar_w, width)
(i, j, h, w) = self.get_params(img, (tar_h, tar_w))
return F.crop(img, i, j, h, w) |
class FinTSMessage(SegmentSequence):
DIRECTION = None
def __init__(self, dialog=None, *args, **kwargs):
self.dialog = dialog
self.next_segment_number = 1
super().__init__(*args, **kwargs)
def __iadd__(self, segment: FinTS3Segment):
if (not isinstance(segment, FinTS3Segment)):
raise TypeError('Can only append FinTS3Segment instances, not {!r}'.format(segment))
segment.header.number = self.next_segment_number
self.next_segment_number += 1
self.segments.append(segment)
return self
def response_segments(self, ref, *args, **kwargs):
for segment in self.find_segments(*args, **kwargs):
if (segment.header.reference == ref.header.number):
(yield segment)
def responses(self, ref, code=None):
for segment in self.response_segments(ref, HIRMS2):
for response in segment.responses:
if ((code is None) or (response.code == code)):
(yield response) |
class Embedding(Layer):
_embedding_support
def __init__(self, input_dim, output_dim, embeddings_initializer='uniform', embeddings_regularizer=None, activity_regularizer=None, embeddings_constraint=None, mask_zero=False, input_length=None, **kwargs):
if ('input_shape' not in kwargs):
if input_length:
kwargs['input_shape'] = (input_length,)
else:
kwargs['input_shape'] = (None,)
super(Embedding, self).__init__(**kwargs)
self.input_dim = input_dim
self.output_dim = output_dim
self.embeddings_initializer = initializers.get(embeddings_initializer)
self.embeddings_regularizer = regularizers.get(embeddings_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.embeddings_constraint = constraints.get(embeddings_constraint)
self.mask_zero = mask_zero
self.input_length = input_length
def build(self, input_shape):
self.embeddings = self.add_weight(shape=(self.input_dim, self.output_dim), initializer=self.embeddings_initializer, name='embeddings', regularizer=self.embeddings_regularizer, constraint=self.embeddings_constraint, dtype=self.dtype)
self.built = True
def compute_mask(self, inputs, mask=None):
if (not self.mask_zero):
return None
else:
return K.not_equal(inputs, 0)
def compute_output_shape(self, input_shape):
if (self.input_length is None):
return (input_shape + (self.output_dim,))
else:
if isinstance(self.input_length, (list, tuple)):
in_lens = list(self.input_length)
else:
in_lens = [self.input_length]
if (len(in_lens) != (len(input_shape) - 1)):
ValueError(('"input_length" is %s, but received input has shape %s' % (str(self.input_length), str(input_shape))))
else:
for (i, (s1, s2)) in enumerate(zip(in_lens, input_shape[1:])):
if ((s1 is not None) and (s2 is not None) and (s1 != s2)):
ValueError(('"input_length" is %s, but received input has shape %s' % (str(self.input_length), str(input_shape))))
elif (s1 is None):
in_lens[i] = s2
return (((input_shape[0],) + tuple(in_lens)) + (self.output_dim,))
def call(self, inputs):
if (K.dtype(inputs) != 'int32'):
inputs = K.cast(inputs, 'int32')
out = K.gather(self.embeddings, inputs)
return out
def get_config(self):
config = {'input_dim': self.input_dim, 'output_dim': self.output_dim, 'embeddings_initializer': initializers.serialize(self.embeddings_initializer), 'embeddings_regularizer': regularizers.serialize(self.embeddings_regularizer), 'activity_regularizer': regularizers.serialize(self.activity_regularizer), 'embeddings_constraint': constraints.serialize(self.embeddings_constraint), 'mask_zero': self.mask_zero, 'input_length': self.input_length}
base_config = super(Embedding, self).get_config()
return dict((list(base_config.items()) + list(config.items()))) |
class TestAllForkSeq(uvm_sequence):
async def body(self):
seqr = ConfigDB().get(None, '', 'SEQR')
random = RandomSeq('random')
max = MaxSeq('max')
random_task = cocotb.start_soon(random.start(seqr))
max_task = cocotb.start_soon(max.start(seqr))
(await Combine(Join(random_task), Join(max_task))) |
class Expanding(Window):
def aggregate(self, agg):
window = self.n
diff = aggregations.diff_expanding
return self.root.accumulate_partitions(aggregations.window_accumulator, diff=diff, window=window, agg=agg, start=self.start, returns_state=True, stream_type='updating', with_state=self.with_state)
def groupby(self, other):
raise NotImplementedError |
def multithread_compute_vali():
global vali_sum, vali_cnt
vali_sum = [0.0, 0.0, 0.0]
vali_cnt = 0
threads = []
for ii in xrange(cmd_args.num_thread):
thread = threading.Thread(target=vali_eval, args=(1, ii))
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
return ((vali_sum[0] / vali_cnt), (vali_sum[1] / vali_cnt), (vali_sum[2] / vali_cnt)) |
def _preprocess(data):
for field in ['date', 'start', 'end']:
date = data.get(field)
if ((field == 'end') and (date == UNSET_INDICATOR)):
continue
if (date is not None):
try:
date = time.strftime(POCKET_DATE_FORMAT, du_parser.parse(date, yearfirst=True).timetuple())
data[field] = date
except ValueError:
raise exceptions.PreprocessingError('Invalid date format.')
filter_items = data.get('filters')
parsed_items = {}
if (filter_items is not None):
try:
for item in filter_items:
(key, value) = item.split('=')
parsed_items[key] = value.lower()
for (field, indicator) in zip(['category', 'end'], [entries.CategoryEntry.DEFAULT_NAME, '']):
try:
if (parsed_items[field] == indicator):
parsed_items[field] = None
except KeyError:
pass
data['filters'] = parsed_items
except ValueError:
raise exceptions.PreprocessingError(f'Invalid filter format: {item}')
month = data.pop('month', None)
if (month is not None):
today = datetime.today()
if (month == 'current'):
date = today
else:
date = None
for fmt in ['b', 'B', 'm', '-m', '#m']:
try:
date = datetime.strptime(month, f'%{fmt}').replace(year=today.year)
break
except ValueError:
continue
if (date is None):
raise exceptions.PreprocessingError(f'Invalid month: {month}')
if (filter_items is None):
data['filters'] = {}
data['filters']['date'] = f"{date.strftime('%Y-%m')}-"
if any([data.get(f) for f in ['frequency', 'start', 'end']]):
data['table_name'] = RECURRENT_TABLE
if any(((n in parsed_items) for n in ['start', 'end', 'frequency'])):
data['recurrent_only'] = True
for field_name in ['category', 'name']:
field = data.get(field_name)
if (field is not None):
field = field.strip()
if (not len(field)):
raise exceptions.PreprocessingError(f'Empty {field_name} given.')
data[field_name] = field |
def set_question_optionset(apps, schema_editor):
Question = apps.get_model('questions', 'Question')
for question in Question.objects.all():
try:
for optionset in question.attribute_entity.attribute.optionsets.all():
question.optionsets.add(optionset)
except AttributeError:
pass |
def make_batches(lines, args, task, max_positions, encode_fn):
def encode_fn_target(x):
return encode_fn(x)
if args.constraints:
batch_constraints = [list() for _ in lines]
for (i, line) in enumerate(lines):
if ('\t' in line):
(lines[i], *batch_constraints[i]) = line.split('\t')
for (i, constraint_list) in enumerate(batch_constraints):
batch_constraints[i] = [task.target_dictionary.encode_line(encode_fn_target(constraint), append_eos=False, add_if_not_exist=False) for constraint in constraint_list]
tokens = [task.source_dictionary.encode_line(encode_fn(src_str), add_if_not_exist=False).long() for src_str in lines]
if args.constraints:
constraints_tensor = pack_constraints(batch_constraints)
else:
constraints_tensor = None
lengths = [t.numel() for t in tokens]
itr = task.get_batch_iterator(dataset=task.build_dataset_for_inference(tokens, lengths, constraints=constraints_tensor), max_tokens=args.max_tokens, max_sentences=args.batch_size, max_positions=max_positions, ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test).next_epoch_itr(shuffle=False)
for batch in itr:
ids = batch['id']
src_tokens = batch['net_input']['src_tokens']
src_lengths = batch['net_input']['src_lengths']
constraints = batch.get('constraints', None)
(yield Batch(ids=ids, src_tokens=src_tokens, src_lengths=src_lengths, constraints=constraints)) |
class SeriesParser(Parser):
_default_orient = 'index'
_split_keys = ('name', 'index', 'data')
def _parse_no_numpy(self):
json = self.json
orient = self.orient
if (orient == 'split'):
decoded = dict(((str(k), v) for (k, v) in compat.iteritems(loads(json, precise_float=self.precise_float))))
self.check_keys_split(decoded)
self.obj = Series(dtype=None, **decoded)
else:
self.obj = Series(loads(json, precise_float=self.precise_float), dtype=None)
def _parse_numpy(self):
json = self.json
orient = self.orient
if (orient == 'split'):
decoded = loads(json, dtype=None, numpy=True, precise_float=self.precise_float)
decoded = dict(((str(k), v) for (k, v) in compat.iteritems(decoded)))
self.check_keys_split(decoded)
self.obj = Series(**decoded)
elif ((orient == 'columns') or (orient == 'index')):
self.obj = Series(*loads(json, dtype=None, numpy=True, labelled=True, precise_float=self.precise_float))
else:
self.obj = Series(loads(json, dtype=None, numpy=True, precise_float=self.precise_float))
def _try_convert_types(self):
if (self.obj is None):
return
(obj, result) = self._try_convert_data('data', self.obj, convert_dates=self.convert_dates)
if result:
self.obj = obj |
def test_arguments_default_value():
node = extract_node("def fruit(eat='please', *, peel='no', trim='yes', **kwargs): ...")
assert (node.args.default_value('eat').value == 'please')
node = extract_node("def fruit(seeds, flavor='good', *, peel='maybe'): ...")
assert (node.args.default_value('flavor').value == 'good') |
def binary_search2(fre, cand, level):
(low, high) = (0, (len(fre) - 1))
if (low > high):
return (- 1)
while (low <= high):
mid = int(((low + high) / 2))
if (cand == fre[mid][0:(level - 1)]):
(slow, shigh) = (low, mid)
if (cand == fre[low][0:(level - 1)]):
return low
else:
while (slow < shigh):
smid = int(((slow + shigh) / 2))
if (cand == fre[smid][0:(level - 1)]):
shigh = smid
else:
slow = (smid + 1)
if ((slow < 0) or (slow >= len(fre))):
slow = 0
return slow
elif (cand < fre[mid][0:(level - 1)]):
high = (mid - 1)
else:
low = (mid + 1)
return (- 1) |
def call_with_extended_paramz(f, args, keys, vals, env, cont):
from pycket.values import parameterization_key
paramz = cont.get_mark_first(parameterization_key)
assert isinstance(paramz, values_parameter.W_Parameterization)
return paramz.extend(keys, vals, env, call_with_paramz_cont(f, args, env, cont)) |
class TestGridEngineCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('GridEngineCollector', {})
self.collector = GridEngineCollector(config, None)
self.fixtures_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), 'fixtures'))
def test_import(self):
self.assertTrue(GridEngineCollector)
(GridEngineCollector, '_queue_stats_xml')
(Collector, 'publish')
def test_queue_stats_should_work_with_real_data(self, publish_mock, xml_mock):
xml_mock.return_value = self.getFixture('queue_stats.xml').getvalue()
self.collector._collect_queue_stats()
published_metrics = {'queues.hadoop.load': 0.00532, 'queues.hadoop.used': 0, 'queues.hadoop.resv': 0, 'queues.hadoop.available': 0, 'queues.hadoop.total': 36, 'queues.hadoop.temp_disabled': 0, 'queues.hadoop.manual_intervention': 36, 'queues.primary_q.load': 0.20509, 'queues.primary_q.used': 1024, 'queues.primary_q.resv': 0, 'queues.primary_q.available': 1152, 'queues.primary_q.total': 2176, 'queues.primary_q.temp_disabled': 0, 'queues.primary_q.manual_intervention': 0, 'queues.secondary_q.load': 0.0046, 'queues.secondary_q.used': 145, 'queues.secondary_q.resv': 0, 'queues.secondary_q.available': 1007, 'queues.secondary_q.total': 1121, 'queues.secondary_q.temp_disabled': 1, 'queues.secondary_q.manual_intervention': 0}
self.setDocExample(collector=self.collector.__class__.__name__, metrics=published_metrics, defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, published_metrics)
(GridEngineCollector, '_queue_stats_xml')
(Collector, 'publish')
def test_707(self, publish_mock, xml_mock):
xml_mock.return_value = self.getFixture('707.xml').getvalue()
self.collector._collect_queue_stats() |
def rtn_write(se: 'SymbolicExecutor', pstate: 'ProcessState'):
logger.debug('write hooked')
fd = pstate.get_argument_value(0)
buf = pstate.get_argument_value(1)
size = pstate.get_argument_value(2)
data = pstate.memory.read(buf, size)
if pstate.file_descriptor_exists(fd):
fdesc = pstate.get_file_descriptor(fd)
if (fd == 0):
return 0
elif (fd == 1):
if se.config.pipe_stdout:
fdesc.fd.buffer.write(data)
fdesc.fd.flush()
elif (fd == 2):
if se.config.pipe_stderr:
fdesc.fd.buffer.write(data)
fdesc.fd.flush()
else:
fdesc.fd.write(data)
else:
return 0
return size |
()
('--filename', default='samples/sample_wind_poitiers.csv', help='Input filename')
('--filename_out', default='windrose.pdf', help='Output filename')
('--dpi', default=DPI_DEFAULT, help='Dot per inch for plot generation')
('--figsize', default=S_FIGSIZE_DEFAULT, help=('Figure size x,y - default=%s' % S_FIGSIZE_DEFAULT))
('--bins_min', default=0.01, help='Bins minimum value')
('--bins_max', default=20, help='Bins maximum value')
('--bins_step', default=2, help='Bins step value')
('--fontname', default='Courier New', help='Font name')
('--show/--no-show', default=False, help='Show figure')
('--dt_from', default='', help='Datetime from')
('--dt_to', default='', help='Datetime to')
('--offset', default=0, help='Axe figure offset')
('--ncols', default=4, help='Number of columns per figure')
('--nrows', default=3, help='Number of rows per figure')
def main(filename, dt_from, dt_to, dpi, figsize, bins_min, bins_max, bins_step, ncols, nrows, fontname, show, filename_out, offset):
figsize = figsize.split(',')
figsize = tuple(map(float, figsize))
(width, height) = figsize
df_all = pd.read_csv(filename)
df_all['Timestamp'] = pd.to_datetime(df_all['Timestamp'])
df_all = df_all.set_index('Timestamp')
df_all.index = df_all.index.tz_localize('UTC').tz_convert('UTC')
if (dt_from == ''):
dt_from = df_all.index[0]
if (dt_to == ''):
dt_to = df_all.index[(- 1)]
df_all = df_all[dt_from:dt_to]
direction_all = df_all['direction'].values
var_all = df_all['speed'].values
by_all = df_all.index.map(by_func_monthly)
by_unique = np.unique(by_all)
print(by_unique)
with PdfPages(filename_out) as pdf:
for (i, by_value) in enumerate(by_unique):
print(('processing: %s' % str(by_value)))
if ((((i + offset) % (ncols * nrows)) == 0) or (i == 0)):
(fig, axs) = plt.subplots(nrows=nrows, ncols=ncols, figsize=figsize, dpi=dpi, facecolor='w', edgecolor='w')
print(f'''{fig!r}
{fig.axes!r}
{axs!r}''')
(i_sheet, sheet_pos) = divmod((i + offset), (ncols * nrows))
(i_row, i_col) = divmod(sheet_pos, ncols)
ax = fig.axes[sheet_pos]
mask = (pd.Series(by_all) == by_value).values
var = var_all[mask]
direction = direction_all[mask]
Vx = (var * sin(((pi / 180) * direction)))
Vy = (var * cos(((pi / 180) * direction)))
ax.scatter(Vx, Vy, alpha=0.1)
v = 40
ax.set_xlim((- v), v)
ax.set_ylim((- v), v)
dt = datetime.date(by_value[0], by_value[1], 1)
fmt = '%B'
title = dt.strftime(fmt)
ax.set_title(title, fontname=fontname)
fig_title = dt.strftime('%Y')
fig.suptitle(fig_title)
remaining = (((i + offset) + 1) % (ncols * nrows))
if (remaining == 0):
save_figure(fig, pdf, show, fig_title)
if (remaining != 0):
save_figure(fig, pdf, show, fig_title)
print(("Save file to '%s'" % filename_out))
print(('remaining: %d' % remaining)) |
class BasicBlock(nn.Module):
def __init__(self, in_channels, out_channels, expansion=1, stride=1, dilation=1, downsample=None, style='pytorch', with_cp=False, conv_cfg=None, norm_cfg=dict(type='BN')):
norm_cfg = copy.deepcopy(norm_cfg)
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.expansion = expansion
assert (self.expansion == 1)
assert ((out_channels % expansion) == 0)
self.mid_channels = (out_channels // expansion)
self.stride = stride
self.dilation = dilation
self.style = style
self.with_cp = with_cp
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
(self.norm1_name, norm1) = build_norm_layer(norm_cfg, self.mid_channels, postfix=1)
(self.norm2_name, norm2) = build_norm_layer(norm_cfg, out_channels, postfix=2)
self.conv1 = build_conv_layer(conv_cfg, in_channels, self.mid_channels, 3, stride=stride, padding=dilation, dilation=dilation, bias=False)
self.add_module(self.norm1_name, norm1)
self.conv2 = build_conv_layer(conv_cfg, self.mid_channels, out_channels, 3, padding=1, bias=False)
self.add_module(self.norm2_name, norm2)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
def norm1(self):
return getattr(self, self.norm1_name)
def norm2(self):
return getattr(self, self.norm2_name)
def forward(self, x):
def _inner_forward(x):
identity = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.norm2(out)
if (self.downsample is not None):
identity = self.downsample(x)
out += identity
return out
if (self.with_cp and x.requires_grad):
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.relu(out)
return out |
class ElectrodeSOHSolver():
def __init__(self, parameter_values, param=None, known_value='cyclable lithium capacity', options=None):
self.parameter_values = parameter_values
self.param = (param or pybamm.LithiumIonParameters(options))
self.known_value = known_value
self.options = (options or pybamm.BatteryModelOptions({}))
self.lims_ocp = self._get_lims_ocp()
self.OCV_function = None
self._get_electrode_soh_sims_full = lru_cache()(self.__get_electrode_soh_sims_full)
self._get_electrode_soh_sims_split = lru_cache()(self.__get_electrode_soh_sims_split)
def _get_lims_ocp(self):
parameter_values = self.parameter_values
if (self.options['open-circuit potential'] == 'MSMR'):
OCPp_data = False
OCPn_data = False
else:
OCPp_data = isinstance(parameter_values['Positive electrode OCP [V]'], tuple)
OCPn_data = isinstance(parameter_values['Negative electrode OCP [V]'], tuple)
if OCPp_data:
Up_sto = parameter_values['Positive electrode OCP [V]'][1][0]
y100_min = (max(np.min(Up_sto), 0) + 1e-06)
y0_max = (min(np.max(Up_sto), 1) - 1e-06)
else:
y100_min = 1e-06
y0_max = (1 - 1e-06)
if OCPn_data:
Un_sto = parameter_values['Negative electrode OCP [V]'][1][0]
x0_min = (max(np.min(Un_sto), 0) + 1e-06)
x100_max = (min(np.max(Un_sto), 1) - 1e-06)
else:
x0_min = 1e-06
x100_max = (1 - 1e-06)
return (x0_min, x100_max, y100_min, y0_max)
def __get_electrode_soh_sims_full(self):
if (self.options['open-circuit potential'] == 'MSMR'):
full_model = _ElectrodeSOHMSMR(param=self.param, known_value=self.known_value)
else:
full_model = _ElectrodeSOH(param=self.param, known_value=self.known_value)
return pybamm.Simulation(full_model, parameter_values=self.parameter_values)
def __get_electrode_soh_sims_split(self):
if (self.options['open-circuit potential'] == 'MSMR'):
x100_model = _ElectrodeSOHMSMR(param=self.param, solve_for=['Un_100'], known_value=self.known_value)
x0_model = _ElectrodeSOHMSMR(param=self.param, solve_for=['Un_0'], known_value=self.known_value)
else:
x100_model = _ElectrodeSOH(param=self.param, solve_for=['x_100'], known_value=self.known_value)
x0_model = _ElectrodeSOH(param=self.param, solve_for=['x_0'], known_value=self.known_value)
x100_sim = pybamm.Simulation(x100_model, parameter_values=self.parameter_values)
x0_sim = pybamm.Simulation(x0_model, parameter_values=self.parameter_values)
return [x100_sim, x0_sim]
def solve(self, inputs):
if ('n_Li' in inputs):
warnings.warn("Input 'n_Li' has been replaced by 'Q_Li', which is 'n_Li * F / 3600'. This will be automatically calculated for now. Q_Li can be read from parameters as 'param.Q_Li_particles_init'", DeprecationWarning)
n_Li = inputs.pop('n_Li')
inputs['Q_Li'] = ((n_Li * pybamm.constants.F.value) / 3600)
if ('C_n' in inputs):
warnings.warn("Input 'C_n' has been renamed to 'Q_n'", DeprecationWarning)
inputs['Q_n'] = inputs.pop('C_n')
if ('C_p' in inputs):
warnings.warn("Input 'C_p' has been renamed to 'Q_p'", DeprecationWarning)
inputs['Q_p'] = inputs.pop('C_p')
if (inputs.pop('V_min', None) is not None):
warnings.warn("V_min has been removed from the inputs. The 'Open-circuit voltage at 0% SOC [V]' parameter is now used automatically.", DeprecationWarning)
if (inputs.pop('V_max', None) is not None):
warnings.warn("V_max has been removed from the inputs. The 'Open-circuit voltage at 100% SOC [V]' parameter is now used automatically.", DeprecationWarning)
ics = self._set_up_solve(inputs)
try:
sol = self._solve_full(inputs, ics)
except pybamm.SolverError:
try:
sol = self._solve_split(inputs, ics)
except pybamm.SolverError as split_error:
self._check_esoh_feasible(inputs)
raise split_error
sol_dict = {key: sol[key].data[0] for key in sol.all_models[0].variables.keys()}
if (self.options['open-circuit potential'] != 'MSMR'):
energy = self.theoretical_energy_integral(sol_dict)
sol_dict.update({'Maximum theoretical energy [W.h]': energy})
return sol_dict
def _set_up_solve(self, inputs):
sim = self._get_electrode_soh_sims_full()
if (sim.solution is not None):
if (self.options['open-circuit potential'] == 'MSMR'):
Un_100_sol = sim.solution['Un(x_100)'].data
Un_0_sol = sim.solution['Un(x_0)'].data
Up_100_sol = sim.solution['Up(y_100)'].data
Up_0_sol = sim.solution['Up(y_0)'].data
return {'Un(x_100)': Un_100_sol, 'Un(x_0)': Un_0_sol, 'Up(x_100)': Up_100_sol, 'Up(x_0)': Up_0_sol}
else:
x100_sol = sim.solution['x_100'].data
x0_sol = sim.solution['x_0'].data
y100_sol = sim.solution['y_100'].data
y0_sol = sim.solution['y_0'].data
return {'x_100': x100_sol, 'x_0': x0_sol, 'y_100': y100_sol, 'y_0': y0_sol}
if (self.known_value == 'cyclable lithium capacity'):
(x100_sim, x0_sim) = self._get_electrode_soh_sims_split()
if ((x100_sim.solution is not None) and (x0_sim.solution is not None)):
if (self.options['open-circuit potential'] == 'MSMR'):
Un_100_sol = x100_sim.solution['Un_100'].data
Un_0_sol = x0_sim.solution['Un_0'].data
Up_100_sol = x100_sim.solution['Up_100'].data
Up_0_sol = x0_sim.solution['Up_0'].data
return {'Un(x_100)': Un_100_sol, 'Un(x_0)': Un_0_sol, 'Up(x_100)': Up_100_sol, 'Up(x_0)': Up_0_sol}
else:
x100_sol = x100_sim.solution['x_100'].data
x0_sol = x0_sim.solution['x_0'].data
y100_sol = x100_sim.solution['y_100'].data
y0_sol = x0_sim.solution['y_0'].data
return {'x_100': x100_sol, 'x_0': x0_sol, 'y_100': y100_sol, 'y_0': y0_sol}
(x0_min, x100_max, y100_min, y0_max) = self._get_lims(inputs)
if (self.known_value == 'cyclable lithium capacity'):
x100_init = np.minimum(x100_max, 0.8)
x0_init = np.maximum(x0_min, 0.2)
y100_init = np.maximum(y100_min, 0.2)
y0_init = np.minimum(y0_max, 0.8)
elif (self.known_value == 'cell capacity'):
Q = inputs['Q']
Q_n = inputs['Q_n']
Q_p = inputs['Q_p']
x0_min = np.maximum(x0_min, 0.1)
x100_max = np.minimum(x100_max, 0.9)
y100_min = np.maximum(y100_min, 0.1)
y0_max = np.minimum(y0_max, 0.9)
x100_init = np.minimum((x0_min + (Q / Q_n)), 0.9)
x0_init = np.maximum((x100_max - (Q / Q_n)), 0.1)
y100_init = np.maximum((y0_max - (Q / Q_p)), 0.1)
y0_init = np.minimum((y100_min + (Q / Q_p)), 0.9)
if (self.options['open-circuit potential'] == 'MSMR'):
msmr_pot_model = _get_msmr_potential_model(self.parameter_values, self.param)
sol0 = pybamm.AlgebraicSolver().solve(msmr_pot_model, inputs={'x': x0_init, 'y': y0_init})
sol100 = pybamm.AlgebraicSolver().solve(msmr_pot_model, inputs={'x': x100_init, 'y': y100_init})
return {'Un(x_100)': sol100['Un'].data, 'Un(x_0)': sol0['Un'].data, 'Up(y_100)': sol100['Up'].data, 'Up(y_0)': sol0['Up'].data}
else:
return {'x_100': x100_init, 'x_0': x0_init, 'y_100': y100_init, 'y_0': y0_init}
def _solve_full(self, inputs, ics):
sim = self._get_electrode_soh_sims_full()
sim.build()
sim.built_model.set_initial_conditions_from(ics)
sol = sim.solve([0], inputs=inputs)
return sol
def _solve_split(self, inputs, ics):
(x100_sim, x0_sim) = self._get_electrode_soh_sims_split()
x100_sim.build()
x100_sim.built_model.set_initial_conditions_from(ics)
x100_sol = x100_sim.solve([0], inputs=inputs)
if (self.options['open-circuit potential'] == 'MSMR'):
inputs['Un(x_100)'] = x100_sol['Un(x_100)'].data[0]
inputs['Up(y_100)'] = x100_sol['Up(y_100)'].data[0]
else:
inputs['x_100'] = x100_sol['x_100'].data[0]
inputs['y_100'] = x100_sol['y_100'].data[0]
x0_sim.build()
x0_sim.built_model.set_initial_conditions_from(ics)
x0_sol = x0_sim.solve([0], inputs=inputs)
return x0_sol
def _get_lims(self, inputs):
Q_p = inputs['Q_p']
Q_n = inputs['Q_n']
(x0_min, x100_max, y100_min, y0_max) = self.lims_ocp
if (self.known_value == 'cyclable lithium capacity'):
Q_Li = inputs['Q_Li']
Q_Li_min = ((Q_n * x0_min) + (Q_p * y100_min))
Q_Li_max = ((Q_n * x100_max) + (Q_p * y0_max))
if (not (Q_Li_min <= Q_Li <= Q_Li_max)):
raise ValueError(f'Q_Li={Q_Li:.4f} Ah is outside the range of possible values [{Q_Li_min:.4f}, {Q_Li_max:.4f}].')
if (Q_Li > Q_p):
warnings.warn(f'Q_Li={Q_Li:.4f} Ah is greater than Q_p={Q_p:.4f} Ah.')
x100_max_from_y100_min = ((Q_Li - (y100_min * Q_p)) / Q_n)
x0_min_from_y0_max = ((Q_Li - (y0_max * Q_p)) / Q_n)
y100_min_from_x100_max = ((Q_Li - (x100_max * Q_n)) / Q_p)
y0_max_from_x0_min = ((Q_Li - (x0_min * Q_n)) / Q_p)
x100_max = min(x100_max_from_y100_min, x100_max)
x0_min = max(x0_min_from_y0_max, x0_min)
y100_min = max(y100_min_from_x100_max, y100_min)
y0_max = min(y0_max_from_x0_min, y0_max)
elif (self.known_value == 'cell capacity'):
Q = inputs['Q']
Q_max = min((Q_n * (x100_max - x0_min)), (Q_p * (y0_max - y100_min)))
if (Q > Q_max):
raise ValueError(f'Q={Q:.4f} Ah is larger than the maximum possible capacity Q_max={Q_max:.4f} Ah.')
if (not ((0 < x0_min < x100_max < 1) and (0 < y100_min < y0_max < 1))):
raise ValueError(f"'0 < x0_min < x100_max < 1' is False for x0_min={x0_min:.4f} and x100_max={x100_max:.4f} or '0 < y100_min < y0_max < 1' is False for y100_min={y100_min:.4f} and y0_max={y0_max:.4f}")
return (x0_min, x100_max, y100_min, y0_max)
def _check_esoh_feasible(self, inputs):
(x0_min, x100_max, y100_min, y0_max) = self._get_lims(inputs)
if (self.OCV_function is None):
self.V_max = self.parameter_values.evaluate(self.param.ocp_soc_100_dimensional)
self.V_min = self.parameter_values.evaluate(self.param.ocp_soc_0_dimensional)
if (self.options['open-circuit potential'] == 'MSMR'):
self.OCV_function = 'MSMR'
else:
T = self.parameter_values['Reference temperature [K]']
x = pybamm.InputParameter('x')
y = pybamm.InputParameter('y')
self.OCV_function = self.parameter_values.process_symbol((self.param.p.prim.U(y, T) - self.param.n.prim.U(x, T)))
if (self.options['open-circuit potential'] == 'MSMR'):
msmr_pot_model = _get_msmr_potential_model(self.parameter_values, self.param)
sol0 = pybamm.AlgebraicSolver(tol=0.0001).solve(msmr_pot_model, inputs={'x': x0_min, 'y': y0_max})
sol100 = pybamm.AlgebraicSolver(tol=0.0001).solve(msmr_pot_model, inputs={'x': x100_max, 'y': y100_min})
Up0 = sol0['Up'].data[0]
Un0 = sol0['Un'].data[0]
Up100 = sol100['Up'].data[0]
Un100 = sol100['Un'].data[0]
V_lower_bound = float((Up0 - Un0))
V_upper_bound = float((Up100 - Un100))
else:
V_lower_bound = float(self.OCV_function.evaluate(inputs={'x': x0_min, 'y': y0_max}).item())
V_upper_bound = float(self.OCV_function.evaluate(inputs={'x': x100_max, 'y': y100_min}).item())
if (V_lower_bound > self.V_min):
raise ValueError(f'The lower bound of the voltage, {V_lower_bound:.4f}V, is greater than the target minimum voltage, {self.V_min:.4f}V. Stoichiometry limits are x:[{x0_min:.4f}, {x100_max:.4f}], y:[{y100_min:.4f}, {y0_max:.4f}].')
if (V_upper_bound < self.V_max):
raise ValueError(f'The upper bound of the voltage, {V_upper_bound:.4f}V, is less than the target maximum voltage, {self.V_max:.4f}V. Stoichiometry limits are x:[{x0_min:.4f}, {x100_max:.4f}], y:[{y100_min:.4f}, {y0_max:.4f}].')
def get_initial_stoichiometries(self, initial_value):
parameter_values = self.parameter_values
param = self.param
(x_0, x_100, y_100, y_0) = self.get_min_max_stoichiometries()
if (isinstance(initial_value, str) and initial_value.endswith('V')):
V_init = float(initial_value[:(- 1)])
V_min = parameter_values.evaluate(param.ocp_soc_0_dimensional)
V_max = parameter_values.evaluate(param.ocp_soc_100_dimensional)
if (not (V_min < V_init < V_max)):
raise ValueError(f'Initial voltage {V_init}V is outside the voltage limits ({V_min}, {V_max})')
soc_model = pybamm.BaseModel()
soc = pybamm.Variable('soc')
x = (x_0 + (soc * (x_100 - x_0)))
y = (y_0 - (soc * (y_0 - y_100)))
if (self.options['open-circuit potential'] == 'MSMR'):
xn = param.n.prim.x
xp = param.p.prim.x
Up = pybamm.Variable('Up')
Un = pybamm.Variable('Un')
soc_model.algebraic[Up] = (x - xn(Un))
soc_model.algebraic[Un] = (y - xp(Up))
soc_model.initial_conditions[Un] = 0
soc_model.initial_conditions[Up] = V_max
soc_model.algebraic[soc] = ((Up - Un) - V_init)
else:
Up = param.p.prim.U
Un = param.n.prim.U
T_ref = parameter_values['Reference temperature [K]']
soc_model.algebraic[soc] = ((Up(y, T_ref) - Un(x, T_ref)) - V_init)
soc_model.initial_conditions[soc] = ((V_init - V_min) / (V_max - V_min))
soc_model.variables['soc'] = soc
parameter_values.process_model(soc_model)
initial_soc = pybamm.AlgebraicSolver().solve(soc_model, [0])['soc'].data[0]
elif isinstance(initial_value, (int, float)):
initial_soc = initial_value
if (not (0 <= initial_soc <= 1)):
raise ValueError('Initial SOC should be between 0 and 1')
else:
raise ValueError("Initial value must be a float between 0 and 1, or a string ending in 'V'")
x = (x_0 + (initial_soc * (x_100 - x_0)))
y = (y_0 - (initial_soc * (y_0 - y_100)))
return (x, y)
def get_min_max_stoichiometries(self):
parameter_values = self.parameter_values
param = self.param
Q_n = parameter_values.evaluate(param.n.Q_init)
Q_p = parameter_values.evaluate(param.p.Q_init)
if (self.known_value == 'cyclable lithium capacity'):
Q_Li = parameter_values.evaluate(param.Q_Li_particles_init)
inputs = {'Q_n': Q_n, 'Q_p': Q_p, 'Q_Li': Q_Li}
elif (self.known_value == 'cell capacity'):
Q = parameter_values.evaluate((param.Q / param.n_electrodes_parallel))
inputs = {'Q_n': Q_n, 'Q_p': Q_p, 'Q': Q}
sol = self.solve(inputs)
return [sol['x_0'], sol['x_100'], sol['y_100'], sol['y_0']]
def get_initial_ocps(self, initial_value):
parameter_values = self.parameter_values
param = self.param
(x, y) = self.get_initial_stoichiometries(initial_value)
if (self.options['open-circuit potential'] == 'MSMR'):
msmr_pot_model = _get_msmr_potential_model(self.parameter_values, self.param)
sol = pybamm.AlgebraicSolver().solve(msmr_pot_model, inputs={'x': x, 'y': y})
Un = sol['Un'].data[0]
Up = sol['Up'].data[0]
else:
T_ref = parameter_values['Reference temperature [K]']
Un = parameter_values.evaluate(param.n.prim.U(x, T_ref))
Up = parameter_values.evaluate(param.p.prim.U(y, T_ref))
return (Un, Up)
def get_min_max_ocps(self):
parameter_values = self.parameter_values
param = self.param
Q_n = parameter_values.evaluate(param.n.Q_init)
Q_p = parameter_values.evaluate(param.p.Q_init)
if (self.known_value == 'cyclable lithium capacity'):
Q_Li = parameter_values.evaluate(param.Q_Li_particles_init)
inputs = {'Q_n': Q_n, 'Q_p': Q_p, 'Q_Li': Q_Li}
elif (self.known_value == 'cell capacity'):
Q = parameter_values.evaluate((param.Q / param.n_electrodes_parallel))
inputs = {'Q_n': Q_n, 'Q_p': Q_p, 'Q': Q}
sol = self.solve(inputs)
return [sol['Un(x_0)'], sol['Un(x_100)'], sol['Up(y_100)'], sol['Up(y_0)']]
def theoretical_energy_integral(self, inputs, points=1000):
x_0 = inputs['x_0']
y_0 = inputs['y_0']
x_100 = inputs['x_100']
y_100 = inputs['y_100']
Q_p = inputs['Q_p']
x_vals = np.linspace(x_100, x_0, num=points)
y_vals = np.linspace(y_100, y_0, num=points)
param = self.param
T = param.T_amb_av(0)
Vs = self.parameter_values.evaluate((param.p.prim.U(y_vals, T) - param.n.prim.U(x_vals, T))).flatten()
Q = (Q_p * (y_0 - y_100))
dQ = (Q / (points - 1))
E = np.trapz(Vs, dx=dQ)
return E |
def test_arguments_marker(testdir):
file_test = testdir.makepyfile("\n import pytest\n pytestmark = pytest.mark.firefox_arguments('baz')\n .nondestructive\n .firefox_arguments('foo', 'bar')\n def test_arguments(firefox_options):\n actual = sorted(firefox_options.arguments)\n expected = sorted(['baz', 'foo', 'bar'])\n assert actual == expected\n ")
testdir.quick_qa(file_test, passed=1) |
class PrologLexer(RegexLexer):
name = 'Prolog'
aliases = ['prolog']
filenames = ['*.ecl', '*.prolog', '*.pro', '*.pl']
mimetypes = ['text/x-prolog']
url = '
version_added = ''
tokens = {'root': [('/\\*', Comment.Multiline, 'nested-comment'), ('%.*', Comment.Single), ("0\\'.", String.Char), ('0b[01]+', Number.Bin), ('0o[0-7]+', Number.Oct), ('0x[0-9a-fA-F]+', Number.Hex), ("\\d\\d?\\'[a-zA-Z0-9]+", Number.Integer), ('(\\d+\\.\\d*|\\d*\\.\\d+)([eE][+-]?[0-9]+)?', Number.Float), ('\\d+', Number.Integer), ('[\\[\\](){}|.,;!]', Punctuation), (':-|-->', Punctuation), ('"(?:\\\\x[0-9a-fA-F]+\\\\|\\\\u[0-9a-fA-F]{4}|\\\\U[0-9a-fA-F]{8}|\\\\[0-7]+\\\\|\\\\["\\\\abcefnrstv]|[^\\\\"])*"', String.Double), ("'(?:''|[^'])*'", String.Atom), ('is\\b', Operator), ('(<|>|=<|>=|==|=:=|=|/|//|\\*|\\+|-)(?=\\s|[a-zA-Z0-9\\[])', Operator), ('(mod|div|not)\\b', Operator), ('_', Keyword), ('([a-z]+)(:)', bygroups(Name.Namespace, Punctuation)), ('([a-z\\u00c0-\\u1fff\\u3040-\\ud7ff\\ue000-\\uffef][\\w$\\u00c0-\\u1fff\\u3040-\\ud7ff\\ue000-\\uffef]*)(\\s*)(:-|-->)', bygroups(Name.Function, Text, Operator)), ('([a-z\\u00c0-\\u1fff\\u3040-\\ud7ff\\ue000-\\uffef][\\w$\\u00c0-\\u1fff\\u3040-\\ud7ff\\ue000-\\uffef]*)(\\s*)(\\()', bygroups(Name.Function, Text, Punctuation)), ('[a-z\\u00c0-\\u1fff\\u3040-\\ud7ff\\ue000-\\uffef][\\w$\\u00c0-\\u1fff\\u3040-\\ud7ff\\ue000-\\uffef]*', String.Atom), ('[#&*+\\-./:<=>?\\\\^~\\u00a1-\\u00bf\\u2010-\\u303f]+', String.Atom), ('[A-Z_]\\w*', Name.Variable), ('\\s+|[\\u2000-\\u200f\\ufff0-\\ufffe\\uffef]', Text)], 'nested-comment': [('\\*/', Comment.Multiline, '#pop'), ('/\\*', Comment.Multiline, '#push'), ('[^*/]+', Comment.Multiline), ('[*/]', Comment.Multiline)]}
def analyse_text(text):
if (':-' in text):
return 0.5
else:
return 0 |
def _verify_patchelf() -> None:
if (not which('patchelf')):
raise ValueError('Cannot find required utility `patchelf` in PATH')
try:
version = check_output(['patchelf', '--version']).decode('utf-8')
except CalledProcessError:
raise ValueError('Could not call `patchelf` binary')
m = re.match('patchelf\\s+(\\d+(.\\d+)?)', version)
if (m and (tuple((int(x) for x in m.group(1).split('.'))) >= (0, 14))):
return
raise ValueError(f'patchelf {version} found. auditwheel repair requires patchelf >= 0.14.') |
def _parse_set_implicit_union(source, info):
items = [_parse_set_member(source, info)]
while True:
here = source.pos
if (source.match(u']') or source.match(u'&&')):
source.pos = here
break
items.append(_parse_set_member(source, info))
if ((len(items) == 1) and (not isinstance(items[0], Range))):
return items[0]
return SetUnion(info, items) |
def get_data_loaders(cfg, transforms_3d, transforms_2d, transforms_val, transforms_img, rank, world_size, verbose=True):
def get_2d_datasets(dataset_names):
datasets = []
for dataset_name in dataset_names:
db = VideoDataset(dataset_name=dataset_name, set='train', transforms=transforms_2d, seqlen=cfg.DATASET.SEQLEN, overlap=cfg.DATASET.OVERLAP, sample_pool=cfg.DATASET.SAMPLE_POOL, random_sample=cfg.DATASET.RANDOM_SAMPLE, random_start=cfg.DATASET.RANDOM_START, verbose=verbose, debug=cfg.DEBUG)
datasets.append(db)
return ConcatDataset(datasets)
def get_3d_datasets(dataset_names):
datasets = []
for dataset_name in dataset_names:
db = VideoDataset(dataset_name=dataset_name, set='train', transforms=transforms_3d, seqlen=cfg.DATASET.SEQLEN, overlap=(cfg.DATASET.OVERLAP if (dataset_name != '3dpw') else 8), sample_pool=cfg.DATASET.SAMPLE_POOL, random_sample=cfg.DATASET.RANDOM_SAMPLE, random_start=cfg.DATASET.RANDOM_START, verbose=verbose, debug=cfg.DEBUG)
datasets.append(db)
return ConcatDataset(datasets)
def get_img_datasets(dataset_names):
datasets = []
for dataset_name in dataset_names:
db = ImageDataset(dataset_name=dataset_name, set='train', transforms=transforms_img, verbose=verbose, debug=cfg.DEBUG)
if (dataset_name == 'mpii3d'):
db = Subset(db, list(range(len(db)))[::5])
datasets.append(db)
return ConcatDataset(datasets)
train_2d_dataset_names = cfg.TRAIN.DATASETS_2D
data_2d_batch_size = cfg.TRAIN.BATCH_SIZE_2D
if data_2d_batch_size:
train_2d_db = get_2d_datasets(train_2d_dataset_names)
train_2d_sampler = DistributedSampler(train_2d_db, rank=rank, num_replicas=world_size)
train_2d_loader = DataLoader(dataset=train_2d_db, batch_size=data_2d_batch_size, num_workers=cfg.NUM_WORKERS, sampler=train_2d_sampler)
else:
train_2d_loader = None
train_3d_dataset_names = cfg.TRAIN.DATASETS_3D
data_3d_batch_size = cfg.TRAIN.BATCH_SIZE_3D
if data_3d_batch_size:
train_3d_db = get_3d_datasets(train_3d_dataset_names)
train_3d_sampler = DistributedSampler(train_3d_db, rank=rank, num_replicas=world_size)
train_3d_loader = DataLoader(dataset=train_3d_db, batch_size=data_3d_batch_size, num_workers=cfg.NUM_WORKERS, sampler=train_3d_sampler)
else:
train_3d_loader = None
train_img_dataset_names = cfg.TRAIN.DATASETS_IMG
data_img_batch_size = cfg.TRAIN.BATCH_SIZE_IMG
if data_img_batch_size:
train_img_db = get_img_datasets(train_img_dataset_names)
train_img_sampler = DistributedSampler(train_img_db, rank=rank, num_replicas=world_size)
train_img_loader = DataLoader(dataset=train_img_db, batch_size=data_img_batch_size, num_workers=cfg.NUM_WORKERS, sampler=train_img_sampler)
else:
train_img_loader = None
eval_set = ('test' if data_3d_batch_size else 'val')
valid_db = VideoDataset(dataset_name=cfg.TRAIN.DATASET_EVAL, set=cfg.TRAIN.EVAL_SET, transforms=transforms_val, overlap=0, sample_pool=cfg.EVAL.SAMPLE_POOL, random_sample=False, random_start=False, verbose=verbose, debug=cfg.DEBUG)
valid_sampler = None
valid_loader = DataLoader(dataset=valid_db, batch_size=cfg.EVAL.BATCH_SIZE, shuffle=False, num_workers=cfg.NUM_WORKERS, sampler=valid_sampler)
return (train_2d_loader, train_3d_loader, valid_loader, train_img_loader) |
def calc_confusion_mat(predictions, data):
exact = 0
one_of = 0
errs = []
err_mat = []
preds_mat = []
one_errs = 0
for (ind, (example, prediction)) in enumerate(zip(data, predictions)):
example = json.loads(example)
tokens = example['tokens']
y_hat = prediction['y_hat']
y = example['head']
if ((len(y) == 1) and (y[0] in implicit_classes)):
y = implicit_classes.index(y[0])
anchors_indices = example['anchors_indices']
if (y.__class__ == list):
closest_y = get_closest_head(y, anchors_indices)
if (closest_y == y_hat):
exact += 1
one_of += 1
preds_mat.append((6, 6))
elif ((y_hat - 6) in y):
one_of += 1
preds_mat.append((6, 6))
else:
errs.append((example, prediction, ind))
if (y_hat < num_implicit):
err_mat.append(('Ref_' + implicit_classes[y_hat]))
preds_mat.append((6, y_hat))
else:
err_mat.append('Ref_Ref')
preds_mat.append((6, 7))
elif (y == y_hat):
exact += 1
one_of += 1
preds_mat.append((y, y))
else:
errs.append((example, prediction, ind))
if (y_hat < num_implicit):
err_mat.append(((implicit_classes[y] + '_') + implicit_classes[y_hat]))
preds_mat.append((y, y_hat))
else:
err_mat.append((implicit_classes[y] + '_Ref'))
preds_mat.append((y, 7))
print(Counter(err_mat))
return (errs, err_mat, preds_mat) |
def construct_script(items: Sequence[Union[(str, int, bytes, opcodes)]]) -> str:
script = ''
for item in items:
if isinstance(item, opcodes):
script += item.hex()
elif (type(item) is int):
script += add_number_to_script(item).hex()
elif isinstance(item, (bytes, bytearray)):
script += push_script(item.hex())
elif isinstance(item, str):
assert is_hex_str(item)
script += push_script(item)
else:
raise Exception(f'unexpected item for script: {item!r}')
return script |
def test_slots_unpickle_after_attr_added(frozen):
a = A(1, 2, 3)
a_pickled = pickle.dumps(a)
a_unpickled = pickle.loads(a_pickled)
assert (a_unpickled == a)
(slots=True, frozen=frozen)
class NEW_A():
x = attr.ib()
b = attr.ib()
d = attr.ib()
c = attr.ib()
with mock.patch(f'{__name__}.A', NEW_A):
new_a = pickle.loads(a_pickled)
assert (new_a.x == 1)
assert (new_a.b == 2)
assert (new_a.c == 3)
assert (not hasattr(new_a, 'd')) |
_end_docstrings(CUSTOM_DPR_READER_DOCSTRING)
class DPRReaderTokenizerFast(CustomDPRReaderTokenizerMixin, BertTokenizerFast):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = READER_PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
pretrained_init_configuration = READER_PRETRAINED_INIT_CONFIGURATION
model_input_names = ['input_ids', 'attention_mask']
slow_tokenizer_class = DPRReaderTokenizer |
def parse_args():
parser = argparse.ArgumentParser(description='MMAction2 test (and eval) a model')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument('--out', default=None, help='output result file in pickle format')
parser.add_argument('--fuse-conv-bn', action='store_true', help='Whether to fuse conv and bn, this will slightly increasethe inference speed')
parser.add_argument('--eval', type=str, nargs='+', help='evaluation metrics, which depends on the dataset, e.g., "top_k_accuracy", "mean_class_accuracy" for video dataset')
parser.add_argument('--gpu-collect', action='store_true', help='whether to use gpu to collect results')
parser.add_argument('--tmpdir', default='tmpdir', help='tmp directory used for collecting results from multiple workers, available when gpu-collect is not specified')
parser.add_argument('--options', nargs='+', action=DictAction, help='custom options')
parser.add_argument('--eval-options', nargs='+', action=DictAction, help='custom evaluation options')
parser.add_argument('--average-clips', choices=['score', 'prob'], default='score', help='average type when averaging test clips')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm', 'mpi'], default='none', help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if ('LOCAL_RANK' not in os.environ):
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args |
def build_radare2():
if (not radare2_exists()):
raise RuntimeError('Fail to detect radare2 repository. Do you forget to init submodules?')
if (not meson_exists()):
raise RuntimeError('Fail to detect meson. Do you forget to install meson?')
os.chdir(RADARE2_DIR)
DEBUG = os.getenv('DEBUG', '')
BUILDDIR = os.getenv('R2BUILDDIR', 'pyr2build')
PREFIX = os.getenv('R2PREFIX', str(((Path(ROOT_DIR) / 'radare2') / 'pyr2installdir')))
BACKEND = os.getenv('BACKEND', 'ninja')
args = []
if (sys.platform == 'win32'):
py = detect_python_on_windows()
if (py is None):
raise RuntimeError("Can't find a python in your path!")
args += py
else:
args += ['python3']
args += ['./sys/meson.py']
if (not DEBUG):
args += ['--release']
args += ['--local']
args += ['--dir', BUILDDIR]
args += ['--shared']
args += ['--backend', BACKEND]
args += ['--prefix', PREFIX]
args += ['--install']
args += ['--options', 'debugger=false', 'sdb_cgen=false']
subprocess.call(args)
if LIBS_DIR.exists():
shutil.rmtree(LIBS_DIR)
os.makedirs(LIBS_DIR, exist_ok=True)
lib_install_dir = ((Path(PREFIX) / 'bin') if (sys.platform == 'win32') else (Path(PREFIX) / 'lib'))
glob = {'linux': '*.so*', 'win32': '*.dll', 'darwin': '*.dylib'}.get(sys.platform, '*.so')
for p in lib_install_dir.rglob(glob):
if p.is_file():
if ((sys.platform == 'darwin') and (not p.is_symlink())):
rewrite_dyld_path(p)
shutil.copy(p, LIBS_DIR, follow_symlinks=False)
os.chdir(ROOT_DIR) |
class TestICDAR2015GWD(TestICDAR2015):
def eval(self):
txt_name = '{}.txt'.format(self.cfgs.VERSION)
real_test_img_list = self.get_test_image()
gwd = build_whole_network.DetectionNetworkGWD(cfgs=self.cfgs, is_training=False)
self.test_icdar2015(det_net=gwd, real_test_img_list=real_test_img_list, txt_name=txt_name)
if (not self.args.show_box):
os.remove(txt_name) |
def check_dummies(overwrite=False):
dummy_files = create_dummy_files()
short_names = {'torch': 'pt'}
path = os.path.join(PATH_TO_TRANSFORMERS, 'utils')
dummy_file_paths = {backend: os.path.join(path, f'dummy_{short_names.get(backend, backend)}_objects.py') for backend in dummy_files.keys()}
actual_dummies = {}
for (backend, file_path) in dummy_file_paths.items():
if os.path.isfile(file_path):
with open(file_path, 'r', encoding='utf-8', newline='\n') as f:
actual_dummies[backend] = f.read()
else:
actual_dummies[backend] = ''
for backend in dummy_files.keys():
if (dummy_files[backend] != actual_dummies[backend]):
if overwrite:
print(f'Updating transformers.utils.dummy_{short_names.get(backend, backend)}_objects.py as the main __init__ has new objects.')
with open(dummy_file_paths[backend], 'w', encoding='utf-8', newline='\n') as f:
f.write(dummy_files[backend])
else:
raise ValueError(f'The main __init__ has objects that are not present in transformers.utils.dummy_{short_names.get(backend, backend)}_objects.py. Run `make fix-copies` to fix this.') |
class BlenderbotSmallOnnxConfig(OnnxSeq2SeqConfigWithPast):
def inputs(self) -> Mapping[(str, Mapping[(int, str)])]:
if (self.task in ['default', 'seq2seq-lm']):
common_inputs = OrderedDict([('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'})])
if self.use_past:
common_inputs['decoder_input_ids'] = {0: 'batch'}
common_inputs['decoder_attention_mask'] = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
common_inputs['decoder_input_ids'] = {0: 'batch', 1: 'decoder_sequence'}
common_inputs['decoder_attention_mask'] = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(common_inputs, direction='inputs')
elif (self.task == 'causal-lm'):
common_inputs = OrderedDict([('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'})])
if self.use_past:
(num_encoder_layers, _) = self.num_layers
for i in range(num_encoder_layers):
common_inputs[f'past_key_values.{i}.key'] = {0: 'batch', 2: 'past_sequence + sequence'}
common_inputs[f'past_key_values.{i}.value'] = {0: 'batch', 2: 'past_sequence + sequence'}
else:
common_inputs = OrderedDict([('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'}), ('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}), ('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'})])
return common_inputs
def outputs(self) -> Mapping[(str, Mapping[(int, str)])]:
if (self.task in ['default', 'seq2seq-lm']):
common_outputs = super().outputs
else:
common_outputs = super(OnnxConfigWithPast, self).outputs
if self.use_past:
(num_encoder_layers, _) = self.num_layers
for i in range(num_encoder_layers):
common_outputs[f'present.{i}.key'] = {0: 'batch', 2: 'past_sequence + sequence'}
common_outputs[f'present.{i}.value'] = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def _generate_dummy_inputs_for_default_and_seq2seq_lm(self, tokenizer: PreTrainedTokenizer, batch_size: int=(- 1), seq_length: int=(- 1), is_pair: bool=False, framework: Optional[TensorType]=None) -> Mapping[(str, Any)]:
encoder_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(tokenizer, batch_size, seq_length, is_pair, framework)
decoder_seq_length = (seq_length if (not self.use_past) else 1)
decoder_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(tokenizer, batch_size, decoder_seq_length, is_pair, framework)
decoder_inputs = {f'decoder_{name}': tensor for (name, tensor) in decoder_inputs.items()}
common_inputs = dict(**encoder_inputs, **decoder_inputs)
if self.use_past:
if (not is_torch_available()):
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.')
else:
import torch
(batch, encoder_seq_length) = common_inputs['input_ids'].shape
decoder_seq_length = common_inputs['decoder_input_ids'].shape[1]
(num_encoder_attention_heads, num_decoder_attention_heads) = self.num_attention_heads
encoder_shape = (batch, num_encoder_attention_heads, encoder_seq_length, (self._config.hidden_size // num_encoder_attention_heads))
decoder_past_length = (decoder_seq_length + 3)
decoder_shape = (batch, num_decoder_attention_heads, decoder_past_length, (self._config.hidden_size // num_decoder_attention_heads))
common_inputs['decoder_attention_mask'] = torch.cat([common_inputs['decoder_attention_mask'], torch.ones(batch, decoder_past_length)], dim=1)
common_inputs['past_key_values'] = []
(num_encoder_layers, num_decoder_layers) = self.num_layers
min_num_layers = min(num_encoder_layers, num_decoder_layers)
max_num_layers = (max(num_encoder_layers, num_decoder_layers) - min_num_layers)
remaining_side_name = ('encoder' if (num_encoder_layers > num_decoder_layers) else 'decoder')
for _ in range(min_num_layers):
common_inputs['past_key_values'].append((torch.zeros(decoder_shape), torch.zeros(decoder_shape), torch.zeros(encoder_shape), torch.zeros(encoder_shape)))
shape = (encoder_shape if (remaining_side_name == 'encoder') else decoder_shape)
for _ in range(min_num_layers, max_num_layers):
common_inputs['past_key_values'].append((torch.zeros(shape), torch.zeros(shape)))
return common_inputs
def _generate_dummy_inputs_for_causal_lm(self, tokenizer: PreTrainedTokenizer, batch_size: int=(- 1), seq_length: int=(- 1), is_pair: bool=False, framework: Optional[TensorType]=None) -> Mapping[(str, Any)]:
common_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(tokenizer, batch_size, seq_length, is_pair, framework)
if self.use_past:
if (not is_torch_available()):
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.')
else:
import torch
(batch, seqlen) = common_inputs['input_ids'].shape
past_key_values_length = (seqlen + 2)
(num_encoder_layers, _) = self.num_layers
(num_encoder_attention_heads, _) = self.num_attention_heads
past_shape = (batch, num_encoder_attention_heads, past_key_values_length, (self._config.hidden_size // num_encoder_attention_heads))
mask_dtype = common_inputs['attention_mask'].dtype
common_inputs['attention_mask'] = torch.cat([common_inputs['attention_mask'], torch.ones(batch, past_key_values_length, dtype=mask_dtype)], dim=1)
common_inputs['past_key_values'] = [(torch.zeros(past_shape), torch.zeros(past_shape)) for _ in range(num_encoder_layers)]
return common_inputs
def _generate_dummy_inputs_for_sequence_classification_and_question_answering(self, tokenizer: PreTrainedTokenizer, batch_size: int=(- 1), seq_length: int=(- 1), is_pair: bool=False, framework: Optional[TensorType]=None) -> Mapping[(str, Any)]:
batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0)
token_to_add = tokenizer.num_special_tokens_to_add(is_pair)
seq_length = compute_effective_axis_dimension(seq_length, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=token_to_add)
dummy_input = ([(' '.join([tokenizer.unk_token]) * seq_length)] * batch_size)
common_inputs = dict(tokenizer(dummy_input, return_tensors=framework))
return common_inputs
def generate_dummy_inputs(self, tokenizer: PreTrainedTokenizer, batch_size: int=(- 1), seq_length: int=(- 1), is_pair: bool=False, framework: Optional[TensorType]=None) -> Mapping[(str, Any)]:
if (self.task in ['default', 'seq2seq-lm']):
common_inputs = self._generate_dummy_inputs_for_default_and_seq2seq_lm(tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework)
elif (self.task == 'causal-lm'):
common_inputs = self._generate_dummy_inputs_for_causal_lm(tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework)
else:
common_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework)
return common_inputs
def _flatten_past_key_values_(self, flattened_output, name, idx, t):
if (self.task in ['default', 'seq2seq-lm']):
flattened_output = super()._flatten_past_key_values_(flattened_output, name, idx, t)
else:
flattened_output = super(OnnxSeq2SeqConfigWithPast, self)._flatten_past_key_values_(flattened_output, name, idx, t) |
class Episode():
def __init__(self, info, max_len=30):
self.info = info
self.maxlen = max_len
def render(self, exp_name=None):
goal_names = [obj['class_name'] for obj in self.info['target'][1][0]]
episode_info = 'Episode {}.'.format(self.info['episode'])
episode_info2 = 'Reward {}. Success {}, Target {}'.format(self.info['reward'], self.info['success'], '_'.join(goal_names))
result_str = '<h3> {} </h3><br>'.format(episode_info)
result_str += '<h7> {} </h7><br>'.format(episode_info2)
titles = ['script_tried', 'script', 'close', 'grabbed']
obj_close = []
obj_grabbed = []
episode = self.info['episode']
n_steps = len(self.info['script_tried'])
for step in range(n_steps):
curr_graph = self.info['graph'][step]
id2node = {node['id']: node for node in curr_graph['nodes']}
visible_ids = self.info['visible_ids'][step]
action_ids = [t for t in self.info['action_ids'][step] if (t in visible_ids)]
goal_ids = [node['id'] for node in curr_graph['nodes'] if (node['class_name'] in goal_names)]
nodes_close = [id2node[edge['to_id']] for edge in curr_graph['edges'] if ((edge['from_id'] == 1) and (edge['relation_type'] == 'CLOSE') and (edge['to_id'] in visible_ids))]
nodes_grabbed = [id2node[edge['to_id']] for edge in curr_graph['edges'] if ((edge['from_id'] == 1) and ('HOLDS' in edge['relation_type']) and (edge['to_id'] in visible_ids))]
close_str = ['{}.{}'.format(node['class_name'], node['id']) for node in nodes_close]
grabbed_str = ['{}.{}'.format(node['class_name'], node['id']) for node in nodes_grabbed]
obj_close.append(close_str)
obj_grabbed.append(grabbed_str)
save_graph_2d('{}/plots/plot_{}_{}.png'.format(exp_name, episode, step), self.info['graph'][step], visible_ids, action_ids, goal_ids)
col_info = [self.info['script_tried'], self.info['script_done'], obj_close, obj_grabbed]
result_str += html_table(titles, self.maxlen, col_info, ['width: 15%', 'width: 15%', '', ''])
steps_title = ['step {}'.format(it) for it in range(n_steps)]
link_table = (('<a href=# onclick="toggle_visibility(\'plot_ep_' + str(self.info['episode'])) + '\');"> show results </a>')
ep_info = [[self.info['script_done'][it], html_img('./plots/plot_{}_{}.png'.format(episode, it))] for it in range(n_steps)]
result_str += (((('<br>' + link_table) + '<div id="plot_ep_{}" style="display: None">'.format(str(self.info['episode']))) + html_table(steps_title, 2, ep_info)) + '</div>')
result_str += '</div><br>'
html_3d_plot = ''
return (result_str, html_3d_plot) |
def get_quotedrpath(rp, separate_basename=0):
if separate_basename:
assert (not rp.index), "Trying to start quoting '{rp}' in the middle.".format(rp=rp)
(dirname, basename) = rp.dirsplit()
return QuotedRPath(rp.conn, dirname, (unquote(basename),), rp.data)
else:
return QuotedRPath(rp.conn, rp.base, rp.index, rp.data) |
def main(client, config):
(ss_ddf, ws_ddf, datedim_ddf) = benchmark(read_tables, config=config, compute_result=config['get_read_time'])
datedim_ddf = datedim_ddf.map_partitions(convert_datestring_to_days)
min_date = np.datetime64(q25_date, 'D').astype(int)
valid_dates_ddf = datedim_ddf[(datedim_ddf['d_date'] > min_date)].reset_index(drop=True)
f_ss_ddf = ss_ddf[ss_ddf['ss_customer_sk'].notnull()].reset_index(drop=True)
f_ws_ddf = ws_ddf[ws_ddf['ws_bill_customer_sk'].notnull()].reset_index(drop=True)
ss_merged_df = f_ss_ddf.merge(valid_dates_ddf, left_on='ss_sold_date_sk', right_on='d_date_sk', how='inner')
ws_merged_df = f_ws_ddf.merge(valid_dates_ddf, left_on='ws_sold_date_sk', right_on='d_date_sk', how='inner')
agg_store_sales_ddf = ss_merged_df.groupby('ss_customer_sk').agg({'ss_sold_date_sk': 'max', 'ss_net_paid': 'sum'})
agg_store_sales_ddf['frequency'] = agg_count_distinct(ss_merged_df, 'ss_customer_sk', 'ss_ticket_number', client=client)
agg_web_sales_ddf = ws_merged_df.groupby('ws_bill_customer_sk').agg({'ws_sold_date_sk': 'max', 'ws_net_paid': 'sum'})
agg_web_sales_ddf['frequency'] = agg_count_distinct(ws_merged_df, 'ws_bill_customer_sk', 'ws_order_number', client=client)
agg_store_sales_ddf = agg_store_sales_ddf.reset_index()
agg_web_sales_ddf = agg_web_sales_ddf.reset_index()
shared_columns = ['cid', 'most_recent_date', 'amount', 'frequency']
agg_store_sales_ddf.columns = shared_columns
agg_web_sales_ddf.columns = shared_columns
agg_sales_ddf = dask_cudf.concat([agg_store_sales_ddf, agg_web_sales_ddf])
cluster_input_ddf = agg_sales_ddf.groupby('cid', as_index=False).agg({'most_recent_date': 'max', 'frequency': 'sum', 'amount': 'sum'})
cluster_input_ddf['recency'] = ((37621 - cluster_input_ddf['most_recent_date']) < 60)
cluster_input_ddf = cluster_input_ddf.sort_values(['cid'])
cluster_input_ddf = cluster_input_ddf.set_index('cid')
cluster_input_ddf = cluster_input_ddf[['recency', 'frequency', 'amount']]
cluster_input_ddf['recency'] = cluster_input_ddf['recency'].astype('int64')
cluster_input_ddf['amount'] = cluster_input_ddf['amount'].astype('float64')
cluster_input_ddf = cluster_input_ddf.persist()
results_dict = get_clusters(client=client, kmeans_input_df=cluster_input_ddf)
return results_dict |
class VoltageChannel(Channel):
voltage_setpoint = Channel.control('VOLT? ({ch})', 'VOLT %g, ({ch})', 'Control the output voltage of this channel, range depends on channel.', validator=strict_range, values=[0, 25], dynamic=True)
current_limit = Channel.control('CURR? ({ch})', 'CURR %g, ({ch})', 'Control the current limit of this channel, range depends on channel.', validator=strict_range, values=[0, 1], dynamic=True)
voltage = Channel.measurement('MEASure:VOLTage? ({ch})', 'Measure actual voltage of this channel.')
current = Channel.measurement('MEAS:CURRent? ({ch})', 'Measure the actual current of this channel.')
output_enabled = Channel.control('OUTPut? ({ch})', 'OUTPut %d, ({ch})', 'Control whether the channel output is enabled (boolean).', validator=strict_discrete_set, map_values=True, values={True: 1, False: 0}) |
def format_callable_args(arg_types: list[Type], arg_kinds: list[ArgKind], arg_names: list[(str | None)], format: Callable[([Type], str)], verbosity: int) -> str:
arg_strings = []
for (arg_name, arg_type, arg_kind) in zip(arg_names, arg_types, arg_kinds):
if (((arg_kind == ARG_POS) and (arg_name is None)) or ((verbosity == 0) and arg_kind.is_positional())):
arg_strings.append(format(arg_type))
else:
constructor = ARG_CONSTRUCTOR_NAMES[arg_kind]
if (arg_kind.is_star() or (arg_name is None)):
arg_strings.append(f'{constructor}({format(arg_type)})')
else:
arg_strings.append(f'{constructor}({format(arg_type)}, {repr(arg_name)})')
return ', '.join(arg_strings) |
def mute_no_singing_parts(mono_output_path, mute_output_path):
print(f'{ULTRASINGER_HEAD} Mute audio parts with no singing')
silence_sections = get_silence_sections(mono_output_path)
(y, sr) = librosa.load(mono_output_path, sr=None)
for i in silence_sections:
start_time = i[0]
end_time = i[1]
start_sample = int((start_time * sr))
end_sample = int((end_time * sr))
y[start_sample:end_sample] = 0
sf.write(mute_output_path, y, sr) |
def load_model_and_checkpoint_files(folder, folds=None, mixed_precision=None, checkpoint_name='model_best'):
if isinstance(folds, str):
folds = [join(folder, 'all')]
assert isdir(folds[0]), ('no output folder for fold %s found' % folds)
elif isinstance(folds, (list, tuple)):
if ((len(folds) == 1) and (folds[0] == 'all')):
folds = [join(folder, 'all')]
else:
folds = [join(folder, ('fold_%d' % i)) for i in folds]
assert all([isdir(i) for i in folds]), 'list of folds specified but not all output folders are present'
elif isinstance(folds, int):
folds = [join(folder, ('fold_%d' % folds))]
assert all([isdir(i) for i in folds]), ('output folder missing for fold %d' % folds)
elif (folds is None):
print("folds is None so we will automatically look for output folders (not using 'all'!)")
folds = subfolders(folder, prefix='fold')
print('found the following folds: ', folds)
else:
raise ValueError('Unknown value for folds. Type: %s. Expected: list of int, int, str or None', str(type(folds)))
trainer = restore_model(join(folds[0], ('%s.model.pkl' % checkpoint_name)), fp16=mixed_precision, folder=folder)
trainer.output_folder = folder
trainer.output_folder_base = folder
trainer.update_fold(0)
trainer.initialize(False)
all_best_model_files = [join(i, ('%s.model' % checkpoint_name)) for i in folds]
print('using the following model files: ', all_best_model_files)
all_params = [torch.load(i, map_location=torch.device('cpu')) for i in all_best_model_files]
return (trainer, all_params) |
def _all_files(root: Union[(str, Path)], filter_function: Optional[Callable[([str], bool)]]=None) -> Set[str]:
all_files = set()
for (dirpath, _, filenames) in os.walk(root):
for filename in filenames:
if ((filter_function is not None) and (not filter_function(filename))):
continue
all_files.add(os.path.join(dirpath, filename))
return all_files |
def fuse_qkv(model, args):
def fuse3(qq, qk, qv):
for mod in [qq, qk, qv]:
if (not hasattr(mod, '_amax')):
print(' WARNING: NO AMAX BUFFER')
return
q = qq._amax.detach().item()
k = qk._amax.detach().item()
v = qv._amax.detach().item()
amax = max(q, k, v)
qq._amax.fill_(amax)
qk._amax.fill_(amax)
qv._amax.fill_(amax)
logger.info(f' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}')
for (name, mod) in model.named_modules():
if name.endswith('.attention.self'):
logger.info(f'FUSE_QKV: {name:{name_width}}')
fuse3(mod.matmul_q_input_quantizer, mod.matmul_k_input_quantizer, mod.matmul_v_input_quantizer)
if args.quant_per_tensor:
fuse3(mod.query._weight_quantizer, mod.key._weight_quantizer, mod.value._weight_quantizer) |
class Karaoke(GStreamerPlugin):
PLUGIN_ID = _PLUGIN_ID
PLUGIN_NAME = _('Karaoke')
PLUGIN_DESC = _('Removes main vocals from audio.')
PLUGIN_ICON = Icons.AUDIO_INPUT_MICROPHONE
def setup_element(cls):
return Gst.ElementFactory.make('audiokaraoke', cls.PLUGIN_ID)
def update_element(cls, element):
element.set_property('level', get_cfg('level'))
element.set_property('filter-band', get_cfg('band'))
element.set_property('filter-width', get_cfg('width'))
def PluginPreferences(cls, window):
prefs = Preferences()
prefs.connect('changed', (lambda *x: cls.queue_update()))
return prefs |
_torch
_vision
class GLPNFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase):
feature_extraction_class = (GLPNFeatureExtractor if is_vision_available() else None)
def setUp(self):
self.feature_extract_tester = GLPNFeatureExtractionTester(self)
def feat_extract_dict(self):
return self.feature_extract_tester.prepare_feat_extract_dict()
def test_feat_extract_properties(self):
feature_extractor = self.feature_extraction_class(**self.feat_extract_dict)
self.assertTrue(hasattr(feature_extractor, 'do_resize'))
self.assertTrue(hasattr(feature_extractor, 'size_divisor'))
self.assertTrue(hasattr(feature_extractor, 'resample'))
self.assertTrue(hasattr(feature_extractor, 'do_rescale'))
def test_batch_feature(self):
pass
def test_call_pil(self):
feature_extractor = self.feature_extraction_class(**self.feat_extract_dict)
image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False)
for image in image_inputs:
self.assertIsInstance(image, Image.Image)
encoded_images = feature_extractor(image_inputs[0], return_tensors='pt').pixel_values
self.assertTrue(((encoded_images.shape[(- 1)] % self.feature_extract_tester.size_divisor) == 0))
self.assertTrue(((encoded_images.shape[(- 2)] % self.feature_extract_tester.size_divisor) == 0))
def test_call_numpy(self):
feature_extractor = self.feature_extraction_class(**self.feat_extract_dict)
image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, numpify=True)
for image in image_inputs:
self.assertIsInstance(image, np.ndarray)
encoded_images = feature_extractor(image_inputs[0], return_tensors='pt').pixel_values
self.assertTrue(((encoded_images.shape[(- 1)] % self.feature_extract_tester.size_divisor) == 0))
self.assertTrue(((encoded_images.shape[(- 2)] % self.feature_extract_tester.size_divisor) == 0))
def test_call_pytorch(self):
feature_extractor = self.feature_extraction_class(**self.feat_extract_dict)
image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, torchify=True)
for image in image_inputs:
self.assertIsInstance(image, torch.Tensor)
encoded_images = feature_extractor(image_inputs[0], return_tensors='pt').pixel_values
self.assertTrue(((encoded_images.shape[(- 1)] % self.feature_extract_tester.size_divisor) == 0))
self.assertTrue(((encoded_images.shape[(- 2)] % self.feature_extract_tester.size_divisor) == 0)) |
class CpmBlock(nn.Module):
def __init__(self, in_channels, channels=(128, 128, 128), kernels=(11, 11, 11), norm_cfg=None):
super().__init__()
assert (len(channels) == len(kernels))
layers = []
for i in range(len(channels)):
if (i == 0):
input_channels = in_channels
else:
input_channels = channels[(i - 1)]
layers.append(ConvModule(input_channels, channels[i], kernels[i], padding=((kernels[i] - 1) // 2), norm_cfg=norm_cfg))
self.model = nn.Sequential(*layers)
def forward(self, x):
out = self.model(x)
return out |
.patch('bot.exts.info.information.constants')
class UserCommandTests(unittest.IsolatedAsyncioTestCase):
def setUp(self):
self.bot = helpers.MockBot()
self.cog = information.Information(self.bot)
self.moderator_role = helpers.MockRole(name='Moderators', id=2, position=10)
self.flautist_role = helpers.MockRole(name='Flautists', id=3, position=2)
self.bassist_role = helpers.MockRole(name='Bassists', id=4, position=3)
self.author = helpers.MockMember(id=1, name='syntaxaire')
self.moderator = helpers.MockMember(id=2, name='riffautae', roles=[self.moderator_role])
self.target = helpers.MockMember(id=3, name='__fluzz__')
self.bot_command_channel = helpers.MockTextChannel(id=constants.Channels.bot_commands)
async def test_regular_member_cannot_target_another_member(self, constants):
constants.MODERATION_ROLES = [self.moderator_role.id]
ctx = helpers.MockContext(author=self.author)
(await self.cog.user_info(self.cog, ctx, self.target))
ctx.send.assert_called_once_with('You may not use this command on users other than yourself.')
async def test_regular_member_cannot_use_command_outside_of_bot_commands(self, constants):
constants.MODERATION_ROLES = [self.moderator_role.id]
constants.STAFF_ROLES = [self.moderator_role.id]
ctx = helpers.MockContext(author=self.author, channel=helpers.MockTextChannel(id=100))
msg = 'Sorry, but you may only use this command within <#50>.'
with self.assertRaises(InWhitelistCheckFailure, msg=msg):
(await self.cog.user_info(self.cog, ctx))
.patch('bot.exts.info.information.Information.create_user_embed')
async def test_regular_user_may_use_command_in_bot_commands_channel(self, create_embed, constants):
constants.STAFF_ROLES = [self.moderator_role.id]
ctx = helpers.MockContext(author=self.author, channel=self.bot_command_channel)
(await self.cog.user_info(self.cog, ctx))
create_embed.assert_called_once_with(ctx, self.author, False)
ctx.send.assert_called_once()
.patch('bot.exts.info.information.Information.create_user_embed')
async def test_regular_user_can_explicitly_target_themselves(self, create_embed, _):
constants.STAFF_ROLES = [self.moderator_role.id]
ctx = helpers.MockContext(author=self.author, channel=self.bot_command_channel)
(await self.cog.user_info(self.cog, ctx, self.author))
create_embed.assert_called_once_with(ctx, self.author, False)
ctx.send.assert_called_once()
.patch('bot.exts.info.information.Information.create_user_embed')
async def test_staff_members_can_bypass_channel_restriction(self, create_embed, constants):
constants.STAFF_PARTNERS_COMMUNITY_ROLES = [self.moderator_role.id]
ctx = helpers.MockContext(author=self.moderator, channel=helpers.MockTextChannel(id=200))
(await self.cog.user_info(self.cog, ctx))
create_embed.assert_called_once_with(ctx, self.moderator, False)
ctx.send.assert_called_once()
.patch('bot.exts.info.information.Information.create_user_embed')
async def test_moderators_can_target_another_member(self, create_embed, constants):
constants.MODERATION_ROLES = [self.moderator_role.id]
constants.STAFF_PARTNERS_COMMUNITY_ROLES = [self.moderator_role.id]
ctx = helpers.MockContext(author=self.moderator, channel=helpers.MockTextChannel(id=50))
(await self.cog.user_info(self.cog, ctx, self.target))
create_embed.assert_called_once_with(ctx, self.target, False)
ctx.send.assert_called_once() |
def freeze_model_weights(model):
print('=> Freezing model weights')
for (n, m) in model.named_modules():
if (hasattr(m, 'weight') and (m.weight is not None)):
print(f'==> No gradient to {n}.weight')
m.weight.requires_grad = False
if (m.weight.grad is not None):
print(f'==> Setting gradient of {n}.weight to None')
m.weight.grad = None
if (hasattr(m, 'bias') and (m.bias is not None)):
print(f'==> No gradient to {n}.bias')
m.bias.requires_grad = False
if (m.bias.grad is not None):
print(f'==> Setting gradient of {n}.bias to None')
m.bias.grad = None |
def generate_inference_command(dataset_name_or_id: Union[(int, str)], configuration_name: str, plans_identifier: str='nnUNetPlans', trainer_name: str='nnUNetTrainer', folds: Union[(List[int], Tuple[(int, ...)])]=(0, 1, 2, 3, 4), folder_with_segs_from_prev_stage: str=None, input_folder: str='INPUT_FOLDER', output_folder: str='OUTPUT_FOLDER', save_npz: bool=False):
fold_str = ''
for f in folds:
fold_str += f' {f}'
predict_command = ''
trained_model_folder = get_output_folder(dataset_name_or_id, trainer_name, plans_identifier, configuration_name, fold=None)
plans_manager = PlansManager(join(trained_model_folder, 'plans.json'))
configuration_manager = plans_manager.get_configuration(configuration_name)
if ('previous_stage' in plans_manager.available_configurations):
prev_stage = configuration_manager.previous_stage_name
predict_command += (generate_inference_command(dataset_name_or_id, prev_stage, plans_identifier, trainer_name, folds, None, output_folder='OUTPUT_FOLDER_PREV_STAGE') + '\n')
folder_with_segs_from_prev_stage = 'OUTPUT_FOLDER_PREV_STAGE'
predict_command = f'nnUNetv2_predict -d {dataset_name_or_id} -i {input_folder} -o {output_folder} -f {fold_str} -tr {trainer_name} -c {configuration_name} -p {plans_identifier}'
if (folder_with_segs_from_prev_stage is not None):
predict_command += f' -prev_stage_predictions {folder_with_segs_from_prev_stage}'
if save_npz:
predict_command += ' --npz'
return predict_command |
def test_get_username_keyring_runtime_error_logged(entered_username, monkeypatch, config, caplog):
class FailKeyring():
def get_credential(system, username):
raise RuntimeError('fail!')
monkeypatch.setattr(auth, 'keyring', FailKeyring)
assert (auth.Resolver(config, auth.CredentialInput()).username == 'entered user')
assert re.search('Error getting username from keyring.+Traceback.+RuntimeError: fail!', caplog.text, re.DOTALL) |
class CocoaAlternateEventLoop(EventLoop):
def run(self, interval=(1 / 60)):
if (not interval):
self.clock.schedule(self._redraw_windows)
else:
self.clock.schedule_interval(self._redraw_windows, interval)
self.has_exit = False
from pyglet.window import Window
Window._enable_event_queue = False
for window in app.windows:
window.switch_to()
window.dispatch_pending_events()
self.platform_event_loop = app.platform_event_loop
self.dispatch_event('on_enter')
self.is_running = True
self.platform_event_loop.nsapp_start(interval)
def exit(self):
self.has_exit = True
self.platform_event_loop.notify()
self.is_running = False
self.dispatch_event('on_exit')
self.platform_event_loop.nsapp_stop() |
def _prepare_onnx_paddings(g, dim, pad):
pad_len = torch.onnx.symbolic_opset9.size(g, pad, g.op('Constant', value_t=torch.tensor([0])))
extension = g.op('Sub', g.op('Mul', g.op('Constant', value_t=torch.tensor(dim, dtype=torch.int64)), g.op('Constant', value_t=torch.tensor(2, dtype=torch.int64))), pad_len)
pad = g.op('Cast', pad, to_i=sym_help.cast_pytorch_to_onnx['Long'])
paddings = g.op('Concat', pad, g.op('ConstantOfShape', extension, value_t=torch.tensor([0], dtype=torch.int64)), axis_i=0)
paddings = g.op('Reshape', paddings, g.op('Constant', value_t=torch.tensor([(- 1), 2])))
paddings = g.op('Transpose', torch.onnx.symbolic_opset10.flip(g, paddings, [0]), perm_i=[1, 0])
paddings = g.op('Reshape', paddings, g.op('Constant', value_t=torch.tensor([(- 1)])))
padding_c = g.op('Cast', paddings, to_i=sym_help.cast_pytorch_to_onnx['Long'])
return padding_c |
def test_logout(flask_app, mocker: MockerFixture):
mock_leave_all_rooms = mocker.patch('randovania.server.multiplayer.session_common.leave_all_rooms', autospec=True)
session = {'user-id': 1234, 'discord-access-token': 'access_token'}
sa = MagicMock()
sa.session.return_value.__enter__.return_value = session
with flask_app.test_request_context():
user_session.logout(sa)
assert (session == {})
mock_leave_all_rooms.assert_called_once_with(sa) |
class Feed(list):
def __init__(self, uri):
self.name = _('Unknown')
self.uri = uri
self.changed = False
self.website = ''
self.__lastgot = 0
def get_age(self):
return (time.time() - self.__lastgot)
def __fill_af(feed, af):
try:
af['title'] = (feed.title or _('Unknown'))
except (TypeError, AttributeError):
af['title'] = _('Unknown')
try:
af['date'] = ('%04d-%02d-%02d' % feed.modified_parsed[:3])
except (AttributeError, TypeError):
pass
for (songkey, feedkey) in [('website', 'link'), ('description', 'tagline'), ('language', 'language'), ('copyright', 'copyright'), ('organization', 'publisher'), ('license', 'license')]:
try:
value = getattr(feed, feedkey)
except AttributeError:
pass
else:
if (value and (value not in af.list(songkey))):
af.add(songkey, value)
try:
af.add('artwork_url', feed.image['href'])
except (AttributeError, KeyError):
pass
try:
author = feed.author_detail
except AttributeError:
try:
author = feed.author
except AttributeError:
pass
else:
if (author and (author not in af.list('artist'))):
af.add('artist', author)
else:
try:
if (author.email and (author.email not in af.list('contact'))):
af.add('contact', author.email)
except AttributeError:
pass
try:
if (author.name and (author.name not in af.list('artist'))):
af.add('artist', author.name)
except AttributeError:
pass
try:
values = feed.contributors
except AttributeError:
pass
else:
for value in values:
try:
value = value.name
except AttributeError:
pass
else:
if (value and (value not in af.list('performer'))):
af.add('performer', value)
try:
af['~#length'] = util.parse_time(feed.itunes_duration)
except (AttributeError, ValueError):
pass
try:
values = dict(feed.categories).values()
except AttributeError:
pass
else:
for value in values:
if (value and (value not in af.list('genre'))):
af.add('genre', value)
def parse(self):
req = Request(self.uri, method='HEAD')
try:
with urlopen(req, timeout=5) as head:
if hasattr(head, 'status'):
print_d(f"Feed URL {self.uri!r} ({head.url}) returned HTTP {head.status}, with content {head.headers.get('Content-Type')}")
if (head.status and (head.status >= 400)):
return False
if head.headers.get('Content-Type').lower().startswith('audio'):
print_w('Looks like an audio stream / radio, not a audio feed.')
return False
content = urlopen(self.uri, timeout=15).read()
except OSError as e:
print_w(f"Couldn't fetch content from {self.uri} ({e})")
return False
try:
doc = feedparser.parse(content)
except Exception as e:
print_w(f"Couldn't parse feed: {self.uri} ({e})")
return False
try:
album = doc.channel.title
except AttributeError:
print_w(('No channel title in %s' % doc))
return False
if album:
self.name = album
else:
self.name = _('Unknown')
defaults = AudioFile({'feed': self.uri})
try:
self.__fill_af(doc.channel, defaults)
except Exception as e:
print_w(f'Error creating feed data: {self.uri} ({e})')
return False
entries = []
uris = set()
print_d(('Found %d entries in channel' % len(doc.entries)))
for entry in doc.entries:
try:
for enclosure in entry.enclosures:
try:
if (('audio' in enclosure.type) or ('ogg' in enclosure.type) or formats.filter(enclosure.url)):
uri = enclosure.url
if (not isinstance(uri, str)):
uri = uri.decode('utf-8')
try:
size = float(enclosure.length)
except (AttributeError, ValueError):
size = 0
entries.append((uri, entry, size))
uris.add(uri)
break
except AttributeError:
pass
except AttributeError:
print_d(('No enclosures found in %s' % entry))
for entry in list(self):
if (entry['~uri'] not in uris):
self.remove(entry)
else:
uris.remove(entry['~uri'])
print_d(('Successfully got %d episodes in channel' % len(entries)))
entries.reverse()
for (uri, entry, size) in entries:
if (uri in uris):
song = RemoteFile(uri)
song['~#size'] = size
song.fill_metadata = False
song.update(defaults)
song['album'] = self.name
try:
self.__fill_af(entry, song)
except Exception as e:
print_d(f"Couldn't convert {uri} to AudioFile ({e})")
else:
self.insert(0, song)
self.__lastgot = time.time()
return bool(uris) |
class MTL_Masker():
def __init__(self, sess, mask_dic, masks_dir, pruning_param_names, save_checkpoints_dir):
self.masks = masks_dir
self.weights = []
self._sess = sess
self._log = Logger(__name__)
self.pruning_names = set(pruning_param_names)
self.backup_values = []
self._save_checkpoints_dir = save_checkpoints_dir
self.masks_dic = mask_dic
self.masks_init()
def masks_init(self):
if (self.masks is None):
mask = {p.name: np.ones(p.shape).astype('float32') for p in tf.trainable_variables() if (p.name in self.pruning_names)}
self.masks = mask
else:
self.masks = self.load_masks(self.masks)
self._log.info('has masks {},tpye {}'.format(len(self.masks), type(self.masks)))
self._feed_masks = []
for (i, mask) in enumerate(self.masks):
self.get_cur_rate(mask, i)
feed_mask = self.apply_mask(i)
self._feed_masks.append(feed_mask)
def get_cur_rate(self, mask, task_id):
cur_m = sum((m.sum().item() for m in mask.values()))
total_m = 0
for (name, p) in mask.items():
total_m += p.size
cur_rate = round(((100.0 * cur_m) / total_m), 2)
self._log.info('Task_id #{} , remain params {},total params {} remain percent {}%'.format(task_id, cur_m, total_m, cur_rate))
def load_masks(self, masks_dir):
masks_path = [os.path.join(masks_dir, f) for f in os.listdir(masks_dir) if (not f.startswith('init'))]
masks_path = list(sorted(filter((lambda f: os.path.isfile(f)), masks_path), key=(lambda s: int(os.path.basename(s).split('.pkl')[0]))))
masks = []
self._log.info('loading masks')
for path in masks_path:
with open(path, 'rb') as f:
dump = pickle.load(f)
assert (('mask' in dump) and ('pruning_time' in dump))
self._log.info('loading pruning_time {}'.format(dump['pruning_time']))
masks.append(dump['mask'])
assert (len(masks) == len(masks_path))
return masks
def _restore_init_checkout(self, params):
checkpoint_saver = tf.train.Saver(var_list=params, max_to_keep=None)
checkpoint_path = os.path.join(self._save_checkpoints_dir, 'ckpt_init-0')
self._log.info('init_weight success form{}'.format(checkpoint_path))
checkpoint_saver.restore(self._sess, checkpoint_path)
def _save_checkpoint(self, step, params, prefix='ckpt_epoch'):
if self._save_checkpoints_dir:
checkpoint_saver = tf.train.Saver(var_list=params, max_to_keep=None)
checkpoint_path = os.path.join(self._save_checkpoints_dir, prefix)
checkpoint_saver.save(self._sess, checkpoint_path, global_step=step)
def before_forward(self, task_id):
return self._feed_masks[task_id]
def map_mask_name_to_w(self, mask_name):
return (mask_name.split('_m')[0] + '_w:0')
def apply_mask(self, task_id):
if isinstance(self.masks, dict):
remain_mask = self.masks
else:
remain_mask = self.masks[task_id]
feed_dict = {}
for (name, p) in self.masks_dic.items():
w_name = self.map_mask_name_to_w(name)
if (w_name in remain_mask):
feed_dict[p] = remain_mask[w_name]
return feed_dict |
class TestEmptyList(unittest.TestCase):
def test_empty_data(self):
data = list()
schema = 'list'
r = validator.validate(data, schema)
self.assertTrue(r)
schema = list()
r = validator.validate(data, schema)
self.assertTrue(r)
schema = ['int']
r = validator.validate(data, schema)
self.assertTrue(r)
def test_empty_schema(self):
data = ['alex', 'rustic']
schema = list()
r = validator.validate(data, schema)
self.assertTrue(r) |
class BotogramRunner():
def __init__(self, *bots, workers=2):
self._bots = {bot._bot_id: bot.freeze() for bot in bots}
self._updater_processes = {}
self._worker_processes = []
self._ipc_process = None
self.ipc = None
self.running = False
self._stop = False
self._started_at = None
self._last_scheduled_checks = (- 1)
self._ipc_server = ipc.IPCServer()
self.ipc_port = self._ipc_server.port
self.ipc_auth_key = self._ipc_server.auth_key
self._ipc_stop_key = self._ipc_server.stop_key
for bot in self._bots.values():
bot._shared_memory.switch_driver(shared.MultiprocessingDriver())
self._workers_count = workers
self.logger = logbook.Logger('botogram runner')
def run(self):
if self.running:
raise RuntimeError('Server already running')
self.logger.debug('Booting up the botogram runner...')
self.logger.debug(('IPC address: 127.0.0.1:%s' % self.ipc_port))
self.logger.debug(('IPC auth key: %s' % self.ipc_auth_key))
self.running = True
self._started_at = time.time()
self._enable_signals()
to_updaters = self._boot_processes()
self.logger.info('Your bot is now running!')
self.logger.info('Press Ctrl+C to exit.')
try:
while (not self._stop):
self._loop()
time.sleep(0.1)
except (KeyboardInterrupt, InterruptedError):
pass
self._shutdown_processes(to_updaters)
self.running = False
self._started_at = None
self._last_scheduled_checks = (- 1)
def _loop(self):
now = int(time.time())
if (now > self._last_scheduled_checks):
self._last_scheduled_checks = now
jobs_list = []
for bot in self._bots.values():
for task in bot.scheduled_tasks(current_time=now, wrap=False):
jobs_list.append(jobs.Job(bot._bot_id, jobs.process_task, {'task': task}))
if jobs_list:
self.ipc.command('jobs.bulk_put', jobs_list)
def stop(self, *__):
self._stop = True
def _boot_processes(self):
upd_commands = multiprocessing.Queue()
ipc_process = processes.IPCProcess(None, self._ipc_server)
ipc_process.start()
self._ipc_process = ipc_process
ipc_info = (self.ipc_port, self.ipc_auth_key)
while True:
try:
self.ipc = ipc.IPCClient(*ipc_info)
break
except ConnectionRefusedError:
time.sleep(0.1)
for i in range(self._workers_count):
worker = processes.WorkerProcess(ipc_info, self._bots)
worker.start()
self._worker_processes.append(worker)
for bot in self._bots.values():
updater = processes.UpdaterProcess(ipc_info, bot, upd_commands)
updater.start()
self._updater_processes[id] = updater
return upd_commands
def _shutdown_processes(self, to_updaters):
self.logger.info('Shutting down the runner...')
for i in range(len(self._updater_processes)):
to_updaters.put('stop')
for process in self._updater_processes.values():
process.join()
self._updaters_processes = {}
self.ipc.command('jobs.shutdown', None)
for worker in self._worker_processes:
worker.join()
self._worker_processes = []
self.ipc.command('__stop__', self._ipc_stop_key)
self._ipc_process.join()
self.ipc = None
def _enable_signals(self):
atexit.register(self.stop)
for one in (signal.SIGINT, signal.SIGTERM):
signal.signal(one, self.stop) |
def parse_markers(x: ((dict[(str, str)] | list[str]) | tuple[(str, ...)])) -> dict[(str, str)]:
if isinstance(x, (list, tuple)):
mapping = {name.strip(): '' for name in x}
elif isinstance(x, dict):
mapping = {name.strip(): description.strip() for (name, description) in x.items()}
else:
msg = "'markers' must be a mapping from markers to descriptions."
raise click.BadParameter(msg)
for name in mapping:
if (not name.isidentifier()):
msg = f'{name} is not a valid Python name and cannot be used as a marker.'
raise click.BadParameter(msg)
return mapping |
def get_fingerprint(mol, hparams):
length = hparams['fingerprint_length']
radius = hparams['fingerprint_radius']
if isinstance(mol, str):
mol = Chem.MolFromSmiles(mol)
if (mol is None):
return np.zeros((length,))
fingerprint = AllChem.GetMorganFingerprintAsBitVect(mol, radius, length)
arr = np.zeros((1,))
DataStructs.ConvertToNumpyArray(fingerprint, arr)
return arr |
def balance_classes(dataset, num_classes=2):
(inputs, targets) = dataset[:]
num_train = inputs.size(0)
(balanced_inputs, balanced_targets) = ([], [])
for class_idx in range(num_classes):
num_class_examples = (num_train // num_classes)
mask = (targets == class_idx)
(masked_inputs, masked_targets) = (inputs[mask], targets[mask])
idxs = torch.randint(masked_inputs.size(0), (num_class_examples,))
balanced_inputs.append(masked_inputs[idxs])
balanced_targets.append(masked_targets[idxs])
balanced_inputs = torch.cat(balanced_inputs)
balanced_targets = torch.cat(balanced_targets)
row_perm = torch.randperm(balanced_inputs.size(0))
balanced_dataset = torch.utils.data.TensorDataset(balanced_inputs[row_perm], balanced_targets[row_perm])
return balanced_dataset |
def set_articulation_state(articulation: sapien.Articulation, state: np.ndarray):
articulation.set_root_pose(Pose(state[0:3], state[3:7]))
articulation.set_root_velocity(state[7:10])
articulation.set_root_angular_velocity(state[10:13])
(qpos, qvel) = np.split(state[13:], 2)
articulation.set_qpos(qpos)
articulation.set_qvel(qvel) |
.parametrize('search, documents, k', [pytest.param((ranker_a | ranker_b), documents(), k, id=f'Union rankers: {ranker_a.__class__.__name__} | {ranker_b.__class__.__name__} k: {k}') for k in [None, 3, 4] for ranker_b in cherche_rankers(key='id', on='article') for ranker_a in cherche_rankers(key='id', on='title')])
def test_ranker_union(search, documents: list, k: int):
search.add(documents)
answers = search(q='Eiffel tower France', documents=documents, k=k)
if (k is not None):
assert (len(answers) == min(k, len(documents)))
else:
assert (len(answers) == len(documents))
for (index, sample) in enumerate(answers):
for key in ['title', 'article', 'author']:
assert (key in sample)
if (index == 0):
assert (sample['title'] == 'Eiffel tower')
answers = search(q='Canada', documents=documents, k=k)
if (k is None):
assert (answers[0]['title'] == 'Montreal')
elif (k >= 1):
assert (answers[0]['title'] == 'Montreal')
else:
assert (len(answers) == 0)
answers = search(q='Paris', documents=[], k=k)
assert (len(answers) == 0) |
def _ssim_3D(img1, img2, window, window_size, channel, size_average=True):
mu1 = F.conv3d(img1, window, padding=(window_size // 2), groups=channel)
mu2 = F.conv3d(img2, window, padding=(window_size // 2), groups=channel)
mu1_sq = mu1.pow(2)
mu2_sq = mu2.pow(2)
mu1_mu2 = (mu1 * mu2)
sigma1_sq = (F.conv3d((img1 * img1), window, padding=(window_size // 2), groups=channel) - mu1_sq)
sigma2_sq = (F.conv3d((img2 * img2), window, padding=(window_size // 2), groups=channel) - mu2_sq)
sigma12 = (F.conv3d((img1 * img2), window, padding=(window_size // 2), groups=channel) - mu1_mu2)
C1 = (0.01 ** 2)
C2 = (0.03 ** 2)
ssim_map = ((((2 * mu1_mu2) + C1) * ((2 * sigma12) + C2)) / (((mu1_sq + mu2_sq) + C1) * ((sigma1_sq + sigma2_sq) + C2)))
if size_average:
return ssim_map.mean()
else:
return ssim_map.mean(1).mean(1).mean(1) |
def optimise_grid_point(geometry_optimiser: GeometryOptimiser, molecule: 'Ligand', qc_spec: 'QCOptions', local_options: 'LocalResource', coordinates: List[float], dihedral: Tuple[(int, int, int, int)], dihedral_angle: int, job_id: int) -> GridPointResult:
with folder_setup(folder_name=f'grid_point_{dihedral_angle}_job_{job_id}'):
optimiser_settings = _build_optimiser_settings(dihedral=dihedral, dihedral_angle=dihedral_angle)
opt_mol = copy.deepcopy(molecule)
input_coords = np.array(coordinates)
opt_mol.coordinates = (input_coords * constants.BOHR_TO_ANGS).reshape((opt_mol.n_atoms, 3))
(result_mol, full_result) = geometry_optimiser.optimise(molecule=opt_mol, qc_spec=qc_spec, local_options=local_options, allow_fail=False, return_result=True, extras=optimiser_settings)
result_data = GridPointResult(dihedral_angle=dihedral_angle, input_geometry=coordinates, final_geometry=(result_mol.coordinates * constants.ANGS_TO_BOHR).ravel().tolist(), final_energy=full_result.energies[(- 1)])
return result_data |
_dtype_float_test(only64=True, include_complex=True, additional_kwargs={'bias_is_tensor': [True, False]})
def test_rootfinder_with_params(dtype, device, bias_is_tensor):
torch.manual_seed(100)
random.seed(100)
nr = 2
nbatch = 2
fwd_options = {'method': 'broyden1', 'f_tol': 1e-09, 'alpha': (- 0.5)}
clss = DummyModuleExplicit
A = (torch.randn((nr, nr)) * 0.5).to(dtype).requires_grad_()
diag = torch.randn((nbatch, nr)).to(dtype).requires_grad_()
if bias_is_tensor:
bias = torch.zeros((nbatch, nr)).to(dtype).requires_grad_()
else:
bias = 0.0
y0 = torch.randn((nbatch, nr)).to(dtype)
model = clss(addx=True)
y = rootfinder(model.forward, y0, (A, diag, bias), **fwd_options)
f = model.forward(y, A, diag, bias)
assert torch.allclose((f * 0), f)
def getloss(y0, A, diag, bias):
model = clss(addx=True)
y = rootfinder(model.forward, y0, (A, diag, bias), **fwd_options)
return y
checkgrad = ((not torch.is_complex(y0)) or bias_is_tensor)
if checkgrad:
gradcheck(getloss, (y0, A, diag, bias))
gradgradcheck(getloss, (y0, A, diag, bias)) |
def create_branch(version):
repo = Repo.init('.')
if repo.is_dirty(untracked_files=True):
raise RuntimeError('Repository is dirty, please commit/stash your changes.')
branch_name = f'release-{version}'
print(f'{Fore.CYAN}Create {branch_name} branch from upstream main')
upstream = get_upstream(repo)
upstream.fetch()
release_branch = repo.create_head(branch_name, upstream.refs.main, force=True)
release_branch.checkout()
return repo |
.parametrize('golden', COFFEE_SLASH_GOLDEN)
def test_coffee_slashes(lexer, golden):
(input_str, slashes_are_regex_here) = golden
output = list(lexer.get_tokens(input_str))
print(output)
for (t, s) in output:
if ('/' in s):
is_regex = (t is Token.String.Regex)
assert (is_regex == slashes_are_regex_here), (t, s) |
def format_data(value='', datatypes=int, nullable=True, pre_format_function=None, format_function=to_int, post_format_function=None, **kwargs) -> Any:
if pre_format_function:
value = pre_format_function(value)
if (nullable and is_none_like(value)):
value = None
else:
try:
value = format_function(value, **kwargs)
except Exception:
pass
if (post_format_function and isinstance(value, datatypes)):
value = post_format_function(value)
return value |
def _zip_pseudo_fifty_mbytes(file_buffer_list: list, zip_bytes_io: io.BytesIO):
bad_data = False
file_count = 0
keywords = pseudonymisation_api.get_default_pseudonymisation_keywords()
keywords.remove('PatientSex')
strategy = pseudonymisation_api.pseudonymisation_dispatch
zip_stream = zip_bytes_io
with ZipFile(zip_stream, mode='w', compression=ZIP_DEFLATED) as myzip:
for uploaded_file_buffer in file_buffer_list:
file_count += 1
original_file_name = None
try:
original_file_name = uploaded_file_buffer.name
ds_input: pydicom.FileDataset = pydicom.dcmread(uploaded_file_buffer, force=True)
anonymise_dataset(ds_input, delete_private_tags=True, delete_unknown_tags=True, copy_dataset=False, identifying_keywords=keywords, replacement_strategy=strategy)
temp_anon_filepath = build_pseudonymised_file_name(ds_input)
in_memory_temp_file = io.BytesIO()
anon_filename = pathlib.Path(temp_anon_filepath).name
pydicom.dcmwrite(in_memory_temp_file, ds_input)
except (KeyError, OSError, ValueError) as e_info:
print(e_info)
print(f'While processing {original_file_name}')
bad_data = True
break
myzip.writestr(anon_filename, in_memory_temp_file.getvalue(), compress_type=ZIP_DEFLATED)
in_memory_temp_file.close()
return bad_data |
def masks_to_bboxes(masks):
masks = np.asarray(masks)
assert (masks.dtype == bool)
ndim = masks.ndim
assert (ndim in [2, 3]), 'masks must be 2 or 3 dimensional'
if (ndim == 2):
masks = masks[None]
bboxes = np.zeros((len(masks), 4), dtype=np.float64)
for (i, mask) in enumerate(masks):
where = np.argwhere(mask)
try:
((y1, x1), (y2, x2)) = (where.min(0), (where.max(0) + 1))
except ValueError:
continue
bboxes[i] = (y1, x1, y2, x2)
if (ndim == 2):
return bboxes[0]
else:
return bboxes |
class _packbits(Function):
_fwd(cast_inputs=torch.float32)
def forward(ctx, grid, thresh, bitfield=None):
if (not grid.is_cuda):
grid = grid.cuda()
grid = grid.contiguous()
C = grid.shape[0]
H3 = grid.shape[1]
N = ((C * H3) // 8)
if (bitfield is None):
bitfield = torch.empty(N, dtype=torch.uint8, device=grid.device)
_backend.packbits(grid, N, thresh, bitfield)
return bitfield |
_benchmark.command(name='collect')
_option
('--force', '-f', help='Force collect results even if workflows are still running', default=False, is_flag=True)
def collect_command(workflow: str, force: bool) -> NoReturn:
try:
collect(workflow, force)
except Exception as e:
logger.error(f'Something went wrong when collecting results: {e}') |
class SubscriptionAddOn(Resource):
schema = {'add_on': 'AddOnMini', 'add_on_source': str, 'created_at': datetime, 'expired_at': datetime, 'id': str, 'object': str, 'percentage_tiers': ['SubscriptionAddOnPercentageTier'], 'quantity': int, 'revenue_schedule_type': str, 'subscription_id': str, 'tier_type': str, 'tiers': ['SubscriptionAddOnTier'], 'unit_amount': float, 'unit_amount_decimal': str, 'updated_at': datetime, 'usage_calculation_type': str, 'usage_percentage': float, 'usage_timeframe': str} |
class _TzCache():
def __init__(self):
self.initialised = (- 1)
self.db = None
self.dummy = False
def get_db(self):
if (self.db is not None):
return self.db
try:
self.db = _TzDBManager.get_database()
except _TzCacheException as err:
get_yf_logger().info(f"Failed to create TzCache, reason: {err}. TzCache will not be used. Tip: You can direct cache to use a different location with 'set_tz_cache_location(mylocation)'")
self.dummy = True
return None
return self.db
def initialise(self):
if (self.initialised != (- 1)):
return
db = self.get_db()
if (db is None):
self.initialised = 0
return
db.connect()
tz_db_proxy.initialize(db)
db.create_tables([_KV])
self.initialised = 1
def lookup(self, key):
if self.dummy:
return None
if (self.initialised == (- 1)):
self.initialise()
if (self.initialised == 0):
return None
try:
return _KV.get((_KV.key == key)).value
except _KV.DoesNotExist:
return None
def store(self, key, value):
if self.dummy:
return
if (self.initialised == (- 1)):
self.initialise()
if (self.initialised == 0):
return
db = self.get_db()
if (db is None):
return
try:
if (value is None):
q = _KV.delete().where((_KV.key == key))
q.execute()
return
with db.atomic():
_KV.insert(key=key, value=value).execute()
except _peewee.IntegrityError:
old_value = self.lookup(key)
if (old_value != value):
get_yf_logger().debug(f'Value for key {key} changed from {old_value} to {value}.')
with db.atomic():
q = _KV.update(value=value).where((_KV.key == key))
q.execute() |
def test_doesnt_raise_deprecation_warning():
app = Flask(__name__)
def provide_str():
return 'this is string'
def configure(binder):
binder.bind(str, to=CallableProvider(provide_str), scope=request)
('/')
def index(s: str):
return s
FlaskInjector(app=app, modules=[configure])
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
with app.test_client() as c:
c.get('/')
assert (len(w) == 0), map(str, w) |
class FatFileSystem(MountFileSystem):
type = 'fat'
aliases = ['efi system partition', 'vfat', 'fat12', 'fat16']
_mount_type = 'vfat'
def detect(cls, source, description):
res = super().detect(source, description)
if ('DOS FAT' in description):
res.update({VolumeSystemFileSystem: (- 50)})
return res |
class MappingPattern(Pattern):
keys: list[Expression]
values: list[Pattern]
rest: (NameExpr | None)
def __init__(self, keys: list[Expression], values: list[Pattern], rest: (NameExpr | None)) -> None:
super().__init__()
assert (len(keys) == len(values))
self.keys = keys
self.values = values
self.rest = rest
def accept(self, visitor: PatternVisitor[T]) -> T:
return visitor.visit_mapping_pattern(self) |
class CreateDatabase(Migration):
def schedule_upgrades(self):
self.orm_control.assert_dialect(self, 'postgresql')
self.schedule('alter', op.create_table, 'sessiondata', Column('id', Integer(), nullable=False), Column('web_session_id', Integer(), nullable=True), Column('region_name', Text(), nullable=False), Column('channel_name', Text(), nullable=False), Column('row_type', String(length=40), nullable=True), PrimaryKeyConstraint('id', name='sessiondata_pkey'))
self.schedule('create_fk', op.create_foreign_key, 'sessiondata_web_session_id_fk', 'sessiondata', 'usersession', ['web_session_id'], ['id'], ondelete='CASCADE')
self.schedule('indexes', op.create_index, 'ix_sessiondata_web_session_id', 'sessiondata', ['web_session_id'], unique=False)
self.schedule('alter', op.create_table, 'webusersession', Column('usersession_id', Integer(), nullable=False), Column('salt', String(length=40), nullable=False), Column('secure_salt', String(length=40), nullable=False), PrimaryKeyConstraint('usersession_id', name='webusersession_pkey'))
self.schedule('create_fk', op.create_foreign_key, 'webusersession_usersession_id_fkey', 'webusersession', 'usersession', ['usersession_id'], ['id'], ondelete='CASCADE')
self.schedule('alter', op.create_table, 'persistedexception', Column('sessiondata_id', Integer(), nullable=False), Column('exception', LargeBinary(), nullable=False), Column('input_name', Text(), nullable=True), PrimaryKeyConstraint('sessiondata_id', name='persistedexception_pkey'))
self.schedule('create_fk', op.create_foreign_key, 'persistedexception_sessiondata_id_fkey', 'persistedexception', 'sessiondata', ['sessiondata_id'], ['id'], ondelete='CASCADE')
self.schedule('alter', op.create_table, 'persistedfile', Column('sessiondata_id', Integer(), nullable=False), Column('input_name', Text(), nullable=False), Column('filename', Text(), nullable=False), Column('file_data', LargeBinary(), nullable=False), Column('content_type', Text(), nullable=False), Column('size', BigInteger(), nullable=False), PrimaryKeyConstraint('sessiondata_id', name='persistedfile_pkey'))
self.schedule('create_fk', op.create_foreign_key, 'persistedfile_sessiondata_id_fkey', 'persistedfile', 'sessiondata', ['sessiondata_id'], ['id'], ondelete='CASCADE')
self.schedule('alter', op.create_table, 'userinput', Column('sessiondata_id', Integer(), nullable=False), Column('key', Text(), nullable=False), Column('value', Text(), nullable=False), PrimaryKeyConstraint('sessiondata_id', name='userinput_pkey'))
self.schedule('create_fk', op.create_foreign_key, 'userinput_sessiondata_id_fkey', 'userinput', 'sessiondata', ['sessiondata_id'], ['id'], ondelete='CASCADE') |
def interleave(seqs):
iters = itertools.cycle(map(iter, seqs))
while True:
try:
for itr in iters:
(yield next(itr))
return
except StopIteration:
predicate = partial(operator.is_not, itr)
iters = itertools.cycle(itertools.takewhile(predicate, iters)) |
def _build_lambda_role(self, db: dynamodb.Table) -> iam.Role:
return iam.Role(self, constants.SERVICE_ROLE, assumed_by=iam.ServicePrincipal('lambda.amazonaws.com'), inline_policies={'dynamic_configuration': iam.PolicyDocument(statements=[iam.PolicyStatement(actions=['appconfig:GetLatestConfiguration', 'appconfig:StartConfigurationSession'], resources=['*'], effect=iam.Effect.ALLOW)]), 'dynamodb_db': iam.PolicyDocument(statements=[iam.PolicyStatement(actions=['dynamodb:PutItem', 'dynamodb:GetItem'], resources=[db.table_arn], effect=iam.Effect.ALLOW)])}, managed_policies=[iam.ManagedPolicy.from_aws_managed_policy_name(managed_policy_name=f'service-role/{constants.LAMBDA_BASIC_EXECUTION_ROLE}')]) |
def main():
(args, checkpoint_dir, writer, model_config) = setup(train=True)
print(args)
from predicate.demo_dataset_graph import get_dataset
from predicate.demo_dataset_graph import collate_fn
from predicate.demo_dataset_graph import to_cuda_fn
(train_dset, test_dset, new_test_dset) = get_dataset(args, train=True)
train_loader = DataLoader(dataset=train_dset, batch_size=args.batch_size, shuffle=True, num_workers=args.n_workers, collate_fn=collate_fn, drop_last=True)
if args.single:
test_loader = DataLoader(dataset=test_dset, batch_size=args.batch_size, shuffle=True, num_workers=0, collate_fn=collate_fn, drop_last=False)
val_loader = DataLoader(dataset=test_dset, batch_size=args.batch_size, shuffle=True, num_workers=0, collate_fn=collate_fn, drop_last=False)
else:
test_loader = DataLoader(dataset=new_test_dset, batch_size=args.batch_size, shuffle=True, num_workers=0, collate_fn=collate_fn, drop_last=False)
val_loader = DataLoader(dataset=new_test_dset, batch_size=args.batch_size, shuffle=True, num_workers=0, collate_fn=collate_fn, drop_last=False)
if (args.inputtype == 'graphinput'):
from network.encoder_decoder import GraphDemo2Predicate
model = GraphDemo2Predicate(args, train_dset, **model_config)
elif (args.inputtype == 'actioninput'):
from network.encoder_decoder import ActionDemo2Predicate
model = ActionDemo2Predicate(args, train_dset, **model_config)
if (args.resume != ''):
model.load(args.resume, True)
optim = torch.optim.Adam(filter((lambda p: p.requires_grad), model.parameters()), args.model_lr_rate)
if (args.gpu_id is not None):
model.cuda()
model.set_to_cuda_fn(to_cuda_fn)
train(args, model, optim, train_loader, test_loader, val_loader, checkpoint_dir, writer, train_dset, test_dset)
if (args.inference != 1):
args.inference = 1
train(args, model, optim, train_loader, test_loader, val_loader, checkpoint_dir, writer, train_dset, test_dset) |
('beeref.selection.SelectableMixin.hoverMoveEvent')
def test_hover_move_event_crop_mode_inside_edge(hover_mock, qapp, item):
item.crop_mode = True
item.crop_temp = QtCore.QRectF(0, 0, 100, 80)
event = MagicMock()
event.pos.return_value = QtCore.QPointF(5, 40)
item.hoverMoveEvent(event)
(item.cursor() == Qt.CursorShape.SizeHorCursor)
hover_mock.assert_not_called() |
def _assign_rank_write_loads(rank_to_write_loads: List[Dict[(str, List[_WriteLoad])]], rank_to_size: List[int], ranks_to_choose: List[int], logical_path: str, size: int, partition_result: List[List[_WriteLoad]]) -> None:
chosen_rank = min(ranks_to_choose, key=(lambda rank: rank_to_size[rank]))
partition_result[chosen_rank].extend(rank_to_write_loads[chosen_rank][logical_path])
rank_to_size[chosen_rank] += size |
def _build(polygons, criterion='rook', ids=None):
if (ids and (len(ids) != len(set(ids)))):
raise ValueError('The argument to the ids parameter contains duplicate entries.')
wttype = WT_TYPE[criterion.lower()]
geo = polygons
if issubclass(type(geo), FileIO):
geo.seek(0)
neighbor_data = ContiguityWeightsLists(polygons, wttype=wttype).w
neighbors = {}
if ids:
for key in neighbor_data:
ida = ids[key]
if (ida not in neighbors):
neighbors[ida] = set()
neighbors[ida].update([ids[x] for x in neighbor_data[key]])
for key in neighbors:
neighbors[key] = set(neighbors[key])
else:
for key in neighbor_data:
neighbors[key] = set(neighbor_data[key])
return (dict(list(zip(list(neighbors.keys()), list(map(list, list(neighbors.values()))), strict=True))), ids) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.