code stringlengths 281 23.7M |
|---|
def test_control_check_get_errors(fake, caplog):
def checking():
fake.error = True
return [(7, 'some error')]
fake.check_get_errors = checking
fake.fake_ctrl_errors
assert (fake.error is True)
assert (caplog.record_tuples[(- 1)] == ('pymeasure.instruments.common_base', logging.ERROR, "Error received after trying to get a property with the command 'ge': '(7, 'some error')'.")) |
def read_data(train_data_dir, test_data_dir):
clients = []
groups = []
train_data = {}
test_data = {}
train_files = os.listdir(train_data_dir)
train_files = [f for f in train_files if f.endswith('.json')]
for f in train_files:
file_path = os.path.join(train_data_dir, f)
with open(file_path, 'r') as inf:
cdata = json.load(inf)
clients.extend(cdata['users'])
if ('hierarchies' in cdata):
groups.extend(cdata['hierarchies'])
train_data.update(cdata['user_data'])
test_files = os.listdir(test_data_dir)
test_files = [f for f in test_files if f.endswith('.json')]
for f in test_files:
file_path = os.path.join(test_data_dir, f)
with open(file_path, 'r') as inf:
cdata = json.load(inf)
test_data.update(cdata['user_data'])
clients = sorted(cdata['users'])
return (clients, groups, train_data, test_data) |
def test_old_style():
assert (get_attrs_shape(OldStyle) == Shape(input=InputShape(constructor=OldStyle, kwargs=None, fields=(InputField(type=Any, id='a', default=NoDefault(), is_required=True, metadata=MappingProxyType({}), original=ANY), InputField(type=int, id='b', default=NoDefault(), is_required=True, metadata=MappingProxyType({}), original=ANY)), params=(Param(field_id='a', name='a', kind=ParamKind.POS_OR_KW), Param(field_id='b', name='b', kind=ParamKind.POS_OR_KW)), overriden_types=frozenset({'a', 'b'})), output=OutputShape(fields=(OutputField(type=Any, id='a', default=NoDefault(), accessor=create_attr_accessor('a', is_required=True), metadata=MappingProxyType({}), original=ANY), OutputField(type=int, id='b', default=NoDefault(), accessor=create_attr_accessor('b', is_required=True), metadata=MappingProxyType({}), original=ANY)), overriden_types=frozenset({'a', 'b'})))) |
def compute_cost(num_spin_orbs: int, lambda_tot: float, thc_dim: int, kmesh: list[int], dE_for_qpe: float=0.0016, chi: int=10, beta: Union[(int, None)]=None) -> ResourceEstimates:
thc_costs = _compute_cost(n=num_spin_orbs, lam=lambda_tot, dE=dE_for_qpe, chi=chi, beta=beta, M=thc_dim, Nkx=kmesh[0], Nky=kmesh[1], Nkz=kmesh[2], stps=20000)
resources = ResourceEstimates(toffolis_per_step=thc_costs[0], total_toffolis=thc_costs[1], logical_qubits=thc_costs[2])
return resources |
def test():
input_time = np.linspace(0, 4, 400)
input_signal = (np.sin((((input_time * 40) * np.pi) * 2)) + np.sin((((input_time * 20) * np.pi) * 2)))
filter1 = LPFilter(100, 40)
filter2 = LPFilter(100, 10)
y1 = [filter1.next(x) for x in input_signal]
y2 = [filter2.next(x) for x in input_signal]
import matplotlib.pyplot as plt
plt.subplot(3, 1, 1)
plt.plot(input_time, input_signal)
plt.subplot(3, 1, 2)
plt.plot(input_time, y1)
plt.subplot(3, 1, 3)
plt.plot(input_time, y2)
plt.show() |
def open_file(path, sep=' ', mode='train'):
src = []
tgt = []
with open(path, 'r', encoding='utf8') as f:
content = f.readlines()
tmp_src = []
tmp_tgt = []
for (i, line) in enumerate(content):
line = line.strip().split(sep)
if (len(line) == 2):
tmp_src.append(line[0])
tmp_tgt.append(line[1])
elif (i == (len(content) - 1)):
if tmp_src:
src.append(tmp_src)
tgt.append(tmp_tgt)
elif tmp_src:
src.append(tmp_src)
tgt.append(tmp_tgt)
tmp_src = []
tmp_tgt = []
return (src, tgt) |
class ProjectViewSet(ModelViewSet):
permission_classes = ((HasModelPermission | HasProjectsPermission),)
serializer_class = ProjectSerializer
filter_backends = (DjangoFilterBackend,)
filterset_fields = ('title', 'user', 'user__username', 'catalog', 'catalog__uri')
def get_queryset(self):
return Project.objects.filter_user(self.request.user).select_related('catalog')
(detail=True, permission_classes=((HasModelPermission | HasProjectPermission),))
def overview(self, request, pk=None):
project = self.get_object()
serializer = ProjectOverviewSerializer(project, context={'request': request})
return Response(serializer.data)
(detail=True, url_path='navigation/(?P<section_id>\\d+)', permission_classes=((HasModelPermission | HasProjectPermission),))
def navigation(self, request, pk=None, section_id=None):
project = self.get_object()
try:
section = project.catalog.sections.get(pk=section_id)
except ObjectDoesNotExist as e:
raise NotFound() from e
project.catalog.prefetch_elements()
navigation = compute_navigation(section, project)
return Response(navigation)
(detail=True, permission_classes=((HasModelPermission | HasProjectPermission),))
def resolve(self, request, pk=None):
snapshot_id = request.GET.get('snapshot')
set_prefix = request.GET.get('set_prefix')
set_index = request.GET.get('set_index')
values = self.get_object().values.filter(snapshot_id=snapshot_id).select_related('attribute', 'option')
page_id = request.GET.get('page')
if page_id:
try:
page = Page.objects.get(id=page_id)
conditions = page.conditions.select_related('source', 'target_option')
if check_conditions(conditions, values, set_prefix, set_index):
return Response({'result': True})
except Page.DoesNotExist:
pass
questionset_id = request.GET.get('questionset')
if questionset_id:
try:
questionset = QuestionSet.objects.get(id=questionset_id)
conditions = questionset.conditions.select_related('source', 'target_option')
if check_conditions(conditions, values, set_prefix, set_index):
return Response({'result': True})
except QuestionSet.DoesNotExist:
pass
question_id = request.GET.get('question')
if question_id:
try:
question = Question.objects.get(id=question_id)
conditions = question.conditions.select_related('source', 'target_option')
if check_conditions(conditions, values, set_prefix, set_index):
return Response({'result': True})
except Question.DoesNotExist:
pass
optionset_id = request.GET.get('optionset')
if optionset_id:
try:
optionset = OptionSet.objects.get(id=optionset_id)
conditions = optionset.conditions.select_related('source', 'target_option')
if check_conditions(conditions, values, set_prefix, set_index):
return Response({'result': True})
except OptionSet.DoesNotExist:
pass
condition_id = request.GET.get('condition')
if condition_id:
try:
condition = Condition.objects.select_related('source', 'target_option').get(id=condition_id)
if check_conditions([condition], values, set_prefix, set_index):
return Response({'result': True})
except Condition.DoesNotExist:
pass
return Response({'result': False})
(detail=True, permission_classes=((HasModelPermission | HasProjectPermission),))
def options(self, request, pk=None):
project = self.get_object()
try:
try:
optionset_id = request.GET.get('optionset')
optionset = OptionSet.objects.get(pk=optionset_id)
except (ValueError, OptionSet.DoesNotExist) as e:
raise NotFound() from e
project.catalog.prefetch_elements()
if (Question.objects.filter_by_catalog(project.catalog).filter(optionsets=optionset) and (optionset.provider is not None)):
options = [dict(**option, text_and_help=option.get('text_and_help', 'text')) for option in optionset.provider.get_options(project, search=request.GET.get('search'))]
return Response(options)
except OptionSet.DoesNotExist:
pass
raise NotFound()
(detail=True, methods=['get', 'post'], permission_classes=((HasProjectProgressModelPermission | HasProjectProgressObjectPermission),))
def progress(self, request, pk=None):
project = self.get_object()
if ((request.method == 'POST') or (project.progress_count is None) or (project.progress_total is None)):
project.catalog.prefetch_elements()
(project.progress_count, project.progress_total) = compute_progress(project)
project.save()
try:
ratio = (project.progress_count / project.progress_total)
except ZeroDivisionError:
ratio = 0
return Response({'count': project.progress_count, 'total': project.progress_total, 'ratio': ratio})
def perform_create(self, serializer):
project = serializer.save(site=get_current_site(self.request))
tasks = Task.objects.filter_current_site().filter_catalog(project.catalog).filter_group(self.request.user).filter_availability(self.request.user)
for task in tasks:
project.tasks.add(task)
if (self.request.data.get('views') is None):
views = View.objects.filter_current_site().filter_catalog(project.catalog).filter_group(self.request.user).filter_availability(self.request.user)
for view in views:
project.views.add(view)
membership = Membership(project=project, user=self.request.user, role='owner')
membership.save() |
class CredentialField(CharField):
def __init__(self, *args, **kwargs):
CharField.__init__(self, *args, **kwargs)
assert ('default' not in kwargs)
assert (not self.index)
def db_value(self, value):
if (value is None):
return None
if isinstance(value, str):
raise Exception('A string cannot be given to a CredentialField; please wrap in a Credential')
return Bytes.for_string_or_unicode(value.hashed).as_unicode()
def python_value(self, value):
if (value is None):
return None
return Credential(Bytes.for_string_or_unicode(value).as_encoded_str()) |
def train_lm(args, gpu_id, rank, loader, model, optimizer, scheduler):
model.train()
start_time = time.time()
total_loss = 0.0
(total_correct, total_denominator) = (0.0, 0.0)
steps = 1
total_steps = args.total_steps
loader_iter = iter(loader)
while True:
if (steps == (total_steps + 1)):
break
(src, tgt, seg) = next(loader_iter)
if (gpu_id is not None):
src = src.cuda(gpu_id)
tgt = tgt.cuda(gpu_id)
seg = seg.cuda(gpu_id)
loss_info = model(src, tgt, seg)
(loss, correct, denominator) = loss_info
total_loss += loss.item()
total_correct += correct.item()
total_denominator += denominator.item()
loss = (loss / args.accumulation_steps)
if args.fp16:
with args.amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
if ((steps % args.accumulation_steps) == 0):
optimizer.step()
scheduler.step()
model.zero_grad()
if (((steps % args.report_steps) == 0) and ((not args.dist_train) or (args.dist_train and (rank == 0)))):
loss = (total_loss / args.report_steps)
elapsed = (time.time() - start_time)
done_tokens = ((((args.batch_size * src.size(1)) * args.report_steps) * args.world_size) if args.dist_train else ((args.batch_size * src.size(1)) * args.report_steps))
print('| {:8d}/{:8d} steps| {:8.2f} tokens/s| loss {:7.2f}| acc: {:3.3f}'.format(steps, total_steps, (done_tokens / elapsed), loss, (total_correct / total_denominator)))
total_loss = 0.0
(total_correct, total_denominator) = (0.0, 0.0)
start_time = time.time()
if (((steps % args.save_checkpoint_steps) == 0) and ((not args.dist_train) or (args.dist_train and (rank == 0)))):
save_model(model, ((args.output_model_path + '-') + str(steps)))
steps += 1 |
def get_args():
parser = argparse.ArgumentParser(description='This script augments a phones.txt\n file (a phone-level symbol table) by adding certain special symbols\n relating to grammar support. See ../add_nonterminals.sh for context.')
parser.add_argument('input_phones_txt', type=str, help='Filename of input phones.txt file, to be augmented')
parser.add_argument('nonterminal_symbols_list', type=str, help='Filename of a file containing a list of nonterminal symbols, one per line. E.g. #nonterm:contact_list')
parser.add_argument('output_phones_txt', type=str, help='Filename of output phones.txt file. May be the same as input-phones-txt.')
args = parser.parse_args()
return args |
class GPUSchema(Schema):
class Meta():
unknown = RAISE
gpus = fields.Int(required=True, description='Number of gpus for training. This affects the `world size` of PyTorch DDP.', exclusiveMinimum=0)
vRam = fields.Int(required=True, description='Minimum VRam required for each gpu. Set it to `-1` to use all gpus.')
wantsMore = fields.Bool(required=True, description='Set to `true` to use all visible gpus and all VRams and ignore `gpus` and `vRam`.')
_load
def _(self, data, **kwargs):
return GPU(**data) |
class GripperController():
def __init__(self, robot_name, create_node=False, upper_limit=0.035, lower_limit=0.01, des_pos_max=1, des_pos_min=0):
if create_node:
rospy.init_node('gripper_controller')
assert (des_pos_max >= des_pos_min), 'gripper des_pos_max has to be >= des_pos_min'
self.des_pos_max = des_pos_max
self.des_pos_min = des_pos_min
self._upper_limit = upper_limit
self._lower_limit = lower_limit
assert (self._upper_limit > self._lower_limit)
if (not create_node):
rospy.Timer(rospy.Duration(0.02), self.update_gripper_pwm)
self._joint_lock = Lock()
self._des_pos_lock = Lock()
self._angles = {}
self._velocities = {}
self._pub_gripper_command = rospy.Publisher(f'/{robot_name}/commands/joint_single', JointSingleCommand, queue_size=3)
rospy.Subscriber(f'/{robot_name}/joint_states', JointState, self._joint_callback)
self._moving = False
self._time_movement_started = None
self._grace_period_until_can_be_marked_as_stopped = 0.1
self._des_pos = None
self.des_pos = self._upper_limit
def des_pos(self):
return self._des_pos
_pos.setter
def des_pos(self, value):
if (value != self._des_pos):
with self._des_pos_lock:
self._moving = True
self._time_movement_started = time.time()
self._des_pos = value
def get_gripper_pos(self):
with self._joint_lock:
return self._angles['left_finger']
def _joint_callback(self, msg):
with self._joint_lock:
for (name, position, velocity) in zip(msg.name, msg.position, msg.velocity):
self._angles[name] = position
self._velocities[name] = velocity
def open(self):
self.des_pos = self._upper_limit
def close(self):
self.des_pos = self._lower_limit
def set_continuous_position(self, target):
target_clipped = np.clip(target, self.des_pos_min, self.des_pos_max)
if (target != target_clipped):
print('Warning target gripper pos outside of range', target)
self.des_pos = self.denormalize(target_clipped)
def get_continuous_position(self):
gripper_pos = self.get_gripper_pos()
return self.normalize(gripper_pos)
def normalize(self, x):
return ((((self.des_pos_max - self.des_pos_min) * (x - self._lower_limit)) / (self._upper_limit - self._lower_limit)) + self.des_pos_min)
def denormalize(self, x):
return ((((x - self.des_pos_min) * (self._upper_limit - self._lower_limit)) / (self.des_pos_max - self.des_pos_min)) + self._lower_limit)
def is_moving(self):
return self._moving
def get_gripper_target_position(self):
des_pos_normed = self.normalize(self.des_pos)
assert ((des_pos_normed <= self.des_pos_max) and (des_pos_normed >= self.des_pos_min))
return des_pos_normed
def update_gripper_pwm(self, event):
with self._des_pos_lock:
moving = self._moving
des_pos = self.des_pos
if moving:
gripper_pos = self.get_gripper_pos()
ctrl = ((des_pos - gripper_pos) * 300)
pwm = self.get_gripper_pwm(ctrl)
gripper_command = JointSingleCommand('gripper', pwm)
self._pub_gripper_command.publish(gripper_command)
def get_gripper_pwm(self, pressure):
pressure = np.clip(pressure, (- 1), 1)
offset = 0
if (pressure < 0):
gripper_pwm = (- (offset + int(((- pressure) * 350))))
if (pressure >= 0):
gripper_pwm = (offset + int((pressure * 350)))
time_since_movements_started = (time.time() - self._time_movement_started)
if ((abs(self._velocities['gripper']) == 0.0) and (time_since_movements_started > self._grace_period_until_can_be_marked_as_stopped)):
gripper_pwm = 0
self._moving = False
self._time_movement_started = None
return gripper_pwm |
.parametrize('package_spec_in,package_or_url_correct,valid_spec', [('pipx', 'pipx', True), ('PiPx_stylized.name', 'pipx-stylized-name', True), ('pipx==0.15.0', 'pipx==0.15.0', True), ('pipx>=0.15.0', 'pipx>=0.15.0', True), ('pipx<=0.15.0', 'pipx<=0.15.0', True), ('pipx;python_version>="3.6"', 'pipx', True), ('pipx==0.15.0;python_version>="3.6"', 'pipx==0.15.0', True), ('pipx[extra1]', 'pipx[extra1]', True), ('pipx[extra1, extra2]', 'pipx[extra1,extra2]', True), ('src/pipx', str(((_ROOT / 'src') / 'pipx').resolve()), True), ('git+ 'git+ True), ('+ ' git+ True), (' ' True), (' ' True), ('black ' True), ('black[extra] 'black[extra] True), ('my-project[cli] git+ssh:///my-company/myproject.git ; python_version<"3.8"', 'my-project[cli] git+ssh:///my-company/myproject.git', True), ('path/doesnt/exist', 'non-existent-path', False), (' 'URL-syntax-error-slash', False)])
def test_parse_specifier_for_metadata(package_spec_in, package_or_url_correct, valid_spec, monkeypatch, root):
monkeypatch.chdir(root)
if valid_spec:
package_or_url = parse_specifier_for_metadata(package_spec_in)
assert (package_or_url == package_or_url_correct)
else:
print(f'package_spec_in = {package_spec_in}')
with pytest.raises(PipxError, match='^Unable to parse package spec'):
package_or_url = parse_specifier_for_metadata(package_spec_in) |
def _get_version_tag(tag):
version_re = re.compile('\n (?P<package>qt|pyqt|pyqtwebengine|python)\n (?P<operator>==|>=|!=|<)\n (?P<version>\\d+\\.\\d+(\\.\\d+)?)\n ', re.VERBOSE)
match = version_re.fullmatch(tag)
if (not match):
return None
package = match.group('package')
version = match.group('version')
if (package == 'qt'):
op = match.group('operator')
do_skip = {'==': (not qtutils.version_check(version, exact=True, compiled=False)), '>=': (not qtutils.version_check(version, compiled=False)), '<': qtutils.version_check(version, compiled=False), '!=': qtutils.version_check(version, exact=True, compiled=False)}
return pytest.mark.skipif(do_skip[op], reason=('Needs ' + tag))
elif (package == 'pyqt'):
return pytest.mark.skipif((not _check_version(op_str=match.group('operator'), running_version=PYQT_VERSION, version_str=version, as_hex=True)), reason=('Needs ' + tag))
elif (package == 'pyqtwebengine'):
try:
from qutebrowser.qt.webenginecore import PYQT_WEBENGINE_VERSION
except ImportError:
running_version = PYQT_VERSION
else:
running_version = PYQT_WEBENGINE_VERSION
return pytest.mark.skipif((not _check_version(op_str=match.group('operator'), running_version=running_version, version_str=version, as_hex=True)), reason=('Needs ' + tag))
elif (package == 'python'):
running_version = sys.version_info
return pytest.mark.skipif((not _check_version(op_str=match.group('operator'), running_version=running_version, version_str=version)), reason=('Needs ' + tag))
else:
raise utils.Unreachable(package) |
def test_build_schema1():
manifest = DockerSchema2Manifest(Bytes.for_string_or_unicode(MANIFEST_BYTES))
assert (not manifest.has_remote_layer)
retriever = ContentRetrieverForTesting({CONFIG_DIGEST: CONFIG_BYTES})
builder = DockerSchema1ManifestBuilder('somenamespace', 'somename', 'sometag')
manifest._populate_schema1_builder(builder, retriever)
schema1 = builder.build(docker_v2_signing_key)
assert (schema1.media_type == DOCKER_SCHEMA1_SIGNED_MANIFEST_CONTENT_TYPE) |
class DBAPICursor():
def execute(self, statement, parameters):
pass
def executemany(self, statement, parameters):
pass
def description(self):
raise NotImplementedError
async def prepare(self, context, clause=None):
raise NotImplementedError
async def async_execute(self, query, timeout, args, limit=0, many=False):
raise NotImplementedError
async def execute_baked(self, baked_query, timeout, args, one):
raise NotImplementedError
def get_statusmsg(self):
raise NotImplementedError |
class Channel():
BOOLS = {True: 1, False: 0}
bwlimit = Instrument.control('BWLimit?', 'BWLimit %d', ' A boolean parameter that toggles 25 MHz internal low-pass filter.', validator=strict_discrete_set, values=BOOLS, map_values=True)
coupling = Instrument.control('COUPling?', 'COUPling %s', ' A string parameter that determines the coupling ("ac" or "dc").', validator=strict_discrete_set, values={'ac': 'AC', 'dc': 'DC'}, map_values=True)
display = Instrument.control('DISPlay?', 'DISPlay %d', ' A boolean parameter that toggles the display.', validator=strict_discrete_set, values=BOOLS, map_values=True)
invert = Instrument.control('INVert?', 'INVert %d', ' A boolean parameter that toggles the inversion of the input signal.', validator=strict_discrete_set, values=BOOLS, map_values=True)
label = Instrument.control('LABel?', 'LABel "%s"', ' A string to label the channel. Labels with more than 10 characters are truncated to 10\n characters. May contain commonly used ASCII characters. Lower case characters are converted\n to upper case.', get_process=(lambda v: str(v[1:(- 1)])))
offset = Instrument.control('OFFSet?', 'OFFSet %f', ' A float parameter to set value that is represented at center of screen in\n Volts. The range of legal values varies depending on range and scale. If the specified\n value is outside of the legal range, the offset value is automatically set to the nearest\n legal value.\n ')
probe_attenuation = Instrument.control('PROBe?', 'PROBe %f', ' A float parameter that specifies the probe attenuation. The probe attenuation\n may be from 0.1 to 10000.', validator=strict_range, values=[0.1, 10000])
range = Instrument.control('RANGe?', 'RANGe %f', ' A float parameter that specifies the full-scale vertical axis in Volts.\n When using 1:1 probe attenuation, legal values for the range are from 8 mV to 40V.')
scale = Instrument.control('SCALe?', 'SCALe %f', 'A float parameter that specifies the vertical scale, or units per division, in Volts.')
def __init__(self, instrument, number):
self.instrument = instrument
self.number = number
def values(self, command, **kwargs):
return self.instrument.values((':channel%d:%s' % (self.number, command)), **kwargs)
def ask(self, command):
self.instrument.ask((':channel%d:%s' % (self.number, command)))
def write(self, command):
self.instrument.write((':channel%d:%s' % (self.number, command)))
def setup(self, bwlimit=None, coupling=None, display=None, invert=None, label=None, offset=None, probe_attenuation=None, vertical_range=None, scale=None):
if ((vertical_range is not None) and (scale is not None)):
log.warning('Both "vertical_range" and "scale" are specified. Specified "scale" has priority.')
if (probe_attenuation is not None):
self.probe_attenuation = probe_attenuation
if (bwlimit is not None):
self.bwlimit = bwlimit
if (coupling is not None):
self.coupling = coupling
if (display is not None):
self.display = display
if (invert is not None):
self.invert = invert
if (label is not None):
self.label = label
if (offset is not None):
self.offset = offset
if (vertical_range is not None):
self.range = vertical_range
if (scale is not None):
self.scale = scale
def current_configuration(self):
ch_setup_raw = self.instrument.ask((':channel%d?' % self.number)).strip('\n')
ch_setup_splitted = ch_setup_raw[7:].split(';')
ch_setup_dict = dict(map((lambda v: v.split(' ')), ch_setup_splitted))
ch_setup_dict['CHAN'] = ch_setup_raw[5]
to_str = ['COUP', 'IMP', 'UNIT', 'STYP']
to_bool = ['DISP', 'BWL', 'INV']
to_float = ['OFFS', 'PROB', 'PROB:SKEW', 'RANG']
to_int = ['CHAN']
for key in ch_setup_dict:
if (key in to_str):
ch_setup_dict[key] = str(ch_setup_dict[key])
elif (key in to_bool):
ch_setup_dict[key] = (ch_setup_dict[key] == '1')
elif (key in to_float):
ch_setup_dict[key] = float(ch_setup_dict[key])
elif (key in to_int):
ch_setup_dict[key] = int(ch_setup_dict[key])
return ch_setup_dict |
def test_dsl_async_cmd_run_has_list_input_save_dev_null():
cmd1 = get_cmd('echo one', 'tests\\testfiles\\cmds\\echo.bat one')
cmd2 = get_cmd('echo two three', 'tests\\testfiles\\cmds\\echo.bat "two three"')
context = Context({'cmds': {'run': [cmd1, cmd2], 'stdout': '/dev/null', 'stderr': '/dev/null'}})
step = AsyncCmdStep('blah', context)
step.run_step()
assert ('cmdOut' not in context) |
_torch
_vision
class ViltImageProcessingTest(ImageProcessingSavingTestMixin, unittest.TestCase):
image_processing_class = (ViltImageProcessor if is_vision_available() else None)
def setUp(self):
self.image_processor_tester = ViltImageProcessingTester(self)
def image_processor_dict(self):
return self.image_processor_tester.prepare_image_processor_dict()
def test_image_processor_properties(self):
image_processing = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(image_processing, 'image_mean'))
self.assertTrue(hasattr(image_processing, 'image_std'))
self.assertTrue(hasattr(image_processing, 'do_normalize'))
self.assertTrue(hasattr(image_processing, 'do_resize'))
self.assertTrue(hasattr(image_processing, 'size'))
self.assertTrue(hasattr(image_processing, 'size_divisor'))
def test_image_processor_from_dict_with_kwargs(self):
image_processor = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size, {'shortest_edge': 30})
image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size=42)
self.assertEqual(image_processor.size, {'shortest_edge': 42})
def test_batch_feature(self):
pass
def test_call_pil(self):
image_processing = self.image_processing_class(**self.image_processor_dict)
image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False)
for image in image_inputs:
self.assertIsInstance(image, Image.Image)
encoded_images = image_processing(image_inputs[0], return_tensors='pt').pixel_values
(expected_height, expected_width) = self.image_processor_tester.get_expected_values(image_inputs)
self.assertEqual(encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width))
encoded_images = image_processing(image_inputs, return_tensors='pt').pixel_values
(expected_height, expected_width) = self.image_processor_tester.get_expected_values(image_inputs, batched=True)
self.assertEqual(encoded_images.shape, (self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width))
def test_call_numpy(self):
image_processing = self.image_processing_class(**self.image_processor_dict)
image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, numpify=True)
for image in image_inputs:
self.assertIsInstance(image, np.ndarray)
encoded_images = image_processing(image_inputs[0], return_tensors='pt').pixel_values
(expected_height, expected_width) = self.image_processor_tester.get_expected_values(image_inputs)
self.assertEqual(encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width))
encoded_images = image_processing(image_inputs, return_tensors='pt').pixel_values
(expected_height, expected_width) = self.image_processor_tester.get_expected_values(image_inputs, batched=True)
self.assertEqual(encoded_images.shape, (self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width))
def test_call_pytorch(self):
image_processing = self.image_processing_class(**self.image_processor_dict)
image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, torchify=True)
for image in image_inputs:
self.assertIsInstance(image, torch.Tensor)
encoded_images = image_processing(image_inputs[0], return_tensors='pt').pixel_values
(expected_height, expected_width) = self.image_processor_tester.get_expected_values(image_inputs)
self.assertEqual(encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width))
encoded_images = image_processing(image_inputs, return_tensors='pt').pixel_values
(expected_height, expected_width) = self.image_processor_tester.get_expected_values(image_inputs, batched=True)
self.assertEqual(encoded_images.shape, (self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width))
def test_equivalence_pad_and_create_pixel_mask(self):
image_processing_1 = self.image_processing_class(**self.image_processor_dict)
image_processing_2 = self.image_processing_class(do_resize=False, do_normalize=False, do_rescale=False)
image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, torchify=True)
for image in image_inputs:
self.assertIsInstance(image, torch.Tensor)
encoded_images_with_method = image_processing_1.pad_and_create_pixel_mask(image_inputs, return_tensors='pt')
encoded_images = image_processing_2(image_inputs, return_tensors='pt')
self.assertTrue(torch.allclose(encoded_images_with_method['pixel_values'], encoded_images['pixel_values'], atol=0.0001))
self.assertTrue(torch.allclose(encoded_images_with_method['pixel_mask'], encoded_images['pixel_mask'], atol=0.0001)) |
def test_order_vertex():
dummy_points_x = [20, 20, 120, 120]
dummy_points_y = [20, 40, 40, 20]
expect_points_x = [20, 120, 120, 20]
expect_points_y = [20, 20, 40, 40]
with pytest.raises(AssertionError):
sort_vertex([], dummy_points_y)
with pytest.raises(AssertionError):
sort_vertex(dummy_points_x, [])
for perm in set(permutations([0, 1, 2, 3])):
points_x = [dummy_points_x[i] for i in perm]
points_y = [dummy_points_y[i] for i in perm]
(ordered_points_x, ordered_points_y) = sort_vertex(points_x, points_y)
assert np.allclose(ordered_points_x, expect_points_x)
assert np.allclose(ordered_points_y, expect_points_y) |
def locate_cuda():
if ('CUDAHOME' in os.environ):
home = os.environ['CUDAHOME']
nvcc = pjoin(home, 'bin', 'nvcc')
else:
default_path = pjoin(os.sep, 'usr', 'local', 'cuda', 'bin')
nvcc = find_in_path('nvcc', ((os.environ['PATH'] + os.pathsep) + default_path))
if (nvcc is None):
raise EnvironmentError('The nvcc binary could not be located in your $PATH. Either add it to your path, or set $CUDAHOME')
home = os.path.dirname(os.path.dirname(nvcc))
cudaconfig = {'home': home, 'nvcc': nvcc, 'include': pjoin(home, 'include'), 'lib64': pjoin(home, 'lib64')}
try:
for (k, v) in cudaconfig.iteritems():
if (not os.path.exists(v)):
raise EnvironmentError(('The CUDA %s path could not be located in %s' % (k, v)))
except:
for (k, v) in cudaconfig.items():
if (not os.path.exists(v)):
raise EnvironmentError(('The CUDA %s path could not be located in %s' % (k, v)))
return cudaconfig |
def get_stale_team(stale_timespan):
stale_at = (datetime.now() - stale_timespan)
try:
candidates = TeamSync.select(TeamSync.id).where(((TeamSync.last_updated <= stale_at) | (TeamSync.last_updated >> None))).limit(500).alias('candidates')
found = TeamSync.select(candidates.c.id).from_(candidates).order_by(db_random_func()).get()
if (found is None):
return
return TeamSync.select(TeamSync, Team).join(Team).where((TeamSync.id == found.id)).get()
except TeamSync.DoesNotExist:
return None |
def get_cadidate_embeddings(json_list, document_embeddings):
document_feats = []
for (document, document_emb) in tqdm(zip(json_list, document_embeddings), total=len(json_list)):
assert (document['document_id'] == document_emb['document_id'])
sentence = flat_list(document['tokens'])
sentence_pos = flat_list(document['tokens_pos'])
sentence_emb = document_emb['tokens']
tokens_tagged = list(zip(sentence, sentence_pos))
for (i, token) in enumerate(sentence):
if (token.lower() in stopword_dict):
tokens_tagged[i] = (token, 'IN')
candidate_phrase = extract_candidates(tokens_tagged)
tmp_embeddings = []
tmp_candidate_phrase = []
for (tmp, (i, j)) in candidate_phrase:
if (j <= i):
continue
if (j >= len(sentence_emb)):
break
tmp_embeddings.append(np.max(np.array(sentence_emb[i:j]), axis=0))
tmp_candidate_phrase.append(tmp)
candidate_phrases_embeddings = tmp_embeddings
candidate_phrases = tmp_candidate_phrase
document_feats.append({'document_id': document['document_id'], 'tokens': document['tokens'], 'candidate_phrases': candidate_phrases, 'candidate_phrases_embeddings': candidate_phrases_embeddings, 'sentence_embeddings': document_emb['doc_cls'], 'keyphrases': document['keyphrases']})
return document_feats |
class TrezorClientBase(HardwareClientBase, Logger):
def __init__(self, transport, handler, plugin):
HardwareClientBase.__init__(self, plugin=plugin)
if plugin.is_outdated_fw_ignored():
TrezorClient.is_outdated = (lambda *args, **kwargs: False)
self.client = TrezorClient(transport, ui=self)
self.device = plugin.device
self.handler = handler
Logger.__init__(self)
self.msg = None
self.creating_wallet = False
self.in_flow = False
self.used()
def run_flow(self, message=None, creating_wallet=False):
if self.in_flow:
raise RuntimeError('Overlapping call to run_flow')
self.in_flow = True
self.msg = message
self.creating_wallet = creating_wallet
self.prevent_timeouts()
return self
def end_flow(self):
self.in_flow = False
self.msg = None
self.creating_wallet = False
self.handler.finished()
self.used()
def __enter__(self):
return self
def __exit__(self, exc_type, e, traceback):
self.end_flow()
if (e is not None):
if isinstance(e, Cancelled):
raise UserCancelled() from e
elif isinstance(e, TrezorFailure):
raise RuntimeError(str(e)) from e
elif isinstance(e, OutdatedFirmwareError):
raise OutdatedHwFirmwareException(e) from e
else:
return False
return True
def features(self):
return self.client.features
def __str__(self):
return ('%s/%s' % (self.label(), self.features.device_id))
def label(self):
return self.features.label
def get_soft_device_id(self):
return self.features.device_id
def is_initialized(self):
return self.features.initialized
def is_pairable(self):
return (not self.features.bootloader_mode)
_in_hwd_thread
def has_usable_connection_with_device(self):
if self.in_flow:
return True
try:
self.client.init_device()
except BaseException:
return False
return True
def used(self):
self.last_operation = time.time()
def prevent_timeouts(self):
self.last_operation = float('inf')
_in_hwd_thread
def timeout(self, cutoff):
if (self.last_operation < cutoff):
self.logger.info('timed out')
self.clear_session()
def i4b(self, x):
return pack('>I', x)
_in_hwd_thread
def get_xpub(self, bip32_path, xtype, creating=False):
address_n = parse_path(bip32_path)
with self.run_flow(creating_wallet=creating):
node = trezorlib.btc.get_public_node(self.client, address_n).node
return BIP32Node(xtype=xtype, eckey=ecc.ECPubkey(node.public_key), chaincode=node.chain_code, depth=node.depth, fingerprint=self.i4b(node.fingerprint), child_number=self.i4b(node.child_num)).to_xpub()
_in_hwd_thread
def toggle_passphrase(self):
if self.features.passphrase_protection:
msg = _('Confirm on your {} device to disable passphrases')
else:
msg = _('Confirm on your {} device to enable passphrases')
enabled = (not self.features.passphrase_protection)
with self.run_flow(msg):
trezorlib.device.apply_settings(self.client, use_passphrase=enabled)
_in_hwd_thread
def change_label(self, label):
with self.run_flow(_('Confirm the new label on your {} device')):
trezorlib.device.apply_settings(self.client, label=label)
_in_hwd_thread
def change_homescreen(self, homescreen):
with self.run_flow(_('Confirm on your {} device to change your home screen')):
trezorlib.device.apply_settings(self.client, homescreen=homescreen)
_in_hwd_thread
def set_safety_checks(self, safety_checks: SafetyCheckLevel):
with self.run_flow(_('Confirm the new safety checks on your {} device')):
trezorlib.device.apply_settings(self.client, safety_checks=safety_checks)
_in_hwd_thread
def set_pin(self, remove):
if remove:
msg = _('Confirm on your {} device to disable PIN protection')
elif self.features.pin_protection:
msg = _('Confirm on your {} device to change your PIN')
else:
msg = _('Confirm on your {} device to set a PIN')
with self.run_flow(msg):
trezorlib.device.change_pin(self.client, remove)
_in_hwd_thread
def clear_session(self):
self.logger.info(f'clear session: {self}')
self.prevent_timeouts()
try:
self.client.clear_session()
except BaseException as e:
self.logger.info(f'clear_session: ignoring error {e}')
_in_hwd_thread
def close(self):
self.logger.info('closing client')
self.clear_session()
_in_hwd_thread
def is_uptodate(self):
if self.client.is_outdated():
return False
return (self.client.version >= self.plugin.minimum_firmware)
def get_trezor_model(self):
return self.features.model
def device_model_name(self):
model = self.get_trezor_model()
if (model == '1'):
return 'Trezor One'
elif (model == 'T'):
return 'Trezor T'
return None
_in_hwd_thread
def show_address(self, address_str, script_type, multisig=None):
coin_name = self.plugin.get_coin_name()
address_n = parse_path(address_str)
with self.run_flow():
return trezorlib.btc.get_address(self.client, coin_name, address_n, show_display=True, script_type=script_type, multisig=multisig)
_in_hwd_thread
def sign_message(self, address_str, message, *, script_type):
coin_name = self.plugin.get_coin_name()
address_n = parse_path(address_str)
with self.run_flow():
return trezorlib.btc.sign_message(self.client, coin_name, address_n, message, script_type=script_type, no_script_type=True)
_in_hwd_thread
def recover_device(self, recovery_type, *args, **kwargs):
input_callback = self.mnemonic_callback(recovery_type)
with self.run_flow():
return trezorlib.device.recover(self.client, *args, input_callback=input_callback, type=recovery_type, **kwargs)
_in_hwd_thread
def sign_tx(self, *args, **kwargs):
with self.run_flow():
return trezorlib.btc.sign_tx(self.client, *args, **kwargs)
_in_hwd_thread
def reset_device(self, *args, **kwargs):
with self.run_flow():
return trezorlib.device.reset(self.client, *args, **kwargs)
_in_hwd_thread
def wipe_device(self, *args, **kwargs):
with self.run_flow():
return trezorlib.device.wipe(self.client, *args, **kwargs)
def button_request(self, br):
message = (self.msg or MESSAGES.get(br.code) or MESSAGES['default'])
self.handler.show_message(message.format(self.device), self.client.cancel)
def get_pin(self, code=None):
show_strength = True
if (code == 2):
msg = _('Enter a new PIN for your {}:')
elif (code == 3):
msg = _('Re-enter the new PIN for your {}.\n\nNOTE: the positions of the numbers have changed!')
else:
msg = _('Enter your current {} PIN:')
show_strength = False
pin = self.handler.get_pin(msg.format(self.device), show_strength=show_strength)
if (not pin):
raise Cancelled
if (len(pin) > 9):
self.handler.show_error(_('The PIN cannot be longer than 9 characters.'))
raise Cancelled
return pin
def get_passphrase(self, available_on_device):
if self.creating_wallet:
msg = _('Enter a passphrase to generate this wallet. Each time you use this wallet your {} will prompt you for the passphrase. If you forget the passphrase you cannot access the QTUMs in the wallet.').format(self.device)
else:
msg = _('Enter the passphrase to unlock this wallet:')
self.handler.passphrase_on_device = available_on_device
passphrase = self.handler.get_passphrase(msg, self.creating_wallet)
if (passphrase is PASSPHRASE_ON_DEVICE):
return passphrase
if (passphrase is None):
raise Cancelled
passphrase = bip39_normalize_passphrase(passphrase)
length = len(passphrase)
if (length > 50):
self.handler.show_error(_('Too long passphrase ({} > 50 chars).').format(length))
raise Cancelled
return passphrase
def _matrix_char(self, matrix_type):
num = (9 if (matrix_type == WordRequestType.Matrix9) else 6)
char = self.handler.get_matrix(num)
if (char == 'x'):
raise Cancelled
return char
def mnemonic_callback(self, recovery_type):
if (recovery_type is None):
return None
if (recovery_type == RecoveryDeviceType.Matrix):
return self._matrix_char
step = 0
def word_callback(_ignored):
nonlocal step
step += 1
msg = _('Step {}/24. Enter seed word as explained on your {}:').format(step, self.device)
word = self.handler.get_word(msg)
if (not word):
raise Cancelled
return word
return word_callback |
class BasicBlock(nn.Module):
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if ((stride != 1) or (in_planes != planes)):
self.shortcut = nn.Sequential(nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(planes))
self.fc1 = nn.Conv2d(planes, (planes // 16), kernel_size=1)
self.fc2 = nn.Conv2d((planes // 16), planes, kernel_size=1)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
w = F.avg_pool2d(out, out.size(2))
w = F.relu(self.fc1(w))
w = F.sigmoid(self.fc2(w))
out = (out * w)
out += self.shortcut(x)
out = F.relu(out)
return out |
def test_upload_mixin_with_filepath(gl):
class TestClass(UploadMixin, FakeObject):
_upload_path = '/tests/{id}/uploads'
url = '
responses.add(method=responses.POST, url=url, json={'id': 42, 'file_name': 'test.txt', 'file_content': 'testing contents'}, status=200, match=[responses.matchers.query_param_matcher({})])
mgr = FakeManager(gl)
obj = TestClass(mgr, {'id': 42})
with patch('builtins.open', mock_open(read_data='raw\nfile\ndata')):
res_only_path = obj.upload('test.txt', None, '/filepath')
assert (obj._get_upload_path() == '/tests/42/uploads')
assert isinstance(res_only_path, dict)
assert (res_only_path['file_name'] == 'test.txt')
assert (res_only_path['file_content'] == 'testing contents')
assert (responses.assert_call_count(url, 1) is True) |
class PresetAM2RHints(PresetTab, Ui_PresetAM2RHints):
def __init__(self, editor: PresetEditor, game_description: GameDescription, window_manager: WindowManager):
super().__init__(editor, game_description, window_manager)
self.setupUi(self)
self.hint_layout.setAlignment(QtCore.Qt.AlignmentFlag.AlignTop)
for (i, item_hint_mode) in enumerate(ItemHintMode):
self.hint_artifact_combo.setItemData(i, item_hint_mode)
self.ice_beam_hint_combo.setItemData(i, item_hint_mode)
self.hint_artifact_combo.currentIndexChanged.connect(self._on_art_combo_changed)
self.ice_beam_hint_combo.currentIndexChanged.connect(self._on_ibeam_combo_changed)
def tab_title(cls) -> str:
return 'Hints'
def uses_patches_tab(cls) -> bool:
return False
def _on_art_combo_changed(self, new_index: int):
with self._editor as editor:
editor.set_configuration_field('hints', dataclasses.replace(editor.configuration.hints, artifacts=self.hint_artifact_combo.currentData()))
def _on_ibeam_combo_changed(self, new_index: int):
with self._editor as editor:
editor.set_configuration_field('hints', dataclasses.replace(editor.configuration.hints, ice_beam=self.ice_beam_hint_combo.currentData()))
def on_preset_changed(self, preset: Preset):
set_combo_with_value(self.hint_artifact_combo, preset.configuration.hints.artifacts)
set_combo_with_value(self.ice_beam_hint_combo, preset.configuration.hints.ice_beam) |
class TestRiskParityBoxesFactory(TestCase):
def setUpClass(cls):
cls.start_date = str_to_date('2022-10-01')
cls.end_date = str_to_date('2022-11-01')
cls.frequency = Frequency.DAILY
datetime_index = pd.DatetimeIndex(['2022-10-02', '2022-10-03', '2022-10-04', '2022-10-05', '2022-10-06', '2022-10-09', '2022-10-10', '2022-10-11', '2022-10-12', '2022-10-13', '2022-10-16', '2022-10-17', '2022-10-18', '2022-10-19', '2022-10-20', '2022-10-23', '2022-10-24', '2022-10-25', '2022-10-26', '2022-10-27', '2022-10-30', '2022-10-31', '2022-11-01'])
bbg_data_provider = Mock(spec=BloombergDataProvider)
all_tickers_str = ['BCIT3T Index', 'IEF US Equity', 'LQD US Equity', 'MSBICBGU Index', 'MXUS Index', 'SPGSCITR Index', 'XAU Curncy']
all_tickers = BloombergTicker.from_string(all_tickers_str)
assets_prices_df = PricesDataFrame(index=datetime_index, columns=all_tickers, data=[[263.7628, 106.24, 121.02, 321.8249, 2409.48, 2295.6, 1271.13], [263.9803, 106.39, 121.29, 322.0949, 2414.41, 2294.91, 1271.66], [264.164, 106.36, 121.22, 322.3203, 2417.31, 2294.28, 1274.85], [264.0932, 106.25, 121.05, 322.4172, 2430.8, 2323.34, 1268.22], [263.9816, 106.12, 120.95, 322.1411, 2428.16, 2282.24, 1276.68], [263.9816, 106.24, 121.05, None, 2423.41, 2284.78, 1284.05], [264.4529, 106.28, 121.13, 322.3113, 2428.73, 2318.99, 1288.03], [264.5108, 106.4, 121.07, 322.3553, 2433.09, 2324.63, 1291.72], [264.8223, 106.5, 121.1, 322.7489, 2428.89, 2314.78, 1293.72], [264.9401, 106.86, 121.58, 322.872, 2430.63, 2342.19, 1303.82], [264.2089, 106.68, 121.41, 322.8467, 2434.66, 2353.2, 1295.79], [264.0592, 106.64, 121.39, 323.1079, 2436.35, 2345.04, 1285.12], [263.937, 106.37, 121.21, 323.2238, 2438.08, 2345.57, 1281.08], [264.0463, 106.48, 121.39, 323.5498, 2439.31, 2332.31, 1290.13], [263.8424, 106.04, 121.06, 322.9874, 2451.7, 2340.26, 1280.47], [263.8961, 106.14, 121.18, 322.7436, 2441.71, 2343.72, 1282.27], [263.7129, 105.82, 120.88, 322.3214, 2445.61, 2366.0, 1276.58], [263.3216, 105.65, 120.56, 322.4332, 2434.13, 2364.23, 1277.53], [263.3638, 105.51, 120.55, 322.1635, 2438.07, 2376.52, 1266.99], [263.8662, 105.85, 120.91, 322.3655, 2457.45, 2396.93, 1273.35], [264.4531, 106.23, 121.31, 322.971, 2449.2, 2407.43, 1276.29], [264.469, 106.16, 121.14, 323.0688, 2452.15, 2415.28, 1271.45], [264.4727, 106.06, 121.01, 323.1553, 2455.7, 2415.48, 1274.66]])
bbg_data_provider.get_price.return_value = assets_prices_df
cls.bbg_data_provider = bbg_data_provider
def setUp(self):
self.risk_parity_boxes_factory = RiskParityBoxesFactory(self.bbg_data_provider)
def test_make_parity_boxes(self):
abs_tolerance = 0.0005
actual_boxes = self.risk_parity_boxes_factory.make_parity_boxes(self.start_date, self.end_date)
datetime_index = pd.DatetimeIndex(['2022-10-03', '2022-10-04', '2022-10-05', '2022-10-06', '2022-10-09', '2022-10-10', '2022-10-11', '2022-10-12', '2022-10-13', '2022-10-16', '2022-10-17', '2022-10-18', '2022-10-19', '2022-10-20', '2022-10-23', '2022-10-24', '2022-10-25', '2022-10-26', '2022-10-27', '2022-10-30', '2022-10-31', '2022-11-01'])
expected_series = SimpleReturnsSeries(index=datetime_index, data=[0., 0., 0., (- 0.), 0.000934, 0., 0., 0., 0., (- 0.), (- 0.), (- 0.), 0.0011223, (- 0.), (- 0.), (- 0.), 0., (- 0.), 0., 0., 7.02049e-05, 0.])
actual_series = actual_boxes.get_series(growth=ChangeDirection.RISING, inflation=ChangeDirection.RISING)
assert_series_equal(expected_series, actual_series, absolute_tolerance=abs_tolerance)
expected_series = SimpleReturnsSeries(index=datetime_index, data=[0., 0., 0., (- 0.), 0.0, 0., 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., 0., (- 0.), (- 0.), (- 0.), 0., 0., 0., (- 0.), (- 0.)])
actual_series = actual_boxes.get_series(growth=ChangeDirection.RISING, inflation=ChangeDirection.FALLING)
assert_series_equal(expected_series, actual_series, absolute_tolerance=abs_tolerance)
expected_series = SimpleReturnsSeries(index=datetime_index, data=[0., 0., (- 0.), 0., 0.00103, 0., 0., 0., 0., (- 0.), (- 0.), (- 0.), 0., (- 0.), 0., (- 0.), (- 0.), (- 0.), 0., 0., (- 0.), 0.])
actual_series = actual_boxes.get_series(growth=ChangeDirection.FALLING, inflation=ChangeDirection.RISING)
assert_series_equal(expected_series, actual_series, absolute_tolerance=abs_tolerance)
expected_series = SimpleReturnsSeries(index=datetime_index, data=[0., 0., (- 0.), 0., 0.002442, 0., 0., 0., 0., (- 0.), (- 0.), (- 0.), 0., (- 0.), 0., (- 0.), (- 0.), (- 0.), 0., 0., (- 0.), 4.935567e-05])
actual_series = actual_boxes.get_series(growth=ChangeDirection.FALLING, inflation=ChangeDirection.FALLING)
assert_series_equal(expected_series, actual_series, absolute_tolerance=abs_tolerance) |
(scope='function')
def simulation_evaluator() -> ClosedLoopEvaluator:
metrics = [DisplacementErrorL2Metric(), DistanceToRefTrajectoryMetric(), CollisionFrontMetric(), CollisionRearMetric(), CollisionSideMetric()]
validators = [RangeValidator('displacement_error_l2_validator', DisplacementErrorL2Metric, max_value=30), RangeValidator('distance_ref_trajectory_validator', DistanceToRefTrajectoryMetric, max_value=4), RangeValidator('collision_front_validator', CollisionFrontMetric, max_value=0), RangeValidator('collision_rear_validator', CollisionRearMetric, max_value=0), RangeValidator('collision_side_validator', CollisionSideMetric, max_value=0)]
intervention_validators = ['displacement_error_l2_validator', 'distance_ref_trajectory_validator', 'collision_front_validator', 'collision_rear_validator', 'collision_side_validator']
return ClosedLoopEvaluator(EvaluationPlan(metrics=metrics, validators=validators, composite_metrics=[], intervention_validators=intervention_validators)) |
class TearDownConvenience(object):
def __init__(self, setup_stack=None):
self._own_setup_stack = (setup_stack is None)
if (setup_stack is None):
setup_stack = SetupStack()
self._setup_stack = setup_stack
def tear_down(self):
assert self._own_setup_stack
self._setup_stack.tear_down() |
class CAM(Net):
def __init__(self):
super(CAM, self).__init__()
def forward(self, x):
x = self.stage1(x)
x = self.stage2(x)
x = self.stage3(x)
x = self.stage4(x)
x = F.conv2d(x, self.classifier.weight)
x = F.relu(x)
x = (x[0] + x[1].flip((- 1)))
return x |
def _get_in_vals(binst: BloqInstance, reg: Register, soq_assign: Dict[(Soquet, ClassicalValT)]) -> ClassicalValT:
if (not reg.shape):
return soq_assign[Soquet(binst, reg)]
if (reg.bitsize <= 8):
dtype = np.uint8
elif (reg.bitsize <= 16):
dtype = np.uint16
elif (reg.bitsize <= 32):
dtype = np.uint32
elif (reg.bitsize <= 64):
dtype = np.uint64
else:
raise NotImplementedError('We currently only support up to 64-bit multi-dimensional registers in classical simulation.')
arg = np.empty(reg.shape, dtype=dtype)
for idx in reg.all_idxs():
soq = Soquet(binst, reg, idx=idx)
arg[idx] = soq_assign[soq]
return arg |
.parametrize('metric', ['euclidean', 'minkowski', 'cityblock', 'chebyshev', 'haversine'])
def test_metric(metric):
data = (grocs.to_crs(4326) if (metric == 'haversine') else grocs)
if ((not HAS_SKLEARN) and (metric in ['chebyshev', 'haversine'])):
pytest.skip('metric not supported by scipy')
(head, tail, weight) = _kernel(data, metric=metric, kernel='identity', p=1.5)
assert (head.shape[0] == (len(data) * (len(data) - 1)))
assert (tail.shape == head.shape)
assert (weight.shape == head.shape)
np.testing.assert_array_equal(pd.unique(head), data.index)
if (metric == 'euclidean'):
assert (weight.mean() == pytest.approx(39758.007362))
assert (weight.max() == pytest.approx(127937.75272))
elif (metric == 'minkowski'):
assert (weight.mean() == pytest.approx(42288.642129))
assert (weight.max() == pytest.approx(140674.095752))
elif (metric == 'cityblock'):
assert (weight.mean() == pytest.approx(49424.576155))
assert (weight.max() == pytest.approx(173379.431622))
elif (metric == 'chebyshev'):
assert (weight.mean() == pytest.approx(36590.352895))
assert (weight.max() == pytest.approx(123955.14249))
else:
assert (weight.mean() == pytest.approx(0.115835))
assert (weight.max() == pytest.approx(0.371465)) |
class EmbedSend(discord.ui.Button):
view: EmbedBuilder
def __init__(self, channel: discord.TextChannel):
self.channel = channel
super().__init__(label='Send to #{0}'.format(channel.name), style=discord.ButtonStyle.green)
async def callback(self, interaction: discord.Interaction) -> T.Any:
user = interaction.user
if (not self.channel.permissions_for(user).send_messages):
return (await interaction.response.send_message(f'You do not have the `send_messages` permission for the {self.channel.mention} channel.'))
try:
m: T.Optional[discord.Message] = (await self.channel.send(embed=self.view.embed))
except Exception as e:
(await interaction.response.send_message(f'An error occured: {e}', ephemeral=True))
else:
(await interaction.response.send_message(f'{emote.check} | Embed was sent to {self.channel.mention} ([Jump URL](<{m.jump_url}>))', ephemeral=True))
(await self.view.on_timeout()) |
def initialise_colour_map(book):
book.colour_map = {}
book.colour_indexes_used = {}
if (not book.formatting_info):
return
for i in xrange(8):
book.colour_map[i] = excel_default_palette_b8[i]
dpal = default_palette[book.biff_version]
ndpal = len(dpal)
for i in xrange(ndpal):
book.colour_map[(i + 8)] = dpal[i]
book.colour_map[(ndpal + 8)] = None
book.colour_map[((ndpal + 8) + 1)] = None
book.colour_map[81] = None
book.colour_map[32767] = None |
def test_environment_pass_references():
options = Options(platform='linux', command_line_arguments=CommandLineArguments.defaults(), env={'CIBW_ENVIRONMENT_PASS_LINUX': 'STARTER MAIN_COURSE', 'STARTER': 'green eggs', 'MAIN_COURSE': 'ham', 'CIBW_ENVIRONMENT': 'MEAL="$STARTER and $MAIN_COURSE"'})
parsed_environment = options.build_options(identifier=None).environment
assert (parsed_environment.as_dictionary(prev_environment={}) == {'MEAL': 'green eggs and ham', 'STARTER': 'green eggs', 'MAIN_COURSE': 'ham'}) |
class Effect6796(BaseEffect):
type = 'passive'
def handler(fit, module, context, projectionRange, **kwargs):
fit.modules.filteredItemMultiply((lambda mod: mod.item.requiresSkill('Small Hybrid Turret')), 'damageMultiplier', (1 / module.getModifiedItemAttr('modeDamageBonusPostDiv')), stackingPenalties=True, penaltyGroup='postDiv', **kwargs) |
_module()
class FooLinearConv1d(BaseModule):
def __init__(self, linear=None, conv1d=None, init_cfg=None):
super().__init__(init_cfg)
if (linear is not None):
self.linear = build_from_cfg(linear, COMPONENTS)
if (conv1d is not None):
self.conv1d = build_from_cfg(conv1d, COMPONENTS)
def forward(self, x):
x = self.linear(x)
return self.conv1d(x) |
def build_dataset_ccrop(cfg):
args = cfg.copy()
transform_rcrop = build_transform(args.rcrop_dict)
transform_ccrop = build_transform(args.ccrop_dict)
ds_dict = args.ds_dict
ds_name = ds_dict.pop('type')
ds_dict['transform_rcrop'] = transform_rcrop
ds_dict['transform_ccrop'] = transform_ccrop
ds = datasets.__dict__[ds_name](**ds_dict)
return ds |
class Hook():
stages = ('before_run', 'before_train_epoch', 'before_train_iter', 'after_train_iter', 'after_train_epoch', 'before_val_epoch', 'before_val_iter', 'after_val_iter', 'after_val_epoch', 'after_run')
def before_run(self, runner):
pass
def after_run(self, runner):
pass
def before_epoch(self, runner):
pass
def after_epoch(self, runner):
pass
def before_iter(self, runner):
pass
def after_iter(self, runner):
pass
def before_train_epoch(self, runner):
self.before_epoch(runner)
def before_val_epoch(self, runner):
self.before_epoch(runner)
def after_train_epoch(self, runner):
self.after_epoch(runner)
def after_val_epoch(self, runner):
self.after_epoch(runner)
def before_train_iter(self, runner):
self.before_iter(runner)
def before_val_iter(self, runner):
self.before_iter(runner)
def after_train_iter(self, runner):
self.after_iter(runner)
def after_val_iter(self, runner):
self.after_iter(runner)
def every_n_epochs(self, runner, n):
return ((((runner.epoch + 1) % n) == 0) if (n > 0) else False)
def every_n_inner_iters(self, runner, n):
return ((((runner.inner_iter + 1) % n) == 0) if (n > 0) else False)
def every_n_iters(self, runner, n):
return ((((runner.iter + 1) % n) == 0) if (n > 0) else False)
def end_of_epoch(self, runner):
return ((runner.inner_iter + 1) == len(runner.data_loader))
def is_last_epoch(self, runner):
return ((runner.epoch + 1) == runner._max_epochs)
def is_last_iter(self, runner):
return ((runner.iter + 1) == runner._max_iters)
def get_triggered_stages(self):
trigger_stages = set()
for stage in Hook.stages:
if is_method_overridden(stage, Hook, self):
trigger_stages.add(stage)
method_stages_map = {'before_epoch': ['before_train_epoch', 'before_val_epoch'], 'after_epoch': ['after_train_epoch', 'after_val_epoch'], 'before_iter': ['before_train_iter', 'before_val_iter'], 'after_iter': ['after_train_iter', 'after_val_iter']}
for (method, map_stages) in method_stages_map.items():
if is_method_overridden(method, Hook, self):
trigger_stages.update(map_stages)
return [stage for stage in Hook.stages if (stage in trigger_stages)] |
def wrap_model(opt, modelG, modelD, flowNet):
if (opt.n_gpus_gen == len(opt.gpu_ids)):
modelG = myModel(opt, modelG)
modelD = myModel(opt, modelD)
flowNet = myModel(opt, flowNet)
else:
if (opt.batchSize == 1):
gpu_split_id = (opt.n_gpus_gen + 1)
modelG = nn.DataParallel(modelG, device_ids=opt.gpu_ids[0:1])
else:
gpu_split_id = opt.n_gpus_gen
modelG = nn.DataParallel(modelG, device_ids=opt.gpu_ids[:gpu_split_id])
modelD = nn.DataParallel(modelD, device_ids=([opt.gpu_ids[0]] + opt.gpu_ids[gpu_split_id:]))
flowNet = nn.DataParallel(flowNet, device_ids=([opt.gpu_ids[0]] + opt.gpu_ids[gpu_split_id:]))
return (modelG, modelD, flowNet) |
def apply_definition(words, args, raw_expansion, def_name):
global valid_prefixes
arg_dict = {}
num_words_left = len(words)
for i in range((len(args) - 1), (- 1), (- 1)):
arg_start = (num_words_left - 1)
while ((arg_start > 1) and (words[(arg_start - 1)] == ':')):
arg_start -= 2
if (arg_start < 0):
raise SyntaxError(('Not enough arguments to %s' % def_name))
arg_dict[args[i]] = words[arg_start:num_words_left]
if (';' in arg_dict[args[i]]):
raise SyntaxError(('Cannot use ; as argument to %s' % def_name))
num_words_left = arg_start
words_out = words[:num_words_left]
for w in raw_expansion:
if (w in arg_dict):
words_out.extend(arg_dict[w])
elif (w and (w[0] in valid_prefixes) and (w[1:] in arg_dict)):
temp_expansion = copy.copy(arg_dict[w[1:]])
temp_expansion[0] = (w[0] + temp_expansion[0])
words_out.extend(temp_expansion)
else:
words_out.append(w)
return words_out |
class MLPNet(nn.Module):
def __init__(self):
super(MLPNet, self).__init__()
self.fc1 = nn.Linear((28 * 28), 256)
self.fc2 = nn.Linear(256, 10)
def forward(self, x):
x = x.view((- 1), (28 * 28))
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x |
class TotalGraph(TimeGraph):
value_params = [(_('year'), _('Distance (km)'), _('Annual Distance'), 'y'), (_('year'), _('Time (hours)'), _('Annual Time'), 'b'), (_('year'), _('Average Heart Rate (bpm)'), _('Annual Average Heart Rate'), 'r'), (_('year'), _('Average Speed (km/h)'), _('Annual Average Speed'), 'g'), (_('year'), _('Calories'), _('Annual Calories'), 'b')]
def __init__(self, sports, vbox=None, window=None, combovalue=None, combovalue2=None, main=None):
TimeGraph.__init__(self, sports, vbox=vbox, window=window, main=main)
self.combovalue = combovalue
self.combovalue2 = combovalue2
self.KEY_FORMAT = '%Y'
self.SPORT_FIELD = 4
def getYears(self, yvalues):
years = set()
for s in yvalues.values():
years |= set([str(x) for x in range(int(min(s.keys())), (int(max(s.keys())) + 1))])
return sorted(list(years))
def drawgraph(self, values):
TimeGraph.drawgraph(self, values, x_func=self.getYears)
def getValue(self, record, value_selected):
conv = {0: 'distance', 1: 'duration', 2: 'beats', 3: 'average', 4: 'calories'}
value_sel = conv[value_selected]
if (value_sel == 'duration'):
return (self.getFloatValue(getattr(record, value_sel)) / 3600)
else:
return self.getFloatValue(getattr(record, value_sel)) |
def test_wrap_long_word_max_data_lines():
column_1 = Column('Col 1', width=10, max_data_lines=2)
column_2 = Column('Col 2', width=10, max_data_lines=2)
column_3 = Column('Col 3', width=10, max_data_lines=2)
column_4 = Column('Col 4', width=10, max_data_lines=1)
columns = [column_1, column_2, column_3, column_4]
tc = TableCreator(columns)
row_data = list()
row_data.append('LongerThan10FitsLast')
row_data.append('LongerThan10FitsLast\nMore lines')
row_data.append('LongerThan10RunsOverLast')
row_data.append('A LongerThan10RunsOverLast')
row = tc.generate_row(row_data=row_data, is_header=False)
assert (row == 'LongerThan LongerThan LongerThan A LongerT...\n10FitsLast 10FitsLas... 10RunsOve... ') |
.issue(86)
.parametrize('today', [False, True])
def test_git_dirty_notag(today: bool, wd: WorkDir, monkeypatch: pytest.MonkeyPatch) -> None:
if today:
monkeypatch.delenv('SOURCE_DATE_EPOCH', raising=False)
wd.commit_testfile()
wd.write('test.txt', 'test2')
wd('git add test.txt')
version = wd.get_version()
if today:
tag = datetime.now(timezone.utc).date().strftime('.d%Y%m%d')
else:
tag = '.d'
assert version.startswith('0.1.dev1+g')
assert version.endswith(tag) |
class NitroMessageLengthTest(TestCase):
def setUp(self):
self.user = User.objects.create(id=50, name='bill', discriminator=5)
self.context = MessageDeletionContext.objects.create(id=50, actor=self.user, creation=dt.now(UTC))
def test_create(self):
message = DeletedMessage(id=46, author=self.user, channel_id=666, content=('w' * 4000), deletion_context=self.context, embeds=[])
try:
message.clean_fields()
except Exception as e:
self.fail(f'Creation of message of length 3950 failed with: {e}')
def test_create_failure(self):
message = DeletedMessage(id=47, author=self.user, channel_id=666, content=('w' * 4001), deletion_context=self.context, embeds=[])
self.assertRaisesRegex(ValidationError, "content':", message.clean_fields) |
class KeynoteSpeaker(TimeStampedModel, OrderedModel):
keynote = models.ForeignKey('conferences.Keynote', on_delete=models.CASCADE, verbose_name=_('keynote'), related_name='speakers', null=False)
user = models.ForeignKey('users.User', on_delete=models.CASCADE, null=True, blank=True, verbose_name=_('user'), related_name='+')
name = models.CharField(_('fullname'), max_length=512, blank=True)
photo = models.ImageField(_('photo'), null=True, blank=False, upload_to='keynotes')
bio = I18nTextField(_('bio'), blank=False, null=True)
pronouns = I18nCharField(_('pronouns'), max_length=512, null=True)
highlight_color = models.CharField(choices=COLORS, max_length=15, blank=True, verbose_name=_('highlight color'))
twitter_handle = models.CharField(_('twitter handle'), max_length=1024, default='', blank=True)
instagram_handle = models.CharField(_('instagram handle'), max_length=1024, default='', blank=True)
website = models.URLField(_('website'), blank=True, default='', max_length=2049)
order_with_respect_to = 'keynote'
class Meta(OrderedModel.Meta):
verbose_name = _('Keynote Speaker')
verbose_name_plural = _('Keynote Speakers') |
class ListParameterItem(WidgetParameterItem):
def __init__(self, param, depth):
self.targetValue = None
WidgetParameterItem.__init__(self, param, depth)
def makeWidget(self):
w = QtWidgets.QComboBox()
w.setMaximumHeight(20)
w.sigChanged = w.currentIndexChanged
w.value = self.value
w.setValue = self.setValue
self.widget = w
self.limitsChanged(self.param, self.param.opts['limits'])
if (len(self.forward) > 0):
self.setValue(self.param.value())
return w
def value(self):
key = self.widget.currentText()
return self.forward.get(key, None)
def setValue(self, val):
self.targetValue = val
match = [fn.eq(val, limVal) for limVal in self.reverse[0]]
if (not any(match)):
self.widget.setCurrentIndex(0)
else:
idx = match.index(True)
key = self.reverse[1][idx]
ind = self.widget.findText(key)
self.widget.setCurrentIndex(ind)
def limitsChanged(self, param, limits):
if (len(limits) == 0):
limits = ['']
(self.forward, self.reverse) = ListParameter.mapping(limits)
try:
self.widget.blockSignals(True)
val = self.targetValue
self.widget.clear()
for k in self.forward:
self.widget.addItem(k)
if (k == val):
self.widget.setCurrentIndex((self.widget.count() - 1))
self.updateDisplayLabel()
finally:
self.widget.blockSignals(False)
def updateDisplayLabel(self, value=None):
if (value is None):
value = self.widget.currentText()
super().updateDisplayLabel(value) |
class read_file():
def GeoTIFF(path_or_dataset, crs_key=None, data_crs=None, sel=None, isel=None, set_data=None, mask_and_scale=False, fill_values='mask'):
(xar, rioxarray) = register_modules('xarray', 'rioxarray')
if ((isel is None) and (sel is None)):
isel = {'band': 0}
opened = False
try:
if isinstance(path_or_dataset, (str, Path)):
ncfile = xar.open_dataset(path_or_dataset, mask_and_scale=mask_and_scale)
opened = True
elif isinstance(path_or_dataset, xar.Dataset):
ncfile = path_or_dataset
else:
raise ValueError(('EOmaps: `m.read_file.GeoTIFF` accepts only a path ' + 'to a GeoTIFF file or an `xarray.Dataset` object!'))
if (sel is not None):
usencfile = ncfile.sel(**sel)
elif (isel is not None):
usencfile = ncfile.isel(**isel)
else:
usencfile = ncfile
ncdims = list(usencfile.dims)
varnames = list(usencfile)
if (len(varnames) > 1):
raise AssertionError((('EOmaps: there is more than 1 variable name available! ' + "please select a specific dataset via the 'sel'- ") + f"or 'isel' kwargs. Available variable names: {varnames}"))
else:
usencfile = usencfile[varnames[0]]
dims = list(usencfile.dims)
if (len(dims) > 2):
raise AssertionError((('EOmaps: there are more than 2 dimensions! ' + "please select a specific dataset via the 'sel'- ") + f"or 'isel' kwargs. Available dimensionss: {dims}"))
if (data_crs is None):
for crskey in ['spatial_ref', 'crs']:
if (crskey in ncfile):
crsattr = ncfile[crskey].attrs
for wktkey in ['crs_wkt', 'wkt']:
if (wktkey in crsattr):
data_crs = crsattr[wktkey]
assert (data_crs is not None), ('EOmaps: No crs information found... please specify the crs ' + "via the 'data_crs' argument explicitly!")
data = np.moveaxis(usencfile.values, *[dims.index(i) for i in ncdims])
(x, y) = (getattr(usencfile, ncdims[0]).values, getattr(usencfile, ncdims[1]).values)
if (mask_and_scale is False):
encoding = usencfile.attrs
fill_value = encoding.get('_FillValue', None)
if (fill_value and (fill_values == 'mask')):
data = np.ma.MaskedArray(data=data, mask=(data == fill_value), copy=False, fill_value=fill_value, hard_mask=True)
else:
encoding = None
if (set_data is not None):
set_data.set_data(data=data, x=x, y=y, crs=data_crs, encoding=encoding)
else:
return dict(data=data, x=x, y=y, crs=data_crs, encoding=encoding)
finally:
if opened:
ncfile.close()
def NetCDF(path_or_dataset, parameter=None, coords=None, crs_key=None, data_crs=None, sel=None, isel=None, set_data=None, mask_and_scale=False, fill_values='mask'):
(xar,) = register_modules('xarray')
opened = False
try:
if isinstance(path_or_dataset, (str, Path)):
ncfile = xar.open_dataset(path_or_dataset, mask_and_scale=mask_and_scale)
opened = True
elif isinstance(path_or_dataset, xar.Dataset):
ncfile = path_or_dataset
else:
raise ValueError(('EOmaps: `m.read_file.NetCDF` accepts only a path ' + 'to a NetCDF file or an `xarray.Dataset` object!'))
if (sel is not None):
usencfile = ncfile.sel(**sel)
elif (isel is not None):
usencfile = ncfile.isel(**isel)
else:
usencfile = ncfile
if (parameter is None):
parameter = next(iter(ncfile))
_log.info(f"EOmaps: Using NetCDF variable '{parameter}' as parameter.")
else:
assert (parameter in ncfile), (f"EOmaps: The provided parameter-name '{parameter}' is not valid." + f'Available parameters are {list(ncfile)}')
data = usencfile[parameter]
if (coords is None):
coords = list(data.dims)
if (len(coords) != 2):
raise AssertionError((((('EOmaps: could not identify the coordinate-dimensions! ' + 'Please provide coordinate-names explicitly via the ') + "'coords' kwarg.\n") + f'''Available coordinates: {list(usencfile.coords)}
''') + f'Available variables: {list(ncfile)}'))
else:
_log.info(f'EOmaps: Using NetCDF coordinates: {coords}')
if (data_crs is None):
for crskey in ['spatial_ref', 'crs', 'crs_wkt']:
if (crskey in usencfile.attrs):
data_crs = usencfile.attrs[crskey]
assert (data_crs is not None), (('EOmaps: No crs information found... please specify the crs ' + "via the 'data_crs' or 'crs_key' argument explicitly!") + f'Available parameters are {list(ncfile)}, {list(ncfile.attrs)}')
if (coords[0] in usencfile.coords):
x = usencfile.coords[coords[0]]
elif (coords[0] in usencfile):
x = usencfile[coords[0]]
else:
raise AssertionError(((f'''EOmaps: Coordinate '{coords[0]}' is not present in the NetCDF.
''' + f'''Available coordinates: {list(usencfile.coords)}
''') + f'Available variables: {list(ncfile)}'))
if (coords[1] in usencfile.coords):
y = usencfile.coords[coords[1]]
elif (coords[1] in usencfile):
y = usencfile[coords[1]]
else:
raise AssertionError(((f'''EOmaps: Coordinate '{coords[1]}' is not present in the NetCDF
''' + f'''Available coordinates: {list(usencfile.coords)}
''') + f'Available variables: {list(ncfile)}'))
check_shapes = ((data.shape == (x.size, y.size)) or (data.shape == (y.size, x.size)) or ((data.shape == x.shape) and (data.shape == y.shape)))
if (not check_shapes):
dstr = str([f'{i}: {j}' for (i, j) in zip(data.dims, data.shape)])
xstr = str([f'{i}: {j}' for (i, j) in zip(x.dims, x.shape)])
ystr = str([f'{i}: {j}' for (i, j) in zip(y.dims, y.shape)])
raise AssertionError(f'''EOmaps: Invalid dimensions of data and coordinates!
data: {dstr}
x : {xstr}
y : {ystr}
''')
if ((data.shape == (y.size, x.size)) and (len(x.shape) == 1)):
data = data.values.T
else:
data = data.values
if (mask_and_scale is False):
encoding = dict(scale_factor=getattr(usencfile[parameter], 'scale_factor', 1), add_offset=getattr(usencfile[parameter], 'add_offset', 0), _FillValue=getattr(usencfile[parameter], '_FillValue', None))
fill_value = encoding.get('_FillValue', None)
if (fill_value and (fill_values == 'mask')):
data = np.ma.MaskedArray(data=data, mask=(data == fill_value), copy=False, fill_value=fill_value, hard_mask=True)
else:
encoding = None
if (set_data is not None):
set_data.set_data(data=data, x=x.values, y=y.values, crs=data_crs, parameter=parameter, encoding=encoding)
else:
return dict(data=data, x=x.values, y=y.values, crs=data_crs, parameter=parameter, encoding=encoding)
finally:
if opened:
ncfile.close()
def CSV(path, parameter=None, x=None, y=None, crs=None, set_data=None, **kwargs):
(pd,) = register_modules('pandas')
data = pd.read_csv(path, **kwargs)
for key in [parameter, x, y]:
assert (key in data), (f'''EOmaps: the parameter-name {key} is not a column of the csv-file!
''' + f'Available columns are: {list(data)}')
usecols = list(dict.fromkeys([parameter, x, y]))
if (set_data is not None):
set_data.set_data(data=data[usecols], x=x, y=y, crs=crs, parameter=parameter)
else:
return dict(data=data[usecols], x=x, y=y, crs=crs, parameter=parameter) |
.parametrize('\n start_datetime, end_datetime,\n repository_id, namespace_id,\n max_query_time, scroll_responses, expected_requests, expected_logs, throws\n ', [pytest.param(parse('2018-03-08'), parse('2018-04-02'), 1, 1, timedelta(seconds=10), SCROLL_RESPONSES, SCROLL_REQUESTS, SCROLL_LOGS, False, id='Scroll 3 pages with page size = 1')])
def test_yield_logs_for_export(start_datetime, end_datetime, repository_id, namespace_id, max_query_time, scroll_responses, expected_requests, expected_logs, throws, logs_model, mock_elasticsearch, mock_db_model, mock_max_result_window, app_config):
mock_elasticsearch.template = Mock(return_value=DEFAULT_TEMPLATE_RESPONSE)
mock_elasticsearch.search_scroll_create = Mock(return_value=scroll_responses[0])
mock_elasticsearch.scroll_get = Mock(side_effect=scroll_responses[1:(- 1)])
mock_elasticsearch.scroll_delete = Mock(return_value=scroll_responses[(- 1)])
configure(app_config)
if throws:
with pytest.raises(Exception):
logs_model.yield_logs_for_export(start_datetime, end_datetime, max_query_time=max_query_time)
else:
log_generator = logs_model.yield_logs_for_export(start_datetime, end_datetime, max_query_time=max_query_time)
counter = 0
for logs in log_generator:
if (counter == 0):
mock_elasticsearch.search_scroll_create.assert_called_with(*expected_requests[counter])
else:
mock_elasticsearch.scroll_get.assert_called_with(*expected_requests[counter])
assert (expected_logs[counter] == logs)
counter += 1
mock_elasticsearch.scroll_get.assert_called_with(*expected_requests[(- 2)])
mock_elasticsearch.scroll_delete.assert_called_with(*expected_requests[(- 1)]) |
class AdaroundAcceptanceTests(unittest.TestCase):
.cuda
def test_adaround_resnet18_only_weights(self):
AimetLogger.set_level_for_all_areas(logging.DEBUG)
torch.cuda.empty_cache()
seed_all(1000)
model = models.resnet18().eval()
model = model.to(torch.device('cuda'))
input_shape = (1, 3, 224, 224)
dummy_input = create_rand_tensors_given_shapes(input_shape, torch.device('cuda'))
orig_output = dummy_forward_pass(model, input_shape)
data_loader = create_fake_data_loader(dataset_size=64, batch_size=16, image_size=input_shape[1:])
params = AdaroundParameters(data_loader=data_loader, num_batches=4, default_num_iterations=5, default_reg_param=0.01, default_beta_range=(20, 2))
adarounded_model = Adaround.apply_adaround(model, dummy_input, params, path='./', filename_prefix='resnet18', default_param_bw=4, default_quant_scheme=QuantScheme.post_training_tf_enhanced)
ada_output = dummy_forward_pass(adarounded_model, input_shape)
self.assertFalse(torch.all(torch.eq(orig_output, ada_output)))
with open('./resnet18.encodings') as json_file:
encoding_data = json.load(json_file)
print(encoding_data)
self.assertTrue(isinstance(encoding_data['conv1.weight'], list))
if os.path.exists('./resnet18.encodings'):
os.remove('./resnet18.encodings')
.cuda
def test_adaround_resnet18_followed_by_quantsim(self):
AimetLogger.set_level_for_all_areas(logging.DEBUG)
torch.cuda.empty_cache()
seed_all(1000)
model = models.resnet18().eval()
model = model.to(torch.device('cuda'))
input_shape = (1, 3, 224, 224)
dummy_input = create_rand_tensors_given_shapes(input_shape, torch.device('cuda'))
data_loader = create_fake_data_loader(dataset_size=64, batch_size=16, image_size=input_shape[1:])
params = AdaroundParameters(data_loader=data_loader, num_batches=4, default_num_iterations=5, default_reg_param=0.01, default_beta_range=(20, 2))
param_bw = 4
output_bw = 8
quant_scheme = QuantScheme.post_training_tf_enhanced
adarounded_model = Adaround.apply_adaround(model, dummy_input, params, path='./', filename_prefix='resnet18', default_param_bw=param_bw, default_quant_scheme=quant_scheme)
with open('./resnet18.encodings') as json_file:
encoding_data = json.load(json_file)
encoding = encoding_data['conv1.weight'][0]
(before_min, before_max, before_delta, before_offset) = (encoding.get('min'), encoding.get('max'), encoding.get('scale'), encoding.get('offset'))
sim = QuantizationSimModel(adarounded_model, quant_scheme=quant_scheme, default_param_bw=param_bw, default_output_bw=output_bw, dummy_input=dummy_input)
sim.set_and_freeze_param_encodings(encoding_path='./resnet18.encodings')
sim.compute_encodings(dummy_forward_pass, forward_pass_callback_args=input_shape)
encoding = sim.model.conv1.param_quantizers['weight'].encoding
(after_min, after_max, after_delta, after_offset) = (encoding.min, encoding.max, encoding.delta, encoding.offset)
self.assertEqual(before_min, after_min)
self.assertEqual(before_max, after_max)
self.assertEqual(before_delta, after_delta)
self.assertEqual(before_offset, after_offset)
if os.path.exists('./resnet18.encodings'):
os.remove('./resnet18.encodings')
def test_dummy(self):
pass |
class TopicEntryList(EntryCreateMixin, IntegratedFormMixin, ListView):
context_object_name = 'entries'
template_name = 'dictionary/list/entry_list.html'
paginator_class = SafePaginator
topic = None
entry = None
view_mode = None
modes = ('regular', 'today', 'popular', 'history', 'nice', 'nicetoday', 'search', 'following', 'novices', 'recent', 'links', 'acquaintances', 'answered', 'images')
login_required_modes = ('novices', 'following', 'recent', 'acquaintances')
redirect = False
def regular(self):
return self.topic.entries.all()
def today(self):
return self.topic.entries.filter(date_created__gte=time_threshold(hours=24))
def popular(self):
return (self.regular() if self.topic.is_pinned else self.today())
def history(self):
year = self.request.GET.get('year', '')
if (not (year.isdigit() and (int(year) in settings.YEAR_RANGE))):
self.redirect = True
return None
now = timezone.now()
diff = (now.year - int(year))
delta = timezone.localtime((now - relativedelta(years=diff)))
return self.topic.entries.filter(date_created__date=delta.date())
def nice(self):
return self.topic.entries.order_by('-vote_rate')
def nicetoday(self):
return self.today().order_by('-vote_rate')
def search(self):
keywords = self.request.GET.get('keywords', '').strip()
if (not keywords):
self.redirect = True
return None
filters = Q(content__icontains=keywords)
if (keywords.startswith('') and (username := keywords[1:])):
with suppress(Author.DoesNotExist):
author = Author.objects.get(username=username)
filters |= Q(author=author)
if (connection.vendor == 'postgresql'):
filters |= Q(content__search=keywords)
return self.topic.entries.filter(filters)
def links(self):
return self.topic.entries.filter(content__regex=RE_WEBURL)
def acquaintances(self):
filters = {'author__in': self.request.user.following.all()}
if (self.request.GET.get('recent') is not None):
filters['date_created__gte'] = time_threshold(hours=120)
return self.topic.entries.filter(**filters)
def following(self):
queryset = None
following = TopicFollowing.objects.filter(author=self.request.user, topic=self.topic).first()
if following:
epoch = self.request.GET.get('d')
try:
last_read = timezone.make_aware(datetime.datetime.utcfromtimestamp((int(epoch) + 1)), timezone.utc)
except (ValueError, TypeError, OSError, OverflowError):
last_read = None
if (last_read and (last_read > following.date_created)):
queryset = self._qs_filter(self.topic.entries.filter(date_created__gt=last_read).exclude(Q(author=self.request.user)))
if ((queryset is not None) and queryset.exists()):
following.read_at = timezone.now()
following.save()
self.request.user.invalidate_unread_topic_count()
return queryset
notifications.info(self.request, _('honestly, there was nothing new. so i listed them all.'))
self.redirect = True
return None
def novices(self):
return self.topic.entries.filter(author__is_novice=True, date_created__gte=time_threshold(hours=24))
def recent(self):
with suppress(Entry.DoesNotExist):
latest = self.topic.entries.filter(author=self.request.user).latest('date_created')
return self.topic.entries.filter(date_created__gte=latest.date_created)
return None
def answered(self):
return self.topic.entries.filter(Exists(Comment.objects.filter(entry=OuterRef('pk'))))
def images(self):
return self.topic.entries.filter(content__regex=IMAGE_REGEX)
def get_queryset(self):
queryset = None
if (self.entry is not None):
return entry_prefetch(Entry.objects_all.filter(pk=self.entry.pk), self.request.user, comments=self.topic.is_ama)
if self.topic.exists:
queryset = getattr(self, self.view_mode)()
if (queryset is not None):
if (self.view_mode == 'following'):
return queryset
return self._qs_filter(queryset)
return self.model.objects.none()
def get_context_data(self, *args, **kwargs):
context = super().get_context_data(*args, **kwargs)
context['topic'] = self.topic
context['mode'] = self.view_mode
context['entry_permalink'] = self.entry
if (not self.topic.exists):
return context
entries = context.get('object_list')
queryset_size = context.get('paginator').count
if self.request.user.is_authenticated:
context['drafts'] = Entry.objects_all.filter(is_draft=True, topic=self.topic, author=self.request.user)
if (queryset_size > 0):
first_entry = (entries[0] if (not self.entry) else self.entry)
if (all(((self.view_mode == 'regular'), (queryset_size == 1), (self.request.GET.get('nr') != 'true'))) and (reference := re.fullmatch(f'\({SEE_EXPR}: (?!<)({RE_TOPIC_CHARSET})\)', first_entry.content))):
title = reference.group(1)
with suppress(Topic.DoesNotExist):
topic = Topic.objects_published.get(title=title)
if (topic.entry_count > 1):
return {'referrer': self.topic, 'referent': topic}
(previous_entries_count, previous_entries_page) = (0, 0)
(subsequent_entries_count, subsequent_entries_page) = (0, 0)
(show_subsequent, show_previous) = (False, False)
if (self.view_mode in ('popular', 'today', 'following', 'novices')):
show_previous = True
elif (self.view_mode in ('history', 'entry_permalink', 'search', 'nicetoday', 'recent', 'links', 'acquaintances', 'images')):
show_previous = True
show_subsequent = True
if (show_subsequent or show_previous):
first_entry_date = first_entry.date_created
previous_entries_count = self._qs_filter(self.topic.entries.filter(date_created__lt=first_entry_date, author__is_novice=False), prefetch=False).count()
if show_previous:
paginate_by = self.get_paginate_by()
previous_entries_page = math.ceil((previous_entries_count / paginate_by))
if show_subsequent:
with suppress(IndexError):
last_entry_date = (entries[(queryset_size - 1)].date_created if (not self.entry) else self.entry.date_created)
subsequent_entries_count = self._qs_filter(self.topic.entries.filter(date_created__gt=last_entry_date, author__is_novice=False), prefetch=False).count()
if (subsequent_entries_count > 0):
subsequent_entries_page = self._find_subsequent_page(previous_entries_count)
context['previous_entries_count'] = previous_entries_count
context['previous_entries_page'] = previous_entries_page
context['subsequent_entries_count'] = subsequent_entries_count
context['subsequent_entries_page'] = subsequent_entries_page
context['first_entry'] = first_entry
else:
self.view_mode = 'regular'
context['all_entries_count'] = self._qs_filter(self.regular(), prefetch=False).count()
return context
def dispatch(self, request, *args, **kwargs):
response = self.get_topic()
if (response and (self.request.method != 'POST')):
return response
if (self.topic is None):
return redirect(reverse('home'))
if (not self.view_mode):
requested = request.GET.get('a')
self.view_mode = (requested if (requested in self.modes) else 'regular')
if ((not request.user.is_authenticated) and (self.view_mode in self.login_required_modes)):
notifications.info(request, _('actually, you may benefit from this feature by logging in.'))
return redirect(reverse('login'))
return super().dispatch(request)
def get_paginate_by(self, *args):
return (self.request.user.entries_per_page if self.request.user.is_authenticated else settings.ENTRIES_PER_PAGE_DEFAULT)
def render_to_response(self, context, **response_kwargs):
if ((referent := context.get('referent')) is not None):
referrer = context['referrer']
notifications.info(self.request, (_("you have been redirected by a reference, <a href='%(url)s'>click here</a> to return to the original topic.") % {'url': (referrer.get_absolute_url() + '?nr=true')}), extra_tags='persistent')
return redirect(referent.get_absolute_url())
return (super().render_to_response(context, **response_kwargs) if (not self.redirect) else self._redirect_to_self())
def get_topic(self):
if self.kwargs.get('slug'):
self.topic = Topic.objects.get_or_pseudo(slug=self.kwargs.get('slug'))
elif self.kwargs.get('unicode_string'):
self.topic = Topic.objects.get_or_pseudo(unicode_string=self.kwargs.get('unicode_string'))
if self.topic.exists:
return self._redirect_to_self()
elif self.kwargs.get('entry_id'):
with proceed_or_404(ValueError, OverflowError):
klass = (Entry.objects_published if (not self.request.user.is_authenticated) else Entry.objects_published.exclude(author__in=self.request.user.blocked.all()))
self.entry = get_object_or_404(klass.select_related('topic'), pk=int(self.kwargs.get('entry_id')))
self.topic = self.entry.topic
self.view_mode = 'entry_permalink'
elif self.request.GET.get('q'):
query = self.request.GET.get('q').strip()
if (not query):
return False
if (query.startswith('') and slugify(query)):
author = get_object_or_404(Author, username=query[1:])
return redirect(author.get_absolute_url())
if (query.startswith('#') and query[1:].isdigit()):
return redirect('entry-permalink', entry_id=query[1:])
self.topic = Topic.objects.get_or_pseudo(unicode_string=query)
if self.topic.exists:
return self._redirect_to_self()
return False
def _redirect_to_self(self):
return redirect(self.topic.get_absolute_url())
def _find_subsequent_page(self, previous_object_count):
paginate_by = self.get_paginate_by()
index = (previous_object_count + 1)
page = ((index // paginate_by) or 1)
if (((index % paginate_by) == 0) or (index > paginate_by)):
page += 1
return page
def _qs_filter(self, queryset, prefetch=True):
novice_view_modes = ['novices', 'entry_permalink', 'acquaintances']
if ((self.view_mode == 'recent') and self.request.user.is_novice):
novice_view_modes.append('recent')
elif ((self.view_mode == 'search') and self.request.GET.get('keywords', '').strip().startswith('')):
novice_view_modes.append('search')
qs = queryset.exclude(is_draft=True)
if (self.view_mode not in novice_view_modes):
qs = qs.exclude(author__is_novice=True)
if self.request.user.is_authenticated:
qs = qs.exclude(author__in=self.request.user.blocked.all())
if prefetch:
return entry_prefetch(qs, self.request.user, comments=self.topic.is_ama)
return qs |
def read_regression_classification(db_loc, fs, models_names, datasets, task):
fields = (['dataset', 'N', 'D'] + [m[1] for m in models_names])
results = {}
for f in fs:
results[f] = {'table': {f: [] for f in fields}, 'vals': []}
with Database(db_loc) as db:
for dataset in datasets:
for f in fs:
results[f]['table']['dataset'].append(dataset[:10])
results[f]['table']['N'].append(ALL_DATATSETS[dataset].N)
results[f]['table']['D'].append(ALL_DATATSETS[dataset].D)
row = {f: [] for f in fs}
for (model, name) in models_names:
res = db.read(task, fs, {'model': model, 'dataset': dataset})
if (len(res) == 0):
for f in fs:
results[f]['table'][name].append('')
row[f].append(np.nan)
else:
print('{} {} {}'.format(model, dataset, len(res)))
for (i, f) in enumerate(fs):
L = [(np.nan if (l[i] is None) else float(l[i])) for l in res]
m = np.nanmean(L)
std = (np.nanstd(L) if (len(L) > 1) else np.nan)
if ((m < 1000) and (m > (- 1000))):
r = '{:.3f}({:.3f})'.format(m, std)
row[f].append(m)
else:
r = 'nan'
row[f].append(np.nan)
results[f]['table'][name].append(r)
for f in fs:
results[f]['vals'].append(row[f])
for f in fs:
if ('unnormalized' not in f):
vals = np.array(results[f]['vals'])
avgs = np.nanmean(vals, 0)
meds = np.nanmedian(vals, 0)
rks = np.nanmean(rankarray(vals), 0)
for (s, n) in [[avgs, 'avg'], [meds, 'median'], [rks, 'avg rank']]:
results[f]['table']['dataset'].append(n)
results[f]['table']['N'].append('')
results[f]['table']['D'].append('')
if (task == 'classification'):
results[f]['table']['K'].append('')
for (ss, name) in zip(s, [m[1] for m in models_names]):
results[f]['table'][name].append('{:.3f}'.format(ss))
return (results, fields) |
_dtype_float_test(only64=True, onlycpu=True)
def test_equil_mem(dtype, device):
def _test_equil():
clss = DummyModule
torch.manual_seed(100)
random.seed(100)
nbatch = 2000
fwd_options = {'method': 'broyden1', 'f_tol': 1e-09, 'alpha': (- 0.5)}
a = (torch.ones((nbatch,), dtype=dtype) + 0.5).requires_grad_()
y0 = torch.ones((nbatch,), dtype=dtype)
def getloss(a):
model = clss(a)
y = equilibrium(model.forward, y0, **fwd_options)
return y
loss = (getloss(a) ** 2).sum()
grads = torch.autograd.grad(loss, (a,), create_graph=True)
assert_no_memleak(_test_equil) |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--input_file', default=None, type=str, required=True)
parser.add_argument('--vocab_file', default=None, type=str, required=True, help='The vocabulary file that the BERT model was trained on.')
parser.add_argument('--output_file', default=None, type=str, required=True)
parser.add_argument('--bert_config_file', default=None, type=str, required=True, help='The config json file corresponding to the pre-trained BERT model. This specifies the model architecture.')
parser.add_argument('--init_checkpoint', default=None, type=str, required=True, help='Initial checkpoint (usually from a pre-trained BERT model).')
parser.add_argument('--layers', default='-1,-2,-3,-4', type=str)
parser.add_argument('--max_seq_length', default=128, type=int, help='The maximum total input sequence length after WordPiece tokenization. Sequences longer than this will be truncated, and sequences shorter than this will be padded.')
parser.add_argument('--do_lower_case', default=True, action='store_true', help='Whether to lower case the input text. Should be True for uncased models and False for cased models.')
parser.add_argument('--batch_size', default=32, type=int, help='Batch size for predictions.')
parser.add_argument('--local_rank', type=int, default=(- 1), help='local_rank for distributed training on gpus')
args = parser.parse_args()
if ((args.local_rank == (- 1)) or args.no_cuda):
device = torch.device(('cuda' if (torch.cuda.is_available() and (not args.no_cuda)) else 'cpu'))
n_gpu = torch.cuda.device_count()
else:
device = torch.device('cuda', args.local_rank)
n_gpu = 1
torch.distributed.init_process_group(backend='nccl')
logger.info('device', device, 'n_gpu', n_gpu, 'distributed training', bool((args.local_rank != (- 1))))
layer_indexes = [int(x) for x in args.layers.split(',')]
bert_config = BertConfig.from_json_file(args.bert_config_file)
tokenizer = tokenization.FullTokenizer(vocab_file=args.vocab_file, do_lower_case=args.do_lower_case)
examples = read_examples(args.input_file)
features = convert_examples_to_features(examples=examples, seq_length=args.max_seq_length, tokenizer=tokenizer)
unique_id_to_feature = {}
for feature in features:
unique_id_to_feature[feature.unique_id] = feature
model = BertModel(bert_config)
if (args.init_checkpoint is not None):
model.load_state_dict(torch.load(args.init_checkpoint, map_location='cpu'))
model.to(device)
if (args.local_rank != (- 1)):
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank)
elif (n_gpu > 1):
model = torch.nn.DataParallel(model)
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long)
eval_data = TensorDataset(all_input_ids, all_input_mask, all_example_index)
if (args.local_rank == (- 1)):
eval_sampler = SequentialSampler(eval_data)
else:
eval_sampler = DistributedSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.batch_size)
model.eval()
with open(args.output_file, 'w', encoding='utf-8') as writer:
for (input_ids, input_mask, example_indices) in eval_dataloader:
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
(all_encoder_layers, _) = model(input_ids, token_type_ids=None, attention_mask=input_mask)
all_encoder_layers = all_encoder_layers
for (b, example_index) in enumerate(example_indices):
feature = features[example_index.item()]
unique_id = int(feature.unique_id)
output_json = collections.OrderedDict()
output_json['linex_index'] = unique_id
all_out_features = []
for (i, token) in enumerate(feature.tokens):
all_layers = []
for (j, layer_index) in enumerate(layer_indexes):
layer_output = all_encoder_layers[int(layer_index)].detach().cpu().numpy()
layer_output = layer_output[b]
layers = collections.OrderedDict()
layers['index'] = layer_index
layers['values'] = [round(x.item(), 6) for x in layer_output[i]]
all_layers.append(layers)
out_features = collections.OrderedDict()
out_features['token'] = token
out_features['layers'] = all_layers
all_out_features.append(out_features)
output_json['features'] = all_out_features
writer.write((json.dumps(output_json) + '\n')) |
class BidirectionalLSTM(nn.Module):
def __init__(self, nIn, nHidden, nOut, ngpu):
super(BidirectionalLSTM, self).__init__()
self.ngpu = ngpu
self.rnn = nn.LSTM(nIn, nHidden, bidirectional=True)
self.embedding = nn.Linear((nHidden * 2), nOut)
def forward(self, input):
(recurrent, _) = utils.data_parallel(self.rnn, input, self.ngpu)
(T, b, h) = recurrent.size()
t_rec = recurrent.view((T * b), h)
output = utils.data_parallel(self.embedding, t_rec, self.ngpu)
output = output.view(T, b, (- 1))
return output |
class WebsocketClient():
def __init__(self, symbol, expiry, api_key, acess_token, underlying):
self.kws = KiteTicker(api_key, acess_token, debug=True)
self.symbol = symbol
self.expiry = expiry
self.underlying = underlying
self.instrumentClass = InstrumentMaster(api_key)
self.token_list = self.instrumentClass.fetch_contract(self.symbol, str(self.expiry), self.underlying)
self.q = Queue()
def form_option_chain(self, q):
while 1:
complete_option_data = self.instrumentClass.generate_optionChain(self.token_list)
q.put(complete_option_data)
def on_ticks(self, ws, ticks):
for tick in ticks:
contract_detail = self.instrumentClass.fetch_token_detail(tick['instrument_token'])
if (contract_detail['type'] == 'EQ'):
optionData = {'token': tick['instrument_token'], 'symbol': contract_detail['symbol'], 'last_price': tick['last_price'], 'change': tick['change']}
else:
optionData = {'token': tick['instrument_token'], 'symbol': contract_detail['symbol'], 'last_price': tick['last_price'], 'volume': tick['volume'], 'change': tick['change'], 'oi': tick['oi']}
self.instrumentClass.store_option_data(contract_detail['symbol'], tick['instrument_token'], optionData)
def on_connect(self, ws, response):
ws.subscribe(self.token_list)
ws.set_mode(ws.MODE_FULL, self.token_list)
def on_close(self, ws, code, reason):
logging.error('closed connection on close: {} {}'.format(code, reason))
def on_error(self, ws, code, reason):
logging.error('closed connection on error: {} {}'.format(code, reason))
def on_noreconnect(self, ws):
logging.error('Reconnecting the websocket failed')
def on_reconnect(self, ws, attempt_count):
logging.debug('Reconnecting the websocket: {}'.format(attempt_count))
def assign_callBacks(self):
self.kws.on_ticks = self.on_ticks
self.kws.on_connect = self.on_connect
self.kws.on_close = self.on_close
self.kws.on_error = self.on_error
self.kws.on_noreconnect = self.on_noreconnect
self.kws.on_reconnect = self.on_reconnect
self.kws.connect()
def queue_callBacks(self):
Process(target=self.assign_callBacks).start()
time.sleep(2)
Process(target=self.form_option_chain, args=(self.q,)).start() |
def passive_grab_device(self, deviceid, time, detail, grab_type, grab_mode, paired_device_mode, owner_events, event_mask, modifiers):
return XIPassiveGrabDevice(display=self.display, opcode=self.display.get_extension_major(extname), deviceid=deviceid, grab_window=self, time=time, cursor=X.NONE, detail=detail, grab_type=grab_type, grab_mode=grab_mode, paired_device_mode=paired_device_mode, owner_events=owner_events, mask=event_mask, modifiers=modifiers) |
def parse_args():
parser = argparse.ArgumentParser(description='Finetune a transformers model on a multiple choice task')
parser.add_argument('--dataset_name', type=str, default=None, help='The name of the dataset to use (via the datasets library).')
parser.add_argument('--dataset_config_name', type=str, default=None, help='The configuration name of the dataset to use (via the datasets library).')
parser.add_argument('--train_file', type=str, default=None, help='A csv or a json file containing the training data.')
parser.add_argument('--validation_file', type=str, default=None, help='A csv or a json file containing the validation data.')
parser.add_argument('--max_seq_length', type=int, default=128, help='The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded if `--pad_to_max_lengh` is passed.')
parser.add_argument('--pad_to_max_length', action='store_true', help='If passed, pad all samples to `max_length`. Otherwise, dynamic padding is used.')
parser.add_argument('--model_name_or_path', type=str, help='Path to pretrained model or model identifier from huggingface.co/models.', required=False)
parser.add_argument('--config_name', type=str, default=None, help='Pretrained config name or path if not the same as model_name')
parser.add_argument('--tokenizer_name', type=str, default=None, help='Pretrained tokenizer name or path if not the same as model_name')
parser.add_argument('--use_slow_tokenizer', action='store_true', help='If passed, will use a slow tokenizer (not backed by the Tokenizers library).')
parser.add_argument('--per_device_train_batch_size', type=int, default=8, help='Batch size (per device) for the training dataloader.')
parser.add_argument('--per_device_eval_batch_size', type=int, default=8, help='Batch size (per device) for the evaluation dataloader.')
parser.add_argument('--learning_rate', type=float, default=5e-05, help='Initial learning rate (after the potential warmup period) to use.')
parser.add_argument('--weight_decay', type=float, default=0.0, help='Weight decay to use.')
parser.add_argument('--num_train_epochs', type=int, default=3, help='Total number of training epochs to perform.')
parser.add_argument('--max_train_steps', type=int, default=None, help='Total number of training steps to perform. If provided, overrides num_train_epochs.')
parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help='Number of updates steps to accumulate before performing a backward/update pass.')
parser.add_argument('--lr_scheduler_type', type=SchedulerType, default='linear', help='The scheduler type to use.', choices=['linear', 'cosine', 'cosine_with_restarts', 'polynomial', 'constant', 'constant_with_warmup'])
parser.add_argument('--num_warmup_steps', type=int, default=0, help='Number of steps for the warmup in the lr scheduler.')
parser.add_argument('--output_dir', type=str, default=None, help='Where to store the final model.')
parser.add_argument('--seed', type=int, default=None, help='A seed for reproducible training.')
parser.add_argument('--model_type', type=str, default=None, help='Model type to use if training from scratch.', choices=MODEL_TYPES)
parser.add_argument('--debug', action='store_true', help='Activate debug mode and run training only with a subset of data.')
parser.add_argument('--push_to_hub', action='store_true', help='Whether or not to push the model to the Hub.')
parser.add_argument('--hub_model_id', type=str, help='The name of the repository to keep in sync with the local `output_dir`.')
parser.add_argument('--hub_token', type=str, help='The token to use to push to the Model Hub.')
parser.add_argument('--checkpointing_steps', type=str, default=None, help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.")
parser.add_argument('--resume_from_checkpoint', type=str, default=None, help='If the training should continue from a checkpoint folder.')
parser.add_argument('--with_tracking', action='store_true', help='Whether to enable experiment trackers for logging.')
parser.add_argument('--report_to', type=str, default='all', help='The integration to report the results and logs to. Supported platforms are `"tensorboard"`, `"wandb"`, `"comet_ml"` and `"clearml"`. Use `"all"` (default) to report to all integrations.Only applicable when `--with_tracking` is passed.')
model_names = bertmodels.bert_versions.keys()
model_configs = bertmodels.bert_configs.keys()
parser.add_argument('--arch', '-a', metavar='ARCH', default='bertForSequence', choices=model_names, help=(('model architecture: ' + ' | '.join(model_names)) + ' (default: bertForSequence)'))
parser.add_argument('--model-config', '-c', metavar='CONF', default='classic', choices=model_configs, help=(('model configs: ' + ' | '.join(model_configs)) + '(default: classic)'))
parser.add_argument('--choice', nargs='+', type=str, default='classic', help='Choose a linear layer to quantize')
def str2bool(v):
if isinstance(v, bool):
return v
if (v.lower() in ('yes', 'true', 't', 'y', '1')):
return True
elif (v.lower() in ('no', 'false', 'f', 'n', '0')):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
parser.add_argument('--qa', type=str2bool, default=True, help='quantize activation')
parser.add_argument('--qw', type=str2bool, default=True, help='quantize weights')
parser.add_argument('--qg', type=str2bool, default=True, help='quantize gradients')
parser.add_argument('--biased', type=str2bool, default=False, help='biased quantization')
parser.add_argument('--abits', type=int, default=8, help='activation number of bits')
parser.add_argument('--wbits', type=int, default=8, help='weight number of bits')
parser.add_argument('--biasbits', type=int, default=16, help='bias number of bits')
parser.add_argument('--bbits', type=int, default=8, help='backward number of bits')
parser.add_argument('--bwbits', type=int, default=8, help='backward weight number of bits')
parser.add_argument('--hadamard', type=str2bool, default=False, help='apply Hadamard transformation on gradients')
parser.add_argument('--dynamic', type=str2bool, default=True, help='whether apply dynamic Hadamard transformation on gradients')
parser.add_argument('--bmm', type=str2bool, default=True, help='whether apply bmm Hadamard transformation on gradients')
parser.add_argument('--biprecision', type=str2bool, default=True, help='Gradient bifurcation')
parser.add_argument('--twolayers_gradweight', '--2gw', type=str2bool, default=False, help='use two 4 bit to simulate a 8 bit')
parser.add_argument('--twolayers_gradinputt', '--2gi', type=str2bool, default=False, help='use two 4 bit to simulate a 8 bit')
parser.add_argument('--luq', type=str2bool, default=False, help='use luq for backward')
parser.add_argument('--weight_quant_method', '--wfq', default='ptq', type=str, metavar='strategy', choices=['uniform', 'lsq', 'ptq'])
parser.add_argument('--input_quant_method', '--ifq', default='ptq', type=str, metavar='strategy', choices=['uniform', 'lsq', 'ptq'])
parser.add_argument('--learnable_step_size', type=str2bool, default=True, help='Debug to draw the variance and leverage score')
parser.add_argument('--learnable_hadamard', type=str2bool, default=True, help='Debug to draw the variance and leverage score')
parser.add_argument('--lsq_layerwise_input', type=str, default='layer', help='Debug to draw the variance and leverage score', choices=['layer', 'row', 'column'])
parser.add_argument('--lsq_layerwise_weight', type=str, default='layer', help='Debug to draw the variance and leverage score', choices=['layer', 'row', 'column'])
parser.add_argument('--retain_large_value', type=str2bool, default=False, help='Debug to draw the variance and leverage score')
parser.add_argument('--quantize_large_value', type=str2bool, default=False, help='Debug to draw the variance and leverage score')
parser.add_argument('--draw_value', type=str2bool, default=False, help='Debug to draw the variance and leverage score')
parser.add_argument('--clip-value', type=float, default=100, help='Choose a linear layer to quantize')
parser.add_argument('--track_step_size', type=str2bool, default=False, help='Debug to draw the variance and leverage score')
parser.add_argument('--fp16', type=str2bool, default=False, help='whether use torch amp')
args = parser.parse_args()
return args |
class TestAssertLess(TestCase):
def test_you(self):
self.assertLess(abc, 'xxx')
def test_me(self):
self.assertLess(123, (xxx + y))
self.assertLess(456, (aaa and bbb))
self.assertLess(789, (ccc or ddd))
self.assertLess(123, (True if You else False))
def test_everybody(self):
self.assertLess('abc', 'def')
def test_message(self):
self.assertLess((123 + z), (xxx + z), msg='This is wrong!')
self.assertLess(123, (xxx + z), 'This is wrong!') |
def train(args, model, device, train_loader, optimizer, scheduler, epoch):
model.train()
for (batch_idx, (input_ids, attention_mask, token_type_ids, cate)) in enumerate(train_loader):
input_ids = input_ids.to(device)
attention_mask = attention_mask.to(device)
token_type_ids = token_type_ids.to(device)
cate = cate.to(device)
optimizer.zero_grad()
outputs = model(input_ids, attention_mask, token_type_ids, cate)
loss = outputs.loss
loss.backward()
optimizer.step()
scheduler.step()
if ((batch_idx % args.log_interval) == 0):
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(epoch, (batch_idx * len(input_ids)), len(train_loader.dataset), ((100.0 * batch_idx) / len(train_loader)), loss.item()))
if args.dry_run:
break |
class ViewProviderAsmRelation(ViewProviderAsmBase):
def canDropObjects(self):
return False
def canDelete(self, _obj):
return True
def claimChildren(self):
return self.ViewObject.Object.Group
def getDetailPath(self, subname, path, append):
vobj = self.ViewObject
idx = subname.find('.')
if (idx > 0):
obj = vobj.Object
sobj = obj.getSubObject(subname[:(idx + 1)], retType=1)
if (sobj != obj):
if isTypeOf(sobj, AsmRelation):
subname = (str(sobj.Index) + subname[idx:])
else:
subname = ''
return vobj.getDetailPath(subname, path, append) |
def coco_score(refs, pred, scorer):
if (scorer.method() == 'Bleu'):
scores = np.array([0.0 for n in range(4)])
else:
scores = 0
num_cap_per_audio = len(refs[list(refs.keys())[0]])
for i in range(num_cap_per_audio):
if (i > 0):
for key in refs:
refs[key].insert(0, res[key][0])
res = {key: [refs[key].pop()] for key in refs}
(score, _) = scorer.compute_score(refs, pred)
if (scorer.method() == 'Bleu'):
scores += np.array(score)
else:
scores += score
score = (scores / num_cap_per_audio)
for key in refs:
refs[key].insert(0, res[key][0])
(score_allref, _) = scorer.compute_score(refs, pred)
diff = (score_allref - score)
return diff |
class EditableModule(object):
def getparams(self, methodname: str) -> Sequence[torch.Tensor]:
paramnames = self.cached_getparamnames(methodname)
return [get_attr(self, name) for name in paramnames]
def setparams(self, methodname: str, *params) -> int:
paramnames = self.cached_getparamnames(methodname)
for (name, val) in zip(paramnames, params):
try:
set_attr(self, name, val)
except TypeError as e:
del_attr(self, name)
set_attr(self, name, val)
return len(params)
def cached_getparamnames(self, methodname: str, refresh: bool=False) -> List[str]:
if (not hasattr(self, '_paramnames_')):
self._paramnames_: Dict[(str, List[str])] = {}
if (methodname not in self._paramnames_):
self._paramnames_[methodname] = self.getparamnames(methodname)
return self._paramnames_[methodname]
def getparamnames(self, methodname: str, prefix: str='') -> List[str]:
pass
def getuniqueparams(self, methodname: str, onlyleaves: bool=False) -> List[torch.Tensor]:
allparams = self.getparams(methodname)
idxs = self._get_unique_params_idxs(methodname, allparams)
if onlyleaves:
return [allparams[i] for i in idxs if allparams[i].is_leaf]
else:
return [allparams[i] for i in idxs]
def setuniqueparams(self, methodname: str, *uniqueparams) -> int:
nparams = self._number_of_params[methodname]
allparams = [None for _ in range(nparams)]
maps = self._unique_params_maps[methodname]
for j in range(len(uniqueparams)):
jmap = maps[j]
p = uniqueparams[j]
for i in jmap:
allparams[i] = p
return self.setparams(methodname, *allparams)
def _get_unique_params_idxs(self, methodname: str, allparams: Union[(Sequence[torch.Tensor], None)]=None) -> Sequence[int]:
if (not hasattr(self, '_unique_params_idxs')):
self._unique_params_idxs = {}
self._unique_params_maps = {}
self._number_of_params = {}
if (methodname in self._unique_params_idxs):
return self._unique_params_idxs[methodname]
if (allparams is None):
allparams = self.getparams(methodname)
ids = []
idxs = []
idx_map = []
for i in range(len(allparams)):
param = allparams[i]
id_param = id(param)
try:
jfound = ids.index(id_param)
idx_map[jfound].append(i)
continue
except ValueError:
pass
ids.append(id_param)
idxs.append(i)
idx_map.append([i])
self._number_of_params[methodname] = len(allparams)
self._unique_params_idxs[methodname] = idxs
self._unique_params_maps[methodname] = idx_map
return idxs
def assertparams(self, method, *args, **kwargs):
if (not inspect.ismethod(method)):
raise TypeError('The input method must be a method')
methodself = method.__self__
if (methodself is not self):
raise RuntimeError('The method does not belong to the same instance')
methodname = method.__name__
self.__assert_method_preserve(method, *args, **kwargs)
self.__assert_get_correct_params(method, *args, **kwargs)
print(('"%s" method check done' % methodname))
def __assert_method_preserve(self, method, *args, **kwargs):
(all_params0, names0) = _get_tensors(self)
all_params0 = [p.clone() for p in all_params0]
method(*args, **kwargs)
(all_params1, names1) = _get_tensors(self)
clsname = method.__self__.__class__.__name__
methodname = method.__name__
msg = ("The method %s.%s does not preserve the object's float tensors: \n" % (clsname, methodname))
if (len(all_params0) != len(all_params1)):
msg += 'The number of parameters changed:\n'
msg += ("* number of object's parameters before: %d\n" % len(all_params0))
msg += ("* number of object's parameters after : %d\n" % len(all_params1))
raise GetSetParamsError(msg)
for (pname, p0, p1) in zip(names0, all_params0, all_params1):
if (p0.shape != p1.shape):
msg += ('The shape of %s changed\n' % pname)
msg += ('* (before) %s.shape: %s\n' % (pname, p0.shape))
msg += ('* (after ) %s.shape: %s\n' % (pname, p1.shape))
raise GetSetParamsError(msg)
if (not torch.allclose(p0, p1)):
msg += ('The value of %s changed\n' % pname)
msg += ('* (before) %s: %s\n' % (pname, p0))
msg += ('* (after ) %s: %s\n' % (pname, p1))
raise GetSetParamsError(msg)
def __assert_get_correct_params(self, method, *args, **kwargs):
methodname = method.__name__
clsname = method.__self__.__class__.__name__
(all_params, all_names) = _get_tensors(self)
def _get_tensor_name(param):
for i in range(len(all_params)):
if (id(all_params[i]) == id(param)):
return all_names[i]
return None
(oper_names, oper_params) = self.__list_operating_params(method, *args, **kwargs)
user_names = self.getparamnames(method.__name__)
user_params = [get_attr(self, name) for name in user_names]
user_params_id = [id(p) for p in user_params]
oper_params_id = [id(p) for p in oper_params]
user_params_id_set = set(user_params_id)
oper_params_id_set = set(oper_params_id)
for i in range(len(user_params)):
param = user_params[i]
if ((not isinstance(param, torch.Tensor)) or (isinstance(param, torch.Tensor) and (param.dtype not in torch_float_type))):
msg = ('Parameter %s is a non-floating point tensor' % user_names[i])
raise GetSetParamsError(msg)
missing_names = []
for i in range(len(oper_names)):
if (oper_params_id[i] not in user_params_id_set):
missing_names.append(oper_names[i])
if (len(missing_names) > 0):
msg = ('getparams for %s.%s does not include: %s' % (clsname, methodname, ', '.join(missing_names)))
warnings.warn(msg, stacklevel=3)
excess_names = []
for i in range(len(user_names)):
if (user_params_id[i] not in oper_params_id_set):
excess_names.append(user_names[i])
if (len(excess_names) > 0):
msg = ('getparams for %s.%s has excess parameters: %s' % (clsname, methodname, ', '.join(excess_names)))
warnings.warn(msg, stacklevel=3)
def __list_operating_params(self, method, *args, **kwargs):
(all_tensors, all_names) = _get_tensors(self)
copy_tensors0 = [tensor.clone().detach().requires_grad_() for tensor in all_tensors]
copy_tensors = copy.copy(copy_tensors0)
_set_tensors(self, copy_tensors)
output = method(*args, **kwargs)
if (not isinstance(output, torch.Tensor)):
raise RuntimeError('The method to be asserted must have a tensor output')
output = output.sum()
grad_tensors = torch.autograd.grad(output, copy_tensors0, retain_graph=True, allow_unused=True)
all_tensors_copy = copy.copy(all_tensors)
_set_tensors(self, all_tensors_copy)
names = []
params = []
for (i, grad) in enumerate(grad_tensors):
if (grad is None):
continue
names.append(all_names[i])
params.append(all_tensors[i])
return (names, params) |
class Migration(migrations.Migration):
dependencies = [('conferences', '0004_remove_conference_vote_range'), ('submissions', '0002_auto__0954'), migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('voting', '0004_auto__1733')]
operations = [migrations.DeleteModel(name='VoteRange'), migrations.AlterModelOptions(name='vote', options={'verbose_name': 'Vote', 'verbose_name_plural': 'Votes'}), migrations.AlterUniqueTogether(name='vote', unique_together={('user', 'submission')})] |
def _adjoint_final_soqs(cbloq: 'CompositeBloq', new_signature: Signature) -> Dict[(str, 'SoquetT')]:
if (LeftDangle not in cbloq._binst_graph):
return {}
(_, init_succs) = _binst_to_cxns(LeftDangle, binst_graph=cbloq._binst_graph)
return _cxn_to_soq_dict(new_signature.rights(), init_succs, get_me=(lambda x: x.left), get_assign=(lambda x: x.right)) |
class webvision_dataloader():
def __init__(self, batch_size, num_batches, num_class, num_workers, root_dir, root_imagenet_dir, log):
self.batch_size = batch_size
self.num_class = num_class
self.num_samples = (None if (num_batches is None) else (self.batch_size * num_batches))
self.num_workers = num_workers
self.root_dir = root_dir
self.root_imagenet_dir = root_dir
self.log = log
self.transform_train = transforms.Compose([transforms.CenterCrop(227), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))])
self.transform_test = transforms.Compose([transforms.CenterCrop(227), transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))])
self.train_imagenet = transforms.Compose([transforms.Resize(256), transforms.CenterCrop(227), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))])
self.transform_imagenet = transforms.Compose([transforms.Resize(256), transforms.CenterCrop(227), transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))])
def run(self, mode, pred=[], prob=[], paths=[]):
if (mode == 'warmup'):
all_dataset = webvision_dataset(root_dir=self.root_dir, transform=self.transform_train, mode='all', num_class=self.num_class, num_samples=self.num_samples)
trainloader = DataLoader(dataset=all_dataset, batch_size=self.batch_size, shuffle=True, num_workers=self.num_workers, pin_memory=True)
return trainloader
if (mode == 'warmup_sta'):
all_dataset = webvision_dataset(root_dir=self.root_dir, transform=self.transform_train, mode='all', num_class=self.num_class, num_samples=self.num_samples)
trainloader = DataLoader(dataset=all_dataset, batch_size=(self.batch_size * 2), shuffle=True, num_workers=self.num_workers, pin_memory=True)
return trainloader
elif (mode == 'train'):
labeled_dataset = webvision_dataset(root_dir=self.root_dir, transform=self.transform_train, mode='labeled', num_class=self.num_class, pred=pred, probability=prob, paths=paths, log=self.log)
labeled_trainloader = DataLoader(dataset=labeled_dataset, batch_size=self.batch_size, shuffle=True, num_workers=self.num_workers, pin_memory=True)
unlabeled_dataset = webvision_dataset(root_dir=self.root_dir, transform=self.transform_train, mode='unlabeled', num_class=self.num_class, pred=pred, paths=paths, log=self.log)
unlabeled_trainloader = DataLoader(dataset=unlabeled_dataset, batch_size=self.batch_size, shuffle=True, num_workers=self.num_workers, pin_memory=True)
return (labeled_trainloader, unlabeled_trainloader)
elif (mode == 'test'):
test_dataset = webvision_dataset(root_dir=self.root_dir, transform=self.transform_test, mode='test', num_class=self.num_class)
test_loader = DataLoader(dataset=test_dataset, batch_size=(self.batch_size * 20), shuffle=False, num_workers=self.num_workers, pin_memory=True)
return test_loader
elif (mode == 'eval_train'):
eval_dataset = webvision_dataset(root_dir=self.root_dir, transform=self.transform_test, mode='all', num_class=self.num_class, num_samples=self.num_samples)
eval_loader = DataLoader(dataset=eval_dataset, batch_size=(self.batch_size * 20), shuffle=False, num_workers=self.num_workers, pin_memory=True)
return eval_loader
elif (mode == 'imagenet'):
imagenet_val = imagenet_dataset(root_dir=self.root_imagenet_dir, transform=self.transform_imagenet, num_class=self.num_class)
imagenet_loader = DataLoader(dataset=imagenet_val, batch_size=(self.batch_size * 20), shuffle=False, num_workers=self.num_workers, pin_memory=True)
return imagenet_loader
elif (mode == 'train_imagenet'):
imagenet_train = train_imagenet_dataset(root_dir=self.root_imagenet_dir, transform=self.train_imagenet, num_class=self.num_class)
train_imagenet_loader = DataLoader(dataset=imagenet_train, batch_size=self.batch_size, shuffle=True, num_workers=self.num_workers, pin_memory=True)
return train_imagenet_loader |
class AccountSupportView(LoginRequiredMixin, FormView):
form_class = SupportForm
template_name = 'adserver/accounts/support.html'
success_url = reverse_lazy('dashboard-home')
message_success = _('Thanks, we got your message and we will get back to you as soon as we can.')
message_error = _('There was a problem sending your message.')
def form_valid(self, form):
form.save()
messages.success(self.request, self.message_success)
return super().form_valid(form)
def get(self, request, *args, **kwargs):
if (request.GET.get('success') == 'true'):
messages.success(self.request, self.message_success)
return redirect(reverse('support'))
if (request.GET.get('error') == 'true'):
messages.error(self.request, self.message_error)
log.warning('Error submitting support request form: %s', request.GET)
return redirect(reverse('support'))
return super().get(request, *args, **kwargs)
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['request'] = self.request
return kwargs |
def get_parser():
parser = argparse.ArgumentParser(description='Revise the spk2utt file: it only contans a subset of the utts', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--in-spk2utt', type=str, help='original spk2utt file')
parser.add_argument('--out-spk2utt', type=str, help='revised spk2utt file')
parser.add_argument('--subset-list', type=str, help='list of utt subset')
return parser |
class SQL2Text(datasets.GeneratorBasedBuilder):
def _info(self):
return datasets.DatasetInfo(description=_DESCRIPTION, features=datasets.Features({'question': datasets.Value('string'), 'query': datasets.Value('string')}), supervised_keys=None, homepage=' citation=_CITATION)
def _split_generators(self, dl_manager):
downloaded_files = dl_manager.download_and_extract(_URLS)
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={'filepath': downloaded_files['train']}), datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={'filepath': downloaded_files['dev']}), datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={'filepath': downloaded_files['test']})]
def _generate_examples(self, filepath):
with open(filepath, encoding='utf-8') as f:
reader = json.load(f)
for (example_idx, example) in enumerate(reader):
(yield (example_idx, {'question': example['question'], 'query': example['query']})) |
def main(args):
seed_init()
if ('base' in args.model_name):
model = MANNER_BASE(in_channels=1, out_channels=1, hidden=60, depth=4, kernel_size=8, stride=4, growth=2, head=1, segment_len=64).to(args.device)
elif ('large' in args.model_name):
model = MANNER_BASE(in_channels=1, out_channels=1, hidden=120, depth=4, kernel_size=8, stride=4, growth=2, head=1, segment_len=64).to(args.device)
elif ('small' in args.model_name):
model = MANNER_SMALL(in_channels=1, out_channels=1, hidden=60, depth=4, kernel_size=8, stride=4, growth=2, head=1, segment_len=64).to(args.device)
checkpoint = torch.load(f'./weights/{args.model_name}')
model.load_state_dict(checkpoint['state_dict'])
print(f'--- Load {args.model_name} weights ---')
model.eval()
with torch.no_grad():
output_path = args.noisy_path
noisy_list = os.listdir(args.noisy_path)
for n_file in tqdm(noisy_list):
(noisy, sr) = torchaudio.load(f'{args.noisy_path}/{n_file}')
if (sr != 16000):
tf = torchaudio.transforms.Resample(sr, 16000)
noisy = tf(noisy)
noisy = noisy.unsqueeze(0).to(args.device)
enhanced = model(noisy)
enhanced = enhanced.squeeze(0).detach().cpu()
save_name = (n_file.split('.')[0] + '_enhanced.wav')
torchaudio.save(f'{output_path}/{save_name}', enhanced, 16000) |
class RegNet(nn.Module):
def __init__(self, cfg, in_chans=3, num_classes=1000, output_stride=32, global_pool='avg', drop_rate=0.0, drop_path_rate=0.0, zero_init_last_bn=True):
super().__init__()
self.num_classes = num_classes
self.drop_rate = drop_rate
assert (output_stride in (8, 16, 32))
stem_width = cfg['stem_width']
self.stem = ConvBnAct(in_chans, stem_width, 3, stride=2)
self.feature_info = [dict(num_chs=stem_width, reduction=2, module='stem')]
prev_width = stem_width
curr_stride = 2
stage_params = self._get_stage_params(cfg, output_stride=output_stride, drop_path_rate=drop_path_rate)
se_ratio = cfg['se_ratio']
for (i, stage_args) in enumerate(stage_params):
stage_name = 's{}'.format((i + 1))
self.add_module(stage_name, RegStage(prev_width, **stage_args, se_ratio=se_ratio))
prev_width = stage_args['out_chs']
curr_stride *= stage_args['stride']
self.feature_info += [dict(num_chs=prev_width, reduction=curr_stride, module=stage_name)]
self.num_features = prev_width
self.head = ClassifierHead(in_chs=prev_width, num_classes=num_classes, pool_type=global_pool, drop_rate=drop_rate)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, mean=0.0, std=0.01)
nn.init.zeros_(m.bias)
if zero_init_last_bn:
for m in self.modules():
if hasattr(m, 'zero_init_last_bn'):
m.zero_init_last_bn()
def _get_stage_params(self, cfg, default_stride=2, output_stride=32, drop_path_rate=0.0):
(w_a, w_0, w_m, d) = (cfg['wa'], cfg['w0'], cfg['wm'], cfg['depth'])
(widths, num_stages, _, _) = generate_regnet(w_a, w_0, w_m, d)
(stage_widths, stage_depths) = np.unique(widths, return_counts=True)
stage_groups = [cfg['group_w'] for _ in range(num_stages)]
stage_bottle_ratios = [cfg['bottle_ratio'] for _ in range(num_stages)]
stage_strides = []
stage_dilations = []
net_stride = 2
dilation = 1
for _ in range(num_stages):
if (net_stride >= output_stride):
dilation *= default_stride
stride = 1
else:
stride = default_stride
net_stride *= stride
stage_strides.append(stride)
stage_dilations.append(dilation)
stage_dpr = np.split(np.linspace(0, drop_path_rate, d), np.cumsum(stage_depths[:(- 1)]))
(stage_widths, stage_groups) = adjust_widths_groups_comp(stage_widths, stage_bottle_ratios, stage_groups)
param_names = ['out_chs', 'stride', 'dilation', 'depth', 'bottle_ratio', 'group_width', 'drop_path_rates']
stage_params = [dict(zip(param_names, params)) for params in zip(stage_widths, stage_strides, stage_dilations, stage_depths, stage_bottle_ratios, stage_groups, stage_dpr)]
return stage_params
def get_classifier(self):
return self.head.fc
def reset_classifier(self, num_classes, global_pool='avg'):
self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate)
def forward_features(self, x):
for block in list(self.children())[:(- 1)]:
x = block(x)
return x
def forward(self, x):
for block in self.children():
x = block(x)
return x |
(frozen=True)
class GenericParamRC(LocatedRequestChecker):
LOCATION = GenericParamLoc
pos: int
def _check_location(self, mediator: DirectMediator, loc: GenericParamLoc) -> None:
if (loc.generic_pos == self.pos):
return
raise CannotProvide(f'Generic param position {loc.generic_pos} must be equal to {self.pos}') |
def test_flops_counter():
with pytest.raises(AssertionError):
model = nn.Conv2d(3, 8, 3)
input_res = [1, 3, 16, 16]
get_model_complexity_info(model, input_res)
with pytest.raises(AssertionError):
model = nn.Conv2d(3, 8, 3)
input_res = tuple()
get_model_complexity_info(model, input_res)
for item in gt_results:
model = item['model']
input = item['input']
(flops, params) = get_model_complexity_info(model, input, as_strings=False, print_per_layer_stat=False)
assert ((flops == item['flops']) and (params == item['params']))
model = ExampleModel()
x = (3, 16, 16)
(flops, params) = get_model_complexity_info(model, x, as_strings=False, print_per_layer_stat=False, input_constructor=input_constructor)
assert ((flops == 43904.0) and (params == 224.0))
model = nn.Conv3d(3, 8, 3)
x = (3, 3, 512, 512)
(flops, params) = get_model_complexity_info(model, x, print_per_layer_stat=False)
assert ((flops == '0.17 GFLOPs') and (params == str(656)))
model = nn.Conv1d(3, 8, 3)
x = (3, 16)
out = StringIO()
get_model_complexity_info(model, x, ost=out)
assert (out.getvalue() == 'Conv1d(0.0 M, 100.000% Params, 0.0 GFLOPs, 100.000% FLOPs, 3, 8, kernel_size=(3,), stride=(1,))\n')
model = nn.Sequential(nn.Conv2d(3, 8, 3), nn.Flatten(), nn.Linear(1568, 2))
x = (3, 16, 16)
(flops, params) = get_model_complexity_info(model, x, as_strings=False, print_per_layer_stat=True)
assert ((flops == 47040.0) and (params == 3362)) |
class NERModel(nn.Module):
def __init__(self, args):
super().__init__()
self.args = args
config = AutoConfig.from_pretrained(args.model_name_or_path, num_labels=args.num_class)
self.model = AutoModel.from_pretrained(args.model_name_or_path)
self.dropout = nn.Dropout(args.dropout_prob)
self.classifier = nn.Linear(config.hidden_size, args.num_class)
self.loss_fnt = nn.CrossEntropyLoss(ignore_index=(- 1))
def forward(self, input_ids, attention_mask, labels=None):
(h, *_) = self.model(input_ids, attention_mask, return_dict=False)
h = self.dropout(h)
c = self.args.num_class
logits = self.classifier(h)
logits = logits.view((- 1), c)
outputs = (logits,)
if (labels is not None):
labels = labels.view((- 1))
loss = self.loss_fnt(logits, labels)
outputs = ((loss,) + outputs)
return outputs |
def test_switch_case_all_call_inputs():
with pytest.raises(Call) as err:
switch(context=Context({'list': 'sg1', 'def': 'sg3', 'sg2_input': 'sg2', 'case1': False, 'case2': True, 'sg': 'sgv', 'fg': 'fgv', 'switch': [{'case': '{case1}', 'call': '{list}'}, {'case': '{case2}', 'call': {'groups': '{sg2_input}', 'success': '{sg}', 'failure': '{fg}'}}, {'default': {'groups': ['{def}', 'sg4'], 'success': '{sg}d', 'failure': '{fg}d'}}]}), name='blah')
cof = err.value
assert isinstance(cof, Call)
assert (cof.groups == ['sg2'])
assert (cof.success_group == 'sgv')
assert (cof.failure_group == 'fgv')
assert (cof.original_config == ('switch', [{'case': '{case1}', 'call': '{list}'}, {'case': '{case2}', 'call': {'groups': '{sg2_input}', 'success': '{sg}', 'failure': '{fg}'}}, {'default': {'groups': ['{def}', 'sg4'], 'success': '{sg}d', 'failure': '{fg}d'}}])) |
def find_similar_names(name: str, names: list[str]) -> list[str]:
threshold = 1000.0
distance_by_name = {}
for actual_name in names:
distance = Levenshtein.distance(name, actual_name)
is_similar = (distance <= (len(name) / 3))
substring_index = actual_name.find(name)
is_substring = (substring_index != (- 1))
if (is_similar or is_substring):
distance_by_name[actual_name] = (distance, (substring_index if is_substring else float('inf')))
distance_by_name = {key: value for (key, value) in distance_by_name.items() if (value[0] < (2 * threshold))}
return sorted(distance_by_name, key=(lambda key: distance_by_name[key])) |
.unit()
.parametrize(('path_1', 'path_2', 'expectation', 'expected'), [pytest.param(PurePosixPath('relative_1'), PurePosixPath('/home/relative_2'), pytest.raises(ValueError, match="Can't mix absolute and relative paths"), None, id='test path 1 is relative'), pytest.param(PureWindowsPath('C:/home/relative_1'), PureWindowsPath('relative_2'), pytest.raises(ValueError, match="Can't mix absolute and relative paths"), None, id='test path 2 is relative', marks=pytest.mark.skipif((sys.platform != 'win32'), reason='fails on UNIX.')), pytest.param(PurePosixPath('/home/user/folder_a'), PurePosixPath('/home/user/folder_b/sub_folder'), does_not_raise(), Path('/home/user'), id='normal behavior with UNIX paths'), pytest.param(PureWindowsPath('C:\\home\\user\\folder_a'), PureWindowsPath('C:\\home\\user\\folder_b\\sub_folder'), does_not_raise(), PureWindowsPath('C:\\home\\user'), id='normal behavior with Windows paths', marks=pytest.mark.skipif((sys.platform != 'win32'), reason='fails on UNIX.')), pytest.param(PureWindowsPath('C:\\home\\user\\folder_a'), PureWindowsPath('D:\\home\\user\\folder_b\\sub_folder'), pytest.raises(ValueError, match="Paths don't have the same drive"), None, id='no common ancestor', marks=pytest.mark.skipif((sys.platform != 'win32'), reason='fails on UNIX.'))])
def test_find_common_ancestor(path_1, path_2, expectation, expected):
with expectation:
result = find_common_ancestor(path_1, path_2)
assert (result == expected) |
class AudioFile(dict, ImageContainer, HasKey):
fill_metadata = False
fill_length = False
multisong = False
streamsong = False
can_add = True
is_file = True
format = 'Unknown Audio File'
supports_rating_and_play_count_in_file = False
mimes: list[str] = []
_property
def _date_format(self) -> str:
return config.gettext('settings', 'datecolumn_timestamp_format')
def __init__(self, default=(), **kwargs):
for (key, value) in dict(default).items():
self[key] = value
for (key, value) in kwargs.items():
self[key] = value
def __song_key(self):
return (self('~#disc', 0), self('~#track', 0), human(self('artistsort')), self.get('musicbrainz_artistid', ''), human(self.get('title', '')), self.get('~filename'))
_property
def album_key(self) -> AlbumKey:
id_val: str = (self.get('album_grouping_key') or self.get('labelid') or self.get('musicbrainz_albumid', ''))
return (id_val, human(self('albumsort', '')), human(self('albumartistsort', '')))
_property
def sort_key(self):
return [self.album_key, self.__song_key()]
def sort_by_func(tag):
def artist_sort(song):
return song.sort_key[1][2]
if callable(tag):
return (lambda song: human(tag(song)))
elif (tag == 'artistsort'):
return artist_sort
elif (tag in FILESYSTEM_TAGS):
return (lambda song: fsn2text(song(tag)))
elif (tag.startswith('~#') and ('~' not in tag[2:])):
return (lambda song: song(tag, 0))
return (lambda song: human(song(tag)))
def __getstate__(self):
pass
def __setstate__(self, state):
pass
def __setitem__(self, key, value):
if (not isinstance(key, str)):
raise TypeError('key has to be str')
if key.startswith('~#'):
if (not isinstance(value, (int | float))):
raise TypeError
elif (key in FILESYSTEM_TAGS):
if (not isinstance(value, fsnative)):
value = path2fsn(value)
else:
value = str(value)
dict.__setitem__(self, key, value)
pop = self.__dict__.pop
pop('album_key', None)
pop('sort_key', None)
def __delitem__(self, key):
dict.__delitem__(self, key)
pop = self.__dict__.pop
pop('album_key', None)
pop('sort_key', None)
def key(self) -> K:
return self['~filename']
def mountpoint(self):
return self['~mountpoint']
def __hash__(self):
return hash(id(self))
def __eq__(self, other):
return (self is other)
def __lt__(self, other):
return (self.sort_key < other.sort_key)
def __ne__(self, other):
return (self is not other)
def has_rating_and_playcount_in_file(self, email):
return False
def reload(self):
backup = dict(self)
fn = self['~filename']
saved = {}
persisted = config.getboolean('editing', 'save_to_songs')
persisted_keys = ({'~#rating', '~#playcount'} if (self.supports_rating_and_play_count_in_file and persisted) else set())
for key in self:
if (key in (MIGRATE - persisted_keys)):
saved[key] = self[key]
self.clear()
self['~filename'] = fn
try:
self.__init__(fn)
except AudioFileError:
self.update(backup)
raise
else:
self.update(saved)
def realkeys(self):
return [s for s in self.keys() if (s[:1] != '~')]
def prefixkeys(self, prefix):
l = []
for k in self:
if k.startswith(prefix):
if ((k == prefix) or k.startswith((prefix + ':'))):
l.append(k)
return l
def _prefixvalue(self, tag):
return '\n'.join(self.list_unique(sorted(self.prefixkeys(tag))))
def iterrealitems(self):
return ((k, v) for (k, v) in self.items() if (k[:1] != '~'))
def __call__(self, key, default: Any='', connector=' - ', joiner=', '):
real_key = key
if (key[:1] == '~'):
key = key[1:]
if ('~' in key):
values = []
sub_tags = util.tagsplit(real_key)
j = (joiner if (len(sub_tags) > 1) else '\n')
for t in sub_tags:
vs = [decode_value(real_key, v) for v in self.list(t)]
v = j.join(vs)
if v:
values.append(v)
return (connector.join(values) or default)
elif (key == '#track'):
try:
return int(self['tracknumber'].split('/')[0])
except (ValueError, TypeError, KeyError):
return default
elif (key == '#disc'):
try:
return int(self['discnumber'].split('/')[0])
except (ValueError, TypeError, KeyError):
return default
elif (key == 'length'):
length = self.get('~#length')
if (length is None):
return default
else:
return util.format_time_display(length)
elif (key == '#rating'):
return dict.get(self, real_key, config.RATINGS.default)
elif (key == 'rating'):
return util.format_rating(self('~#rating'))
elif (key == 'people'):
return ('\n'.join(self.list_unique(PEOPLE)) or default)
elif (key == 'people:real'):
unique = self.list_unique(PEOPLE)
for val in VARIOUS_ARTISTS_VALUES:
if ((len(unique) > 1) and (val in unique)):
unique.remove(val)
return ('\n'.join(unique) or default)
elif (key == 'people:roles'):
return (self._role_call('performer', PEOPLE) or default)
elif (key == 'peoplesort'):
return ('\n'.join(self.list_unique(PEOPLE_SORT)) or self('~people', default, connector))
elif (key == 'peoplesort:roles'):
return (self._role_call('performersort', PEOPLE_SORT) or self('~peoplesort', default, connector))
elif (key in ('performers', 'performer')):
return (self._prefixvalue('performer') or default)
elif (key in ('performerssort', 'performersort')):
return (self._prefixvalue('performersort') or self(real_key[(- 4):], default, connector))
elif (key in ('performers:roles', 'performer:roles')):
return (self._role_call('performer') or default)
elif (key in ('performerssort:roles', 'performersort:roles')):
return (self._role_call('performersort') or self(real_key.replace('sort', ''), default, connector))
elif (key == 'basename'):
return (os.path.basename(self['~filename']) or self['~filename'])
elif (key == 'dirname'):
return (os.path.dirname(self['~filename']) or self['~filename'])
elif (key == 'uri'):
try:
return self['~uri']
except KeyError:
return fsn2uri(self['~filename'])
elif (key == 'format'):
return self.get('~format', str(self.format))
elif (key == 'codec'):
codec = self.get('~codec')
if (codec is None):
return self('~format')
return codec
elif (key == 'encoding'):
encoding = '\n'.join((part for part in [self.get('~encoding'), self.get('encodedby')] if part))
return (encoding or default)
elif (key == 'language'):
codes = self.list('language')
if (not codes):
return default
return '\n'.join(((iso639.translate(c) or c) for c in codes))
elif (key == 'bitrate'):
return util.format_bitrate(self('~#bitrate'))
elif (key == '#date'):
date = self.get('date')
if (date is None):
return default
return util.date_key(date)
elif (key == 'year'):
return self.get('date', default)[:4]
elif (key == '#year'):
try:
return int(self.get('date', default)[:4])
except (ValueError, TypeError, KeyError):
return default
elif (key == 'originalyear'):
return self.get('originaldate', default)[:4]
elif (key == '#originalyear'):
try:
return int(self.get('originaldate', default)[:4])
except (ValueError, TypeError, KeyError):
return default
elif (key == '#tracks'):
try:
return int(self['tracknumber'].split('/')[1])
except (ValueError, IndexError, TypeError, KeyError):
return default
elif (key == '#discs'):
try:
return int(self['discnumber'].split('/')[1])
except (ValueError, IndexError, TypeError, KeyError):
return default
elif (key == 'lyrics'):
try:
return self['lyrics']
except KeyError:
pass
try:
return self['unsyncedlyrics']
except KeyError:
pass
lyric_filename = self.lyric_filename
if (not lyric_filename):
return default
try:
with open(lyric_filename, 'rb') as fileobj:
print_d(f'Reading lyrics from {lyric_filename!r}')
text = fileobj.read().decode('utf-8', 'replace')
if ('\x00' in text):
return default
return text
except (OSError, UnicodeDecodeError):
return default
elif (key == 'filesize'):
return util.format_size(self('~#filesize', 0))
elif (key == 'playlists'):
from quodlibet import app
lib = app.library
if (not lib):
return ''
playlists = lib.playlists.playlists_featuring(self)
return ('\n'.join((s.name for s in playlists)) or default)
elif key.startswith('#replaygain_'):
try:
val = self.get(key[1:], default)
return round(float(val.split(' ')[0]), 2)
except (ValueError, TypeError, AttributeError):
return default
elif (real_key in HUMAN_TO_NUMERIC_TIME_TAGS):
time_value = float(self.get(HUMAN_TO_NUMERIC_TIME_TAGS[real_key], 0))
return format_date(time_value, self._date_format)
elif (key[:1] == '#'):
if (real_key in self):
return self[real_key]
elif (real_key in NUMERIC_ZERO_DEFAULT):
return 0
else:
try:
val = self[real_key[2:]]
except KeyError:
return default
try:
return int(val)
except ValueError:
try:
return float(val)
except ValueError:
return default
elif (key == 'json'):
return json.dumps(self, sort_keys=True)
else:
return dict.get(self, real_key, default)
elif (key == 'title'):
title = dict.get(self, 'title')
if (title is None):
unknown_track_template = _(config.gettext('browsers', 'missing_title_template'))
from quodlibet.pattern import Pattern
try:
pattern = Pattern(unknown_track_template)
except ValueError:
title = decode_value('~basename', self('~basename'))
else:
title = (pattern % self)
return title
elif (key in SORT_TO_TAG):
try:
return self[key]
except KeyError:
key = SORT_TO_TAG[key]
return dict.get(self, key, default)
def _role_call(self, role_tag, sub_keys=None):
role_tag_keys = self.prefixkeys(role_tag)
role_map = {}
for key in role_tag_keys:
if (key == role_tag):
if (sub_keys is None):
continue
else:
role = TAG_ROLES.get(role_tag, role_tag)
else:
role = key.split(':', 1)[(- 1)]
for name in self.list(key):
role_map.setdefault(name, []).append(role)
if (sub_keys is None):
names = self.list_unique(role_tag_keys)
else:
names = self.list_unique(sub_keys)
for tag in sub_keys:
if (tag in TAG_ROLES):
for name in self.list(tag):
role_map.setdefault(name, []).append(TAG_ROLES[tag])
descs = []
for name in names:
roles = role_map.get(name, [])
if (not roles):
descs.append(name)
else:
roles = sorted(map(capitalize, roles))
descs.append(f"{name} ({', '.join(roles)})")
return '\n'.join(descs)
def lyric_filename(self) -> (str | None):
from quodlibet.pattern import ArbitraryExtensionFileFromPattern
rx_params = re.compile((('[^\\\\]<[^' + re.escape(os.sep)) + ']*[^\\\\]>'))
def expand_pathfile(rpf):
expanded = []
root = os.path.expanduser(rpf.root)
pathfile = os.path.expanduser(rpf.pathfile)
if rx_params.search(pathfile):
root = ArbitraryExtensionFileFromPattern(root).format(self)
pathfile = ArbitraryExtensionFileFromPattern(pathfile).format(self)
rpf = RootPathFile(root, pathfile)
expanded.append(rpf)
if ((not os.path.exists(pathfile)) and is_windows()):
pathfile = os.path.sep.join([rpf.root, rpf.end_escaped])
rpf = RootPathFile(rpf.root, pathfile)
expanded.insert((len(expanded) - 1), rpf)
return expanded
def sanitise(sep, parts):
return sep.join((part.replace(os.path.sep, '')[:128] for part in parts))
lyric_paths = config.getstringlist('memory', 'lyric_rootpaths', [])
lyric_paths.append(os.path.join(get_home_dir(), '.lyrics'))
lyric_paths.append(os.path.join(os.path.dirname(self.comma('~filename'))))
lyric_filenames = config.getstringlist('memory', 'lyric_filenames', [])
lyric_filenames.append((sanitise(os.sep, [(self.comma('lyricist') or self.comma('artist')), self.comma('title')]) + '.lyric'))
lyric_filenames.append((sanitise(' - ', [(self.comma('lyricist') or self.comma('artist')), self.comma('title')]) + '.lyric'))
pathfiles = OrderedDict()
for r in lyric_paths:
for f in lyric_filenames:
pathfile = os.path.join(r, os.path.dirname(f), fsnative(os.path.basename(f)))
rpf = RootPathFile(r, pathfile)
if (pathfile not in pathfiles):
pathfiles[pathfile] = rpf
match_ = None
pathfiles_expanded = OrderedDict()
for (_pf, rpf) in pathfiles.items():
for rpf in expand_pathfile(rpf):
pathfile = rpf.pathfile
pathfiles_expanded[pathfile] = rpf
if os.path.exists(pathfile):
match_ = pathfile
break
if match_:
break
if (not match_):
lyric_extensions = ['lyric', 'lyrics', '', 'txt']
def generate_mod_ext_paths(pathfile):
ext = os.path.splitext(pathfile)[1][1:]
path = (pathfile[:((- 1) * len(ext))].strip('.') if ext else pathfile)
extra_extensions = [x for x in lyric_extensions if (x != ext)]
return [('.'.join([path, ext]) if ext else path) for ext in extra_extensions]
for pathfile in pathfiles_expanded.keys():
paths_mod_ext = generate_mod_ext_paths(pathfile)
for path_ext in paths_mod_ext:
if os.path.exists(path_ext):
match_ = path_ext
break
if match_:
break
if (not match_):
match_ = list(pathfiles_expanded.keys())[0]
return match_
def has_rating(self):
return (self.get('~#rating') is not None)
def remove_rating(self):
self.pop('~#rating', None)
def comma(self, key):
if (('~' in key) or (key == 'title')):
if (key in FILESYSTEM_TAGS):
v = fsn2text(self(key, fsnative()))
else:
v = self(key, '')
else:
v = self.get(key, '')
if isinstance(v, (int | float)):
return v
else:
return re.sub('\n+', ', ', v.strip())
def list(self, key):
if (('~' in key) or (key == 'title')):
v = self(key)
if (v == ''):
return []
else:
v = (v.split('\n') if isinstance(v, str) else [v])
return [x for x in v if x]
else:
v = self.get(key, '')
return [x for x in v.split('\n') if x]
def list_sort(self, key):
display = decode_value(key, self(key))
display = (display.split('\n') if display else [])
sort = []
if (key in TAG_TO_SORT):
sort = decode_value(TAG_TO_SORT[key], self(TAG_TO_SORT[key]))
sort = (sort.split('\n') if sort else [])
result = []
for (d, s) in zip_longest(display, sort):
if d:
result.append((d, (s if ((s is not None) and (s != '')) else d)))
return result
def list_separate(self, key):
if ((key[:1] == '~') and ('~' in key[1:])):
vals = [self.list_sort(tag) for tag in util.tagsplit(key)]
r = [j for i in vals for j in i]
return r
else:
return self.list_sort(key)
def list_unique(self, keys):
l = []
seen = set()
for k in keys:
for v in self.list(k):
if (v not in seen):
l.append(v)
seen.add(v)
return l
def as_lowercased(self):
merged = AudioFile()
text = {}
for (key, value) in self.items():
lower = key.lower()
if key.startswith('~#'):
merged[lower] = value
else:
text.setdefault(lower, []).extend(value.split('\n'))
for (key, values) in text.items():
merged[key] = '\n'.join(values)
return merged
def exists(self):
return os.path.exists(self['~filename'])
def valid(self):
return (bool(self.get('~#mtime', 0)) and (self['~#mtime'] == mtime(self['~filename'])))
def mounted(self):
return ismount(self.get('~mountpoint', '/'))
def can_multiple_values(self, key=None):
return True
def can_change(self, k=None):
if (k is None):
return True
if (not isascii(k)):
return False
if ((not k) or ('=' in k) or ('~' in k)):
return False
return True
def is_writable(self):
return os.access(self['~filename'], os.W_OK)
def rename(self, newname):
if os.path.isabs(newname):
mkdir(os.path.dirname(newname))
else:
newname = os.path.join(self('~dirname'), newname)
if (not os.path.exists(newname)):
shutil.move(self['~filename'], newname)
elif (normalize_path(newname, canonicalise=True) != self['~filename']):
raise ValueError
self.sanitize(newname)
def sanitize(self, filename=None):
for (key, val) in list(self.items()):
self[key] = val
if (isinstance(val, str) and ('\x00' in val)):
self[key] = '\n'.join(filter((lambda s: s), val.split('\x00')))
if ((key in NUMERIC_ZERO_DEFAULT) and (val == 0)):
del self[key]
if filename:
self['~filename'] = filename
elif ('~filename' not in self):
raise ValueError('Unknown filename!')
assert isinstance(self['~filename'], fsnative)
if self.is_file:
self['~filename'] = normalize_path(self['~filename'], canonicalise=True)
head = self['~filename']
while ('~mountpoint' not in self):
(head, tail) = os.path.split(head)
head = (head or fsnative('/'))
if ismount(head):
self['~mountpoint'] = head
else:
self['~mountpoint'] = fsnative('/')
self.setdefault('~#added', int(time.time()))
try:
stat = os.stat(self['~filename'])
self['~#mtime'] = stat.st_mtime
self['~#filesize'] = stat.st_size
if ('~#bitrate' not in self):
try:
self['~#bitrate'] = int((stat.st_size / (self['~#length'] * (1000 / 8))))
except (KeyError, ZeroDivisionError):
pass
except OSError:
self['~#mtime'] = 0
def to_dump(self):
def encode_key(k):
return (encode(k) if isinstance(k, str) else k)
s = []
for k in self.keys():
enc_key = encode_key(k)
assert isinstance(enc_key, bytes)
if isinstance(self[k], int):
l = (enc_key + encode(('=%d' % self[k])))
s.append(l)
elif isinstance(self[k], float):
l = (enc_key + encode(('=%f' % self[k])))
s.append(l)
else:
for v2 in self.list(k):
if (not isinstance(v2, bytes)):
v2 = encode(v2)
s.append(((enc_key + b'=') + v2))
for k in (NUMERIC_ZERO_DEFAULT - set(self.keys())):
enc_key = encode_key(k)
l = (enc_key + encode(('=%d' % self.get(k, 0))))
s.append(l)
if ('~#rating' not in self):
s.append(encode(('~#rating=%f' % self('~#rating'))))
s.append(encode(('~format=%s' % self.format)))
s.append(b'')
return b'\n'.join(s)
def from_dump(self, text):
for line in text.split(b'\n'):
if (not line):
continue
parts = line.split(b'=')
key = decode(parts[0])
val = b'='.join(parts[1:])
if (key == '~format'):
pass
elif (key in FILESYSTEM_TAGS):
self.add(key, bytes2fsn(val, 'utf-8'))
elif key.startswith('~#'):
try:
self.add(key, int(val))
except ValueError:
try:
self.add(key, float(val))
except ValueError:
pass
else:
self.add(key, decode(val))
def change(self, key, old_value, new_value):
try:
parts = self.list(key)
try:
parts[parts.index(old_value)] = new_value
except ValueError:
self[key] = new_value
else:
self[key] = '\n'.join(parts)
except KeyError:
self[key] = new_value
def add(self, key, value):
if (key not in self):
self[key] = value
else:
self[key] += ('\n' + value)
def remove(self, key, value=None):
if (key not in self):
return
elif ((value is None) or (self[key] == value)):
del self[key]
else:
try:
parts = self.list(key)
parts.remove(value)
self[key] = '\n'.join(parts)
except ValueError:
pass
def replay_gain(self, profiles, pre_amp_gain=0, fallback_gain=0):
for profile in profiles:
if (profile == 'none'):
return 1.0
try:
db = float(self[('replaygain_%s_gain' % profile)].split()[0])
peak = float(self.get(('replaygain_%s_peak' % profile), 1))
except (KeyError, ValueError, IndexError):
continue
else:
db += pre_amp_gain
try:
scale = (10.0 ** (db / 20))
except OverflowError:
scale = (1.0 / peak)
else:
if ((scale * peak) > 1):
scale = (1.0 / peak)
return min(15, scale)
else:
try:
scale = (10.0 ** ((fallback_gain + pre_amp_gain) / 20))
except OverflowError:
scale = 1.0
else:
scale = min(scale, 1.0)
return min(15, scale)
def write(self):
raise NotImplementedError
def bookmarks(self):
marks = []
invalid = []
for line in self.list('~bookmark'):
try:
(time, mark) = line.split(' ', 1)
except Exception:
invalid.append(((- 1), line))
else:
try:
time = util.parse_time(time, False)
except Exception:
invalid.append(((- 1), line))
else:
if (time >= 0):
marks.append((time, mark))
else:
invalid.append(((- 1), line))
marks.sort()
marks.extend(invalid)
return marks
def bookmarks(self, marks):
result = []
for (time_, mark) in marks:
if (time_ < 0):
raise ValueError('mark times must be positive')
result.append(f'{util.format_time(time_)} {mark}')
result = '\n'.join(result)
if result:
self['~bookmark'] = result
elif ('~bookmark' in self):
del self['~bookmark'] |
def multiple_input_model():
input1 = tf.keras.Input(name='input1', shape=(10, 10, 3))
input2 = tf.keras.Input(name='input2', shape=(12, 12, 3))
x1 = tf.keras.layers.Conv2D(8, (1, 1), name='conv1a')(input1)
x2 = tf.keras.layers.Conv2D(8, (3, 3), name='conv1b')(input2)
x = tf.keras.layers.add([x1, x2])
x = tf.keras.layers.Conv2D(4, (1, 1), name='conv2')(x)
x = tf.keras.layers.Flatten()(x)
outputs = tf.keras.layers.Dense(2, activation=tf.nn.softmax, name='multiple_input_model')(x)
return outputs |
class Migration(migrations.Migration):
dependencies = [('projects', '0039_integrationoption_secret')]
operations = [migrations.CreateModel(name='IssueResource', fields=[('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('url', models.URLField(help_text='The URL of this issue resource.', verbose_name='URL')), ('integration', models.ForeignKey(help_text='The integration for this issue resource.', on_delete=django.db.models.deletion.CASCADE, related_name='resources', to='projects.Integration', verbose_name='Integration')), ('issue', models.ForeignKey(help_text='The issue for this issue resource.', on_delete=django.db.models.deletion.CASCADE, related_name='resources', to='projects.Issue', verbose_name='Issue'))], options={'verbose_name': 'Issue resource', 'verbose_name_plural': 'Issue resources', 'ordering': ('issue__project__title', 'issue')})] |
class CurrentTests(BaseOutputTest):
def __init__(self, model, param, disc, solution, operating_condition):
super().__init__(model, param, disc, solution, operating_condition)
self.a_j_n = solution['Negative electrode volumetric interfacial current density [A.m-3]']
self.a_j_p = solution['Positive electrode volumetric interfacial current density [A.m-3]']
self.a_j_n_av = solution['X-averaged negative electrode volumetric interfacial current density [A.m-3]']
self.a_j_p_av = solution['X-averaged positive electrode volumetric interfacial current density [A.m-3]']
self.a_j_n_sei = solution['Negative electrode SEI volumetric interfacial current density [A.m-3]']
self.a_j_n_sei_av = solution['X-averaged negative electrode SEI volumetric interfacial current density [A.m-3]']
self.a_j_n_pl = solution['Negative electrode lithium plating volumetric interfacial current density [A.m-3]']
self.a_j_n_pl_av = solution['X-averaged negative electrode lithium plating volumetric interfacial current density [A.m-3]']
self.i_s_n = solution['Negative electrode current density [A.m-2]']
self.i_s_p = solution['Positive electrode current density [A.m-2]']
self.i_s = solution['Electrode current density [A.m-2]']
self.i_e = solution['Electrolyte current density [A.m-2]']
def test_interfacial_current_average(self):
np.testing.assert_allclose(np.mean(((self.a_j_n(self.t, self.x_n) + self.a_j_n_sei(self.t, self.x_n)) + self.a_j_n_pl(self.t, self.x_n)), axis=0), (self.i_cell / self.L_n), rtol=0.001, atol=0.0001)
np.testing.assert_allclose(np.mean(self.a_j_p(self.t, self.x_p), axis=0), ((- self.i_cell) / self.L_p), rtol=0.001, atol=0.0001)
def test_conservation(self):
(t, x_n, x_s, x_p) = (self.t, self.x_n, self.x_s, self.x_p)
current_param = self.model.param.current_density_with_time
i_cell = self.param.process_symbol(current_param).evaluate(t=t)
for x in [x_n, x_s, x_p]:
np.testing.assert_allclose((self.i_s(t, x) + self.i_e(t, x)), i_cell, rtol=0.01, atol=1e-08)
np.testing.assert_allclose(self.i_s(t, x_n), self.i_s_n(t, x_n), rtol=0.001, atol=1e-09)
np.testing.assert_allclose(self.i_s(t, x_p), self.i_s_p(t, x_p), rtol=0.001, atol=1e-09)
def test_current_density_boundaries(self):
(t, x_n, x_p) = (self.t, self.x_n_edge, self.x_p_edge)
current_param = self.model.param.current_density_with_time
i_cell = self.param.process_symbol(current_param).evaluate(t=t)
np.testing.assert_array_almost_equal(self.i_s_n(t, x_n[0]), i_cell, decimal=2)
np.testing.assert_array_almost_equal(self.i_s_n(t, x_n[(- 1)]), 0, decimal=4)
np.testing.assert_array_almost_equal(self.i_s_p(t, x_p[(- 1)]), i_cell, decimal=3)
np.testing.assert_array_almost_equal(self.i_s_p(t, x_p[0]), 0, decimal=4)
def test_all(self):
self.test_conservation()
self.test_current_density_boundaries()
if (self.model.options['surface form'] != 'differential'):
self.test_interfacial_current_average() |
_dataframe_method
def separate_rows(df, column_name: str, sep: str=''):
columns_original = list(df.columns)
df['id'] = df.index
wdf = pd.DataFrame(df[column_name].str.split(sep).tolist()).stack().reset_index()
wdf.rename(columns={'level_0': 'id', 0: 'revenue_items'}, inplace=True)
wdf.drop(columns=['level_1'], inplace=True)
return pd.merge(df, wdf, on='id', suffixes=('_drop', '')).drop(columns=['id', (column_name + '_drop')])[columns_original] |
def _transform_electronic_energy(hamiltonian: ElectronicEnergy, density_total: ElectronicIntegrals, density_active: ElectronicIntegrals, active_basis: BasisTransformer, offset_name: str, *, reference_inactive_fock: (ElectronicIntegrals | None)=None, reference_inactive_energy: (float | None)=None) -> ElectronicEnergy:
if (reference_inactive_fock is None):
reference_inactive_fock = hamiltonian.fock(density_total)
active_fock_operator = (hamiltonian.fock(density_active) - hamiltonian.electronic_integrals.one_body)
inactive_fock_operator = (reference_inactive_fock - active_fock_operator)
if (reference_inactive_energy is None):
reference_inactive_energy = cast(ElectronicIntegrals, (0.5 * ElectronicIntegrals.einsum({'ij,ji': ('+-', '+-', '')}, (reference_inactive_fock + hamiltonian.electronic_integrals.one_body), density_total)))
reference_inactive_energy = ((reference_inactive_energy.alpha.get('', 0.0) + reference_inactive_energy.beta.get('', 0.0)) + reference_inactive_energy.beta_alpha.get('', 0.0))
e_inactive = cast(ElectronicIntegrals, ((- 1.0) * ElectronicIntegrals.einsum({'ij,ji': ('+-', '+-', '')}, reference_inactive_fock, density_active)))
e_inactive += cast(ElectronicIntegrals, (0.5 * ElectronicIntegrals.einsum({'ij,ji': ('+-', '+-', '')}, active_fock_operator, density_active)))
e_inactive_sum = (((reference_inactive_energy + e_inactive.alpha.get('', 0.0)) + e_inactive.beta.get('', 0.0)) + e_inactive.beta_alpha.get('', 0.0))
new_hamil = ElectronicEnergy(active_basis.transform_electronic_integrals((inactive_fock_operator + hamiltonian.electronic_integrals.two_body)))
new_hamil.constants = deepcopy(hamiltonian.constants)
new_hamil.constants[offset_name] = e_inactive_sum
return new_hamil |
class PointLineDistance(PointOnLine):
_id = 8
_props = ['Distance']
_iconName = 'Assembly_ConstraintPointLineDistance.svg'
_tooltip = QT_TRANSLATE_NOOP('asm3', 'Add a "{}" to constrain the distance between a point and a linear edge in 2D or 3D')
def init(cls, obj):
infos = obj.Proxy.getElementsInfo()
p1 = infos[0].Placement.multVec(utils.getElementPos(infos[0].Shape))
(p2, p3) = utils.getElementLinePoints(infos[1].Shape)
p2 = infos[1].Placement.multVec(p2)
p3 = infos[1].Placement.multVec(p3)
if (len(infos) == 3):
rot = infos[2].Placement.Rotation.multiply(utils.getElementRotation(infos[2].Shape))
(p1, p2, p3) = utils.project2D(rot, p1, p2, p3)
obj.Distance = p1.distanceToLine(p2, (p3 - p2)) |
class Solution(object):
def oddEvenList(self, head):
odd = head
if (head is None):
return None
if (head.next is None):
return head
even_head = even = head.next
while ((odd.next is not None) and (even.next is not None)):
odd.next = even.next
odd = odd.next
even.next = odd.next
even = even.next
odd.next = even_head
return head |
def test_execute_should_show_operation_as_cancelled_on_subprocess_keyboard_interrupt(config: Config, pool: RepositoryPool, mocker: MockerFixture, io: BufferedIO, env: MockEnv) -> None:
executor = Executor(env, pool, config, io)
executor.verbose()
mocker.patch.object(executor, '_install', return_value=(- 2))
assert (executor.execute([Install(Package('clikit', '0.2.3'))]) == 1)
expected = '\nPackage operations: 1 install, 0 updates, 0 removals\n\n - Installing clikit (0.2.3)\n - Installing clikit (0.2.3): Cancelled\n'
assert (io.fetch_output() == expected) |
class SpconvModel(nn.Module):
def __init__(self, in_channels=1, out_channels=10):
super().__init__()
self.conv = spconv.SparseConv2d(in_channels, out_channels, 1)
def forward(self, x):
nhwc_x = x.permute(0, *[i for i in range(2, len(x.shape))], 1)
spconv_input = spconv.SparseConvTensor.from_dense(nhwc_x)
output = self.conv(spconv_input)
return output.dense() |
class ScrimsClaim(discord.ui.Button):
view: ScrimsSlotmPublicView
def __init__(self, **kwargs):
super().__init__(**kwargs)
async def callback(self, interaction: discord.Interaction) -> T.Any:
if (not self.view.claimable):
(await interaction.response.send_message('No slot available right now.', ephemeral=True))
return (await self.view.record.refresh_public_message())
(await interaction.response.defer(thinking=True, ephemeral=True))
claim_view = BaseSelector(interaction.user.id, ClaimSlotSelector, scrims=self.view.claimable, multiple_slots=self.view.record.multiple_slots)
(await interaction.followup.send('Select the slot you want to claim:', view=claim_view, ephemeral=True)) |
class AttrVI_ATTR_RSRC_SPEC_VERSION(RangeAttribute):
resources = AllSessionTypes
py_name = 'spec_version'
visa_name = 'VI_ATTR_RSRC_SPEC_VERSION'
visa_type = 'ViVersion'
default = 3145728
(read, write, local) = (True, False, True)
(min_value, max_value, values) = (0, , None) |
def annotation_string(word_annotation):
result = ''
for i in range(len(word_annotation.stresses)):
result += SYLLABLE_SEPARATOR
for j in range(len(word_annotation.syllables)):
if (word_annotation.stresses[i][j] == Stress.primary):
result += ' '
elif (word_annotation.stresses[i][j] == Stress.secondary):
result += '`'
result += (word_annotation.syllables[j] + SYLLABLE_SEPARATOR)
result += '\n'
return result[:(- 1)] |
class DotOutputFormatter(object):
LOAD_IF = staticmethod((lambda config: (config.formatter == 'dots')))
LOAD_PRIORITY = 30
STATE_SYMBOLS = {Step.State.PASSED: '.', Step.State.PENDING: 'P', Step.State.UNTESTED: 'U', Step.State.SKIPPED: 'S', Step.State.FAILED: 'F'}
def __init__(self):
before.each_feature(self.dot_formatter_before_each_feature)
after.each_feature((lambda *args, **kwargs: sys.stdout.write('\n')))
after.each_scenario(self.dot_formatter_after_each_scenario)
after.each_step(self.dot_formatter_after_each_step)
after.all(self.dot_formatter_failure_summary)
self._failed_steps = []
def dot_formatter_before_each_feature(self, feature):
output = (cf.bold_black(feature.path) + ': ')
sys.stdout.write(str(output))
def dot_formatter_after_each_scenario(self, scenario):
if isinstance(scenario, (ScenarioOutline, ScenarioLoop)):
return
sys.stdout.write(str(self.STATE_SYMBOLS[scenario.state]))
def dot_formatter_after_each_step(self, step):
if (step.state == Step.State.FAILED):
self._failed_steps.append(step)
def dot_formatter_failure_summary(self, features, marker):
if (not self._failed_steps):
return
output = (('\n' + cf.bold_red('Failures:')) + '\n')
for step in self._failed_steps:
output += '{}: {}\n {}\n'.format(step.path, step.parent.sentence, cf.red(step.sentence))
if world.config.with_traceback:
output += ' {}\n'.format('\n '.join([str(cf.red(l)) for l in step.failure.traceback.split('\n')[:(- 2)]]))
output += ' {}: {}\n\n'.format(cf.bold_red(step.failure.name), cf.red(step.failure.reason))
sys.stdout.write(str((output + '\n'))) |
class AutoPurgeEvents(Cog):
def __init__(self, bot: Quotient):
self.bot = bot
self.bot.loop.create_task(self.delete_older_snipes())
async def delete_older_snipes(self):
(await self.bot.wait_until_ready())
(await Snipe.filter(delete_time__lte=(datetime.now(tz=IST) - timedelta(days=10))).delete())
()
async def on_message_delete(self, message: discord.Message):
if ((not message.guild) or message.author.bot):
return
channel = message.channel
content = (message.content if message.content else '*[Content Unavailable]*')
if (not (channel.type in (discord.ChannelType.text, discord.ChannelType.private_thread, discord.ChannelType.public_thread))):
return
(await Snipe.update_or_create(channel_id=channel.id, defaults={'author_id': message.author.id, 'content': content, 'nsfw': channel.is_nsfw()}))
()
async def on_message(self, message: discord.Message):
if ((not message.guild) or (not (message.channel.id in self.bot.cache.autopurge_channels))):
return
record = (await AutoPurge.get_or_none(channel_id=message.channel.id))
if (not record):
return self.bot.cache.autopurge_channels.discard(message.channel.id)
(await self.bot.reminders.create_timer((datetime.now(tz=IST) + timedelta(seconds=record.delete_after)), 'autopurge', message_id=message.id, channel_id=message.channel.id))
()
async def on_autopurge_timer_complete(self, timer: Timer):
(message_id, channel_id) = (timer.kwargs['message_id'], timer.kwargs['channel_id'])
check = (await AutoPurge.get_or_none(channel_id=channel_id))
if (not check):
return
channel = check.channel
if (not channel):
return
message = channel.get_partial_message(message_id)
with suppress(discord.NotFound, discord.Forbidden, discord.HTTPException):
msg = (await message.fetch())
if (not msg.pinned):
(await msg.delete())
()
async def on_guild_channel_delete(self, channel: discord.TextChannel):
if (channel.id in self.bot.cache.autopurge_channels):
(await AutoPurge.filter(channel_id=channel.id).delete())
self.bot.cache.autopurge_channels.discard(channel.id) |
class TestTensorboardPlotHook(HookTestBase):
def setUp(self) -> None:
self.base_dir = tempfile.mkdtemp()
def tearDown(self) -> None:
shutil.rmtree(self.base_dir)
def test_constructors(self) -> None:
config = {'summary_writer': {}, 'log_period': 5}
invalid_config = copy.deepcopy(config)
invalid_config['log_period'] = 'this is not an int'
self.constructor_test_helper(config=config, hook_type=TensorboardPlotHook, hook_registry_name='tensorboard_plot', hook_kwargs={'tb_writer': SummaryWriter(), 'log_period': 5}, invalid_configs=[invalid_config])
('classy_vision.hooks.tensorboard_plot_hook.is_primary')
def test_writer(self, mock_is_primary_func: mock.MagicMock) -> None:
for (phase_idx, master) in product([0, 1, 2], [True, False]):
(train, phase_type) = ((True, 'train') if ((phase_idx % 2) == 0) else (False, 'test'))
mock_is_primary_func.return_value = master
config = get_test_task_config()
config['dataset']['train']['batchsize_per_replica'] = 2
config['dataset']['test']['batchsize_per_replica'] = 5
task = build_task(config)
task.prepare()
task.advance_phase()
task.phase_idx = phase_idx
task.train = train
losses = [1.23, 4.45, 12.3, 3.4]
sample_fetch_times = [1.1, 2.2, 3.3, 2.2]
summary_writer = SummaryWriter(self.base_dir)
summary_writer = mock.MagicMock(wraps=summary_writer)
tensorboard_plot_hook = TensorboardPlotHook(summary_writer)
tensorboard_plot_hook.on_phase_start(task)
disable_sample_fetch_times = (phase_idx == 0)
for (loss, sample_fetch_time) in zip(losses, sample_fetch_times):
task.losses.append(loss)
step_data = ({} if disable_sample_fetch_times else {'sample_fetch_time': sample_fetch_time})
task.last_batch = LastBatchInfo(None, None, None, None, step_data)
tensorboard_plot_hook.on_step(task)
tensorboard_plot_hook.on_phase_end(task)
if master:
if train:
learning_rate_key = f'Learning Rate/{phase_type}'
summary_writer.add_scalar.assert_any_call(learning_rate_key, mock.ANY, global_step=mock.ANY, walltime=mock.ANY)
avg_loss_key = f'Losses/{phase_type}'
summary_writer.add_scalar.assert_any_call(avg_loss_key, mock.ANY, global_step=mock.ANY)
for meter in task.meters:
for name in meter.value:
meter_key = f'Meters/{phase_type}/{meter.name}/{name}'
summary_writer.add_scalar.assert_any_call(meter_key, mock.ANY, global_step=mock.ANY)
if step_data:
summary_writer.add_scalar.assert_any_call(f'Speed/{phase_type}/cumulative_sample_fetch_time', mock.ANY, global_step=mock.ANY, walltime=mock.ANY)
else:
summary_writer.add_scalar.assert_not_called()
summary_writer.add_scalar.reset_mock()
def test_logged_lr(self):
class SchedulerMock(ClassyParamScheduler):
def __call__(self, where):
return where
mock_lr_scheduler = SchedulerMock(UpdateInterval.STEP)
class DummySummaryWriter(object):
def __init__(self):
self.scalar_logs = {}
def add_scalar(self, key, value, global_step=None, walltime=None) -> None:
self.scalar_logs[key] = (self.scalar_logs.get(key, []) + [value])
def add_histogram(self, key, value, global_step=None, walltime=None) -> None:
return
def add_text(self, *args, **kwargs):
pass
def flush(self):
return
config = get_test_mlp_task_config()
config['num_epochs'] = 3
config['dataset']['train']['batchsize_per_replica'] = 10
config['dataset']['test']['batchsize_per_replica'] = 5
task = build_task(config)
writer = DummySummaryWriter()
hook = TensorboardPlotHook(writer)
hook.log_period = 1
task.set_hooks([hook])
task.set_optimizer_schedulers({'lr': mock_lr_scheduler})
trainer = LocalTrainer()
trainer.train(task)
self.assertEqual(writer.scalar_logs['Learning Rate/train'], [0, (1 / 6), (2 / 6), (3 / 6), (4 / 6), (5 / 6)]) |
class Actor(torch.nn.Module):
def __init__(self, device):
super().__init__()
self.device = device
self.mlp_policy = mlp(input_size=128, layer_sizes=[128, 64], output_size=1)
def forward(self, x, action):
logit = self.mlp_policy(torch.concat([x, action], dim=1).to(self.device))
return logit |
def test_one_pass():
plugin = ConsoleOutputPlugin()
dispatcher = EventDispatcher()
plugin.register(dispatcher)
graph = bonobo.Graph()
context = MagicMock(spec=GraphExecutionContext(graph))
dispatcher.dispatch(events.START, events.ExecutionEvent(context))
dispatcher.dispatch(events.TICK, events.ExecutionEvent(context))
dispatcher.dispatch(events.STOPPED, events.ExecutionEvent(context))
plugin.unregister(dispatcher) |
class Effect2795(BaseEffect):
type = 'passive'
def handler(fit, module, context, projectionRange, **kwargs):
for type in ('kinetic', 'thermal', 'explosive', 'em'):
fit.ship.boostItemAttr((('shield' + type.capitalize()) + 'DamageResonance'), (module.getModifiedItemAttr((type + 'DamageResistanceBonus')) or 0), stackingPenalties=True, **kwargs) |
def load_code_detector(bytecode):
if isinstance(bytecode, bytes):
bytecode = bytecode.hex()
result = list(re.finditer('60.{2}604052', bytecode))
load_bytecode = ''
rtcode_auxdata = bytecode
if (len(result) > 1):
position = result[1].start()
load_bytecode = bytecode[:position]
rtcode_auxdata = bytecode[position:]
return (load_bytecode, rtcode_auxdata) |
.parametrize(['width', 'height', 'affine_transform', 'expected_bounds'], [pytest.param(2, 2, Affine.identity(), (0.0, 2.0, 2.0, 0.0), id='Identity transform'), pytest.param(2, 2, Affine.scale(1, (- 1)), (0.0, (- 2.0), 2.0, 0.0), id='North-up transform'), pytest.param(2, 2, (Affine.translation(2, 2) * Affine.scale(1, (- 1))), (2.0, 0.0, 4.0, 2.0), id='Translated transform'), pytest.param(2, 2, (Affine.scale(4) * Affine.scale(1, (- 1))), (0.0, (- 8.0), 8.0, 0.0), id='Scaled transform'), pytest.param(2, 2, (Affine.rotation(90) * Affine.scale(1, (- 1))), (0.0, 0.0, 2.0, 2.0), id='90 degree rotated transform'), pytest.param(2, 2, (Affine.rotation(45) * Affine.scale(1, (- 1))), (0.0, (- math.sqrt(2)), (2 * math.sqrt(2)), math.sqrt(2)), id='45 degree rotated transform'), pytest.param(2, 2, (Affine.scale(4, 1) * Affine.scale(1, (- 1))), (0, (- 2.0), 8.0, 0.0), id='Rectangular pixel transform'), pytest.param(6, 2, Affine.scale(1, (- 1)), (0, (- 2.0), 6.0, 0.0), id='Differing width and height')])
def test_array_bounds_from_transforms(width, height, affine_transform, expected_bounds):
actual_bounds = transform.array_bounds(height, width, affine_transform)
assert_bounding_box_equal(expected_bounds, actual_bounds) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.