code
stringlengths
281
23.7M
def map_dict_keys(inputs, keys_map, logger_print=None): from .string_utils import regex_replace, regex_match, is_regex import re outputs = {} for (key, value) in inputs.items(): new_key = key for (in_pattern, out_pattern) in keys_map.items(): if regex_match(key, in_pattern): new_key = regex_replace(key, in_pattern, out_pattern) break if ((new_key == 'None') or (new_key is None)): if (logger_print is not None): logger_print(f'Delete {key}!') continue if ((new_key != key) and (logger_print is not None)): logger_print(f'Change {key} to {new_key}.') outputs[new_key] = value return outputs
class Adam(torch.optim.Optimizer): def __init__(self, params, lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, amsgrad=False): defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad) super(Adam, self).__init__(params, defaults) def supports_memory_efficient_fp16(self): return True def supports_flat_params(self): return True def step(self, closure=None): loss = None if (closure is not None): loss = closure() for group in self.param_groups: for p in group['params']: if (p.grad is None): continue grad = p.grad.data if (grad.dtype in {torch.float16, torch.bfloat16}): grad = grad.float() if grad.is_sparse: raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead') amsgrad = group['amsgrad'] p_data_fp32 = p.data if (p.data.dtype in {torch.float16, torch.bfloat16}): p_data_fp32 = p_data_fp32.float() state = self.state[p] if (len(state) == 0): state['step'] = 0 state['exp_avg'] = torch.zeros_like(p_data_fp32) state['exp_avg_sq'] = torch.zeros_like(p_data_fp32) if amsgrad: state['max_exp_avg_sq'] = torch.zeros_like(p_data_fp32) else: state['exp_avg'] = state['exp_avg'].to(p_data_fp32) state['exp_avg_sq'] = state['exp_avg_sq'].to(p_data_fp32) if amsgrad: state['max_exp_avg_sq'] = state['max_exp_avg_sq'].to(p_data_fp32) (exp_avg, exp_avg_sq) = (state['exp_avg'], state['exp_avg_sq']) if amsgrad: max_exp_avg_sq = state['max_exp_avg_sq'] (beta1, beta2) = group['betas'] state['step'] += 1 exp_avg.mul_(beta1).add_(grad, alpha=(1 - beta1)) exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=(1 - beta2)) if amsgrad: torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq) denom = max_exp_avg_sq.sqrt().add_(group['eps']) else: denom = exp_avg_sq.sqrt().add_(group['eps']) bias_correction1 = (1 - (beta1 ** state['step'])) bias_correction2 = (1 - (beta2 ** state['step'])) step_size = ((group['lr'] * math.sqrt(bias_correction2)) / bias_correction1) if (group['weight_decay'] != 0): p_data_fp32.add_(p_data_fp32, alpha=((- group['weight_decay']) * group['lr'])) p_data_fp32.addcdiv_(exp_avg, denom, value=(- step_size)) if (p.data.dtype in {torch.float16, torch.bfloat16}): p.data.copy_(p_data_fp32) return loss
class CmdTrade(Command): key = 'trade' aliases = ['barter'] locks = 'cmd:all()' help_category = 'General' def func(self): if (not self.args): if (self.caller.ndb.tradehandler and self.caller.ndb.tradeevent.trade_started): self.caller.msg("You are already in a trade. Use 'end trade' to abort it.") else: self.caller.msg('Usage: trade <other party> [accept|decline] [:emote]') return self.args = self.args.strip() selfemote = '' theiremote = '' if (':' in self.args): (self.args, emote) = [part.strip() for part in self.args.rsplit(':', 1)] selfemote = ('You say, "%s"\n ' % emote) if self.caller.has_account: theiremote = ('|c%s|n says, "%s"\n ' % (self.caller.key, emote)) else: theiremote = ('%s says, "%s"\n ' % (self.caller.key, emote)) part_a = self.caller accept = ('accept' in self.args) decline = ('decline' in self.args) if accept: part_b = self.args.rstrip('accept').strip() elif decline: part_b = self.args.rstrip('decline').strip() else: part_b = self.args part_b = self.caller.search(part_b) if (not part_b): return if (part_a == part_b): part_a.msg('You play trader with yourself.') return str_init_a = 'You ask to trade with %s. They need to accept within %s secs.' str_init_b = '%s wants to trade with you. Use |wtrade %s accept/decline [:emote]|n to answer (within %s secs).' str_noinit_a = '%s declines the trade' str_noinit_b = 'You decline trade with %s.' str_start_a = '%s starts to trade with you. See |wtrade help|n for aid.' str_start_b = 'You start to trade with %s. See |wtrade help|n for aid.' if (not (accept or decline)): if self.caller.ndb.tradehandler: if self.caller.ndb.tradehandler.trade_started: string = 'You are already in trade with %s. You need to end trade first.' else: string = 'You are already trying to initiate trade with %s. You need to decline that trade first.' self.caller.msg((string % part_b.key)) elif (part_b.ndb.tradehandler and (part_b.ndb.tradehandler.part_b == part_a)): part_b.ndb.tradehandler.join(part_a) part_b.msg((theiremote + (str_start_a % part_a.key))) part_a.msg((selfemote + (str_start_b % part_b.key))) else: TradeHandler(part_a, part_b) part_a.msg((selfemote + (str_init_a % (part_b.key, TRADE_TIMEOUT)))) part_b.msg((theiremote + (str_init_b % (part_a.key, part_a.key, TRADE_TIMEOUT)))) part_a.scripts.add(TradeTimeout) return elif accept: if part_a.ndb.tradehandler: part_a.msg(('You are already in trade with %s. You need to end that first.' % part_b.key)) return if part_b.ndb.tradehandler.join(part_a): part_b.msg((theiremote + (str_start_a % part_a.key))) part_a.msg((selfemote + (str_start_b % part_b.key))) else: part_a.msg('No trade proposal to accept.') return else: if (part_a.ndb.tradehandler and (part_a.ndb.tradehandler.part_b == part_a)): part_a.ndb.tradehandler.finish(force=True) part_b.msg((theiremote + ('%s aborted trade attempt with you.' % part_a))) part_a.msg((selfemote + ('You aborted the trade attempt with %s.' % part_b))) elif (part_b.ndb.tradehandler and part_b.ndb.tradehandler.unjoin(part_a)): part_b.msg((theiremote + (str_noinit_a % part_a.key))) part_a.msg((selfemote + (str_noinit_b % part_b.key))) else: part_a.msg('No trade proposal to decline.') return
def resize_images_bilinear(X, height_factor=1, width_factor=1, target_height=None, target_width=None, data_format='default'): if (data_format == 'default'): data_format = K.image_data_format() if (data_format == 'channels_first'): original_shape = K.int_shape(X) if (target_height and target_width): new_shape = tf.constant(np.array((target_height, target_width)).astype('int32')) else: new_shape = tf.shape(X)[2:] new_shape *= tf.constant(np.array([height_factor, width_factor]).astype('int32')) X = permute_dimensions(X, [0, 2, 3, 1]) X = tf.image.resize_bilinear(X, new_shape) X = permute_dimensions(X, [0, 3, 1, 2]) if (target_height and target_width): X.set_shape((None, None, target_height, target_width)) else: X.set_shape((None, None, (original_shape[2] * height_factor), (original_shape[3] * width_factor))) return X elif (data_format == 'channels_last'): original_shape = K.int_shape(X) if (target_height and target_width): new_shape = tf.constant(np.array((target_height, target_width)).astype('int32')) else: new_shape = tf.shape(X)[1:3] new_shape *= tf.constant(np.array([height_factor, width_factor]).astype('int32')) X = tf.image.resize_bilinear(X, new_shape) if (target_height and target_width): X.set_shape((None, target_height, target_width, None)) else: X.set_shape((None, (original_shape[1] * height_factor), (original_shape[2] * width_factor), None)) return X else: raise Exception(('Invalid data_format: ' + data_format))
def test_collect_symlink_out_of_tree(pytester: Pytester) -> None: sub = pytester.mkdir('sub') real = sub.joinpath('test_real.py') real.write_text(textwrap.dedent('\n def test_nodeid(request):\n # Should not contain sub/ prefix.\n assert request.node.nodeid == "test_real.py::test_nodeid"\n '), encoding='utf-8') out_of_tree = pytester.mkdir('out_of_tree') symlink_to_sub = out_of_tree.joinpath('symlink_to_sub') symlink_or_skip(sub, symlink_to_sub) os.chdir(sub) result = pytester.runpytest('-vs', ('--rootdir=%s' % sub), symlink_to_sub) result.stdout.fnmatch_lines(['test_real.py::test_nodeid PASSED']) assert (result.ret == 0)
(tryfirst=True) def pytest_runtest_call(item: Item) -> None: try: request = item._request except AttributeError: return factoryboy_request = request.getfixturevalue('factoryboy_request') factoryboy_request.evaluate(request) assert (not factoryboy_request.deferred) request.config.hook.pytest_factoryboy_done(request=request)
def series_filter(values, kernel_size=3): filter_values = np.cumsum(values, dtype=float) filter_values[kernel_size:] = (filter_values[kernel_size:] - filter_values[:(- kernel_size)]) filter_values[kernel_size:] = (filter_values[kernel_size:] / kernel_size) for i in range(1, kernel_size): filter_values[i] /= (i + 1) return filter_values
class Effect6574(BaseEffect): type = 'passive' def handler(fit, src, context, projectionRange, **kwargs): lvl = src.level fit.modules.filteredItemBoost((lambda mod: mod.item.requiresSkill('Capital Railgun Specialization')), 'damageMultiplier', (src.getModifiedItemAttr('damageMultiplierBonus') * lvl), **kwargs)
def prepare_parser(): usage = 'Parser for ImageNet HDF5 scripts.' parser = ArgumentParser(description=usage) parser.add_argument('--dataset', type=str, default='I128', help='Which Dataset to train on, out of I128, I256, C10, C100;Append "_hdf5" to use the hdf5 version for ISLVRC (default: %(default)s)') parser.add_argument('--data_root', type=str, default='data', help='Default location where data is stored (default: %(default)s)') parser.add_argument('--batch_size', type=int, default=256, help='Default overall batchsize (default: %(default)s)') parser.add_argument('--num_workers', type=int, default=16, help='Number of dataloader workers (default: %(default)s)') parser.add_argument('--chunk_size', type=int, default=500, help='Default overall batchsize (default: %(default)s)') parser.add_argument('--compression', action='store_true', default=False, help='Use LZF compression? (default: %(default)s)') return parser
def nodes_to_html(nodes): out = [] append = out.append stack = [] curr = nodes i = (- 1) while True: i += 1 if (i >= len(curr)): if (not stack): break (curr, i) = stack.pop() append(f"</{curr[i]['tag']}>") continue node = curr[i] if isinstance(node, str): append(escape(node)) continue append(f"<{node['tag']}") if node.get('attrs'): for (attr, value) in node['attrs'].items(): append(f' {attr}="{escape(value)}"') if node.get('children'): append('>') stack.append((curr, i)) (curr, i) = (node['children'], (- 1)) continue if (node['tag'] in VOID_ELEMENTS): append('/>') else: append(f"></{node['tag']}>") return ''.join(out)
('randovania.cli._run_args', autospec=True) ('randovania.cli._create_parser', autospec=True) def test_run_cli(mock_create_parser: MagicMock, mock_run_args: MagicMock): argv = [MagicMock(), MagicMock(), MagicMock()] mock_run_args.return_value = 1234 with pytest.raises(SystemExit) as p: cli.run_cli(argv) mock_create_parser.return_value.parse_args.assert_called_once_with(argv[1:]) mock_run_args.assert_called_once_with(mock_create_parser.return_value, mock_create_parser.return_value.parse_args.return_value) assert (p.value.code == 1234)
def simclr_train(train_loader, model, criterion, optimizer, epoch): losses = AverageMeter('Loss', ':.4e') progress = ProgressMeter(len(train_loader), [losses], prefix='Epoch: [{}]'.format(epoch)) model.train() for (i, batch) in enumerate(train_loader): images = batch['image'] images_augmented = batch['image_augmented'] (b, c, h, w) = images.size() input_ = torch.cat([images.unsqueeze(1), images_augmented.unsqueeze(1)], dim=1) input_ = input_.view((- 1), c, h, w) input_ = input_.cuda(non_blocking=True) targets = batch['target'].cuda(non_blocking=True) output = model(input_).view(b, 2, (- 1)) loss = criterion(output) losses.update(loss.item()) optimizer.zero_grad() loss.backward() optimizer.step() if ((i % 25) == 0): progress.display(i)
('iM_product_vect_jvp_vjp_translation') def _iM_product_vect_jvp_vjp_translation(c, cotan, q, vect, q_tan, vect_tan): (type_in, size_xla, dims_spec) = check_dim_imputs((cotan, q, vect, q_tan, vect_tan), c) op_name = (b'iM_prod_vect_jvp_vjp_wrapper_f32' if (type_in == np.float32) else b'iM_prod_vect_jvp_vjp_wrapper_f64') return xops.CustomCallWithLayout(c, op_name, operands=(size_xla, cotan, q, vect, q_tan, vect_tan), operand_shapes_with_layout=dims_spec, shape_with_layout=xla_client.Shape.tuple_shape((dims_spec[2], dims_spec[3], dims_spec[4], dims_spec[5])))
def run_locking_test(ctx): with tempfile.TemporaryDirectory() as dir_name: assert (get_subprocess_lock_state(ctx, dir_name) == 'unlocked') with lock_ctx(dir_name): dir_key = f'{dir_name}-{os.getpid()}' assert (dir_key in local_mem._locks) assert local_mem._locks[dir_key] assert (get_subprocess_lock_state(ctx, dir_name) == 'locked') with lock_ctx(dir_name, timeout=0.1): assert (get_subprocess_lock_state(ctx, dir_name) == 'locked') assert (get_subprocess_lock_state(ctx, dir_name) == 'locked') assert (get_subprocess_lock_state(ctx, dir_name) == 'unlocked')
def log(header, data, level=None): if (logfile is None): return if (level is not None): log_level_set(level) if (not isinstance(data, str)): data = pp.pformat(data) if len(header): logfile.write((('\n' + log_get_sec()) + ' ')) logfile.write(header) if (len(header) and len(data.strip())): logfile.write('\n') logfile.write(data)
def subparser_call(self, parser, namespace, values, option_string=None): from argparse import ArgumentError, SUPPRESS, _UNRECOGNIZED_ARGS_ATTR parser_name = values[0] arg_strings = values[1:] if (self.dest is not SUPPRESS): setattr(namespace, self.dest, parser_name) try: parser = self._name_parser_map[parser_name] except KeyError: tup = (parser_name, ', '.join(self._name_parser_map)) msg = _('unknown parser {!r} (choices: {})').format(*tup) raise ArgumentError(self, msg) (namespace, arg_strings) = parser.parse_known_args(arg_strings, namespace) if arg_strings: vars(namespace).setdefault(_UNRECOGNIZED_ARGS_ATTR, []) getattr(namespace, _UNRECOGNIZED_ARGS_ATTR).extend(arg_strings)
_rewriter([IfElse]) def find_measurable_ifelse_mixture(fgraph, node): rv_map_feature: Optional[PreserveRVMappings] = getattr(fgraph, 'preserve_rv_mappings', None) if (rv_map_feature is None): return None op = node.op (if_var, *base_rvs) = node.inputs valued_rvs = rv_map_feature.rv_values.keys() if (not all((check_potential_measurability([base_var], valued_rvs) for base_var in base_rvs))): return None base_rvs = assume_measured_ir_outputs(valued_rvs, base_rvs) if (len(base_rvs) != (op.n_outs * 2)): return None if (not all(((var.owner and isinstance(var.owner.op, MeasurableVariable)) for var in base_rvs))): return None return MeasurableIfElse(n_outs=op.n_outs).make_node(if_var, *base_rvs).outputs
def gen_char_embedding(pretrained_char_embedding_file=None, gram_dict=None, embedding_dim=300, output_file=None): if (not os.path.exists(output_file)): word2vec = gensim.models.KeyedVectors.load_word2vec_format(pretrained_char_embedding_file, binary=False, unicode_errors='ignore') text_wordvec = np.zeros((len(gram_dict), embedding_dim)) print('gen_word2vec.....') count = 0 for (word, word_index) in gram_dict.items(): count += 1 if ((count % 500) == 0): print('count:{}.......'.format(count)) try: word_vec = word2vec[word] text_wordvec[word_index] = word_vec except: print('exception:{}'.format(word)) continue np.save(output_file, text_wordvec) return text_wordvec else: text_wordvec = np.load(output_file, allow_pickle=True) return text_wordvec
class mixed_pdf(PDF): def __init__(self, shape, pdf1, pdf2, pdf1_weight=0.5): self.pdf1_weight = pdf1_weight self.pdf2_weight = (1.0 - pdf1_weight) self.shape = shape self.pdf1 = pdf1 self.pdf2 = pdf2 def value(self, ray_dir): return ((self.pdf1.value(ray_dir) * self.pdf1_weight) + (self.pdf2.value(ray_dir) * self.pdf2_weight)) def generate(self): mask = np.random.rand(self.shape) return vec3.where((mask < self.pdf1_weight), self.pdf1.generate(), self.pdf2.generate())
def _get_asyncio_mode(config: Config) -> Mode: val = config.getoption('asyncio_mode') if (val is None): val = config.getini('asyncio_mode') try: return Mode(val) except ValueError: modes = ', '.join((m.value for m in Mode)) raise pytest.UsageError(f'{val!r} is not a valid asyncio_mode. Valid modes: {modes}.')
def main(): try: pathserv = fs.get_path_info_for_active_session() except mpexceptions.ExceptionUndefinedSamplesDir: print("The env var 'pyglet_mp_samples_dir' is not defined.") return 1 except mpexceptions.ExceptionNoSessionIsActive: print('*** Error, no session active.') return 1 count_bads = ins.CountBads() try: summarize(pathserv, count_bads) except mpexceptions.ExceptionAttemptToBrekReportsProtection: print('*** Error, attempt to overwrite reports when protect_reports is True.') return 1 except mpexceptions.ExceptionNoDbgFilesPresent: print('*** Error, no dbg files present; maybe playmany should be run?') return 1 return 0
.parametrize('test_input, expected', [('1', '1st'), ('2', '2nd'), ('3', '3rd'), ('4', '4th'), ('11', '11th'), ('12', '12th'), ('13', '13th'), ('101', '101st'), ('102', '102nd'), ('103', '103rd'), ('111', '111th'), ('something else', 'something else'), (None, 'None'), (math.nan, 'NaN'), (math.inf, '+Inf'), ((- math.inf), '-Inf'), ('nan', 'NaN'), ('-inf', '-Inf')]) def test_ordinal(test_input: str, expected: str) -> None: assert (humanize.ordinal(test_input) == expected)
(ScheduleItem) class ScheduleItemAdmin(admin.ModelAdmin): list_display = ('title', 'conference', 'status', 'language', 'slot', 'type', 'submission') list_filter = ('conference', 'status', 'type') ordering = ('conference', 'slot') form = ScheduleItemAdminForm fieldsets = ((_('Event'), {'fields': ('conference', 'type', 'status', 'language', 'title', 'slug', 'image', 'highlight_color', 'audience_level', 'description', 'submission', 'keynote', 'slido_url')}), (_('Schedule'), {'fields': ('slot', 'new_slot', 'notify_new_time_slot', 'duration', 'rooms')}), (_('Invitation'), {'fields': ('speaker_invitation_notes', 'speaker_invitation_sent_at')}), (_('Booking'), {'fields': ('attendees_total_capacity', 'spaces_left')}), (_('Voucher'), {'fields': ('exclude_from_voucher_generation',)}), (_('YouTube'), {'fields': ('youtube_video_id', 'video_uploaded_path')})) autocomplete_fields = ('submission',) prepopulated_fields = {'slug': ('title',)} filter_horizontal = ('rooms',) search_fields = ('title', 'submission__title', 'submission__speaker__full_name', 'submission__speaker__email') inlines = [ScheduleItemAdditionalSpeakerInline, ScheduleItemAttendeeInline] actions = [send_schedule_invitation_to_all, send_schedule_invitation_to_uninvited, send_schedule_invitation_reminder_to_waiting, mark_speakers_to_receive_vouchers, upload_videos_to_youtube] readonly_fields = ('spaces_left',) def get_urls(self): return ([path('email-speakers/', self.admin_site.admin_view(self.email_speakers), name='schedule-email-speakers'), path('<int:object_id>/export-attendees/', self.admin_site.admin_view(self.export_attendees), name='schedule-export-attendees')] + super().get_urls()) def export_attendees(self, request, object_id: int): schedule_item = ScheduleItem.objects.get(id=object_id) resource = ScheduleItemAttendeeResource() data = resource.export(schedule_item.attendees.all()) csv_format = CSV() export_data = csv_format.export_data(data) date_str = timezone.now().strftime('%Y-%m-%d') response = HttpResponse(export_data, content_type=csv_format.get_content_type()) response['Content-Disposition'] = f'attachment; filename="{schedule_item.slug}-attendees-{date_str}.csv"' return response def email_speakers(self, request): form = EmailSpeakersForm((request.POST or None)) context = dict(self.admin_site.each_context(request), form=form) if ((request.method == 'POST') and form.is_valid()): conference = form.cleaned_data['conference'] subject = form.cleaned_data['subject'] body = form.cleaned_data['body'] only_speakers_without_ticket = form.cleaned_data['only_speakers_without_ticket'] schedule_items = conference.schedule_items.filter((Q(submission__isnull=False) | Q(additional_speakers__isnull=False))) notified_ids = set() for schedule_item in schedule_items.all(): if (schedule_item.submission_id and (schedule_item.submission.speaker_id not in notified_ids)): send_speaker_communication_email(user_id=schedule_item.submission.speaker_id, subject=subject, body=body, only_speakers_without_ticket=only_speakers_without_ticket, conference=conference) notified_ids.add(schedule_item.submission.speaker_id) for additional_speaker in schedule_item.additional_speakers.all(): if (additional_speaker.user_id in notified_ids): continue notified_ids.add(additional_speaker.user_id) send_speaker_communication_email(user_id=additional_speaker.user_id, subject=subject, body=body, only_speakers_without_ticket=only_speakers_without_ticket, conference=conference) messages.success(request, f'Scheduled {len(notified_ids)} emails.') return TemplateResponse(request, 'email-speakers.html', context) def spaces_left(self, obj): if (obj.attendees_total_capacity is None): return None return (obj.attendees_total_capacity - obj.attendees.count()) def save_form(self, request, form, change): if form.cleaned_data['new_slot']: form.instance.slot = form.cleaned_data['new_slot'] return_value = super().save_form(request, form, change) if form.cleaned_data['notify_new_time_slot']: send_new_submission_time_slot(form.instance) return return_value
class UpdateSponsorInfoViewTests(TestCase): def setUp(self): self.user = baker.make(settings.AUTH_USER_MODEL) self.client.force_login(self.user) self.sponsorship = baker.make(Sponsorship, submited_by=self.user, status=Sponsorship.APPLIED, _fill_optional=True) self.sponsor = self.sponsorship.sponsor self.contact = baker.make('sponsors.SponsorContact', sponsor=self.sponsor) self.url = reverse('users:edit_sponsor_info', args=[self.sponsor.pk]) self.data = {'description': 'desc', 'name': 'CompanyX', 'primary_phone': '+', 'mailing_address_line_1': '4th street', 'city': 'New York', 'postal_code': '10212', 'country': 'US', 'contact-0-id': self.contact.pk, 'contact-0-name': 'John', 'contact-0-email': '', 'contact-0-phone': '+', 'contact-0-primary': True, 'contact-TOTAL_FORMS': 1, 'contact-MAX_NUM_FORMS': 5, 'contact-MIN_NUM_FORMS': 1, 'contact-INITIAL_FORMS': 1, 'web_logo': get_static_image_file_as_upload('psf-logo.png', 'logo.png'), 'print_logo': get_static_image_file_as_upload('psf-logo_print.png', 'logo_print.png')} def test_display_template_with_sponsor_info(self): response = self.client.get(self.url) context = response.context self.assertTemplateUsed(response, 'users/sponsor_info_update.html') self.assertEqual(context['sponsor'], self.sponsor) self.assertIsInstance(context['form'], SponsorUpdateForm) def test_404_if_sponsor_does_not_exist(self): self.sponsor.delete() response = self.client.get(self.url) self.assertEqual(response.status_code, 404) def test_404_if_sponsor_from_sponsorship_from_another_user(self): sponsorship = baker.make(Sponsorship, _fill_optional=True) self.url = reverse('users:edit_sponsor_info', args=[sponsorship.sponsor.pk]) response = self.client.get(self.url) self.assertEqual(response.status_code, 404) def test_render_form_with_errors(self): self.data = {} response = self.client.post(self.url, data=self.data) form = response.context['form'] self.assertEqual(200, response.status_code) self.assertTrue(form.errors) def test_update_sponsor_and_contact(self): response = self.client.post(self.url, data=self.data) self.sponsor.refresh_from_db() self.contact.refresh_from_db() self.assertEqual(302, response.status_code) self.assertEqual('desc', self.sponsor.description) self.assertEqual('John', self.contact.name)
_fixtures(WebFixture) def test_html5_page(web_fixture): fixture = web_fixture widget = HTML5Page(fixture.view, title='It: $current_title') widget.add_default_slot('slot1', P.factory()) tester = WidgetTester(widget) rendered_html = tester.render_html() head = ('<head><title>It: %s</title></head>' % fixture.view.title) expected_regex = ('^<!DOCTYPE html><html.*class="no-js"><script>.*</script>%s<body><div id="_reahl_out_of_bound_container"></div></body></html>$' % head) assert re.match(expected_regex, rendered_html.replace('\n', '')) assert (list(widget.default_slot_definitions.keys()) == ['slot1'])
def _make_xunit_fixture(obj: type, setup_name: str, teardown_name: str, cleanup_name: Optional[str], scope: Scope, pass_self: bool): setup = getattr(obj, setup_name, None) teardown = getattr(obj, teardown_name, None) if ((setup is None) and (teardown is None)): return None if cleanup_name: cleanup = getattr(obj, cleanup_name, (lambda *args: None)) else: def cleanup(*args): pass (scope=scope.value, autouse=True, name=f'_unittest_{setup_name}_fixture_{obj.__qualname__}') def fixture(self, request: FixtureRequest) -> Generator[(None, None, None)]: if _is_skipped(self): reason = self.__unittest_skip_why__ raise pytest.skip.Exception(reason, _use_item_location=True) if (setup is not None): try: if pass_self: setup(self, request.function) else: setup() except Exception: if pass_self: cleanup(self) else: cleanup() raise (yield) try: if (teardown is not None): if pass_self: teardown(self, request.function) else: teardown() finally: if pass_self: cleanup(self) else: cleanup() return fixture
def pytest_terminal_summary(terminalreporter: TerminalReporter) -> None: if (terminalreporter.config.option.pastebin != 'failed'): return if ('failed' in terminalreporter.stats): terminalreporter.write_sep('=', 'Sending information to Paste Service') for rep in terminalreporter.stats['failed']: try: msg = rep.longrepr.reprtraceback.reprentries[(- 1)].reprfileloc except AttributeError: msg = terminalreporter._getfailureheadline(rep) file = StringIO() tw = create_terminal_writer(terminalreporter.config, file) rep.toterminal(tw) s = file.getvalue() assert len(s) pastebinurl = create_new_paste(s) terminalreporter.write_line(f'{msg} --> {pastebinurl}')
.parametrize('state_index', [0, 10, 40]) .parametrize('history_steps', [0, 5, 10]) .parametrize('future_steps', [0, 5, 10]) def test_get_agent_context(zarr_dataset: ChunkedDataset, state_index: int, history_steps: int, future_steps: int) -> None: scene = zarr_dataset.scenes[0] frames = zarr_dataset.frames[get_frames_slice_from_scenes(scene)] agents = zarr_dataset.agents[get_agents_slice_from_frames(*frames[[0, (- 1)]])] tls = zarr_dataset.tl_faces[get_tl_faces_slice_from_frames(*frames[[0, (- 1)]])] (frames_his_f, frames_fut_f, agents_his_f, agents_fut_f, tls_his_f, tls_fut_f) = get_agent_context(state_index, frames, agents, tls, history_steps, future_steps) first_idx = (state_index + 1) last_idx = ((state_index + 1) + future_steps) frames_fut = frames[first_idx:last_idx] agents_fut = filter_agents_by_frames(frames_fut, zarr_dataset.agents) tls_fut = filter_tl_faces_by_frames(frames_fut, zarr_dataset.tl_faces) assert np.all((frames_fut_f['timestamp'] == frames_fut['timestamp'])) assert (len(agents_fut) == len(agents_fut_f)) for idx in range(len(agents_fut)): assert np.all((agents_fut_f[idx] == agents_fut[idx])) assert (len(tls_fut) == len(tls_fut_f)) for idx in range(len(tls_fut)): assert np.all((tls_fut_f[idx] == tls_fut[idx])) first_idx = max((state_index - history_steps), 0) last_idx = (state_index + 1) frames_his = frames[first_idx:last_idx] agents_his = filter_agents_by_frames(frames_his, zarr_dataset.agents) tls_his = filter_tl_faces_by_frames(frames_his, zarr_dataset.tl_faces) assert np.all((frames_his_f['timestamp'] == frames_his['timestamp'][::(- 1)])) assert (len(agents_his) == len(agents_his_f)) for idx in range(len(agents_his)): assert np.all((agents_his_f[idx] == agents_his[((len(agents_his) - idx) - 1)])) assert (len(tls_his) == len(tls_his_f)) for idx in range(len(tls_his)): assert np.all((tls_his_f[idx] == tls_his[((len(tls_his) - idx) - 1)]))
class DataProvider(BaseDataProvider): def __init__(self, dataset: typing.Union[(str, list, pd.DataFrame)], data_preprocessors: typing.List[typing.Callable]=None, batch_size: int=4, shuffle: bool=True, initial_epoch: int=1, augmentors: typing.List[Augmentor]=None, transformers: typing.List[Transformer]=None, batch_postprocessors: typing.List[typing.Callable]=None, skip_validation: bool=True, limit: int=None, use_cache: bool=False, workers: int=os.cpu_count(), use_multiprocessing: bool=False, max_queue_size: int=5): super(DataProvider, self).__init__(dataset=dataset, data_preprocessors=data_preprocessors, batch_size=batch_size, shuffle=shuffle, initial_epoch=initial_epoch, augmentors=augmentors, transformers=transformers, batch_postprocessors=batch_postprocessors, skip_validation=skip_validation, limit=limit, use_cache=use_cache) self.workers = workers self.use_multiprocessing = use_multiprocessing self.max_queue_size = max_queue_size def start_executor(self) -> None: if (not hasattr(self, '_executor')): if self.use_multiprocessing: try: self._executor = ProcessExecutor(self.process_data, self.workers) except: self.use_multiprocessing = False self.logger.error('Failed to start multiprocessing, switching to multithreading') self._executor = ThreadExecutor(self.process_data, self.workers) else: self._executor = ThreadExecutor(self.process_data, self.workers) if (not hasattr(self, '_sequenceHandler')): self._sequenceHandler = SequenceHandler(self.__getitem__, len(self), self.max_queue_size, self._shuffle) def __iter__(self): self.start_executor() for index in range(len(self)): results = self._sequenceHandler(index) (yield results) self.__exit__() def __exit__(self): self._executor.__exit__() del self._executor self._sequenceHandler.__exit__() del self._sequenceHandler
def run_tests(tests, xserver=True): if (not xserver): vt = 1 else: vt = 7 if (os.system(f'sudo chvt {vt}') != 0): print('FAILED to switch VT') return len(tests) time.sleep(3) num_failed = 0 for test in tests: clean_directory() print('Running ', test, '... ', sep='', end='', flush=True) try: output = subprocess.check_output(['python3', test], timeout=90, stderr=subprocess.STDOUT) output = output.decode('utf-8') output = output.split('\n') test_passed = True test_skipped = False for line in output: line = line.lower() if ('test pattern modes' in line): pass elif ('qxcbconnection' in line): pass elif ('xdg_runtime_dir' in line): pass elif ('unable to set controls' in line): pass elif ('skipped' in line): test_skipped = True elif ('error' in line): print('\tERROR') print('\t', line) test_passed = False num_failed = (num_failed + 1) break if test_passed: print(('\tSKIPPED' if test_skipped else '\tPASSED')) except subprocess.CalledProcessError as e: print('\tFAILED') print_subprocess_output(e) num_failed = (num_failed + 1) except subprocess.TimeoutExpired as e: print('\tTIMED OUT') print_subprocess_output(e) num_failed = (num_failed + 1) return num_failed
def train(start_epoch): global EPOCH_CNT min_loss = .0 loss = 0 for epoch in range(start_epoch, MAX_EPOCH): EPOCH_CNT = epoch log_string(('**** EPOCH %03d ****' % epoch)) log_string(('Current learning rate: %f' % get_current_lr(epoch))) log_string(('Current BN decay momentum: %f' % bnm_scheduler.lmbd(bnm_scheduler.last_epoch))) log_string(str(datetime.now())) np.random.seed() train_one_epoch() if ((EPOCH_CNT == 0) or ((EPOCH_CNT % 10) == 9)): loss = evaluate_one_epoch() save_dict = {'epoch': (epoch + 1), 'optimizer_state_dict': optimizer.state_dict(), 'loss': loss} try: save_dict['model_state_dict'] = net.module.state_dict() except: save_dict['model_state_dict'] = net.state_dict() torch.save(save_dict, os.path.join(LOG_DIR, 'checkpoint.tar'))
.unit() def test_module_name_from_path(tmp_path: Path) -> None: result = _module_name_from_path((tmp_path / 'src/project/task_foo.py'), tmp_path) assert (result == 'src.project.task_foo') result = _module_name_from_path(Path('/home/foo/task_foo.py'), Path('/bar')) assert (result == 'home.foo.task_foo') result = _module_name_from_path((tmp_path / 'src/app/__init__.py'), tmp_path) assert (result == 'src.app')
_module() class NASFCOSHead(FCOSHead): def _init_layers(self): dconv3x3_config = dict(type='DCNv2', kernel_size=3, use_bias=True, deform_groups=2, padding=1) conv3x3_config = dict(type='Conv', kernel_size=3, padding=1) conv1x1_config = dict(type='Conv', kernel_size=1) self.arch_config = [dconv3x3_config, conv3x3_config, dconv3x3_config, conv1x1_config] self.cls_convs = nn.ModuleList() self.reg_convs = nn.ModuleList() for (i, op_) in enumerate(self.arch_config): op = copy.deepcopy(op_) chn = (self.in_channels if (i == 0) else self.feat_channels) assert isinstance(op, dict) use_bias = op.pop('use_bias', False) padding = op.pop('padding', 0) kernel_size = op.pop('kernel_size') module = ConvModule(chn, self.feat_channels, kernel_size, stride=1, padding=padding, norm_cfg=self.norm_cfg, bias=use_bias, conv_cfg=op) self.cls_convs.append(copy.deepcopy(module)) self.reg_convs.append(copy.deepcopy(module)) self.conv_cls = nn.Conv2d(self.feat_channels, self.cls_out_channels, 3, padding=1) self.conv_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1) self.conv_centerness = nn.Conv2d(self.feat_channels, 1, 3, padding=1) self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides]) def init_weights(self): bias_cls = bias_init_with_prob(0.01) normal_init(self.conv_reg, std=0.01) normal_init(self.conv_centerness, std=0.01) normal_init(self.conv_cls, std=0.01, bias=bias_cls) for branch in [self.cls_convs, self.reg_convs]: for module in branch.modules(): if (isinstance(module, ConvModule) and isinstance(module.conv, nn.Conv2d)): caffe2_xavier_init(module.conv)
_test def test_global_maxpooling2d_legacy_interface(): old_layer = keras.layers.GlobalMaxPooling2D(dim_ordering='tf', name='global_maxpool2d') new_layer = keras.layers.GlobalMaxPool2D(data_format='channels_last', name='global_maxpool2d') assert (json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())) old_layer = keras.layers.GlobalMaxPooling2D(dim_ordering='th', name='global_maxpool2d') new_layer = keras.layers.GlobalMaxPool2D(data_format='channels_first', name='global_maxpool2d') assert (json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())) old_layer = keras.layers.GlobalMaxPooling2D(dim_ordering='default', name='global_maxpool2d') new_layer = keras.layers.GlobalMaxPool2D(name='global_maxpool2d') assert (json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config()))
class Class(PyCollector): def from_parent(cls, parent, *, name, obj=None, **kw): return super().from_parent(name=name, parent=parent, **kw) def newinstance(self): return self.obj() def collect(self) -> Iterable[Union[(nodes.Item, nodes.Collector)]]: if (not safe_getattr(self.obj, '__test__', True)): return [] if hasinit(self.obj): assert (self.parent is not None) self.warn(PytestCollectionWarning(('cannot collect test class %r because it has a __init__ constructor (from: %s)' % (self.obj.__name__, self.parent.nodeid)))) return [] elif hasnew(self.obj): assert (self.parent is not None) self.warn(PytestCollectionWarning(('cannot collect test class %r because it has a __new__ constructor (from: %s)' % (self.obj.__name__, self.parent.nodeid)))) return [] self._inject_setup_class_fixture() self._inject_setup_method_fixture() self.session._fixturemanager.parsefactories(self.newinstance(), self.nodeid) return super().collect() def _inject_setup_class_fixture(self) -> None: setup_class = _get_first_non_fixture_func(self.obj, ('setup_class',)) teardown_class = _get_first_non_fixture_func(self.obj, ('teardown_class',)) if ((setup_class is None) and (teardown_class is None)): return (autouse=True, scope='class', name=f'_xunit_setup_class_fixture_{self.obj.__qualname__}') def xunit_setup_class_fixture(cls) -> Generator[(None, None, None)]: if (setup_class is not None): func = getimfunc(setup_class) _call_with_optional_argument(func, self.obj) (yield) if (teardown_class is not None): func = getimfunc(teardown_class) _call_with_optional_argument(func, self.obj) self.obj.__pytest_setup_class = xunit_setup_class_fixture def _inject_setup_method_fixture(self) -> None: has_nose = self.config.pluginmanager.has_plugin('nose') setup_name = 'setup_method' setup_method = _get_first_non_fixture_func(self.obj, (setup_name,)) emit_nose_setup_warning = False if ((setup_method is None) and has_nose): setup_name = 'setup' emit_nose_setup_warning = True setup_method = _get_first_non_fixture_func(self.obj, (setup_name,)) teardown_name = 'teardown_method' teardown_method = _get_first_non_fixture_func(self.obj, (teardown_name,)) emit_nose_teardown_warning = False if ((teardown_method is None) and has_nose): teardown_name = 'teardown' emit_nose_teardown_warning = True teardown_method = _get_first_non_fixture_func(self.obj, (teardown_name,)) if ((setup_method is None) and (teardown_method is None)): return (autouse=True, scope='function', name=f'_xunit_setup_method_fixture_{self.obj.__qualname__}') def xunit_setup_method_fixture(self, request) -> Generator[(None, None, None)]: method = request.function if (setup_method is not None): func = getattr(self, setup_name) _call_with_optional_argument(func, method) if emit_nose_setup_warning: warnings.warn(NOSE_SUPPORT_METHOD.format(nodeid=request.node.nodeid, method='setup'), stacklevel=2) (yield) if (teardown_method is not None): func = getattr(self, teardown_name) _call_with_optional_argument(func, method) if emit_nose_teardown_warning: warnings.warn(NOSE_SUPPORT_METHOD.format(nodeid=request.node.nodeid, method='teardown'), stacklevel=2) self.obj.__pytest_setup_method = xunit_setup_method_fixture
class SigmoidFocalLoss(nn.Module): def __init__(self, gamma, alpha): super(SigmoidFocalLoss, self).__init__() self.gamma = gamma self.alpha = alpha def forward(self, logits, targets): assert logits.is_cuda loss = sigmoid_focal_loss(logits, targets, self.gamma, self.alpha) return loss.sum() def __repr__(self): tmpstr = (self.__class__.__name__ + '(') tmpstr += ('gamma=' + str(self.gamma)) tmpstr += (', alpha=' + str(self.alpha)) tmpstr += ')' return tmpstr
(frozen=True, slots=True) class ConfigurableNode(Node): def __repr__(self) -> str: return f'ConfigurableNode({self.name!r})' def requirement_to_leave(self, context: NodeContext) -> Requirement: return context.patches.configurable_nodes[context.node_provider.identifier_for_node(self)]
class BosonOperatorTest(unittest.TestCase): def test_is_normal_ordered_empty(self): op = (BosonOperator() * 2) self.assertTrue(op.is_normal_ordered()) def test_is_normal_ordered_number(self): op = (BosonOperator('2^ 2') * (- 1j)) self.assertTrue(op.is_normal_ordered()) def test_is_normal_ordered_reversed(self): self.assertFalse(BosonOperator('2 2^').is_normal_ordered()) def test_is_normal_ordered_create(self): self.assertTrue(BosonOperator('11^').is_normal_ordered()) def test_is_normal_ordered_annihilate(self): self.assertTrue(BosonOperator('0').is_normal_ordered()) def test_is_normal_ordered_long_not(self): self.assertTrue(BosonOperator('0 5^ 3^ 2^ 1^').is_normal_ordered()) def test_is_normal_ordered_outoforder(self): self.assertTrue(BosonOperator('0 1').is_normal_ordered()) def test_is_normal_ordered_long_descending(self): self.assertTrue(BosonOperator('5^ 3^ 2^ 1^ 0').is_normal_ordered()) def test_is_normal_ordered_multi(self): op = (BosonOperator('4 3 2^ 2') + BosonOperator('1 2')) self.assertTrue(op.is_normal_ordered()) def test_is_normal_ordered_multiorder(self): op = (BosonOperator('4 3 2 1') + BosonOperator('3 2')) self.assertTrue(op.is_normal_ordered()) def test_is_boson_preserving_BosonOperator(self): op = BosonOperator() self.assertTrue(op.is_boson_preserving()) def test_is_boson_preserving_number(self): op = number_operator(n_modes=5, mode=3, parity=1) self.assertTrue(op.is_boson_preserving()) def test_is_boson_preserving_three(self): op = BosonOperator(((0, 1), (2, 1), (4, 0))) self.assertFalse(op.is_boson_preserving()) def test_is_boson_preserving_out_of_order(self): op = BosonOperator(((0, 1), (2, 0), (1, 1), (3, 0))) self.assertTrue(op.is_boson_preserving())
('/rename_subnet', methods=['POST']) _params([dict(name='old_region', type=str, required=True, nullable=False), dict(name='new_region', type=str, required=True, nullable=False)], need_username=True) _wrapper_json _web_opration_log('rename_subnet', get_op_info=rename_subnet_log) def rename_subnet(old_region, new_region, username): _is_valide_region(new_region) return SubnetIpDal.rename_subnet(old_region, new_region, username)
class PyttiLocalConfigSearchPathPlugin(SearchPathPlugin): def manipulate_search_path(self, search_path: ConfigSearchPath) -> None: local_path = f'{os.getcwd()}/config/' logger.debug(local_path) search_path.append(provider='pytti_hydra_pathplugin', path=f'file://{local_path}')
def _conv_flop_jit(inputs: Tuple[Any], outputs: Tuple[torch.Tensor]) -> Number: x: torch.Tensor = inputs[0] w: torch.Tensor = inputs[1] (x_shape, w_shape, out_shape) = (x.shape, w.shape, outputs[0].shape) transposed: bool = inputs[6] return _conv_flop_count(list(x_shape), list(w_shape), list(out_shape), transposed=transposed)
def parse_time(string: str, locale: ((Locale | str) | None)=LC_TIME, format: _PredefinedTimeFormat='medium') -> datetime.time: numbers = re.findall('(\\d+)', string) if (not numbers): raise ParseError('No numbers were found in input') format_str = get_time_format(format=format, locale=locale).pattern.lower() hour_idx = format_str.index('h') if (hour_idx < 0): hour_idx = format_str.index('k') min_idx = format_str.index('m') sec_idx = format_str.index('s') indexes = sorted([(hour_idx, 'H'), (min_idx, 'M'), (sec_idx, 'S')]) indexes = {item[1]: idx for (idx, item) in enumerate(indexes)} hour_offset = 0 if (('a' in format_str) and ('pm' in string.lower())): hour_offset = 12 minute = second = 0 hour = (int(numbers[indexes['H']]) + hour_offset) if (len(numbers) > 1): minute = int(numbers[indexes['M']]) if (len(numbers) > 2): second = int(numbers[indexes['S']]) return datetime.time(hour, minute, second)
class AsmCmdShowElementCS(AsmCmdCheckable): _id = 28 _menuText = QT_TRANSLATE_NOOP('asm3', 'Show element coordinate system') _iconName = 'Assembly_ShowElementCS.svg' _toolbarName = None _menuGroupName = None _contextMenuName = None _saveParam = True _defaultValue = False def IsActive(cls): return True def Activated(cls, checked): super(AsmCmdShowElementCS, cls).Activated(checked) from .assembly import isTypeOf, AsmElement for doc in FreeCAD.listDocuments().values(): for obj in doc.Objects: if isTypeOf(obj, AsmElement): obj.ViewObject.Proxy.setupAxis()
class TestContextManagerModel(): def test_model(self) -> None: ast_nodes = builder.extract_node('\n def test():\n "a"\n yield\n\n gen = test()\n gen.__enter__ #\n gen.__exit__ #\n ') assert isinstance(ast_nodes, list) enter = next(ast_nodes[0].infer()) assert isinstance(enter, astroid.BoundMethod) assert isinstance(enter.bound, bases.Generator) assert (enter.bound._proxied.qname() == 'builtins.generator') assert isinstance(enter._proxied._proxied, nodes.FunctionDef) assert (len(enter._proxied._proxied.args.args) == 1) assert (enter._proxied._proxied.args.args[0].name == 'self') exit_node = next(ast_nodes[1].infer()) assert isinstance(exit_node, astroid.BoundMethod) assert isinstance(exit_node._proxied._proxied, nodes.FunctionDef) assert (len(exit_node._proxied._proxied.args.args) == 4) assert (exit_node._proxied._proxied.args.args[0].name == 'self') assert (exit_node._proxied._proxied.args.args[1].name == 'exc_type') assert (exit_node._proxied._proxied.args.args[2].name == 'exc_value') assert (exit_node._proxied._proxied.args.args[3].name == 'traceback')
class Updater(): def __init__(self, cnt_round, dic_agent_conf, dic_exp_conf, dic_traffic_env_conf, dic_path, best_round=None, bar_round=None): self.cnt_round = cnt_round self.dic_path = dic_path self.dic_exp_conf = dic_exp_conf self.dic_traffic_env_conf = dic_traffic_env_conf self.dic_agent_conf = dic_agent_conf self.agents = [] self.sample_set_list = [] self.sample_indexes = None self.path_to_log = os.path.join(self.dic_path['PATH_TO_WORK_DIRECTORY'], 'train_round', 'round_0', 'generator_0') env_tmp = DIC_ENVS[dic_traffic_env_conf['SIMULATOR_TYPE']](path_to_log=self.path_to_log, path_to_work_directory=self.dic_path['PATH_TO_WORK_DIRECTORY'], dic_traffic_env_conf=self.dic_traffic_env_conf) env_tmp.reset() for i in range(dic_traffic_env_conf['NUM_AGENTS']): agent_name = self.dic_exp_conf['MODEL_NAME'] if (agent_name == 'CoLight_Signal'): agent = DIC_AGENTS[agent_name](self.dic_agent_conf, self.dic_traffic_env_conf, self.dic_path, self.cnt_round, inter_info=env_tmp.list_intersection, intersection_id=str(i)) else: agent = DIC_AGENTS[agent_name](self.dic_agent_conf, self.dic_traffic_env_conf, self.dic_path, self.cnt_round, intersection_id=str(i)) self.agents.append(agent) def load_sample(self, i): sample_set = [] try: if self.dic_exp_conf['PRETRAIN']: sample_file = open(os.path.join(self.dic_path['PATH_TO_PRETRAIN_WORK_DIRECTORY'], 'train_round', ('total_samples' + '.pkl')), 'rb') elif self.dic_exp_conf['AGGREGATE']: sample_file = open(os.path.join(self.dic_path['PATH_TO_AGGREGATE_SAMPLES'], 'aggregate_samples.pkl'), 'rb') else: sample_file = open(os.path.join(self.dic_path['PATH_TO_WORK_DIRECTORY'], 'train_round', ('total_samples_inter_{0}'.format(i) + '.pkl')), 'rb') try: while True: sample_set += pickle.load(sample_file) except EOFError: sample_file.close() pass except Exception as e: error_dir = os.path.join(self.dic_path['PATH_TO_WORK_DIRECTORY']).replace('records', 'errors') if (not os.path.exists(error_dir)): os.makedirs(error_dir) f = open(os.path.join(error_dir, 'error_info_inter_{0}.txt'.format(i)), 'a') f.write('Fail to load samples for inter {0}\n'.format(i)) f.write(('traceback.format_exc():\n%s\n' % traceback.format_exc())) f.close() print(('traceback.format_exc():\n%s' % traceback.format_exc())) pass if ((i % 100) == 0): print('load_sample for inter {0}'.format(i)) return sample_set def load_hidden_states_with_forget(self): hidden_states_set = [] try: hidden_state_file = open(os.path.join(self.dic_path['PATH_TO_WORK_DIRECTORY'], 'train_round', 'total_hidden_states.pkl'), 'rb') try: while True: hidden_states_set.append(pickle.load(hidden_state_file)) hidden_states_set = np.vstack(hidden_states_set) ind_end = len(hidden_states_set) print('hidden_state_set shape: ', hidden_states_set.shape) if (self.dic_exp_conf['PRETRAIN'] or self.dic_exp_conf['AGGREGATE']): pass else: ind_sta = max(0, (ind_end - self.dic_agent_conf['MAX_MEMORY_LEN'])) hidden_states_after_forget = hidden_states_set[ind_sta:ind_end] hidden_states_set = [np.array([hidden_states_after_forget[k] for k in self.sample_indexes])] except EOFError: hidden_state_file.close() pass except Exception as e: error_dir = os.path.join(self.dic_path['PATH_TO_WORK_DIRECTORY']).replace('records', 'errors') if (not os.path.exists(error_dir)): os.makedirs(error_dir) f = open(os.path.join(error_dir, 'error_info.txt'), 'a') f.write('Fail to load hidden_states for inter\n') f.write(('traceback.format_exc():\n%s\n' % traceback.format_exc())) f.close() print(('traceback.format_exc():\n%s' % traceback.format_exc())) pass return hidden_states_set def load_hidden_states(self): hidden_states_set = [] try: hidden_state_file = open(os.path.join(self.dic_path['PATH_TO_WORK_DIRECTORY'], 'train_round', 'total_hidden_states.pkl'), 'rb') try: while True: hidden_states_set.append(pickle.load(hidden_state_file)) except EOFError: pass except Exception as e: error_dir = os.path.join(self.dic_path['PATH_TO_WORK_DIRECTORY']).replace('records', 'errors') if (not os.path.exists(error_dir)): os.makedirs(error_dir) f = open(os.path.join(error_dir, 'error_info.txt'), 'a') f.write('Fail to load hidden_states for inter\n') f.write(('traceback.format_exc():\n%s\n' % traceback.format_exc())) f.close() print(('traceback.format_exc():\n%s' % traceback.format_exc())) pass return hidden_states_set def load_sample_with_forget(self, i): sample_set = [] try: if self.dic_exp_conf['PRETRAIN']: sample_file = open(os.path.join(self.dic_path['PATH_TO_PRETRAIN_WORK_DIRECTORY'], 'train_round', ('total_samples' + '.pkl')), 'rb') elif self.dic_exp_conf['AGGREGATE']: sample_file = open(os.path.join(self.dic_path['PATH_TO_AGGREGATE_SAMPLES'], 'aggregate_samples.pkl'), 'rb') else: sample_file = open(os.path.join(self.dic_path['PATH_TO_WORK_DIRECTORY'], 'train_round', ('total_samples_inter_{0}'.format(i) + '.pkl')), 'rb') try: while True: cur_round_sample_set = pickle.load(sample_file) ind_end = len(cur_round_sample_set) if (self.dic_exp_conf['PRETRAIN'] or self.dic_exp_conf['AGGREGATE']): pass else: ind_sta = max(0, (ind_end - self.dic_agent_conf['MAX_MEMORY_LEN'])) memory_after_forget = cur_round_sample_set[ind_sta:ind_end] sample_size = min(self.dic_agent_conf['SAMPLE_SIZE'], len(memory_after_forget)) if (self.sample_indexes is None): self.sample_indexes = random.sample(range(len(memory_after_forget)), sample_size) sample_set = [memory_after_forget[k] for k in self.sample_indexes] sample_set += cur_round_sample_set except EOFError: pass except Exception as e: error_dir = os.path.join(self.dic_path['PATH_TO_WORK_DIRECTORY']).replace('records', 'errors') if (not os.path.exists(error_dir)): os.makedirs(error_dir) f = open(os.path.join(error_dir, 'error_info_inter_{0}.txt'.format(i)), 'a') f.write('Fail to load samples for inter {0}\n'.format(i)) f.write(('traceback.format_exc():\n%s\n' % traceback.format_exc())) f.close() print(('traceback.format_exc():\n%s' % traceback.format_exc())) pass if ((i % 100) == 0): print('load_sample for inter {0}'.format(i)) return sample_set def load_sample_for_agents(self): start_time = time.time() print('Start load samples at', start_time) if (self.dic_exp_conf['MODEL_NAME'] not in ['GCN', 'CoLight']): if (self.dic_traffic_env_conf['ONE_MODEL'] or (self.dic_exp_conf['MODEL_NAME'] in ['SimpleDQNOne'])): sample_set_all = [] for i in range(self.dic_traffic_env_conf['NUM_INTERSECTIONS']): sample_set = self.load_sample_with_forget(i) sample_set_all.extend(sample_set) self.agents[0].prepare_Xs_Y(sample_set_all, self.dic_exp_conf) else: for i in range(self.dic_traffic_env_conf['NUM_INTERSECTIONS']): sample_set = self.load_sample(i) self.agents[i].prepare_Xs_Y(sample_set, self.dic_exp_conf) else: samples_gcn_df = None if False: for i in range(self.dic_traffic_env_conf['NUM_INTERSECTIONS']): sample_set = self.load_sample(i) if (len(sample_set) == 0): continue samples_set_df = pd.DataFrame.from_records(sample_set, columns=['state', 'action', 'next_state', 'inst_reward', 'reward', 'time', 'generator']) samples_set_df['input'] = samples_set_df[['state', 'action', 'next_state', 'inst_reward', 'reward']].values.tolist() samples_set_df.drop(['state', 'action', 'next_state', 'inst_reward', 'reward'], axis=1, inplace=True) if (samples_gcn_df is None): samples_gcn_df = samples_set_df else: samples_gcn_df = pd.merge(samples_gcn_df, samples_set_df, how='inner', on=['generator', 'time'], suffixes=('', '_{0}'.format(i))) intersection_input_columns = (['input'] + ['input_{0}'.format((i + 1)) for i in range((self.dic_traffic_env_conf['NUM_INTERSECTIONS'] - 1))]) for i in range(self.dic_traffic_env_conf['NUM_AGENTS']): sample_set_list = samples_gcn_df[intersection_input_columns].values.tolist() self.agents[i].prepare_Xs_Y(sample_set_list, self.dic_exp_conf) elif False: samples_gcn_df = [] print('start get samples') get_samples_start_time = time.time() for i in range(self.dic_traffic_env_conf['NUM_INTERSECTIONS']): sample_set = self.load_sample(i) samples_set_df = pd.DataFrame.from_records(sample_set, columns=['state', 'action', 'next_state', 'inst_reward', 'reward', 'time', 'generator']) samples_set_df['input'] = samples_set_df[['state', 'action', 'next_state', 'inst_reward', 'reward']].values.tolist() samples_set_df.drop(['state', 'action', 'next_state', 'inst_reward', 'reward', 'time', 'generator'], axis=1, inplace=True) samples_gcn_df.append(samples_set_df['input']) if ((i % 100) == 0): print('inter {0} samples_set_df.shape: '.format(i), samples_set_df.shape) samples_gcn_df = pd.concat(samples_gcn_df, axis=1) print('samples_gcn_df.shape :', samples_gcn_df.shape) print('Getting samples time: ', (time.time() - get_samples_start_time)) for i in range(self.dic_traffic_env_conf['NUM_AGENTS']): sample_set_list = samples_gcn_df.values.tolist() self.agents[i].prepare_Xs_Y(sample_set_list, self.dic_exp_conf) else: samples_gcn_df = [] print('start get samples') get_samples_start_time = time.time() for i in range(self.dic_traffic_env_conf['NUM_INTERSECTIONS']): sample_set = self.load_sample(i) samples_set_df = pd.DataFrame.from_records(sample_set, columns=['state', 'action', 'next_state', 'inst_reward', 'reward', 'time', 'generator']) samples_set_df['input'] = samples_set_df[['state', 'action', 'next_state', 'inst_reward', 'reward']].values.tolist() samples_set_df.drop(['state', 'action', 'next_state', 'inst_reward', 'reward', 'time', 'generator'], axis=1, inplace=True) samples_gcn_df.append(samples_set_df['input']) if ((i % 100) == 0): print('inter {0} samples_set_df.shape: '.format(i), samples_set_df.shape) samples_gcn_df = pd.concat(samples_gcn_df, axis=1) print('samples_gcn_df.shape :', samples_gcn_df.shape) print('Getting samples time: ', (time.time() - get_samples_start_time)) for i in range(self.dic_traffic_env_conf['NUM_AGENTS']): sample_set_list = samples_gcn_df.values.tolist() self.agents[i].prepare_Xs_Y(sample_set_list, self.dic_exp_conf) print('Load samples time: ', (time.time() - start_time)) def sample_set_to_sample_gcn_df(self, sample_set): print('make results') samples_set_df = pd.DataFrame.from_records(sample_set, columns=['state', 'action', 'next_state', 'inst_reward', 'reward', 'time', 'generator']) samples_set_df = samples_set_df.set_index(['time', 'generator']) samples_set_df['input'] = samples_set_df[['state', 'action', 'next_state', 'inst_reward', 'reward']].values.tolist() samples_set_df.drop(['state', 'action', 'next_state', 'inst_reward', 'reward'], axis=1, inplace=True) self.sample_set_list.append(samples_set_df) def update_network(self, i): print(('update agent %d' % i)) self.agents[i].train_network(self.dic_exp_conf) if self.dic_traffic_env_conf['ONE_MODEL']: if self.dic_exp_conf['PRETRAIN']: self.agents[i].q_network.save(os.path.join(self.dic_path['PATH_TO_PRETRAIN_MODEL'], '{0}.h5'.format(self.dic_exp_conf['TRAFFIC_FILE'][0]))) shutil.copy(os.path.join(self.dic_path['PATH_TO_PRETRAIN_MODEL'], '{0}.h5'.format(self.dic_exp_conf['TRAFFIC_FILE'][0])), os.path.join(self.dic_path['PATH_TO_MODEL'], 'round_0.h5')) elif self.dic_exp_conf['AGGREGATE']: self.agents[i].q_network.save('model/initial', 'aggregate.h5') shutil.copy('model/initial/aggregate.h5', os.path.join(self.dic_path['PATH_TO_MODEL'], 'round_0.h5')) else: self.agents[i].save_network('round_{0}'.format(self.cnt_round)) elif self.dic_exp_conf['PRETRAIN']: self.agents[i].q_network.save(os.path.join(self.dic_path['PATH_TO_PRETRAIN_MODEL'], '{0}_inter_{1}.h5'.format(self.dic_exp_conf['TRAFFIC_FILE'][0], self.agents[i].intersection_id))) shutil.copy(os.path.join(self.dic_path['PATH_TO_PRETRAIN_MODEL'], '{0}_inter_{1}.h5'.format(self.dic_exp_conf['TRAFFIC_FILE'][0], self.agents[i].intersection_id)), os.path.join(self.dic_path['PATH_TO_MODEL'], 'round_0.h5')) elif self.dic_exp_conf['AGGREGATE']: self.agents[i].q_network.save('model/initial', 'aggregate_inter_{0}.h5'.format(self.agents[i].intersection_id)) shutil.copy('model/initial/aggregate.h5', os.path.join(self.dic_path['PATH_TO_MODEL'], 'round_0_inter_{0}.h5'.format(self.agents[i].intersection_id))) else: self.agents[i].save_network('round_{0}_inter_{1}'.format(self.cnt_round, self.agents[i].intersection_id)) def update_network_for_agents(self): if self.dic_traffic_env_conf['ONE_MODEL']: self.update_network(0) else: print('update_network_for_agents', self.dic_traffic_env_conf['NUM_AGENTS']) for i in range(self.dic_traffic_env_conf['NUM_AGENTS']): self.update_network(i)
def compute_dense_reward(self, action, obs): dist_to_handle = np.linalg.norm((self.robot.ee_position - self.obj1.position)) handle_goal_diff = np.linalg.norm((self.obj1.position - self.goal_position)) action_reg = np.sum(np.square(action)) w_dist = (- 1.0) w_goal_diff = (- 1.0) w_action_reg = (- 0.1) reward = (((w_dist * dist_to_handle) + (w_goal_diff * handle_goal_diff)) + (w_action_reg * action_reg)) return reward
def fetch_versions(build_type, timeout=5.0): try: content = urlopen((' % build_type), timeout=timeout).read() except Exception as error: raise UpdateError(error) from error d = feedparser.parse(content) if d.bozo: raise UpdateError(d.bozo_exception) try: link = d.feed.link enclosures = [e for entry in d.entries for e in entry.enclosures] except AttributeError as error: raise UpdateError(error) from error try: versions = [parse_version(en.version) for en in enclosures] except ValueError as error: raise UpdateError(error) from error return (sorted(versions), link)
.parametrize('text', ('<a=b&b=a>', '<a=b|b=a>', '<a=b]b=a>')) def test_compound_positive_matches(lexer, text): assert (lexer.formula(0, text) == len(text)) assert (lexer.cur[0] == (0, Punctuation, '<')) assert (lexer.cur[4][1] == Operator) assert (lexer.cur[(- 1)] == ((len(text) - 1), Punctuation, '>'))
def signature_test(): Print_Function() e3d = Ga('e1 e2 e3', g=[1, 1, 1]) print('e3d.g =', e3d.g) print('Signature = (3,0) I =', e3d.I(), ' I**2 =', (e3d.I() * e3d.I())) e3d = Ga('e1 e2 e3', g=[2, 2, 2]) print('e3d.g =', e3d.g) print('Signature = (3,0) I =', e3d.I(), ' I**2 =', (e3d.I() * e3d.I())) sp4d = Ga('e1 e2 e3 e4', g=[1, (- 1), (- 1), (- 1)]) print('e3d.g =', sp4d.g) print('Signature = (1,3) I =', sp4d.I(), ' I**2 =', (sp4d.I() * sp4d.I())) sp4d = Ga('e1 e2 e3 e4', g=[2, (- 2), (- 2), (- 2)]) print('e3d.g =', sp4d.g) print('Signature = (1,3) I =', sp4d.I(), ' I**2 =', (sp4d.I() * sp4d.I())) e4d = Ga('e1 e2 e3 e4', g=[1, 1, 1, 1]) print('e4d.g =', e4d.g) print('Signature = (4,0) I =', e4d.I(), ' I**2 =', (e4d.I() * e4d.I())) cf3d = Ga('e1 e2 e3 e4 e5', g=[1, 1, 1, 1, (- 1)]) print('cf4d.g =', cf3d.g) print('Signature = (4,1) I =', cf3d.I(), ' I**2 =', (cf3d.I() * cf3d.I())) cf3d = Ga('e1 e2 e3 e4 e5', g=[2, 2, 2, 2, (- 2)]) print('cf4d.g =', cf3d.g) print('Signature = (4,1) I =', cf3d.I(), ' I**2 =', (cf3d.I() * cf3d.I())) return
def data_dir() -> Path: if os.getenv('POETRY_HOME'): return Path(os.getenv('POETRY_HOME')).expanduser() if WINDOWS: base_dir = Path(_get_win_folder('CSIDL_APPDATA')) elif MACOS: base_dir = Path('~/Library/Application Support').expanduser() else: base_dir = Path(os.getenv('XDG_DATA_HOME', '~/.local/share')).expanduser() base_dir = base_dir.resolve() return (base_dir / 'pypoetry')
def test_legal_port_connect(): class A(ComponentLevel3): def construct(s): s.out = OutPort(32) def up_A_write(): s.out = 123 class B(ComponentLevel3): def construct(s): s.in_ = InPort(32) def up_B_read(): print(s.in_) class OutWrap(ComponentLevel3): def construct(s): s.out = OutPort(32) s.a = A() s.a.out //= s.out def up_out_read(): print(s.out) class InWrap(ComponentLevel3): def construct(s): s.in_ = InPort(32) s.b = B() s.b.in_ //= s.in_ def up_in_read(): print(s.in_) class Top(ComponentLevel3): def construct(s): s.i = InWrap() s.o = OutWrap() connect(s.o.out, s.i.in_) _test_model(Top)
class SDIO_STA(IntEnum): CCRCFAIL = (1 << 0) DCRCFAIL = (1 << 1) CTIMEOUT = (1 << 2) DTIMEOUT = (1 << 3) TXUNDERR = (1 << 4) RXOVERR = (1 << 5) CMDREND = (1 << 6) CMDSENT = (1 << 7) DATAEND = (1 << 8) STBITERR = (1 << 9) DBCKEND = (1 << 10) CMDACT = (1 << 11) TXACT = (1 << 12) RXACT = (1 << 13) TXFIFOHE = (1 << 14) RXFIFOHF = (1 << 15) TXFIFOF = (1 << 16) RXFIFOF = (1 << 17) TXFIFOE = (1 << 18) RXFIFOE = (1 << 19) TXDAVL = (1 << 20) RXDAVL = (1 << 21) SDIOIT = (1 << 22) CEATAEND = (1 << 23)
class LastKnownValueEraser(TypeTranslator): def visit_instance(self, t: Instance) -> Type: if ((not t.last_known_value) and (not t.args)): return t return t.copy_modified(args=[a.accept(self) for a in t.args], last_known_value=None) def visit_type_alias_type(self, t: TypeAliasType) -> Type: return t def visit_union_type(self, t: UnionType) -> Type: new = cast(UnionType, super().visit_union_type(t)) instances = [item for item in new.items if isinstance(get_proper_type(item), Instance)] if (len(instances) > 1): instances_by_name: dict[(str, list[Instance])] = {} p_new_items = get_proper_types(new.items) for p_item in p_new_items: if (isinstance(p_item, Instance) and (not p_item.args)): instances_by_name.setdefault(p_item.type.fullname, []).append(p_item) merged: list[Type] = [] for item in new.items: orig_item = item item = get_proper_type(item) if (isinstance(item, Instance) and (not item.args)): types = instances_by_name.get(item.type.fullname) if (types is not None): if (len(types) == 1): merged.append(item) else: from mypy.typeops import make_simplified_union merged.append(make_simplified_union(types)) del instances_by_name[item.type.fullname] else: merged.append(orig_item) return UnionType.make_union(merged) return new
def smiles2differentiable_graph(smiles): mol = smiles2mol(smiles) if (mol is None): return None if (not is_valid(smiles)): return None (idx_lst, node_mat, substructure_lst, atomidx_2substridx, adjacency_matrix, leaf_extend_idx_pair) = smiles2graph(smiles) N = len(idx_lst) d = len(vocabulary) M = int(np.sum((np.sum(adjacency_matrix, 1) == 1))) is_nonleaf = (np.sum(adjacency_matrix, 1) > 1) is_nonleaf = np.concatenate([is_nonleaf, np.zeros(M, dtype=np.bool)]) is_leaf = (np.sum(adjacency_matrix, 1) == 1) is_leaf = np.concatenate([is_leaf, np.zeros(M, dtype=np.bool)]) leaf_idx_lst = list(np.where((is_leaf == True))[0]) leaf_nonleaf_lst = [] for leaf_idx in leaf_idx_lst: for i in range(N): if (adjacency_matrix[(i, leaf_idx)] == 1): leaf_nonleaf_lst.append((leaf_idx, i)) break node_mask = np.zeros((N + M), dtype=np.bool) node_mask[is_nonleaf] = True node_mask[is_leaf] = True node_indicator_1 = np.zeros((N, d)) node_indicator_1[(node_mat == 1)] = inf_value node_indicator_1[(node_mat == 0)] = (- inf_value) node_indicator_2 = np.random.random((M, d)) node_indicator = np.concatenate([node_indicator_1, node_indicator_2], 0) adjacency_mask = np.ones(((N + M), (N + M)), dtype=np.bool) for (leaf_idx, extend_idx) in leaf_extend_idx_pair: adjacency_mask[(leaf_idx, extend_idx)] = False adjacency_mask[(extend_idx, leaf_idx)] = False adjacency_weight = np.zeros(((N + M), (N + M))) adjacency_weight.fill((- inf_value)) for i in range(N): for j in range(N): if (adjacency_matrix[(i, j)] == 1): adjacency_weight[(i, j)] = inf_value for (leaf_idx, extend_idx) in leaf_extend_idx_pair: adjacency_weight[(leaf_idx, extend_idx)] = 0 adjacency_weight[(extend_idx, leaf_idx)] = 0 return (node_mask, node_indicator, adjacency_mask, adjacency_weight)
.parametrize('string, separator, expected', [('a', '!', ['a']), ('ab', '!', ['ab']), ('ab!cd', '!', ['ab', 'cd']), ('ab!cd!ef', '!', ['ab', 'cd', 'ef']), ('a"b!c"d!ef', '!', ['a"b!c"d', 'ef']), ('a', '\\', ['a']), ('ab', '\\', ['ab']), ('ab\\cd', '\\', ['ab', 'cd']), ('ab\\cd\\ef', '\\', ['ab', 'cd', 'ef']), ('a"b\\c"d\\ef', '\\', ['a"b\\c"d', 'ef']), (f'str.upper ! {SYMBOL} + "z"', '!', ['str.upper', f' {SYMBOL} + "z"'])]) def test_split_string_on_separator(string, separator, expected): result = list(interpret.split_pipestring(string, separator)) assert (result == expected)
class TransformerEncoderUnit(nn.Module): def __init__(self, feat_dim, n_head=8, pos_en_flag=True, attn_type='softmax', P=None): super(TransformerEncoderUnit, self).__init__() self.feat_dim = feat_dim self.attn_type = attn_type self.pos_en_flag = pos_en_flag self.P = P self.pos_en = PosEnSine((self.feat_dim // 2)) self.attn = OurMultiheadAttention(feat_dim, n_head) self.linear1 = nn.Conv2d(self.feat_dim, self.feat_dim, 1) self.linear2 = nn.Conv2d(self.feat_dim, self.feat_dim, 1) self.activation = nn.ReLU(inplace=True) self.norm1 = nn.BatchNorm2d(self.feat_dim) self.norm2 = nn.BatchNorm2d(self.feat_dim) def forward(self, src): if self.pos_en_flag: pos_embed = self.pos_en(src) else: pos_embed = 0 src2 = self.attn(q=(src + pos_embed), k=(src + pos_embed), v=src, attn_type=self.attn_type, P=self.P)[0] src = (src + src2) src = self.norm1(src) src2 = self.linear2(self.activation(self.linear1(src))) src = (src + src2) src = self.norm2(src) return src
def get_hash(): if os.path.exists('.git'): sha = get_git_hash()[:7] elif os.path.exists(version_file): try: from basicsr.version import __version__ sha = __version__.split('+')[(- 1)] except ImportError: raise ImportError('Unable to get git version') else: sha = 'unknown' return sha
class TestAttributes(unittest.TestCase): def get_schema(self): openldap_uri = 'file://{}'.format(TEST_SUBSCHEMA_FILES[0]) (dn, schema) = ldap.schema.urlfetch(openldap_uri) return schema def test_empty_attributetype_attrs(self): attr = AttributeType('( 2.999 )') self.assertEqual(attr.oid, '2.999') self.assertEqual(attr.names, ()) self.assertEqual(attr.desc, None) self.assertEqual(attr.obsolete, False) self.assertEqual(attr.single_value, False) self.assertEqual(attr.syntax, None) self.assertEqual(attr.no_user_mod, False) self.assertEqual(attr.equality, None) self.assertEqual(attr.substr, None) self.assertEqual(attr.ordering, None) self.assertEqual(attr.usage, 0) self.assertEqual(attr.sup, ()) self.assertEqual(attr.x_origin, ()) def test_empty_objectclass_attrs(self): cls = ObjectClass('( 2.999 )') self.assertEqual(cls.oid, '2.999') self.assertEqual(cls.names, ()) self.assertEqual(cls.desc, None) self.assertEqual(cls.obsolete, False) self.assertEqual(cls.must, ()) self.assertEqual(cls.may, ()) self.assertEqual(cls.kind, 0) self.assertEqual(cls.sup, ('top',)) self.assertEqual(cls.x_origin, ()) def test_attributetype_attrs(self): schema = self.get_schema() attr = schema.get_obj(AttributeType, '1.3.6.1.4.1.11.1.3.1.1.3') expected_desc = 'Maximum time an agent or service allows for a search to complete' self.assertEqual(attr.oid, '1.3.6.1.4.1.11.1.3.1.1.3') self.assertEqual(attr.names, ('searchTimeLimit',)) self.assertEqual(attr.desc, expected_desc) self.assertEqual(attr.obsolete, False) self.assertEqual(attr.single_value, True) self.assertEqual(attr.syntax, '1.3.6.1.4.1.1466.115.121.1.27') self.assertEqual(attr.no_user_mod, False) self.assertEqual(attr.equality, 'integerMatch') self.assertEqual(attr.ordering, 'integerOrderingMatch') self.assertEqual(attr.sup, ()) self.assertEqual(attr.x_origin, ('RFC4876', 'user defined')) def test_objectclass_attrs(self): schema = self.get_schema() cls = schema.get_obj(ObjectClass, '2.5.6.9') expected_may = ('member', 'businessCategory', 'seeAlso', 'owner', 'ou', 'o', 'description') self.assertEqual(cls.oid, '2.5.6.9') self.assertEqual(cls.names, ('groupOfNames',)) self.assertEqual(cls.desc, None) self.assertEqual(cls.obsolete, False) self.assertEqual(cls.must, ('cn',)) self.assertEqual(cls.may, expected_may) self.assertEqual(cls.kind, 0) self.assertEqual(cls.sup, ('top',)) self.assertEqual(cls.x_origin, ('RFC 4519',))
class FromImport(ImportInfo): def __init__(self, module_name, level, names_and_aliases): self.module_name = module_name self.level = level self.names_and_aliases = names_and_aliases def get_imported_primaries(self, context): if (self.names_and_aliases[0][0] == '*'): module = self.get_imported_module(context) return [name for name in module if (not name.startswith('_'))] result = [] for (name, alias) in self.names_and_aliases: if alias: result.append(alias) else: result.append(name) return result def get_imported_resource(self, context): if (self.level == 0): return context.project.find_module(self.module_name, folder=context.folder) else: return context.project.find_relative_module(self.module_name, context.folder, self.level) def get_imported_module(self, context): if (self.level == 0): return context.project.get_module(self.module_name, context.folder) else: return context.project.get_relative_module(self.module_name, context.folder, self.level) def get_import_statement(self): result = ((('from ' + ('.' * self.level)) + self.module_name) + ' import ') for (name, alias) in self.names_and_aliases: result += name if alias: result += (' as ' + alias) result += ', ' return result[:(- 2)] def is_empty(self): return (len(self.names_and_aliases) == 0) def is_star_import(self): return ((len(self.names_and_aliases) > 0) and (self.names_and_aliases[0][0] == '*'))
class UserView(ModelView): list_template = 'list.html' can_create = False can_delete = True can_edit = False def is_accessible(self): return current_user.is_authenticated def inaccessible_callback(self, name, **kwargs): return redirect(url_for('admin.login_view', next=request.url)) ('/') def index_view(self): self._template_args['card_title'] = 'Current users' return super(UserView, self).index_view()
class SemAnalTypeInfoSuite(DataSuite): required_out_section = True files = ['semanal-typeinfo.test'] def run_case(self, testcase: DataDrivenTestCase) -> None: try: src = '\n'.join(testcase.input) result = build.build(sources=[BuildSource('main', None, src)], options=get_semanal_options(src, testcase), alt_lib_path=test_temp_dir) a = result.errors if a: raise CompileError(a) typeinfos = TypeInfoMap() for (module, file) in result.files.items(): if (module in testcase.test_modules): for n in file.names.values(): if isinstance(n.node, TypeInfo): assert n.fullname if any((n.fullname.startswith((m + '.')) for m in testcase.test_modules)): typeinfos[n.fullname] = n.node a = str(typeinfos).split('\n') except CompileError as e: a = e.messages assert_string_arrays_equal(testcase.output, a, f'Invalid semantic analyzer output ({testcase.file}, line {testcase.line})')
class Task(): def session_context(self): _context.current_session = self.session _context.current_task_id = self.coro_id try: (yield) finally: _context.current_session = None _context.current_task_id = None def gen_coro_id(coro=None): name = 'coro' if hasattr(coro, '__name__'): name = coro.__name__ return ('%s-%s' % (name, random_str(10))) def __init__(self, coro, session: CoroutineBasedSession, on_coro_stop=None): self.session = session self.coro = coro self.result = None self.task_closed = False self.on_coro_stop = (on_coro_stop or (lambda _: None)) self.coro_id = self.gen_coro_id(self.coro) self.pending_futures = {} logger.debug('Task[%s] created ', self.coro_id) def step(self, result=None, throw_exp=False): coro_yield = None with self.session_context(): try: if throw_exp: coro_yield = self.coro.throw(result) else: coro_yield = self.coro.send(result) except StopIteration as e: if (len(e.args) == 1): self.result = e.args[0] self.close() logger.debug('Task[%s] finished', self.coro_id) except Exception as e: if (not isinstance(e, SessionException)): self.session.on_task_exception() self.close() if (coro_yield is None): return future = None if isinstance(coro_yield, WebIOFuture): if coro_yield.coro: future = asyncio.run_coroutine_threadsafe(coro_yield.coro, asyncio.get_event_loop()) else: future = coro_yield if ((not self.session.closed()) and hasattr(future, 'add_done_callback')): future.add_done_callback(self._wakeup) self.pending_futures[id(future)] = future def _wakeup(self, future): if (not future.cancelled()): del self.pending_futures[id(future)] self.step(future.result()) def close(self): if self.task_closed: return self.task_closed = True self.coro.close() while self.pending_futures: (_, f) = self.pending_futures.popitem() f.cancel() self.on_coro_stop(self) self.on_coro_stop = None self.session = None logger.debug('Task[%s] closed', self.coro_id) def __del__(self): if (not self.task_closed): logger.warning('Task[%s] was destroyed but it is pending!', self.coro_id) def task_handle(self): handle = TaskHandler(close=self.close, closed=(lambda : self.task_closed)) return handle
.wrap def get_block_sizes_runtime_device(block_sizes: List[int], runtime_device: torch.device, tensor_cache: Dict[(str, Tuple[(torch.Tensor, List[torch.Tensor])])], embedding_shard_metadata: Optional[List[List[int]]]=None, dtype: torch.dtype=torch.int32) -> Tuple[(torch.Tensor, List[torch.Tensor])]: cache_key: str = '__block_sizes' if (cache_key not in tensor_cache): tensor_cache[cache_key] = (torch.tensor(block_sizes, device=runtime_device, dtype=dtype), ([] if (embedding_shard_metadata is None) else [torch.tensor(row_pos, device=runtime_device, dtype=dtype) for row_pos in embedding_shard_metadata])) return tensor_cache[cache_key]
_env('PickCube-Light-v0', max_episode_steps=100, override=True) class PickCubeLightEnv(PickCubeEnv): def _setup_lighting(self): shadow = self.enable_shadow self._scene.set_ambient_light([0.3, 0.3, 0.3]) self._scene.add_directional_light([1, 1, (- 1)], [1, 1, 1], shadow=shadow, scale=5, shadow_map_size=2048) self._scene.add_directional_light([(- 1), (- 1), (- 1)], [1, 1, 1]) self._scene.add_point_light([1, 0, 1], [1, 0, 0], shadow=shadow) self._scene.add_point_light([(- 0.5), 1, 1], [0, 1, 0], shadow=shadow) self._scene.add_point_light([(- 0.5), (- 1), 1], [0, 0, 1], shadow=shadow)
class TestCommand(): def test_ensure_string_list(self, cmd): cmd.not_string_list = ['one', 2, 'three'] cmd.yes_string_list = ['one', 'two', 'three'] cmd.not_string_list2 = object() cmd.yes_string_list2 = 'ok' cmd.ensure_string_list('yes_string_list') cmd.ensure_string_list('yes_string_list2') with pytest.raises(DistutilsOptionError): cmd.ensure_string_list('not_string_list') with pytest.raises(DistutilsOptionError): cmd.ensure_string_list('not_string_list2') cmd.option1 = 'ok,dok' cmd.ensure_string_list('option1') assert (cmd.option1 == ['ok', 'dok']) cmd.option2 = ['xxx', 'www'] cmd.ensure_string_list('option2') cmd.option3 = ['ok', 2] with pytest.raises(DistutilsOptionError): cmd.ensure_string_list('option3') def test_make_file(self, cmd): with pytest.raises(TypeError): cmd.make_file(infiles=1, outfile='', func='func', args=()) def _execute(func, args, exec_msg, level): assert (exec_msg == 'generating out from in') cmd.force = True cmd.execute = _execute cmd.make_file(infiles='in', outfile='out', func='func', args=()) def test_dump_options(self, cmd): msgs = [] def _announce(msg, level): msgs.append(msg) cmd.announce = _announce cmd.option1 = 1 cmd.option2 = 1 cmd.user_options = [('option1', '', ''), ('option2', '', '')] cmd.dump_options() wanted = ["command options for 'MyCmd':", ' option1 = 1', ' option2 = 1'] assert (msgs == wanted) def test_ensure_string(self, cmd): cmd.option1 = 'ok' cmd.ensure_string('option1') cmd.option2 = None cmd.ensure_string('option2', 'xxx') assert hasattr(cmd, 'option2') cmd.option3 = 1 with pytest.raises(DistutilsOptionError): cmd.ensure_string('option3') def test_ensure_filename(self, cmd): cmd.option1 = __file__ cmd.ensure_filename('option1') cmd.option2 = 'xxx' with pytest.raises(DistutilsOptionError): cmd.ensure_filename('option2') def test_ensure_dirname(self, cmd): cmd.option1 = (os.path.dirname(__file__) or os.curdir) cmd.ensure_dirname('option1') cmd.option2 = 'xxx' with pytest.raises(DistutilsOptionError): cmd.ensure_dirname('option2') def test_debug_print(self, cmd, capsys, monkeypatch): cmd.debug_print('xxx') assert (capsys.readouterr().out == '') monkeypatch.setattr(debug, 'DEBUG', True) cmd.debug_print('xxx') assert (capsys.readouterr().out == 'xxx\n')
.parametrize('shape,tile_shape,tile_start', [((2,), (2,), (1,)), ((4,), (2,), (0,)), ((4, 2), (2, 2), (1, 2)), ((2, 4), (2, 2), (2, 1))]) def test_read_write_tiles(tmp_path, shape, tile_shape, tile_start): a = num.arange(math.prod(shape)).reshape(shape) write_tiles(ary=a, dirpath=tmp_path, tile_shape=tile_shape, tile_start=tile_start) fence(block=True) b = num.empty_like(a) read_tiles(ary=b, dirpath=tmp_path, tile_shape=tile_shape, tile_start=tile_start) assert (a == b).all()
class Connection_Combination(nn.Module): def __init__(self): super(Connection_Combination, self).__init__() def forward(self, prev_parallel, prev_above, prev_below, betas): betas = F.softmax(betas, dim=(- 1)) mix = ((((3 * betas[0]) * prev_parallel) + ((3 * betas[1]) * prev_above)) + ((3 * betas[2]) * prev_below)) mix = F.relu(mix) return mix
class InlineQueryResultCachedDocument(InlineQueryResult): __slots__ = ('reply_markup', 'caption_entities', 'document_file_id', 'caption', 'title', 'description', 'parse_mode', 'input_message_content') def __init__(self, id: str, title: str, document_file_id: str, description: Optional[str]=None, caption: Optional[str]=None, reply_markup: Optional[InlineKeyboardMarkup]=None, input_message_content: Optional['InputMessageContent']=None, parse_mode: ODVInput[str]=DEFAULT_NONE, caption_entities: Optional[Sequence[MessageEntity]]=None, *, api_kwargs: Optional[JSONDict]=None): super().__init__(InlineQueryResultType.DOCUMENT, id, api_kwargs=api_kwargs) with self._unfrozen(): self.title: str = title self.document_file_id: str = document_file_id self.description: Optional[str] = description self.caption: Optional[str] = caption self.parse_mode: ODVInput[str] = parse_mode self.caption_entities: Tuple[(MessageEntity, ...)] = parse_sequence_arg(caption_entities) self.reply_markup: Optional[InlineKeyboardMarkup] = reply_markup self.input_message_content: Optional[InputMessageContent] = input_message_content
def init_chain_adapters(*, backend: mcb.Backend, chains: int, initial_point: Mapping[(str, np.ndarray)], step: Union[(CompoundStep, BlockedStep)], model: Model) -> Tuple[(mcb.Run, List[ChainRecordAdapter])]: (meta, point_fn) = make_runmeta_and_point_fn(initial_point=initial_point, step=step, model=model) run = backend.init_run(meta) statsbj = StatsBijection(step.stats_dtypes) adapters = [ChainRecordAdapter(chain=run.init_chain(chain_number=chain_number), point_fn=point_fn, stats_bijection=statsbj) for chain_number in range(chains)] return (run, adapters)
def processor_class_from_name(class_name: str): for (module_name, processors) in PROCESSOR_MAPPING_NAMES.items(): if (class_name in processors): module_name = model_type_to_module_name(module_name) module = importlib.import_module(f'.{module_name}', 'transformers.models') try: return getattr(module, class_name) except AttributeError: continue for processor in PROCESSOR_MAPPING._extra_content.values(): if (getattr(processor, '__name__', None) == class_name): return processor main_module = importlib.import_module('transformers') if hasattr(main_module, class_name): return getattr(main_module, class_name) return None
class DataTrainingArguments(): task_name: Optional[str] = field(default='ncc', metadata={'help': 'The name of the task to train on: ncc'}) dataset_name: Optional[str] = field(default='indic_glue', metadata={'help': 'The name of the dataset to use (via the datasets library).'}) dataset_config_name: Optional[str] = field(default='sna.bn', metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'}) max_seq_length: int = field(default=128, metadata={'help': 'The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.'}) pad_to_max_length: bool = field(default=True, metadata={'help': 'Whether to pad all samples to `max_seq_length`. If False, will pad the samples dynamically when batching to the maximum length in the batch.'}) def __post_init__(self): self.task_name = self.task_name.lower()
def _coalesce_add_and_mm_nodes(ir_nodes_list: List[IrNode]): del_node_indices = [] for (i, ir_node) in enumerate(ir_nodes_list): if ((ir_node.node_type == 'add') and (len(ir_node.inputs) == 1)): producer_ir_node = ir_nodes_list[(i - 1)] if ((producer_ir_node.node_type == 'mm') and (len(producer_ir_node.outputs) == 1) and (producer_ir_node.outputs[0] == ir_node.inputs[0])): producer_ir_node.outputs = ir_node.outputs producer_ir_node.node_type = 'addmm' del_node_indices.insert(0, i) for index in del_node_indices: del ir_nodes_list[index]
class Selector(object): def __init__(self, exps_data, filters=None, custom_filters=None): self._exps_data = exps_data if (filters is None): self._filters = tuple() else: self._filters = tuple(filters) if (custom_filters is None): self._custom_filters = [] else: self._custom_filters = custom_filters def where(self, k, v): return Selector(self._exps_data, (self._filters + ((k, v),)), self._custom_filters) def custom_filter(self, filter): return Selector(self._exps_data, self._filters, (self._custom_filters + [filter])) def _check_exp(self, exp): return (all((((str(exp.flat_params.get(k, None)) == str(v)) or (k not in exp.flat_params)) for (k, v) in self._filters)) and all((custom_filter(exp) for custom_filter in self._custom_filters))) def extract(self): return list(filter(self._check_exp, self._exps_data)) def iextract(self): return filter(self._check_exp, self._exps_data)
def get_tiny_config_from_class(configuration_class): if ('OpenAIGPT' in configuration_class.__name__): return model_type = configuration_class.model_type camel_case_model_name = configuration_class.__name__.split('Config')[0] try: model_slug = model_type.replace('-', '_') module = importlib.import_module(f'.test_modeling_{model_slug}', package=f'tests.{model_slug}') model_tester_class = getattr(module, f'{camel_case_model_name}ModelTester', None) except (ImportError, AttributeError): logger.error(f'No model tester class for {configuration_class.__name__}') return if (model_tester_class is None): logger.warning(f'No model tester class for {configuration_class.__name__}') return model_tester = model_tester_class(parent=None) if hasattr(model_tester, 'get_pipeline_config'): config = model_tester.get_pipeline_config() elif hasattr(model_tester, 'get_config'): config = model_tester.get_config() else: config = None logger.warning(f'Model tester {model_tester_class.__name__} has no `get_config()`.') return config
def check_config_attributes(): configs_with_unused_attributes = {} for config_class in list(CONFIG_MAPPING.values()): unused_attributes = check_config_attributes_being_used(config_class) if (len(unused_attributes) > 0): configs_with_unused_attributes[config_class.__name__] = unused_attributes if (len(configs_with_unused_attributes) > 0): error = 'The following configuration classes contain unused attributes in the corresponding modeling files:\n' for (name, attributes) in configs_with_unused_attributes.items(): error += f'''{name}: {attributes} ''' raise ValueError(error)
def LoadMat(path, project): if (not Path(path).is_file()): raise PyUnityException(f'The specified file does not exist: {path}') with open(path) as f: contents = f.read().rstrip().splitlines() if (contents.pop(0) != 'Material'): raise ProjectParseException('Expected "Material" as line 1') parts = {split[0][4:]: split[1] for split in map((lambda x: x.split(': ')), contents)} if (not (parts['color'].startswith('RGB') or parts['color'].startswith('HSV'))): raise ProjectParseException('Color value does not start with RGB or HSV') color = Color.fromString(parts['color']) if ((parts['texture'] not in project._idMap) and (parts['texture'] != 'None')): raise ProjectParseException(f"Project file UUID not found: {parts['texture']}") if (parts['texture'] == 'None'): texture = None else: texture = project._idMap[parts['texture']] return Material(color, texture)
def test_pip(host): assert host.pip.get_packages()['pip']['version'].startswith('23.') pkg = host.pip.get_packages(pip_path='/v/bin/pip')['requests'] assert (pkg['version'] == '2.30.0') outdated = host.pip.get_outdated_packages(pip_path='/v/bin/pip')['requests'] assert (outdated['current'] == pkg['version']) assert host.pip.check().succeeded assert host.pip('pip').is_installed assert (not host.pip('does_not_exist').is_installed) pkg = host.pip('requests', pip_path='/v/bin/pip') assert pkg.is_installed assert host.pip('pip').version.startswith('23.') assert (pkg.version == '2.30.0') assert (host.pip('does_not_exist').version == '')
def get_us_midlatitude_cyclone_abi(base_dir=None, method=None, force=False): base_dir = (base_dir or config.get('demo_data_dir', '.')) if (method is None): method = 'gcsfs' if (method not in ['gcsfs']): raise NotImplementedError("Demo data download method '{}' not implemented yet.".format(method)) from ._google_cloud_platform import get_bucket_files patterns = ['gs://gcp-public-data-goes-16/ABI-L1b-RadC/2019/073/00/*s*.nc'] subdir = os.path.join(base_dir, 'abi_l1b', '_us_midlatitude_cyclone') os.makedirs(subdir, exist_ok=True) filenames = get_bucket_files(patterns, subdir, force=force) if (len(filenames) != 16): raise RuntimeError('Not all files could be downloaded') return filenames
def main() -> None: logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) logger.addHandler(logging.StreamHandler(sys.stdout)) parser = argparse.ArgumentParser(description='OS distro info tool') parser.add_argument('--json', '-j', help='Output in machine readable format', action='store_true') parser.add_argument('--root-dir', '-r', type=str, dest='root_dir', help='Path to the root filesystem directory (defaults to /)') args = parser.parse_args() if args.root_dir: dist = LinuxDistribution(include_lsb=False, include_uname=False, include_oslevel=False, root_dir=args.root_dir) else: dist = _distro if args.json: logger.info(json.dumps(dist.info(), indent=4, sort_keys=True)) else: logger.info('Name: %s', dist.name(pretty=True)) distribution_version = dist.version(pretty=True) logger.info('Version: %s', distribution_version) distribution_codename = dist.codename() logger.info('Codename: %s', distribution_codename)
class Bottleneck(nn.Module): expansion = 4 def __init__(self, inplanes, planes, stride=1, downsample=None): super(Bottleneck, self).__init__() self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) self.bn2 = nn.BatchNorm2d(planes) self.conv3 = nn.Conv2d(planes, (planes * self.expansion), kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d((planes * self.expansion)) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride def forward(self, x): residual = x out = self.relu(self.bn1(self.conv1(x))) out = self.relu(self.bn2(self.conv2(out))) out = self.bn3(self.conv3(out)) if (self.downsample is not None): residual = self.downsample(x) out += residual return self.relu(out)
def build_field_transform_default_imagenet(config: Optional[List[Dict[(str, Any)]]], default_transform: Optional[Callable]=None, split: Optional[bool]=None, key: Union[(int, str)]='input', key_map_transform: Optional[Callable]=DEFAULT_KEY_MAP) -> Callable: assert ((default_transform is None) or (split is None)), 'Can only specify one of default_transform and split' if (config is None): if (default_transform is not None): transform = default_transform elif (split is not None): transform = (ImagenetAugmentTransform() if (split == 'train') else ImagenetNoAugmentTransform()) else: raise ValueError('No transform config provided with no defaults') else: transform = build_transforms(config) transform = ApplyTransformToKey(transform, key=key) if (key_map_transform is None): return transform return transforms.Compose([key_map_transform, transform])
def map_reduce(iterable, keyfunc, valuefunc=None, reducefunc=None): valuefunc = ((lambda x: x) if (valuefunc is None) else valuefunc) ret = defaultdict(list) for item in iterable: key = keyfunc(item) value = valuefunc(item) ret[key].append(value) if (reducefunc is not None): for (key, value_list) in ret.items(): ret[key] = reducefunc(value_list) ret.default_factory = None return ret
def _get_vendored_config(): config_fp = os.environ.get('QIIME2_CONFIG') if (config_fp is None): if os.path.exists((fp_ := os.path.join(appdirs.user_config_dir('qiime2'), 'qiime2_config.toml'))): config_fp = fp_ elif os.path.exists((fp_ := os.path.join(appdirs.site_config_dir('qiime2'), 'qiime2_config.toml'))): config_fp = fp_ elif ((CONDA_PREFIX != '') and os.path.exists((fp_ := VENDORED_FP))): config_fp = fp_ elif (CONDA_PREFIX != ''): with open(VENDORED_FP, 'w') as fh: tomlkit.dump(VENDORED_CONFIG, fh) config_fp = VENDORED_FP return config_fp
class Seq2SeqTSModelOutput(ModelOutput): last_hidden_state: torch.FloatTensor = None past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None cross_attentions: Optional[Tuple[torch.FloatTensor]] = None encoder_last_hidden_state: Optional[torch.FloatTensor] = None encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None loc: Optional[torch.FloatTensor] = None scale: Optional[torch.FloatTensor] = None static_features: Optional[torch.FloatTensor] = None
def _set_filepicker_kwargs(fileDlg, **kwargs): NO_MATCH = object() for (kk, vv) in kwargs.items(): formattedName = (kk[0].upper() + kk[1:]) if (formattedName == 'Options'): enumCls = fileDlg.Option else: enumCls = getattr(fileDlg, formattedName, NO_MATCH) setFunc = getattr(fileDlg, f'set{formattedName}', NO_MATCH) if ((enumCls is NO_MATCH) or (setFunc is NO_MATCH)): continue if (enumCls is fileDlg.Option): builder = fileDlg.Option(0) if isinstance(vv, str): vv = [vv] for flag in vv: curVal = getattr(enumCls, flag) builder |= curVal outEnum = enumCls(builder) else: outEnum = getattr(enumCls, vv) setFunc(outEnum)
class TimeSolveLossActiveMaterial(SolveModel): param_names = ['model', 'model option', 'solver class'] params = ([pybamm.lithium_ion.SPM, pybamm.lithium_ion.DFN], ['none', 'stress-driven', 'reaction-driven', 'stress and reaction-driven'], [pybamm.CasadiSolver, pybamm.IDAKLUSolver]) def setup(self, model, params, solver_class): set_random_seed() SolveModel.solve_setup(self, 'Ai2020', model, 'loss of active material', params, solver_class) def time_solve_model(self, _model, _params, _solver_class): self.solver.solve(self.model, t_eval=self.t_eval)
class EngineConfiguration(object): def new() -> 'EngineConfiguration': raise NotImplementedError def from_file(filepath: Path) -> 'EngineConfiguration': raise NotImplementedError def from_str(s: str) -> 'EngineConfiguration': raise NotImplementedError def to_str(self) -> str: raise NotImplementedError def get_coverage_mode(self) -> CoverageMode: raise NotImplementedError def set_target(self, target: int) -> None: pass
def hybrid_training(threshold, use_threshold, stage_nums, core_nums, train_step_nums, batch_size_nums, learning_rate_nums, keep_ratio_nums, train_data_x, train_data_y, test_data_x, test_data_y): stage_length = len(stage_nums) col_num = stage_nums[1] tmp_inputs = [[[] for i in range(col_num)] for i in range(stage_length)] tmp_labels = [[[] for i in range(col_num)] for i in range(stage_length)] index = [[None for i in range(col_num)] for i in range(stage_length)] tmp_inputs[0][0] = train_data_x tmp_labels[0][0] = train_data_y test_inputs = test_data_x for i in range(0, stage_length): for j in range(0, stage_nums[i]): if (len(tmp_labels[i][j]) == 0): continue inputs = tmp_inputs[i][j] labels = [] test_labels = [] if (i == 0): divisor = ((stage_nums[(i + 1)] * 1.0) / (STORE_NUMBER / BLOCK_SIZE)) for k in tmp_labels[i][j]: labels.append(int((k * divisor))) for k in test_data_y: test_labels.append(int((k * divisor))) else: labels = tmp_labels[i][j] test_labels = test_data_y tmp_index = TrainedNN(threshold[i], use_threshold[i], core_nums[i], train_step_nums[i], batch_size_nums[i], learning_rate_nums[i], keep_ratio_nums[i], inputs, labels, test_inputs, test_labels) tmp_index.train() index[i][j] = AbstractNN(tmp_index.get_weights(), tmp_index.get_bias(), core_nums[i], tmp_index.cal_err()) del tmp_index gc.collect() if (i < (stage_length - 1)): for ind in range(len(tmp_inputs[i][j])): p = index[i][j].predict(tmp_inputs[i][j][ind]) if (p > (stage_nums[(i + 1)] - 1)): p = (stage_nums[(i + 1)] - 1) tmp_inputs[(i + 1)][p].append(tmp_inputs[i][j][ind]) tmp_labels[(i + 1)][p].append(tmp_labels[i][j][ind]) for i in range(stage_nums[(stage_length - 1)]): if (index[(stage_length - 1)][i] is None): continue mean_abs_err = index[(stage_length - 1)][i].mean_err if (mean_abs_err > threshold[(stage_length - 1)]): print('Using BTree') index[(stage_length - 1)][i] = BTree(2) index[(stage_length - 1)][i].build(tmp_inputs[(stage_length - 1)][i], tmp_labels[(stage_length - 1)][i]) return index
def build_sdist(sdist_directory, config_settings): target = 'pkg2-0.5.tar.gz' with tarfile.open(pjoin(sdist_directory, target), 'w:gz', format=tarfile.PAX_FORMAT) as tf: def _add(relpath): tf.add(relpath, arcname=('pkg2-0.5/' + relpath)) _add('pyproject.toml') for pyfile in glob('*.py'): _add(pyfile) for distinfo in glob('*.dist-info'): _add(distinfo) return target
class BaseCascade(BaseMulti): def prepare(cls, obj, solver): if (not getattr(obj, 'Cascade', True)): return super(BaseCascade, cls).prepare(obj, solver) func = cls.constraintFunc(obj, solver) if (not func): return props = cls.getPropertyValues(obj) prev = None ret = [] for e in obj.Proxy.getElements(): info = e.Proxy.getInfo() if ((not prev) or (prev.Part == info.Part)): prev = info continue prevInfo = solver.getPartInfo(prev) e1 = cls._entityDef[0](solver, prevInfo, prev.Subname, prev.Shape) partInfo = solver.getPartInfo(info) e2 = cls._entityDef[0](solver, partInfo, info.Subname, info.Shape) prev = info if solver.isFixedPart(info.Part): params = (props + [e1, e2]) else: params = (props + [e2, e1]) solver.system.checkRedundancy(obj, prevInfo, partInfo, prev.SubnameRef, info.SubnameRef) h = func(*params, group=solver.group) if isinstance(h, (list, tuple)): ret += list(h) else: ret.append(h) if (not ret): logger.warn(translate('asm3', '{} has no effective constraint'), cstrName(obj)) return ret
def resolve(request): preimage = None if ('secrethash' not in request): return preimage x_secret = '0x2ff886d47b156de00d4cad5d8cb5b572adfe35e6d2f65ee' x_secret_hash = to_hex(sha256(to_bytes(hexstr=x_secret)).digest()) if (request['secrethash'] == x_secret_hash): preimage = {'secret': x_secret} return preimage
def sicpovm_preparation_matrix(label: str) -> np.array: res = np.array([]) if (label == 'S0'): res = np.array([[1, 0], [0, 0]], dtype=complex) if (label == 'S1'): res = (np.array([[1, np.sqrt(2)], [np.sqrt(2), 2]], dtype=complex) / 3) if (label == 'S2'): res = (np.array([[1, (np.exp(((np.pi * 2j) / 3)) * np.sqrt(2))], [(np.exp((((- np.pi) * 2j) / 3)) * np.sqrt(2)), 2]]) / 3) if (label == 'S3'): res = (np.array([[1, (np.exp((((- np.pi) * 2j) / 3)) * np.sqrt(2))], [(np.exp(((np.pi * 2j) / 3)) * np.sqrt(2)), 2]]) / 3) return res
def test_temporary_directory_python_3_10_or_newer(mocker: MockerFixture) -> None: mocked_rmtree = mocker.patch('shutil.rmtree') mocked_temp_dir = mocker.patch('tempfile.TemporaryDirectory') mocked_mkdtemp = mocker.patch('tempfile.mkdtemp') mocker.patch.object(sys, 'version_info', (3, 10)) with temporary_directory() as tmp: assert tmp assert (not mocked_rmtree.called) assert (not mocked_mkdtemp.called) mocked_temp_dir.assert_called_with(ignore_cleanup_errors=True)
def to_pickle(data): def process_item(item): dtype = type(item) if (dtype in (str, int, float, bool, bytes, SafeString, SafeBytes)): return item elif (dtype == tuple): return tuple((process_item(val) for val in item)) elif (dtype in (list, _SaverList)): return [process_item(val) for val in item] elif (dtype in (dict, _SaverDict)): return dict(((process_item(key), process_item(val)) for (key, val) in item.items())) elif (dtype in (set, _SaverSet)): return set((process_item(val) for val in item)) elif (dtype in (OrderedDict, _SaverOrderedDict)): return OrderedDict(((process_item(key), process_item(val)) for (key, val) in item.items())) elif (dtype in (deque, _SaverDeque)): return deque((process_item(val) for val in item)) elif hasattr(item, '__iter__'): try: return item.__class__([process_item(val) for val in item]) except (AttributeError, TypeError): return [process_item(val) for val in item] elif (hasattr(item, 'sessid') and hasattr(item, 'conn_time')): return pack_session(item) return pack_dbobj(item) return process_item(data)
def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, config_file, pytorch_dump_path): config = T5Config.from_json_file(config_file) print('Building PyTorch model from configuration: {}'.format(str(config))) model = T5Model(config) load_tf_weights_in_t5(model, config, tf_checkpoint_path) print('Save PyTorch model to {}'.format(pytorch_dump_path)) torch.save(model.state_dict(), pytorch_dump_path)
class ParaphraseMiningEvaluator(SentenceEvaluator): def __init__(self, sentences_map: Dict[(str, str)], duplicates_list: List[Tuple[(str, str)]]=None, duplicates_dict: Dict[(str, Dict[(str, bool)])]=None, add_transitive_closure: bool=False, query_chunk_size: int=5000, corpus_chunk_size: int=100000, max_pairs: int=500000, top_k: int=100, show_progress_bar: bool=False, batch_size: int=16, name: str=''): self.sentences = [] self.ids = [] for (id, sentence) in sentences_map.items(): self.sentences.append(sentence) self.ids.append(id) self.name = name self.show_progress_bar = show_progress_bar self.batch_size = batch_size self.query_chunk_size = query_chunk_size self.corpus_chunk_size = corpus_chunk_size self.max_pairs = max_pairs self.top_k = top_k self.duplicates = (duplicates_dict if (duplicates_dict is not None) else defaultdict((lambda : defaultdict(bool)))) if (duplicates_list is not None): for (id1, id2) in duplicates_list: if ((id1 in sentences_map) and (id2 in sentences_map)): self.duplicates[id1][id2] = True self.duplicates[id2][id1] = True if add_transitive_closure: self.duplicates = self.add_transitive_closure(self.duplicates) positive_key_pairs = set() for key1 in self.duplicates: for key2 in self.duplicates[key1]: if ((key1 in sentences_map) and (key2 in sentences_map) and (self.duplicates[key1][key2] or self.duplicates[key2][key1])): positive_key_pairs.add(tuple(sorted([key1, key2]))) self.total_num_duplicates = len(positive_key_pairs) if name: name = ('_' + name) self.csv_file: str = (('paraphrase_mining_evaluation' + name) + '_results.csv') self.csv_headers = ['epoch', 'steps', 'precision', 'recall', 'f1', 'threshold', 'average_precision'] def __call__(self, model, output_path: str=None, epoch: int=(- 1), steps: int=(- 1)) -> float: if (epoch != (- 1)): out_txt = (f' after epoch {epoch}:' if (steps == (- 1)) else f' in epoch {epoch} after {steps} steps:') else: out_txt = ':' logging.info(((('Paraphrase Mining Evaluation on ' + self.name) + ' dataset') + out_txt)) pairs_list = paraphrase_mining(model, self.sentences, self.show_progress_bar, self.batch_size, self.query_chunk_size, self.corpus_chunk_size, self.max_pairs, self.top_k) logging.info(('Number of candidate pairs: ' + str(len(pairs_list)))) n_extract = n_correct = 0 threshold = 0 best_f1 = best_recall = best_precision = 0 average_precision = 0 for idx in range(len(pairs_list)): (score, i, j) = pairs_list[idx] id1 = self.ids[i] id2 = self.ids[j] n_extract += 1 if (self.duplicates[id1][id2] or self.duplicates[id2][id1]): n_correct += 1 precision = (n_correct / n_extract) recall = (n_correct / self.total_num_duplicates) f1 = (((2 * precision) * recall) / (precision + recall)) average_precision += precision if (f1 > best_f1): best_f1 = f1 best_precision = precision best_recall = recall threshold = ((pairs_list[idx][0] + pairs_list[min((idx + 1), (len(pairs_list) - 1))][0]) / 2) average_precision = (average_precision / self.total_num_duplicates) logging.info('Average Precision: {:.2f}'.format((average_precision * 100))) logging.info('Optimal threshold: {:.4f}'.format(threshold)) logging.info('Precision: {:.2f}'.format((best_precision * 100))) logging.info('Recall: {:.2f}'.format((best_recall * 100))) logging.info('F1: {:.2f}\n'.format((best_f1 * 100))) if (output_path is not None): csv_path = os.path.join(output_path, self.csv_file) if (not os.path.isfile(csv_path)): with open(csv_path, mode='w', encoding='utf-8') as f: writer = csv.writer(f) writer.writerow(self.csv_headers) writer.writerow([epoch, steps, best_precision, best_recall, best_f1, threshold, average_precision]) else: with open(csv_path, mode='a', encoding='utf-8') as f: writer = csv.writer(f) writer.writerow([epoch, steps, best_precision, best_recall, best_f1, threshold, average_precision]) return average_precision def add_transitive_closure(graph): nodes_visited = set() for a in list(graph.keys()): if (a not in nodes_visited): connected_subgraph_nodes = set() connected_subgraph_nodes.add(a) neighbor_nodes_queue = list(graph[a]) while (len(neighbor_nodes_queue) > 0): node = neighbor_nodes_queue.pop(0) if (node not in connected_subgraph_nodes): connected_subgraph_nodes.add(node) neighbor_nodes_queue.extend(graph[node]) connected_subgraph_nodes = list(connected_subgraph_nodes) for i in range((len(connected_subgraph_nodes) - 1)): for j in range((i + 1), len(connected_subgraph_nodes)): graph[connected_subgraph_nodes[i]][connected_subgraph_nodes[j]] = True graph[connected_subgraph_nodes[j]][connected_subgraph_nodes[i]] = True nodes_visited.add(connected_subgraph_nodes[i]) nodes_visited.add(connected_subgraph_nodes[j]) return graph
def get_args(): parser = argparse.ArgumentParser(description="This script copies the 'srcdir'\n data directory to output data directory 'dir'\n while modifying the utterances so that there are\n 3 copies of each utterance: one with the same\n speed, one with a higher speed (not more than\n factor% faster) and one with a lower speed\n (not more than factor% slower)") parser.add_argument('factor', type=float, default=12, help='Spacing (in percentage) between allowed lengths.') parser.add_argument('srcdir', type=str, help='path to source data dir') parser.add_argument('dir', type=str, help='output dir') parser.add_argument('--coverage-factor', type=float, default=0.05, help='Percentage of durations not covered from each\n side of duration histogram.') parser.add_argument('--frame-shift', type=int, default=10, help='Frame shift in milliseconds.') parser.add_argument('--frame-length', type=int, default=25, help='Frame length in milliseconds.') parser.add_argument('--frame-subsampling-factor', type=int, default=3, help='Chain frame subsampling factor.\n See steps/nnet3/chain/train.py') parser.add_argument('--speed-perturb', type=str, choices=['true', 'false'], default='true', help='If false, no speed perturbation will occur, i.e.\n only 1 copy of each utterance will be\n saved, which is modified to have an allowed length\n by using extend-wav-with-silence.') args = parser.parse_args() args.speed_perturb = (True if (args.speed_perturb == 'true') else False) return args
def get_layer_path_for_storage(storage_uuid, cas_path, content_checksum): store = config.store if (not cas_path): logger.debug('Serving layer from legacy v1 path for storage %s', storage_uuid) return store.v1_image_layer_path(storage_uuid) return store.blob_path(content_checksum)
class Predictor(cog.Predictor): def setup(self): faceenhancer_model = {'name': 'GPEN-BFR-256', 'size': 256, 'channel_multiplier': 1, 'narrow': 0.5} self.faceenhancer = FaceEnhancement(size=faceenhancer_model['size'], model=faceenhancer_model['name'], channel_multiplier=faceenhancer_model['channel_multiplier'], narrow=faceenhancer_model['narrow']) faceinpainter_model = {'name': 'GPEN-Inpainting-1024', 'size': 1024} self.faceinpainter = FaceInpainting(size=faceinpainter_model['size'], model=faceinpainter_model['name'], channel_multiplier=2) facecolorizer_model = {'name': 'GPEN-Colorization-1024', 'size': 1024} self.facecolorizer = FaceColorization(size=facecolorizer_model['size'], model=facecolorizer_model['name'], channel_multiplier=2) ('image', type=Path, help='input image') ('task', type=str, options=['Face Restoration', 'Face Colorization', 'Face Inpainting'], default='Face Restoration', help='choose task type') ('output_individual', type=bool, default=False, help='whether outputs individual enhanced faces, valid for Face Restoration. When set to true, a zip folder of all the enhanced faces in the input will be generated for download.') ('broken_image', type=bool, default=True, help="whether the input image is broken, valid for Face Inpainting. When set to True, the output will be the 'fixed' image. When set to False, the image will randomly add brush strokes to simulate a broken image, and the output will be broken + fixed image") def predict(self, image, task='Face Restoration', output_individual=False, broken_image=True): out_path = (Path(tempfile.mkdtemp()) / 'out.png') if (task == 'Face Restoration'): im = cv2.imread(str(image), cv2.IMREAD_COLOR) assert isinstance(im, np.ndarray), 'input filename error' im = cv2.resize(im, (0, 0), fx=2, fy=2) (img, orig_faces, enhanced_faces) = self.faceenhancer.process(im) cv2.imwrite(str(out_path), img) if output_individual: zip_folder = 'out_zip' os.makedirs(zip_folder, exist_ok=True) out_path = (Path(tempfile.mkdtemp()) / 'out.zip') try: cv2.imwrite(os.path.join(zip_folder, 'whole_image.jpg'), img) for (m, ef) in enumerate(enhanced_faces): cv2.imwrite(os.path.join(zip_folder, f'face_{m}.jpg'), ef) img_list = sorted(glob.glob(os.path.join(zip_folder, '*'))) with ZipFile(str(out_path), 'w') as zipfile: for img in img_list: zipfile.write(img) finally: clean_folder(zip_folder) elif (task == 'Face Colorization'): grayf = cv2.imread(str(image), cv2.IMREAD_GRAYSCALE) grayf = cv2.cvtColor(grayf, cv2.COLOR_GRAY2BGR) colorf = self.facecolorizer.process(grayf) cv2.imwrite(str(out_path), colorf) else: originf = cv2.imread(str(image), cv2.IMREAD_COLOR) brokenf = originf if (not broken_image): brokenf = np.asarray(brush_stroke_mask(Image.fromarray(originf))) completef = self.faceinpainter.process(brokenf) brokenf = cv2.resize(brokenf, completef.shape[:2]) out_img = (completef if broken_image else np.hstack((brokenf, completef))) cv2.imwrite(str(out_path), out_img) return out_path
def run_test(case, m): m.elaborate() VStructuralTranslatorL2.is_verilog_reserved = (lambda s, x: (x in verilog_reserved)) tr = VStructuralTranslatorL2(m) tr.clear(m) tr._rtlir_tr_unpacked_q = deque() tr.translate_structural(m) ports = tr.structural.decl_ports[m] wires = tr.structural.decl_wires[m] structs = tr.structural.decl_type_struct conns = tr.structural.connections[m] check_eq(ports, case.REF_PORT) check_eq(wires, case.REF_WIRE) check_eq(conns, case.REF_CONN) assert (list(structs.keys())[(- 1)] == case.REF_STRUCT[0]) check_eq(list(structs.values())[(- 1)]['def'], case.REF_STRUCT[1])