code
stringlengths
281
23.7M
def print_exclusion_list(): current = ops.env.get(ops.survey.EXCLUDE, addr='') if (current is not None): current = json.loads(current) if (len(current[0]) == 0): disp = None else: disp = [] for i in current: disp.append([i]) if (current is not None): pprint(disp, header=['Survey Exclusions']) else: print('No exclusions.')
class CellAlignment(): def __init__(self, left, right, left_orientations, right_orientations, left_unique_id, right_unique_id): self.left = left self.right = right left = left.copy() right = right.copy() if (not isinstance(left_orientations, str)): left['mm_o'] = left_orientations left_orientations = 'mm_o' self.left_orientations = left[left_orientations] if (not isinstance(right_orientations, str)): right['mm_o'] = right_orientations right_orientations = 'mm_o' self.right_orientations = right[right_orientations] self.left_unique_id = left[left_unique_id] self.right_unique_id = right[right_unique_id] comp = left[[left_unique_id, left_orientations]].merge(right[[right_unique_id, right_orientations]], left_on=left_unique_id, right_on=right_unique_id, how='left') if (left_orientations == right_orientations): left_orientations = (left_orientations + '_x') right_orientations = (right_orientations + '_y') self.series = np.absolute((comp[left_orientations] - comp[right_orientations])) self.series.index = left.index
def artist_space_transforms(word): new_wordlist = [] if (' ' in word): if Config.ARTIST_SPLIT_BY_WORD: new_wordlist.extend(word.split(' ')) new_wordlist.append(word.replace(' ', '')) if (Config.ARTIST_SPACE_REPLACEMENT and Config.SPACE_REPLACEMENT_CHARSET): for character in Config.SPACE_REPLACEMENT_CHARSET: new_wordlist.append(word.replace(' ', character)) return new_wordlist
def train_one_epoch(model, optimizer, data_loader, device, epoch, print_freq, neptune=None): model.train() metric_logger = utils.MetricLogger(delimiter=' ') metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}')) header = 'Epoch: [{}]'.format(epoch) lr_scheduler = None if (epoch == 0): warmup_factor = (1.0 / 1000) warmup_iters = min(1000, (len(data_loader) - 1)) lr_scheduler = utils.warmup_lr_scheduler(optimizer, warmup_iters, warmup_factor) for (images, targets) in metric_logger.log_every(data_loader, print_freq, header): images = list((image.to(device) for image in images)) targets = [{k: v.to(device) for (k, v) in t.items()} for t in targets] loss_dict = model(images, targets) losses = sum((loss for loss in loss_dict.values())) loss_dict_reduced = utils.reduce_dict(loss_dict) losses_reduced = sum((loss for loss in loss_dict_reduced.values())) loss_value = losses_reduced.item() if (neptune is not None): neptune.log_metric('train/loss', loss_value) if (not math.isfinite(loss_value)): print('Loss is {}, stopping training'.format(loss_value)) print(loss_dict_reduced) sys.exit(1) optimizer.zero_grad() losses.backward() optimizer.step() if (lr_scheduler is not None): lr_scheduler.step() metric_logger.update(loss=losses_reduced, **loss_dict_reduced) metric_logger.update(lr=optimizer.param_groups[0]['lr'])
def action_detection(sentence, role): action_all = [] action = {} entity_all = json.load(open('entity_all.txt', 'r', encoding='utf-8'), strict=False) for slot in entity_all.keys(): values = re.sub('\\s', '', entity_all[('' + slot)]).split(',') for value in values: if re.findall(value, sentence): action['slot'] = slot action['aspect'] = value " \n disease_match = open('../../:disease_new.txt', 'r', encoding='utf-8').read().split(',')\n medicine_match = open('../../medicine_new.txt', 'r', encoding='utf-8').read().split(',')\n check_item_match = open('../../check_item_new.txt', 'r', encoding='utf-8').read().split(',')\n symptom_match = open('../../symptom_new.txt', 'r', encoding='utf-8').read().split(',')\n\n for disease in disease_match:\n if re.findall(disease, sentence):\n action['slot'] = 'disease'\n action['aspect'] = disease\n\n for symptom in symptom_match:\n if re.findall(symptom, sentence):\n action['slot'] = 'symptom'\n action['aspect'] = symptom\n\n for medicine in medicine_match:\n if re.findall(medicine, sentence):\n action['slot'] = 'disease'\n action['aspect'] = medicine\n\n for check_item in check_item_match:\n if re.findall(check_item, sentence):\n action['slot'] = 'check_item'\n action['aspect'] = check_item\n " aa = '?||?|?|?|||' bb = '().?()' rem = '' cc = '||||ok||' t1 = '().*?(||?|?)' t2 = '(,|||?)(|||)|^(|||||)' t3 = '|' t4 = '([0-9]*|[])(|||||)' dep = '' degree = '||||' fre = '([0-9]*|[])()|' br = '' dose = 'ml||||' pr = 'sd' effect = '|' dh = '' pa = '' se = ' ' mc = '' mp = '([][]|[])' tm = '|([0-9]*)' if re.search(aa, sentence): action['intent'] = 'Inquire' if ((role == 'doctor') and ('slot' in action.keys()) and (action['slot'] in recommend_slot)): action['intent'] = 'Recommend' if ((role == 'doctor') and ('slot' in action.keys()) and (action['slot'] == 'disease')): action['intent'] = 'Diagnosis' if re.search(cc, sentence): action['intent'] = 'Chitchat' if (re.search(t1, sentence) or re.search(t2, sentence) or re.search(t3, sentence)): action['intent'] = 'Inquire' action['slot'] = 'time' if re.search(t4, sentence): action['slot'] = 'time' action['value'] = re.search(t4, sentence).group() if re.search(dep, sentence): action['slot'] = 'department' action['aspect'] = re.search(dep, sentence).group() if re.search(degree, sentence): action['slot'] = 'degree' action['value'] = re.search(degree, sentence).group() if re.search(fre, sentence): action['slot'] = 'frequency' action['value'] = re.search(fre, sentence).group() if re.search(br, sentence): action['slot'] = 'rang_body' action['aspect'] = re.search(br, sentence).group() if re.search(dose, sentence): action['slot'] = 'dose' action['value'] = re.search(dose, sentence).group() if re.search(mp, sentence): action['slot'] = 'medicine_place' action['aspect'] = re.search(mp, sentence).group() if re.search(tm, sentence): action['slot'] = 'temperature' action['value'] = re.search(tm, sentence).group() if ('intent' not in action.keys()): action['intent'] = 'Inform' return action
class GCSMirrorConfig(BaseModelExtended): bucket_uri: str ('bucket_uri') def check_uri_format(cls, value: str): if (not value.startswith('gs://')): raise ValueError(f'Got invalid value "{value}" for bucket_uri. Expected a URI that starts with "gs://".') return value
def parse_args(): parser = argparse.ArgumentParser(description='MMAction2 demo') parser.add_argument('config', help='test config file path') parser.add_argument('checkpoint', help='checkpoint file') parser.add_argument('video', help='video file/url or rawframes directory') parser.add_argument('label', help='label file') parser.add_argument('--use-frames', default=False, action='store_true', help='whether to use rawframes as input') parser.add_argument('--device', type=str, default='cuda:0', help='CPU/CUDA device option') parser.add_argument('--fps', default=30, type=int, help='specify fps value of the output video when using rawframes to generate file') parser.add_argument('--font-size', default=20, type=int, help='font size of the label test in output video') parser.add_argument('--font-color', default='white', help='font color of the label test in output video') parser.add_argument('--target-resolution', nargs=2, default=None, type=int, help='Target resolution (w, h) for resizing the frames when using a video as input. If either dimension is set to -1, the frames are resized by keeping the existing aspect ratio') parser.add_argument('--resize-algorithm', default='bicubic', help='resize algorithm applied to generate video') parser.add_argument('--out-filename', default=None, help='output filename') args = parser.parse_args() return args
(('%s.visualize_utils.mmcv.imshow' % __name__)) (('%s.visualize_utils.mmcv.imwrite' % __name__)) def test_imshow_pred_boundary(mock_imshow, mock_imwrite): img = './tests/data/test_img1.jpg' boundaries_with_scores = [[0, 0, 1, 0, 1, 1, 0, 1, 1]] labels = [1] file = tempfile.NamedTemporaryFile().name visualize_utils.imshow_pred_boundary(img, boundaries_with_scores, labels, show=True, out_file=file) mock_imwrite.assert_called_once() mock_imshow.assert_called_once()
def test_constant_init(): conv_module = nn.Conv2d(3, 16, 3) constant_init(conv_module, 0.1) assert conv_module.weight.allclose(torch.full_like(conv_module.weight, 0.1)) assert conv_module.bias.allclose(torch.zeros_like(conv_module.bias)) conv_module_no_bias = nn.Conv2d(3, 16, 3, bias=False) constant_init(conv_module_no_bias, 0.1) assert conv_module.weight.allclose(torch.full_like(conv_module.weight, 0.1))
(python=['3.8', '3.9', '3.10', '3.11', '3.12', 'pypy3']) ('installable', INSTALLABLE) def tests(session, installable): env = dict(JSON_SCHEMA_TEST_SUITE=str((ROOT / 'json'))) session.install('virtue', installable) if (session.posargs and (session.posargs[0] == 'coverage')): if ((len(session.posargs) > 1) and (session.posargs[1] == 'github')): posargs = session.posargs[2:] github = os.environ['GITHUB_STEP_SUMMARY'] else: (posargs, github) = (session.posargs[1:], None) session.install('coverage[toml]') session.run('coverage', 'run', *posargs, '-m', 'virtue', PACKAGE, env=env) if (github is None): session.run('coverage', 'report') else: with open(github, 'a') as summary: summary.write('### Coverage\n\n') summary.flush() session.run('coverage', 'report', '--format=markdown', stdout=summary) else: session.run('virtue', *session.posargs, PACKAGE, env=env)
def crop(img_path, write_path, bbox, size=480, overwrite=False): if (osp.exists(write_path) and (not overwrite)): print(write_path, 'already exists') return os.makedirs(osp.join('/', *write_path.split('/')[:(- 1)]), exist_ok=True) (crop, _) = crop_board(img_path, bbox) if (size != 'full'): crop = cv2.resize(crop, (size, size)) cv2.imwrite(write_path, crop) print('Wrote', write_path)
class Migration(migrations.Migration): dependencies = [migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('schedule', '0045_scheduleitem_youtube_video_id')] operations = [migrations.SeparateDatabaseAndState(database_operations=[migrations.RunSQL(sql='DROP INDEX IF EXISTS schedule_scheduleitem_additional_speakers_user_id_ecd7e435;', reverse_sql='CREATE INDEX schedule_scheduleitem_additional_speakers_user_id_ecd7e435 ON schedule_scheduleitem_additional_speakers USING btree (user_id);'), migrations.RunSQL(sql='DROP INDEX IF EXISTS schedule_scheduleitemstar_user_id_0f52424d;', reverse_sql='CREATE INDEX schedule_scheduleitemstar_user_id_0f52424d ON schedule_scheduleitemstar USING btree (user_id);'), migrations.AlterField(model_name='scheduleitemadditionalspeaker', name='user_id', field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, related_name='+', to=settings.AUTH_USER_MODEL, verbose_name='user', db_column='user_id')), migrations.AlterField(model_name='scheduleitemattendee', name='user_id', field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, related_name='+', to=settings.AUTH_USER_MODEL, verbose_name='user', db_column='user_id')), migrations.AlterField(model_name='scheduleitemstar', name='user_id', field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, related_name='+', to=settings.AUTH_USER_MODEL, verbose_name='user', db_column='user_id'))], state_operations=[migrations.AlterUniqueTogether(name='scheduleitemadditionalspeaker', unique_together=set()), migrations.AlterUniqueTogether(name='scheduleitemattendee', unique_together=set()), migrations.AlterUniqueTogether(name='scheduleitemstar', unique_together=set()), migrations.AddField(model_name='scheduleitemadditionalspeaker', name='user', field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, related_name='+', to=settings.AUTH_USER_MODEL, verbose_name='user'), preserve_default=False), migrations.AddField(model_name='scheduleitemattendee', name='user', field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, related_name='+', to=settings.AUTH_USER_MODEL, verbose_name='user'), preserve_default=False), migrations.AddField(model_name='scheduleitemstar', name='user', field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, related_name='+', to=settings.AUTH_USER_MODEL, verbose_name='user'), preserve_default=False), migrations.AlterUniqueTogether(name='scheduleitemadditionalspeaker', unique_together={('user', 'scheduleitem')}), migrations.AlterUniqueTogether(name='scheduleitemattendee', unique_together={('user', 'schedule_item')}), migrations.AlterUniqueTogether(name='scheduleitemstar', unique_together={('user', 'schedule_item')}), migrations.RemoveField(model_name='scheduleitemadditionalspeaker', name='user_id'), migrations.RemoveField(model_name='scheduleitemattendee', name='user_id'), migrations.RemoveField(model_name='scheduleitemstar', name='user_id')])]
def token_f1_score(prediction, ground_truth): prediction_tokens = normalize_answer(prediction).split() ground_truth_tokens = normalize_answer(ground_truth).split() common = (Counter(prediction_tokens) & Counter(ground_truth_tokens)) num_same = sum(common.values()) if (num_same == 0): return 0 precision = ((1.0 * num_same) / len(prediction_tokens)) recall = ((1.0 * num_same) / len(ground_truth_tokens)) f1 = (((2 * precision) * recall) / (precision + recall)) return f1
def parse_args(args: List[str]) -> argparse.Namespace: parser = argparse.ArgumentParser(description='Check URLs in asciidoctor files') parser.add_argument('--verbose', '-v', action='store_const', const=True, default=False, help='Output all found links, not only failed ones') parser.add_argument('--timeout', '-t', type=int, default=2, help='Timeout in seconds (default 2)') parser.add_argument('adoc', type=argparse.FileType('r'), nargs='+', help='Names of asciidoctor files') return parser.parse_args(args)
class EvalUnit(AppStateMixin, _OnExceptionMixin, Generic[TEvalData], ABC): def __init__(self) -> None: super().__init__() self.eval_progress = Progress() def on_eval_start(self, state: State) -> None: pass def on_eval_epoch_start(self, state: State) -> None: pass def eval_step(self, state: State, data: TEvalData) -> Any: ... def on_eval_epoch_end(self, state: State) -> None: pass def on_eval_end(self, state: State) -> None: pass def get_next_eval_batch(self, state: State, data_iter: Iterator[object]) -> Union[(Iterator[TEvalData], TEvalData)]: pass_data_iter_to_step = _step_requires_iterator(self.eval_step) if pass_data_iter_to_step: return cast(Iterator[TEvalData], data_iter) return cast(TEvalData, next(data_iter))
def get_optimizer(cfg, parameters): optimizer = None if (cfg.TRAIN.OPTIMIZER == 'sgd'): optimizer = optim.SGD(parameters, lr=cfg.TRAIN.LR, momentum=cfg.TRAIN.MOMENTUM, weight_decay=cfg.TRAIN.WD, nesterov=cfg.TRAIN.NESTEROV) elif (cfg.TRAIN.OPTIMIZER == 'adam'): optimizer = optim.Adam(parameters, lr=cfg.TRAIN.LR) return optimizer
def get_identifications_by_id(identification_id: MultiInt, **params) -> JsonResponse: response = get(f'{API_V1}/identifications', ids=identification_id, **params) identifications = response.json() identifications['results'] = convert_all_timestamps(identifications['results']) return identifications
class ServiceInventoryTests(unittest.TestCase): def setUp(self): self.mock_filewatcher = mock.Mock(spec=FileWatcher) self.inventory = service_discovery.ServiceInventory('/whatever') self.inventory._filewatcher = self.mock_filewatcher def _set_inventory_contents(self, text): parsed = service_discovery._parse(StringIO(text)) self.mock_filewatcher.get_data.return_value = parsed def test_load_backends(self): self._set_inventory_contents(TEST_INVENTORY_ONE) backends = self.inventory.get_backends() self.assertEqual(len(backends), 1) self.assertEqual(backends[0].id, 205) self.assertEqual(backends[0].name, 'i-258fc8b6') self.assertEqual(backends[0].endpoint.address.host, '10.0.1.2') self.assertEqual(backends[0].endpoint.address.port, 9090) self.assertEqual(backends[0].weight, 1) self._set_inventory_contents(TEST_INVENTORY_TWO) backends = self.inventory.get_backends() self.assertEqual(len(backends), 3) def test_single_get(self): self._set_inventory_contents(TEST_INVENTORY_ONE) backend = self.inventory.get_backend() self.assertEqual(backend.id, 205) def test_no_backends_available(self): self.mock_filewatcher.get_data.side_effect = WatchedFileNotAvailableError('', None) with self.assertRaises(service_discovery.NoBackendsAvailableError): self.inventory.get_backend() self.assertEqual(self.inventory.get_backends(), []) self.mock_filewatcher.get_data.side_effect = None self._set_inventory_contents('[]') with self.assertRaises(service_discovery.NoBackendsAvailableError): self.inventory.get_backend()
class TestCephCollectorGettingStats(CollectorTestCase): def setUp(self): config = get_collector_config('CephCollector', {'interval': 10}) self.collector = ceph.CephCollector(config, None) def test_import(self): self.assertTrue(ceph.CephCollector) _only_if_subprocess_check_output_is_available ('subprocess.check_output') def test_load_works(self, check_output): expected = {'a': 1, 'b': 2} check_output.return_value = json.dumps(expected) actual = self.collector._get_stats_from_socket('a_socket_name') check_output.assert_called_with(['/usr/bin/ceph', '--admin-daemon', 'a_socket_name', 'perf', 'dump']) self.assertEqual(actual, expected) _only_if_subprocess_check_output_is_available ('subprocess.check_output') def test_ceph_command_fails(self, check_output): check_output.side_effect = subprocess.CalledProcessError(255, ['/usr/bin/ceph'], 'error!') actual = self.collector._get_stats_from_socket('a_socket_name') check_output.assert_called_with(['/usr/bin/ceph', '--admin-daemon', 'a_socket_name', 'perf', 'dump']) self.assertEqual(actual, {}) _only_if_subprocess_check_output_is_available ('json.loads') ('subprocess.check_output') def test_json_decode_fails(self, check_output, loads): input = {'a': 1, 'b': 2} check_output.return_value = json.dumps(input) loads.side_effect = ValueError('bad data') actual = self.collector._get_stats_from_socket('a_socket_name') check_output.assert_called_with(['/usr/bin/ceph', '--admin-daemon', 'a_socket_name', 'perf', 'dump']) loads.assert_called_with(json.dumps(input)) self.assertEqual(actual, {})
def get_hash(modname, seed=None): 'From env = os.environ.copy() if (seed is not None): env['PYTHONHASHSEED'] = str(seed) else: env.pop('PYTHONHASHSEED', None) cmd_line = [sys.executable, modname] p = subprocess.Popen(cmd_line, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env) (out, err) = p.communicate() return (out, err, p.returncode)
class Timer(): def __init__(self, callback: SignalHandler, timer: int=TIMER, interval: float=INTERVAL_SECONDS, timer_signal: int=TIMER_SIGNAL) -> None: assert callable(callback), 'callback must be callable' signal.signal(timer_signal, self.callback) signal.setitimer(timer, interval, interval) self._callback = callback def callback(self, signum: int, stack: FrameType) -> None: self._callback(signum, stack) def stop(self) -> None: del self._callback signal.signal(TIMER_SIGNAL, signal.SIG_IGN) def __bool__(self) -> bool: return True
def raiden_state_changes_search_for_item(raiden: RaidenService, item_type: Type[SC], attributes: Mapping) -> Optional[SC]: assert raiden.wal, 'RaidenService must be started' for item in raiden.wal.storage.get_statechanges_by_range(RANGE_ALL_STATE_CHANGES): if (isinstance(item, item_type) and check_nested_attrs(item, attributes)): return item return None
class JSONStoreDisk(ContentStore, metaclass=ABCMeta): def __init__(self, in_folder, key, msg, msg_args) -> None: self.in_folder = in_folder self.key = key self.msg = msg self.msg_args = (*msg_args, self.file) def file(self): return (self.in_folder.path / f'{self.key}.json') def exists(self): return self.file.exists() def read(self): (data, bad_format) = (None, False) try: data = json.loads(self.file.read_text(encoding='utf-8')) except ValueError: bad_format = True except Exception: pass else: logging.debug('got %s from %s', self.msg, self.msg_args) return data if bad_format: with suppress(OSError): self.remove() return None def remove(self): self.file.unlink() logging.debug('removed %s at %s', self.msg, self.msg_args) def locked(self): with self.in_folder.lock_for_key(self.key): (yield) def write(self, content): folder = self.file.parent folder.mkdir(parents=True, exist_ok=True) self.file.write_text(json.dumps(content, sort_keys=True, indent=2), encoding='utf-8') logging.debug('wrote %s at %s', self.msg, self.msg_args)
class Message(object): MSG_ARG_KEY_OPERATION = 'operation' MSG_ARG_KEY_TYPE = 'msg_type' MSG_ARG_KEY_SENDER = 'sender' MSG_ARG_KEY_RECEIVER = 'receiver' MSG_OPERATION_SEND = 'send' MSG_OPERATION_RECEIVE = 'receive' MSG_OPERATION_BROADCAST = 'broadcast' MSG_OPERATION_REDUCE = 'reduce' MSG_ARG_KEY_MODEL_PARAMS = 'model_params' def __init__(self, type=0, sender_id=0, receiver_id=0): self.type = type self.sender_id = sender_id self.receiver_id = receiver_id self.msg_params = {} self.msg_params[Message.MSG_ARG_KEY_TYPE] = type self.msg_params[Message.MSG_ARG_KEY_SENDER] = sender_id self.msg_params[Message.MSG_ARG_KEY_RECEIVER] = receiver_id def init(self, msg_params): self.msg_params = msg_params def init_from_json_string(self, json_string): self.msg_params = json.loads(json_string) self.type = self.msg_params[Message.MSG_ARG_KEY_TYPE] self.sender_id = self.msg_params[Message.MSG_ARG_KEY_SENDER] self.receiver_id = self.msg_params[Message.MSG_ARG_KEY_RECEIVER] def get_sender_id(self): return self.sender_id def get_receiver_id(self): return self.receiver_id def add_params(self, key, value): self.msg_params[key] = value def get_params(self): return self.msg_params def add(self, key, value): self.msg_params[key] = value def get(self, key): return self.msg_params[key] def get_type(self): return self.msg_params[Message.MSG_ARG_KEY_TYPE] def to_string(self): return self.msg_params def to_json(self): json_string = json.dumps(self.msg_params) print(('json string size = ' + str(sys.getsizeof(json_string)))) return json_string def get_content(self): print_dict = self.msg_params.copy() msg_str = ((str(self.__to_msg_type_string()) + ': ') + str(print_dict)) return msg_str def __to_msg_type_string(self): type = self.msg_params[Message.MSG_ARG_KEY_TYPE] return type
def conv2d_same(x, weight: torch.Tensor, bias: Optional[torch.Tensor]=None, stride: Tuple[(int, int)]=(1, 1), padding: Tuple[(int, int)]=(0, 0), dilation: Tuple[(int, int)]=(1, 1), groups: int=1): x = pad_same(x, weight.shape[(- 2):], stride, dilation) return F.conv2d(x, weight, bias, stride, (0, 0), dilation, groups)
class RestrictedAlgoWithoutCheckSetDoNotOrderList(TradingAlgorithm): def initialize(self, symbol): self.rl = SecurityListSet(self.get_datetime, self.asset_finder) self.set_do_not_order_list(self.rl.leveraged_etf_list) self.order_count = 0 self.sid = self.symbol(symbol) def handle_data(self, data): self.order(self.sid, 100) self.order_count += 1
class ProviderOf(Generic[T]): def __init__(self, injector: Injector, interface: Type[T]): self._injector = injector self._interface = interface def __repr__(self) -> str: return ('%s(%r, %r)' % (type(self).__name__, self._injector, self._interface)) def get(self) -> T: return self._injector.get(self._interface)
def optlist_to_menuoptions(treestr, optlist, index, mark_category, go_back): menuoptions = [] cur_index = 0 for option in optlist: index_to_add = optlist[cur_index][0] menuitem = {} keystr = index_to_selection(treestr, index_to_add) if (mark_category and is_category(treestr, index_to_add)): menuitem['key'] = [(keystr + ' [+]'), keystr] else: menuitem['key'] = keystr desc = index_to_selection(treestr, index_to_add, desc=True) if desc: menuitem['desc'] = desc menuitem['goto'] = ['menunode_treeselect', {'newindex': index_to_add}] menuoptions.append(menuitem) cur_index += 1 if ((index != None) and (go_back == True)): gobackitem = {'key': ['<< Go Back', 'go back', 'back'], 'desc': 'Return to the previous menu.', 'goto': ['menunode_treeselect', {'newindex': go_up_one_category(treestr, index)}]} menuoptions.append(gobackitem) return menuoptions
def test_model_update() -> None: from pynamodb.attributes import NumberAttribute, UnicodeAttribute from pynamodb.models import Model class MyModel(Model): my_attr = NumberAttribute() my_str_attr = UnicodeAttribute() my_model = MyModel() my_model.update(actions=[MyModel.my_attr.set((MyModel.my_attr + 123)), MyModel.my_attr.set((123 + MyModel.my_attr)), MyModel.my_attr.set((MyModel.my_attr - 123)), MyModel.my_attr.set((123 - MyModel.my_attr)), MyModel.my_attr.set((MyModel.my_attr | 123))]) _ = MyModel.my_attr.set('foo') _ = MyModel.my_attr.set(MyModel.my_str_attr)
(QuestionSet) class QuestionSetAdmin(admin.ModelAdmin): form = QuestionSetAdminForm inlines = (QuestionSetQuestionSetInline, QuestionSetQuestionInline) search_fields = ['uri', *get_language_fields('title'), *get_language_fields('help')] list_display = ('uri', 'attribute', 'is_collection') readonly_fields = ('uri',) list_filter = ('pages__sections__catalogs', 'pages__sections', 'pages', 'is_collection') filter_horizontal = ('editors', 'conditions')
class GraphConvolution(nn.Module): def __init__(self, input_dim, output_dim, support, act_func=None, featureless=False, dropout_rate=0.0, bias=False): super(GraphConvolution, self).__init__() self.support = support self.featureless = featureless for i in range(len(self.support)): setattr(self, 'W{}'.format(i), nn.Parameter(torch.randn(input_dim, output_dim))) if bias: self.b = nn.Parameter(torch.zeros(1, output_dim)) self.act_func = act_func self.dropout = nn.Dropout(dropout_rate) def forward(self, x): x = self.dropout(x) for i in range(len(self.support)): if self.featureless: pre_sup = getattr(self, 'W{}'.format(i)) else: pre_sup = x.mm(getattr(self, 'W{}'.format(i))) if (i == 0): out = self.support[i].mm(pre_sup) else: out += self.support[i].mm(pre_sup) if (self.act_func is not None): out = self.act_func(out) self.embedding = out return out
def rtn_strncpy(se: 'SymbolicExecutor', pstate: 'ProcessState'): logger.debug('strncpy hooked') dst = pstate.get_argument_value(0) src = pstate.get_argument_value(1) cnt = pstate.get_argument_value(2) pstate.concretize_argument(2) for index in range(cnt): src_sym = pstate.read_symbolic_memory_byte((src + index)) pstate.write_symbolic_memory_byte((dst + index), src_sym) if (src_sym.getAst().evaluate() == 0): pstate.push_constraint((src_sym.getAst() == 0)) break else: pstate.push_constraint((src_sym.getAst() != 0)) return dst
class F19_NetworkData(F16_NetworkData): removedKeywords = F16_NetworkData.removedKeywords removedAttrs = F16_NetworkData.removedAttrs def __init__(self, *args, **kwargs): F16_NetworkData.__init__(self, *args, **kwargs) self.bondslaves = kwargs.get('bondslaves', '') self.bondopts = kwargs.get('bondopts', '') self.vlanid = kwargs.get('vlanid', '') self.ipv6gateway = kwargs.get('ipv6gateway', '') def _getArgsAsStr(self): retval = F16_NetworkData._getArgsAsStr(self) if self.bondslaves: retval += (' --bondslaves=%s' % self.bondslaves) if self.bondopts: retval += (' --bondopts=%s' % self.bondopts) if self.vlanid: retval += (' --vlanid=%s' % self.vlanid) if self.ipv6gateway: retval += (' --ipv6gateway=%s' % self.ipv6gateway) return retval
_module() class FPNHead(BaseDecodeHead): def __init__(self, feature_strides, **kwargs): super(FPNHead, self).__init__(input_transform='multiple_select', **kwargs) assert (len(feature_strides) == len(self.in_channels)) assert (min(feature_strides) == feature_strides[0]) self.feature_strides = feature_strides self.scale_heads = nn.ModuleList() for i in range(len(feature_strides)): head_length = max(1, int((np.log2(feature_strides[i]) - np.log2(feature_strides[0])))) scale_head = [] for k in range(head_length): scale_head.append(ConvModule((self.in_channels[i] if (k == 0) else self.channels), self.channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)) if (feature_strides[i] != feature_strides[0]): scale_head.append(nn.Upsample(scale_factor=2, mode='bilinear', align_corners=self.align_corners)) self.scale_heads.append(nn.Sequential(*scale_head)) def forward(self, inputs): x = self._transform_inputs(inputs) output = self.scale_heads[0](x[0]) for i in range(1, len(self.feature_strides)): output = (output + resize(self.scale_heads[i](x[i]), size=output.shape[2:], mode='bilinear', align_corners=self.align_corners)) output = self.cls_seg(output) return output
_task(constants.MULTILINGUAL_TRANSLATION_TASK) class PyTorchTranslateMultilingualTranslationTask(PyTorchTranslateMultiTask): def add_args(parser): PyTorchTranslateMultiTask.add_args(parser) parser.add_argument('--vocabulary', type=str, metavar='EXPR', action='append', help='Per-language vocabulary configuration.Path to vocabulary file must be in the format lang:path', default=[]) parser.add_argument('--multilingual-train-text-file', type=str, metavar='EXPR', action='append', help='Path to train text file in the format src_lang-tgt_lang:source-path,target-path') parser.add_argument('--multilingual-eval-text-file', type=str, metavar='EXPR', action='append', help='Path to eval text file in the format src_lang-tgt_lang:source-path,target-path') parser.add_argument('--multilingual-train-binary-path', type=str, metavar='EXPR', action='append', help='Path to train binary file in the format src_lang-tgt_lang:source-path,target-path') parser.add_argument('--multilingual-eval-binary-path', type=str, metavar='EXPR', action='append', help='Path to eval binary file in the format src_lang-tgt_lang:source-path,target-path') def __init__(self, args, dicts, training): super().__init__(args, dicts, training) self.loss_weights = [] def setup_task(cls, args, **kwargs): args.left_pad_source = options.eval_bool(args.left_pad_source) args.left_pad_target = options.eval_bool(args.left_pad_target) if ((args.source_lang is not None) or (args.target_lang is not None)): if (args.lang_pairs is not None): raise ValueError('--source-lang/--target-lang implies generation, which is incompatible with --lang-pairs') training = False args.lang_pairs = ['{}-{}'.format(args.source_lang, args.target_lang)] else: training = True args.lang_pairs = args.lang_pairs.split(',') (args.source_lang, args.target_lang) = args.lang_pairs[0].split('-') dicts = tasks_utils.load_multilingual_vocabulary(args) return cls(args, dicts, training) def load_dataset(self, split, **kwargs): lang_pair_to_datasets = {} binary_path_arg = ('--multilingual-train-binary-path' if (split == 'train') else '--multilingual-eval-binary-path') binary_path_value = (self.args.multilingual_train_binary_path if (split == 'train') else self.args.multilingual_eval_binary_path) format_warning = f'{binary_path_arg} has to be in the format src_lang-tgt_lang:src_dataset_path,tgt_dataset_path' for path_config in binary_path_value: assert (':' in path_config), format_warning (lang_pair, dataset_paths) = path_config.split(':') assert ('-' in lang_pair), format_warning assert (',' in dataset_paths), format_warning (src_dataset_path, tgt_dataset_path) = dataset_paths.split(',') lang_pair_to_datasets[lang_pair] = (src_dataset_path, tgt_dataset_path) for lang_pair in self.args.lang_pairs: assert (lang_pair in lang_pair_to_datasets), 'Not all language pairs have dataset binary paths specified!' datasets = {} for lang_pair in self.args.lang_pairs: (src, tgt) = lang_pair.split('-') (src_bin_path, tgt_bin_path) = lang_pair_to_datasets[lang_pair] corpus = pytorch_translate_data.ParallelCorpusConfig(source=pytorch_translate_data.CorpusConfig(dialect=src, data_file=src_bin_path), target=pytorch_translate_data.CorpusConfig(dialect=tgt, data_file=tgt_bin_path)) if self.args.log_verbose: print('Starting to load binarized data files.', flush=True) data_utils.validate_corpus_exists(corpus=corpus, split=split) tgt_dataset = pytorch_translate_data.InMemoryIndexedDataset.create_from_file(corpus.target.data_file) src_dataset = pytorch_translate_data.InMemoryIndexedDataset.create_from_file(corpus.source.data_file) datasets[lang_pair] = weighted_data.WeightedLanguagePairDataset(src=src_dataset, src_sizes=src_dataset.sizes, src_dict=self.dicts[src], tgt=tgt_dataset, tgt_sizes=tgt_dataset.sizes, tgt_dict=self.dicts[tgt], weights=None, left_pad_source=False) self.datasets[split] = RoundRobinZipDatasets(OrderedDict([(lang_pair, datasets[lang_pair]) for lang_pair in self.args.lang_pairs]), eval_key=(None if self.training else f'{self.args.source_lang}-{self.args.target_lang}')) if self.args.log_verbose: print('Finished loading dataset', flush=True) print(f'| {split} {len(self.datasets[split])} examples')
def test_requirement_source_fifo(): with TemporaryDirectory() as tmp_dir: fifo_path = Path(os.path.join(tmp_dir, 'fifo')) os.mkfifo(fifo_path) def write_to_fifo(): with open(fifo_path, 'w') as f: f.write('flask==2.0.1') t = Thread(target=write_to_fifo) t.start() try: source = requirement.RequirementSource([fifo_path]) specs = list(source.collect()) finally: t.join() assert (ResolvedDependency('Flask', Version('2.0.1')) in specs)
class FC3_HardDrive(KickstartCommand): removedKeywords = KickstartCommand.removedKeywords removedAttrs = KickstartCommand.removedAttrs def __init__(self, writePriority=0, *args, **kwargs): KickstartCommand.__init__(self, writePriority, *args, **kwargs) self.biospart = kwargs.get('biospart', None) self.partition = kwargs.get('partition', None) self.dir = kwargs.get('dir', None) self.op = self._getParser() self.deleteRemovedAttrs() def __eq__(self, other): if (not other): return False return ((self.biospart == other.biospart) and (self.partition == other.partition) and (self.dir == other.dir)) def __ne__(self, other): return (not (self == other)) def __str__(self): retval = KickstartCommand.__str__(self) if (not self.seen): return retval retval += '# Use hard drive installation media\n' if (self.biospart is not None): retval += ('harddrive --dir=%s --biospart=%s\n' % (self.dir, self.biospart)) else: retval += ('harddrive --dir=%s --partition=%s\n' % (self.dir, self.partition)) return retval def _getParser(self): op = KSOptionParser(prog='harddrive', description='\n Install from a directory of ISO images on a local drive, which must\n be either vfat or ext2. In addition to this directory, you must also\n provide the install.img in some way. You can either do this by\n booting off the boot.iso or by creating an images/ directory in the\n same directory as the ISO images and placing install.img in there.\n ', version=FC3) op.add_argument('--biospart', version=FC3, help='BIOS partition to install from (such as 82p2).') op.add_argument('--partition', version=FC3, help='Partition to install from (such as, sdb2).') op.add_argument('--dir', required=True, version=FC3, help='\n Directory containing both the ISO images and the\n images/install.img. For example::\n\n ``harddrive --partition=hdb2 --dir=/tmp/install-tree``\n ') return op def parse(self, args): ns = self.op.parse_args(args=args, lineno=self.lineno) self.set_to_self(ns) if (((self.biospart is None) and (self.partition is None)) or ((self.biospart is not None) and (self.partition is not None))): raise KickstartParseError(_('One of biospart or partition options must be specified.'), lineno=self.lineno) return self
def get_all_tilted_square_lattice_specs(*, n_instances=10, n_repetitions=1000, min_side_length=2, max_side_length=8, side_length_step=2, macrocycle_depths=None, twoq_gate_name='sqrt_iswap') -> List[TiltedSquareLatticeLoschmidtSpec]: if (macrocycle_depths is None): macrocycle_depths = np.arange(2, (8 + 1), 2) topologies = _get_all_tilted_square_lattices(min_side_length=min_side_length, max_side_length=max_side_length, side_length_step=side_length_step) return [TiltedSquareLatticeLoschmidtSpec(topology=topology, macrocycle_depth=macrocycle_depth, instance_i=instance_i, n_repetitions=n_repetitions, twoq_gate_name=twoq_gate_name) for (topology, macrocycle_depth, instance_i) in itertools.product(topologies, macrocycle_depths, range(n_instances))]
class ExperimentPlanner3D_v21_32GB(ExperimentPlanner3D_v21): def __init__(self, folder_with_cropped_data, preprocessed_output_folder): super(ExperimentPlanner3D_v21_32GB, self).__init__(folder_with_cropped_data, preprocessed_output_folder) self.data_identifier = 'nnFormerData_plans_v2.1_verybig' self.plans_fname = join(self.preprocessed_output_folder, 'nnFormerPlansv2.1_verybig_plans_3D.pkl') def get_properties_for_stage(self, current_spacing, original_spacing, original_shape, num_cases, num_modalities, num_classes): new_median_shape = np.round(((original_spacing / current_spacing) * original_shape)).astype(int) dataset_num_voxels = (np.prod(new_median_shape) * num_cases) input_patch_size = (1 / np.array(current_spacing)) input_patch_size /= input_patch_size.mean() input_patch_size *= ((1 / min(input_patch_size)) * 512) input_patch_size = np.round(input_patch_size).astype(int) input_patch_size = [min(i, j) for (i, j) in zip(input_patch_size, new_median_shape)] (network_num_pool_per_axis, pool_op_kernel_sizes, conv_kernel_sizes, new_shp, shape_must_be_divisible_by) = get_pool_and_conv_props(current_spacing, input_patch_size, self.unet_featuremap_min_edge_length, self.unet_max_numpool) ref = ((Generic_UNet.use_this_for_batch_size_computation_3D * 32) / 8) here = Generic_UNet.compute_approx_vram_consumption(new_shp, network_num_pool_per_axis, self.unet_base_num_features, self.unet_max_num_filters, num_modalities, num_classes, pool_op_kernel_sizes, conv_per_stage=self.conv_per_stage) while (here > ref): axis_to_be_reduced = np.argsort((new_shp / new_median_shape))[(- 1)] tmp = deepcopy(new_shp) tmp[axis_to_be_reduced] -= shape_must_be_divisible_by[axis_to_be_reduced] (_, _, _, _, shape_must_be_divisible_by_new) = get_pool_and_conv_props(current_spacing, tmp, self.unet_featuremap_min_edge_length, self.unet_max_numpool) new_shp[axis_to_be_reduced] -= shape_must_be_divisible_by_new[axis_to_be_reduced] (network_num_pool_per_axis, pool_op_kernel_sizes, conv_kernel_sizes, new_shp, shape_must_be_divisible_by) = get_pool_and_conv_props(current_spacing, new_shp, self.unet_featuremap_min_edge_length, self.unet_max_numpool) here = Generic_UNet.compute_approx_vram_consumption(new_shp, network_num_pool_per_axis, self.unet_base_num_features, self.unet_max_num_filters, num_modalities, num_classes, pool_op_kernel_sizes, conv_per_stage=self.conv_per_stage) input_patch_size = new_shp batch_size = Generic_UNet.DEFAULT_BATCH_SIZE_3D batch_size = int(np.floor((max((ref / here), 1) * batch_size))) max_batch_size = np.round(((self.batch_size_covers_max_percent_of_dataset * dataset_num_voxels) / np.prod(input_patch_size, dtype=np.int64))).astype(int) max_batch_size = max(max_batch_size, self.unet_min_batch_size) batch_size = max(1, min(batch_size, max_batch_size)) do_dummy_2D_data_aug = ((max(input_patch_size) / input_patch_size[0]) > self.anisotropy_threshold) plan = {'batch_size': batch_size, 'num_pool_per_axis': network_num_pool_per_axis, 'patch_size': input_patch_size, 'median_patient_size_in_voxels': new_median_shape, 'current_spacing': current_spacing, 'original_spacing': original_spacing, 'do_dummy_2D_data_aug': do_dummy_2D_data_aug, 'pool_op_kernel_sizes': pool_op_kernel_sizes, 'conv_kernel_sizes': conv_kernel_sizes} return plan
def test_accessors(): class SubTestObject(): attr_obj = 1 attr_char = 2 class TestObject(): basic_attr = 1 begin_end = [1, 2, 3] d = {'operator[object]': 1, 'operator[char *]': 2} sub = SubTestObject() def func(self, x, *args): return ((self.basic_attr + x) + sum(args)) d = m.accessor_api(TestObject()) assert (d['basic_attr'] == 1) assert (d['begin_end'] == [1, 2, 3]) assert (d['operator[object]'] == 1) assert (d['operator[char *]'] == 2) assert (d['attr(object)'] == 1) assert (d['attr(char *)'] == 2) assert (d['missing_attr_ptr'] == 'raised') assert (d['missing_attr_chain'] == 'raised') assert (d['is_none'] is False) assert (d['operator()'] == 2) assert (d['operator*'] == 7) assert (d['implicit_list'] == [1, 2, 3]) assert all(((x in TestObject.__dict__) for x in d['implicit_dict'])) assert (m.tuple_accessor(tuple()) == (0, 1, 2)) d = m.accessor_assignment() assert (d['get'] == 0) assert (d['deferred_get'] == 0) assert (d['set'] == 1) assert (d['deferred_set'] == 1) assert (d['var'] == 99)
def get_files(**kwargs): metadata_directory = kwargs.get('metadata_directory', '') package_root = kwargs.get('package_root', '') files = [] for f in get_template_files(**kwargs): if (str(f.path) == 'LICENSE.txt'): files.append(File(Path(metadata_directory, 'licenses', f.path), f.contents)) elif (f.path.parts[(- 1)] == '__about__.py'): files.append(File(Path('zfoo.py'), f.contents)) pth_file_name = f"_{kwargs['package_name']}.pth" loader_file_name = f"_editable_impl_{kwargs['package_name']}.py" files.extend((File(Path(pth_file_name), f"import _editable_impl_{kwargs['package_name']}"), File(Path(loader_file_name), f'''from editables.redirector import RedirectingFinder as F F.install() F.map_module({kwargs['package_name']!r}, {package_root!r})'''), File(Path(metadata_directory, 'WHEEL'), f'''Wheel-Version: 1.0 Generator: hatchling {__version__} Root-Is-Purelib: true Tag: py2-none-any Tag: py3-none-any '''), File(Path(metadata_directory, 'METADATA'), f'''Metadata-Version: {DEFAULT_METADATA_VERSION} Name: {kwargs['project_name']} Version: 0.0.1 License-File: LICENSE.txt Requires-Dist: editables~=0.3 '''))) record_file = File(Path(metadata_directory, 'RECORD'), '') update_record_file_contents(record_file, files, generated_files={pth_file_name, loader_file_name}) files.append(record_file) return files
class HoverXRefBibtexDomainMixin(HoverXRefBaseDomain): def resolve_xref(self, env, fromdocname, builder, typ, target, node, contnode): textnode = super().resolve_xref(env, fromdocname, builder, typ, target, node, contnode) if (textnode is None): return textnode if any([self._is_ignored_ref(env, target), (not (env.config.hoverxref_auto_ref or (typ in self.hoverxref_types) or ('cite' in env.config.hoverxref_domains)))]): return textnode refnode_index = textnode.first_child_matching_class(docutils.nodes.reference) if refnode_index: refnode = textnode.children[refnode_index] self._inject_hoverxref_data(env, refnode, typ) return textnode
_on_failure .parametrize('enable_rest_api', [True]) def test_get_notification(client, api_server_test_instance): notification = create_notification(api_server_test_instance) response = client.get(notifications_endpoint) assert (response.get_json() == [{'id': notification.id, 'summary': notification.summary, 'body': notification.body, 'urgency': notification.urgency}])
class PathTestCase(TestCase): def test_document_path(self): path = Path('foo.bar') assert (str(path) == 'foo.bar') assert (repr(path) == "Path(['foo', 'bar'])") def test_attribute_name(self): path = Path(['foo.bar']) assert (str(path) == "'foo.bar'") assert (repr(path) == "Path(['foo.bar'])") def test_index_document_path(self): path = Path('foo.bar')[0] assert (str(path) == 'foo.bar[0]') assert (repr(path) == "Path(['foo', 'bar[0]'])") def test_index_attribute_name(self): path = Path(['foo.bar'])[0] assert (str(path) == "'foo.bar'[0]") assert (repr(path) == "Path(['foo.bar[0]'])") def test_index_map_attribute(self): path = Path(['foo.bar'])['baz'] assert (str(path) == "'foo.bar'.baz") assert (repr(path) == "Path(['foo.bar', 'baz'])") def test_index_invalid(self): with self.assertRaises(TypeError): _ = Path('foo.bar')[0.0]
class fileglob_t(ctypes.Structure): class list_entry(ctypes.Structure): _fields_ = (('le_next', POINTER64), ('le_prev', POINTER64)) _fields_ = (('f_msglist', list_entry), ('fg_flag', ctypes.c_int32), ('fg_count', ctypes.c_int32), ('fg_msgcount', ctypes.c_int32), ('fg_lflags', ctypes.c_int32), ('fg_cred', POINTER64), ('fg_ops', POINTER64), ('fg_offset', ctypes.c_int64), ('fg_data', POINTER64), ('fg_vn_data', POINTER64), ('fg_lock', POINTER64), ('fg_label', POINTER64)) def __init__(self, ql, base): self.ql = ql self.base = base def updateToMem(self): self.ql.mem.write(self.base, bytes(self)) def loadFromMem(self): data = self.ql.mem.read(self.base, ctypes.sizeof(self)) newObj = type(self).from_buffer(data) newObj.ql = self.ql newObj.base = self.base return newObj
def parse_args(): parser = argparse.ArgumentParser() parser.add_argument('-dataset', required=True, type=str, help='Targeting dataset.', choices=['DBLP', 'Freebase', 'PubMed', 'Yelp']) parser.add_argument('-model', required=True, type=str, help='Targeting model.', choices=['metapath2vec-ESim', 'PTE', 'HIN2Vec', 'AspEm', 'HEER', 'R-GCN', 'HAN', 'MAGNN', 'HGT', 'TransE', 'DistMult', 'ComplEx', 'ConvE']) parser.add_argument('-attributed', required=True, type=str, help='Only R-GCN, HAN, MAGNN, and HGT support attributed training.', choices=['True', 'False']) parser.add_argument('-supervised', required=True, type=str, help='Only R-GCN, HAN, MAGNN, and HGT support semi-supervised training.', choices=['True', 'False']) return parser.parse_args()
def _file_with_extension(directory, extension): matching = (f for f in os.listdir(directory) if f.endswith(extension)) try: (file,) = matching except ValueError: raise ValueError('No distribution was found. Ensure that `setup.py` is not empty and that it calls `setup()`.') return file
class PlaylistModel(QAbstractListModel): def __init__(self, playlist, *args, **kwargs): super(PlaylistModel, self).__init__(*args, **kwargs) self.playlist = playlist def data(self, index, role): if (role == Qt.DisplayRole): media = self.playlist.media(index.row()) return media.canonicalUrl().fileName() def rowCount(self, index): return self.playlist.mediaCount()
class Generator(): def __init__(self, name, is_training, ngf=64, norm='instance', image_size=128, is_dropout=False): self.name = name self.reuse = False self.ngf = ngf self.norm = norm self.is_training = is_training self.image_size = image_size self.is_dropout = is_dropout self.noise = None def __call__(self, input, noise=None): with tf.variable_scope(self.name): c7s1_32 = ops.c7s1_k(input, self.ngf, is_training=self.is_training, norm=self.norm, reuse=self.reuse, name='c7s1_32') d64 = ops.dk(c7s1_32, (2 * self.ngf), is_training=self.is_training, norm=self.norm, reuse=self.reuse, name='d64') d128 = ops.dk(d64, (2 * self.ngf), is_training=self.is_training, norm=self.norm, reuse=self.reuse, name='d128') if (self.image_size <= 128): res_output = ops.n_res_blocks(d128, reuse=self.reuse, norm=None, n=6) else: res_output = ops.n_res_blocks(d128, reuse=self.reuse, n=9) if (self.noise == None): noise = tf.random_normal(res_output.get_shape().as_list(), mean=0.0, stddev=1.0, dtype=tf.float32) else: noise = self.noise res_output = tf.concat([res_output, noise], 4) u64 = ops.uk(res_output, (2 * self.ngf), is_training=self.is_training, norm=self.norm, reuse=self.reuse, name='u64') u32 = ops.uk(u64, self.ngf, is_training=self.is_training, norm=self.norm, reuse=self.reuse, name='u32', output_size=self.image_size) output = ops.c7s1_k(u32, 1, norm=None, activation='tanh', reuse=self.reuse, name='output') if self.is_dropout: output = tf.nn.dropout(output, keep_prob=0.9) self.reuse = True self.variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.name) return output def sample(self, input): image = utils.batch_convert2int(self.__call__(input)) image = tf.image.encode_jpeg(tf.squeeze(image, [0])) return image
class Effect504(BaseEffect): type = 'passive' def handler(fit, container, context, projectionRange, **kwargs): level = (container.level if ('skill' in context) else 1) amount = (container.getModifiedItemAttr('droneRangeBonus') * level) fit.extraAttributes.increase('droneControlRange', amount, **kwargs)
def upgrade(op, tables, tester): op.create_index('quotanamespacesize_namespace_user_id', 'quotanamespacesize', ['namespace_user_id'], unique=True) op.create_index('quotanamespacesize_backfill_start_ms', 'quotanamespacesize', ['backfill_start_ms'], unique=False) op.create_index('quotanamespacesize_size_bytes', 'quotanamespacesize', ['size_bytes'], unique=False) op.create_index('quotarepositorysize_repository_id', 'quotarepositorysize', ['repository_id'], unique=True) op.create_index('quotarepositorysize_size_bytes', 'quotarepositorysize', ['size_bytes'], unique=False)
class Case(): def __init__(self, project, name, data=None, assert_data=None, save_data=None, del_data=None, pre_time=0, g={}, s=''): self.name = name self.g = g self.interface = interf(name, project, self.g) self.data = data self.assert_data = assert_data self.save_data = save_data self.del_data = del_data self.status = [0, 0] self.pre_time = pre_time self.s = s def run(self): try: if self.pre_time: time.sleep(self.pre_time) if self.data: if (not isinstance(self.data, dict)): self.data = json.loads(self.data) self.interface.modify_params(self.data) self.interface.params = self.data if self.interface.request(): self.status[0] = 1 if self.assert_data: if (not isinstance(self.assert_data, dict)): self.assert_data = json.loads(self.assert_data) if self.interface.assert_response(self.assert_data): self.status[1] = 1 else: self.status[1] = 1 if self.save_data: self.save_data = self.save_data.replace(' ', '').split(',') self.interface.g_push(self.save_data) except: print(traceback.format_exc()) pass return (self.name, self.status, self.interface.log_file, self.del_data)
def pred_analysis2(pred_json, dev_json): ent_all = 0 ent_type_error = 0 ent_span_error = 0 ent_loss = 0 ent_add = 0 for (preds, dev) in zip(pred_json, dev_json): pred: dict = preds['label'] target: dict = dev['label'] entity2type = dict() entity2span = dict() for (type, ent_spans) in target.items(): for (ent, spans) in ent_spans.items(): entity2type[ent] = type entity2span[ent] = spans for (type, ent_spans) in pred.items(): for (ent, spans) in ent_spans.items(): ent_all += 1 if (ent not in entity2type): print('ent=', ent) print('entity2type=', entity2type) print(('*' * 20)) ent_add += 1 continue if (type != entity2type[ent]): ent_type_error += 1 span_yes = True for span in spans: if (span not in entity2span[ent]): span_yes = False if (not span_yes): ent_span_error += 1 print('ent_type_error={}'.format(ent_type_error)) print('ent_span_error={}'.format(ent_span_error)) print('ent_loss={}'.format(ent_loss)) print('ent_add={}'.format(ent_add)) print('ent_all={}'.format(ent_all))
def process_compound_tables(c, filenames, reporter): start_time = time.time() reporter.report('[Stage 1/7] Merging compound records ...') create_compound_table(c) for (db_id, progress_str, filename) in enumerate_progress(filenames): with transaction(c): create_compound_map_table(c, db_id) with attach_as_old(c, filename): (num_compounds,) = next(c.execute('SELECT count(*) FROM old.compound')) with progress(reporter, f'[Stage 1/7] #compounds: {num_compounds} {progress_str}'): process_compound_table(c, db_id, filename) with transaction(c): (num_compounds,) = next(c.execute('SELECT count(*) FROM compound')) with progress(reporter, f'[Stage 1/7] Exporting {num_compounds} compound records'): export_compound_table(c) clear_compound_table(c) end_time = time.time() reporter.report(f'[Stage 1/7] Merged {num_compounds} compound records in {SECS(start_time, end_time)}.')
class Speech2Text2Tokenizer(PreTrainedTokenizer): vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES model_input_names = ['input_ids', 'attention_mask'] def __init__(self, vocab_file, bos_token='<s>', pad_token='<pad>', eos_token='</s>', unk_token='<unk>', do_lower_case=False, merges_file=None, **kwargs): super().__init__(unk_token=unk_token, bos_token=bos_token, eos_token=eos_token, pad_token=pad_token, do_lower_case=do_lower_case, **kwargs) self.do_lower_case = do_lower_case with open(vocab_file, encoding='utf-8') as vocab_handle: self.encoder = json.load(vocab_handle) self.decoder = {v: k for (k, v) in self.encoder.items()} if (merges_file is None): logger.info(f'No merges files provided. {self.__class__.__name__} can only be used for decoding.') self.bpe_ranks = None self.cache = None else: with open(merges_file, encoding='utf-8') as merges_handle: merges = merges_handle.read().split('\n')[:(- 1)] merges = [tuple(merge.split()[:2]) for merge in merges] self.bpe_ranks = dict(zip(merges, range(len(merges)))) self.cache = {} def vocab_size(self) -> int: return len(self.decoder) def get_vocab(self) -> Dict: return dict(self.encoder, **self.added_tokens_encoder) def bpe(self, token): word = (tuple(token[:(- 1)]) + ((token[(- 1)] + BPE_TOKEN_MERGES),)) if (token in self.cache): return self.cache[token] pairs = get_pairs(word) if (not pairs): return token while True: bigram = min(pairs, key=(lambda pair: self.bpe_ranks.get(pair, float('inf')))) if (bigram not in self.bpe_ranks): break (first, second) = bigram new_word = [] i = 0 while (i < len(word)): try: j = word.index(first, i) except ValueError: new_word.extend(word[i:]) break else: new_word.extend(word[i:j]) i = j if ((word[i] == first) and (i < (len(word) - 1)) and (word[(i + 1)] == second)): new_word.append((first + second)) i += 2 else: new_word.append(word[i]) i += 1 new_word = tuple(new_word) word = new_word if (len(word) == 1): break else: pairs = get_pairs(word) word = ' '.join(word) if (word == ('\n ' + BPE_TOKEN_MERGES)): word = ('\n' + BPE_TOKEN_MERGES) if word.endswith(BPE_TOKEN_MERGES): word = word.replace(BPE_TOKEN_MERGES, '') word = word.replace(' ', BPE_TOKEN_VOCAB) self.cache[token] = word return word def _tokenize(self, text): if (self.bpe_ranks is None): raise ValueError('This tokenizer was instantiated without a `merges.txt` file, so that it can only be used for decoding, not for encoding.Make sure to provide `merges.txt` file at instantiation to enable encoding.') if self.do_lower_case: text = text.lower() text = text.split() split_tokens = [] for token in text: if token: split_tokens.extend([t for t in self.bpe(token).split(' ')]) return split_tokens def _convert_token_to_id(self, token: str) -> int: return self.encoder.get(token, self.encoder.get(self.unk_token)) def _convert_id_to_token(self, index: int) -> str: result = self.decoder.get(index, self.unk_token) return result def convert_tokens_to_string(self, tokens: List[str]) -> str: string = ' '.join(tokens) string = ''.join(string.split(BPE_TOKEN_VOCAB)) return string def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]: if (not os.path.isdir(save_directory)): logger.error(f'Vocabulary path ({save_directory}) should be a directory') return vocab_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])) merges_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'])) with open(vocab_file, 'w', encoding='utf-8') as f: f.write(json.dumps(self.encoder, ensure_ascii=False)) index = 0 if (self.bpe_ranks is None): return (vocab_file,) with open(merges_file, 'w', encoding='utf-8') as writer: for (bpe_tokens, token_index) in sorted(self.bpe_ranks.items(), key=(lambda kv: kv[1])): if (index != token_index): logger.warning(f'Saving vocabulary to {merges_file}: BPE merge indices are not consecutive. Please check that the tokenizer is not corrupted!') index = token_index writer.write((' '.join(bpe_tokens) + '\n')) index += 1 return (vocab_file, merges_file)
class CheckpointManagerConstructor(typing_extensions.Protocol): def __call__(self, directory: str, train_state: train_state_lib.TrainState, partitioner: partitioning.BasePartitioner, dataset_iterator: Optional[tf.data.Iterator]=None, save_dtype: Optional[jnp.dtype]=None, restore_dtype: Optional[jnp.dtype]=None, keep: Optional[int]=None, period: Optional[int]=None, force_keep_period: Optional[int]=None, options: Optional[orbax.checkpoint.CheckpointManagerOptions]=None) -> CheckpointManager: pass
def match_clusters(dbscan_clusters, clf_file): clf2dbscan = dict() clf_cluster2phrases = defaultdict(list) with clf_file.open('r') as f: for line in f: (phrase, clf_label) = line.strip().split('\t') clf_cluster2phrases[clf_label].append(phrase) for (clf_cluster, clf_phrases) in clf_cluster2phrases.items(): clf_phrases = set(clf_phrases) dbscan_labels = [] match_ratios = [] for (dbscan_cluster, dbscan_phrases) in dbscan_clusters.items(): n_common = len((set(dbscan_phrases) & clf_phrases)) match_ratio = ((n_common / len(clf_phrases)) if (len(clf_phrases) > 0) else 0) match_ratios.append(match_ratio) dbscan_labels.append(dbscan_cluster) best_match = np.argmax(match_ratios) clf2dbscan[clf_cluster] = dbscan_labels[best_match] clf_clusters = dict(clf_cluster2phrases) return (clf2dbscan, clf_clusters)
def upgrade_source(upgrade_package_names: Set[str], verbose: bool=False, dry_run: bool=False, pre: bool=False) -> None: packages_to_sources = _get_sources_for_packages(upgrade_package_names, TargetType.ALL) _newline = '\n - ' missing_packages = (upgrade_package_names - packages_to_sources.keys()) if missing_packages: print(f'''Some of the given packages were not found in either source or target files. Please check that the packages are spelled correctly. If any of these packages were newly added to any of the source files you need to run '{SCRIPT_NAME} compile' first. Missing package(s): - {_newline.join(missing_packages)}''') sys.exit(1) grouped_packages_to_sources = [(set((package_name for (package_name, _) in group)), key) for (key, group) in groupby(sorted(packages_to_sources.items(), key=itemgetter(1)), key=itemgetter(1))] for (package_names, source_names) in grouped_packages_to_sources: print(f'''Upgrading package(s): - {_newline.join(package_names)}''') for source_name in _resolve_deps(source_names): _run_pip_compile(source_name, upgrade_packages=package_names, verbose=verbose, dry_run=dry_run, pre=pre)
class VyperLexer(RegexLexer): name = 'Vyper' aliases = ['vyper'] filenames = ['*.vy'] url = ' version_added = '2.17' tokens = {'root': [('\\s+', Whitespace), ('(\\\\)(\\n|\\r\\n|\\r)', bygroups(Text, Whitespace)), ('#.*$', Comment.Single), ('\\"\\"\\"', Comment.Multiline, 'multiline-comment'), ("'", String.Single, 'single-string'), ('"', String.Double, 'double-string'), ('(def)(\\s+)([a-zA-Z_][a-zA-Z0-9_]*)', bygroups(Keyword, Whitespace, Name.Function)), ('(event|struct|interface|log)(\\s+)([a-zA-Z_][a-zA-Z0-9_]*)', bygroups(Keyword, Whitespace, Name.Class)), ('(from)(\\s+)(vyper\\.\\w+)(\\s+)(import)(\\s+)(\\w+)', bygroups(Keyword, Whitespace, Name.Namespace, Whitespace, Keyword, Whitespace, Name.Class)), ('\\b0x[0-9a-fA-F]+\\b', Number.Hex), ('\\b(\\d{1,3}(?:_\\d{3})*|\\d+)\\b', Number.Integer), ('\\b\\d+\\.\\d*\\b', Number.Float), (words(('def', 'event', 'pass', 'return', 'for', 'while', 'if', 'elif', 'else', 'assert', 'raise', 'import', 'in', 'struct', 'implements', 'interface', 'from', 'indexed', 'log'), prefix='\\b', suffix='\\b'), Keyword), (words(('public', 'private', 'view', 'pure', 'constant', 'immutable', 'nonpayable'), prefix='\\b', suffix='\\b'), Keyword.Declaration), (words(('bitwise_and', 'bitwise_not', 'bitwise_or', 'bitwise_xor', 'shift', 'create_minimal_proxy_to', 'create_copy_of', 'create_from_blueprint', 'ecadd', 'ecmul', 'ecrecover', 'keccak256', 'sha256', 'concat', 'convert', 'uint2str', 'extract32', 'slice', 'abs', 'ceil', 'floor', 'max', 'max_value', 'min', 'min_value', 'pow_mod256', 'sqrt', 'isqrt', 'uint256_addmod', 'uint256_mulmod', 'unsafe_add', 'unsafe_sub', 'unsafe_mul', 'unsafe_div', 'as_wei_value', 'blockhash', 'empty', 'len', 'method_id', '_abi_encode', '_abi_decode', 'print', 'range'), prefix='\\b', suffix='\\b'), Name.Builtin), (words(('msg.sender', 'msg.value', 'block.timestamp', 'block.number', 'msg.gas'), prefix='\\b', suffix='\\b'), Name.Builtin.Pseudo), (words(('uint', 'uint8', 'uint16', 'uint32', 'uint64', 'uint128', 'uint256', 'int', 'int8', 'int16', 'int32', 'int64', 'int128', 'int256', 'bool', 'decimal', 'bytes', 'bytes1', 'bytes2', 'bytes3', 'bytes4', 'bytes5', 'bytes6', 'bytes7', 'bytes8', 'bytes9', 'bytes10', 'bytes11', 'bytes12', 'bytes13', 'bytes14', 'bytes15', 'bytes16', 'bytes17', 'bytes18', 'bytes19', 'bytes20', 'bytes21', 'bytes22', 'bytes23', 'bytes24', 'bytes25', 'bytes26', 'bytes27', 'bytes28', 'bytes29', 'bytes30', 'bytes31', 'bytes32', 'string', 'String', 'address', 'enum', 'struct'), prefix='\\b', suffix='\\b'), Keyword.Type), ('\\b(indexed)\\b(\\s*)(\\()(\\s*)(\\w+)(\\s*)(\\))', bygroups(Keyword, Whitespace, Punctuation, Whitespace, Keyword.Type, Punctuation)), ('(\\+|\\-|\\*|\\/|<=?|>=?|==|!=|=|\\||&|%)', Operator), ('[.,:;()\\[\\]{}]', Punctuation), ('[\\w.]+', Name.Decorator), ('__\\w+__', Name.Magic), ('EMPTY_BYTES32', Name.Constant), ('\\bERC20\\b', Name.Class), ('\\bself\\b', Name.Attribute), ('Bytes\\[\\d+\\]', Keyword.Type), ('\\b[a-zA-Z_]\\w*\\b:', Name.Variable), ('\\b[a-zA-Z_]\\w*\\b', Name)], 'multiline-comment': [('\\"\\"\\"', Comment.Multiline, '#pop'), ('[^"]+', Comment.Multiline), ('\\"', Comment.Multiline)], 'single-string': [("[^\\\\']+", String.Single), ("'", String.Single, '#pop'), ('\\\\.', String.Escape)], 'double-string': [('[^\\\\"]+', String.Double), ('"', String.Double, '#pop'), ('\\\\.', String.Escape)]}
class TradeHandler(object): def __init__(self, part_a, part_b): self.part_a = part_a self.part_b = part_b self.part_a.cmdset.add(CmdsetTrade()) self.trade_started = False self.part_a.ndb.tradehandler = self self.part_a_offers = [] self.part_b_offers = [] self.part_a_accepted = False self.part_b_accepted = False def msg_other(self, sender, string): if (self.part_a == sender): self.part_b.msg(string) elif (self.part_b == sender): self.part_a.msg(string) else: (sender.msg(string) if sender else self.part_a.msg(string)) def get_other(self, party): if (self.part_a == party): return self.part_b if (self.part_b == party): return self.part_a return None def join(self, part_b): if (self.part_b == part_b): self.part_b.ndb.tradehandler = self self.part_b.cmdset.add(CmdsetTrade()) self.trade_started = True return True return False def unjoin(self, part_b): if (self.part_b == part_b): self.finish(force=True) return True return False def offer(self, party, *args): if self.trade_started: self.part_a_accepted = False self.part_b_accepted = False if (party == self.part_a): self.part_a_offers = list(args) elif (party == self.part_b): self.part_b_offers = list(args) else: raise ValueError def list(self): return (self.part_a_offers, self.part_b_offers) def search(self, offername): all_offers = (self.part_a_offers + self.part_b_offers) if isinstance(offername, int): if (0 <= offername < len(all_offers)): return all_offers[offername] all_keys = [offer.key for offer in all_offers] try: imatch = all_keys.index(offername) return all_offers[imatch] except ValueError: for offer in all_offers: if offer.aliases.get(offername): return offer return None def accept(self, party): if self.trade_started: if (party == self.part_a): self.part_a_accepted = True elif (party == self.part_b): self.part_b_accepted = True else: raise ValueError return self.finish() return False def decline(self, party): if self.trade_started: if (party == self.part_a): if self.part_a_accepted: self.part_a_accepted = False return True return False elif (party == self.part_b): if self.part_b_accepted: self.part_b_accepted = False return True return False else: raise ValueError return False def finish(self, force=False): fin = False if (self.trade_started and self.part_a_accepted and self.part_b_accepted): for obj in self.part_a_offers: obj.location = self.part_b for obj in self.part_b_offers: obj.location = self.part_a fin = True if (fin or force): self.part_a.cmdset.delete('cmdset_trade') self.part_b.cmdset.delete('cmdset_trade') self.part_a_offers = None self.part_b_offers = None self.part_a.scripts.stop('trade_request_timeout') del self.part_a.ndb.tradehandler if self.part_b.ndb.tradehandler: del self.part_b.ndb.tradehandler return True return False
class Compare(NodeNG): _astroid_fields = ('left', 'ops') left: NodeNG ops: list[tuple[(str, NodeNG)]] def postinit(self, left: NodeNG, ops: list[tuple[(str, NodeNG)]]) -> None: self.left = left self.ops = ops def get_children(self): (yield self.left) for (_, comparator) in self.ops: (yield comparator) def last_child(self): return self.ops[(- 1)][1] def _to_literal(node: SuccessfulInferenceResult) -> Any: return ast.literal_eval(node.as_string()) def _do_compare(self, left_iter: Iterable[InferenceResult], op: str, right_iter: Iterable[InferenceResult]) -> (bool | util.UninferableBase): retval: (bool | None) = None if (op in UNINFERABLE_OPS): return util.Uninferable op_func = COMPARE_OPS[op] for (left, right) in itertools.product(left_iter, right_iter): if (isinstance(left, util.UninferableBase) or isinstance(right, util.UninferableBase)): return util.Uninferable try: (left, right) = (self._to_literal(left), self._to_literal(right)) except (SyntaxError, ValueError, AttributeError): return util.Uninferable try: expr = op_func(left, right) except TypeError as exc: raise AstroidTypeError from exc if (retval is None): retval = expr elif (retval != expr): return util.Uninferable assert (retval is not None) return retval def _infer(self, context: (InferenceContext | None)=None, **kwargs: Any) -> Generator[((nodes.Const | util.UninferableBase), None, None)]: retval: (bool | util.UninferableBase) = True ops = self.ops left_node = self.left lhs = list(left_node.infer(context=context)) for (op, right_node) in ops: rhs = list(right_node.infer(context=context)) try: retval = self._do_compare(lhs, op, rhs) except AstroidTypeError: retval = util.Uninferable break if (retval is not True): break lhs = rhs if (retval is util.Uninferable): (yield retval) else: (yield Const(retval))
def get_model_name(factory_class: FactoryType) -> str: model_cls = factory_class._meta.model if isinstance(model_cls, str): return model_cls model_name = inflection.underscore(model_cls.__name__) if (model_cls in WARN_FOR_MODEL_TYPES): warnings.warn(f'''Using a {model_cls} as model type for {factory_class} is discouraged by pytest-factoryboy, as it assumes that the model name is {model_name!r} when using it as SubFactory or RelatedFactory, which is too generic and probably not what you want. You can giving an explicit name to the model by using: model = named_model({model_cls.__name__}, "Foo")''') return model_name
class TestWindow(pyglet.window.Window): def __init__(self, *args, **kwargs): super(TestWindow, self).__init__(*args, **kwargs) self.batch = pyglet.graphics.Batch() self.document = pyglet.text.decode_attributed(doctext) for i in [element_index]: self.document.insert_element(i, TestElement(60, (- 10), 70)) self.margin = 2 self.layout = IncrementalTextLayout(self.document, (self.width - (self.margin * 2)), (self.height - (self.margin * 2)), multiline=True, batch=self.batch) self.caret = caret.Caret(self.layout) self.push_handlers(self.caret) self.set_mouse_cursor(self.get_system_mouse_cursor('text')) def on_draw(self): pyglet.gl.glClearColor(1, 1, 1, 1) self.clear() self.batch.draw() def on_key_press(self, symbol, modifiers): super(TestWindow, self).on_key_press(symbol, modifiers) if (symbol == pyglet.window.key.TAB): self.caret.on_text('\t') self.document.set_style(0, len(self.document.text), dict(bold=None))
class AI(): def __init__(self, bot_user: discord.Member, symbol: str): self.user = bot_user self.symbol = symbol async def get_move(board: dict[(int, str)], _: discord.Message) -> tuple[(bool, int)]: possible_moves = [i for (i, emoji) in board.items() if (emoji in list(Emojis.number_emojis.values()))] for symbol in (Emojis.o_square, Emojis.x_square): for move in possible_moves: board_copy = board.copy() board_copy[move] = symbol if check_win(board_copy): return (False, move) open_corners = [i for i in possible_moves if (i in (1, 3, 7, 9))] if (len(open_corners) > 0): return (False, random.choice(open_corners)) if (5 in possible_moves): return (False, 5) open_edges = [i for i in possible_moves if (i in (2, 4, 6, 8))] return (False, random.choice(open_edges)) def __str__(self) -> str: return self.user.mention
class TMP4Freeform(TestCase): def test_cmp(self): self.assertReallyEqual(MP4FreeForm(b'woooo', 142, 42), MP4FreeForm(b'woooo', 142, 42)) self.assertReallyNotEqual(MP4FreeForm(b'woooo', 142, 43), MP4FreeForm(b'woooo', 142, 42)) self.assertReallyNotEqual(MP4FreeForm(b'woooo', 143, 42), MP4FreeForm(b'woooo', 142, 42)) self.assertReallyNotEqual(MP4FreeForm(b'wooox', 142, 42), MP4FreeForm(b'woooo', 142, 42)) def test_cmp_bytes(self): self.assertReallyEqual(MP4FreeForm(b'woooo'), b'woooo') self.assertReallyNotEqual(MP4FreeForm(b'woooo'), b'foo')
class RandomProcedure(Procedure): iterations = IntegerParameter('Loop Iterations', default=100) delay = FloatParameter('Delay Time', units='s', default=0.001) seed = Parameter('Random Seed', default='12345') DATA_COLUMNS = ['Iteration', 'Random Number'] def startup(self): random.seed(self.seed) def execute(self): for i in range(self.iterations): data = {'Iteration': i, 'Random Number': random.random()} self.emit('results', data) self.emit('progress', ((100.0 * i) / self.iterations)) sleep(self.delay) if self.should_stop(): break
def test_first_mark_class(item_names_for): tests_content = '\n import pytest\n\n def test_1(): pass\n\n\n .order("first")\n class TestSuite:\n\n def test_3(self): pass\n\n def test_2(self): pass\n\n ' assert (item_names_for(tests_content) == ['TestSuite::test_3', 'TestSuite::test_2', 'test_1'])
def test_mesh_generation(): from solcore.sesame_drift_diffusion.process_structure import make_mesh from solcore import material, si from solcore.structure import Junction, Layer from solcore.state import State GaAs_n = material('GaAs')() GaAs_p = material('GaAs')() options = State(minimum_spacing=1e-09, maximum_spacing=1e-09) pn_junction = Junction([Layer(si('500nm'), GaAs_n), Layer(si('2000nm'), GaAs_p)]) layer_width = [(layer.width * 100.0) for layer in pn_junction] make_mesh(pn_junction, layer_width, options, [5e-05]) assert (pn_junction.mesh == approx((np.arange(0, 2500.01, 1) * 1e-09)))
def distributed_all_gather(tensor_list, valid_batch_size=None, out_numpy=False, world_size=None, no_barrier=False, is_valid=None): if (world_size is None): world_size = torch.distributed.get_world_size() if (valid_batch_size is not None): valid_batch_size = min(valid_batch_size, world_size) elif (is_valid is not None): is_valid = torch.tensor(bool(is_valid), dtype=torch.bool, device=tensor_list[0].device) if (not no_barrier): torch.distributed.barrier() tensor_list_out = [] with torch.no_grad(): if (is_valid is not None): is_valid_list = [torch.zeros_like(is_valid) for _ in range(world_size)] torch.distributed.all_gather(is_valid_list, is_valid) is_valid = [x.item() for x in is_valid_list] for tensor in tensor_list: gather_list = [torch.zeros_like(tensor) for _ in range(world_size)] torch.distributed.all_gather(gather_list, tensor) if (valid_batch_size is not None): gather_list = gather_list[:valid_batch_size] elif (is_valid is not None): gather_list = [g for (g, v) in zip(gather_list, is_valid_list) if v] if out_numpy: gather_list = [t.cpu().numpy() for t in gather_list] tensor_list_out.append(gather_list) return tensor_list_out
def test_normal_queue_implicit_top_module(do_test): def tv_in(m, tv): m.enq_en = Bits1(tv[0]) m.enq_msg = Bits32(tv[1]) m.deq_en = Bits1(tv[3]) def tv_out(m, tv): if (tv[2] != '*'): assert (m.enq_rdy == Bits1(tv[2])) if (tv[4] != '*'): assert (m.deq_rdy == Bits1(tv[5])) if (tv[5] != '*'): assert (m.deq_msg == Bits32(tv[4])) class VQueue(Component, VerilogPlaceholder): def construct(s, data_width, num_entries, count_width): s.count = OutPort(mk_bits(count_width)) s.deq_en = InPort(Bits1) s.deq_rdy = OutPort(Bits1) s.deq_msg = OutPort(mk_bits(data_width)) s.enq_en = InPort(Bits1) s.enq_rdy = OutPort(Bits1) s.enq_msg = InPort(mk_bits(data_width)) num_entries = 1 q = VQueue(data_width=32, num_entries=num_entries, count_width=clog2((num_entries + 1))) tv = [[1, 42, 1, 0, 0, 0], [0, 43, 0, 1, 42, 1], [1, 43, 1, 0, 42, 0], [0, 44, 0, 1, 43, 1], [1, 44, 1, 0, 43, 0], [0, 45, 0, 1, 44, 1], [1, 45, 1, 0, 44, 0]] q._tvs = tv q._tv_in = tv_in q._tv_out = tv_out do_test(q)
class EvalUtilTest(tf.test.TestCase): def test_load_categories_from_csv_file(self): csv_data = '\n 0,"cat"\n 1,"dog"\n 2,"bird"\n '.strip(' ') csv_path = os.path.join(self.get_temp_dir(), 'test.csv') with tf.gfile.Open(csv_path, 'wb') as f: f.write(csv_data) categories = category_util.load_categories_from_csv_file(csv_path) self.assertTrue(({'id': 0, 'name': 'cat'} in categories)) self.assertTrue(({'id': 1, 'name': 'dog'} in categories)) self.assertTrue(({'id': 2, 'name': 'bird'} in categories)) def test_save_categories_to_csv_file(self): categories = [{'id': 0, 'name': 'cat'}, {'id': 1, 'name': 'dog'}, {'id': 2, 'name': 'bird'}] csv_path = os.path.join(self.get_temp_dir(), 'test.csv') category_util.save_categories_to_csv_file(categories, csv_path) saved_categories = category_util.load_categories_from_csv_file(csv_path) self.assertEqual(saved_categories, categories)
class BaseProgressBar(object): def __init__(self, iterable, epoch=None, prefix=None): self.iterable = iterable self.n = getattr(iterable, 'n', 0) self.epoch = epoch self.prefix = '' if (epoch is not None): self.prefix += 'epoch {:03d}'.format(epoch) if (prefix is not None): self.prefix += ' | {}'.format(prefix) def __len__(self): return len(self.iterable) def __enter__(self): return self def __exit__(self, *exc): return False def __iter__(self): raise NotImplementedError def log(self, stats, tag=None, step=None): raise NotImplementedError def print(self, stats, tag=None, step=None): raise NotImplementedError def _str_commas(self, stats): return ', '.join((((key + '=') + stats[key].strip()) for key in stats.keys())) def _str_pipes(self, stats): return ' | '.join((((key + ' ') + stats[key].strip()) for key in stats.keys())) def _format_stats(self, stats): postfix = OrderedDict(stats) for key in postfix.keys(): postfix[key] = str(format_stat(postfix[key])) return postfix
def is_transaction_invalidated(transaction: ContractSendEvent, state_change: StateChange) -> bool: is_our_failed_update_transfer = (isinstance(state_change, ContractReceiveChannelSettled) and isinstance(transaction, ContractSendChannelUpdateTransfer) and (state_change.token_network_address == transaction.token_network_address) and (state_change.channel_identifier == transaction.channel_identifier)) if is_our_failed_update_transfer: return True is_our_failed_withdraw = (isinstance(state_change, ContractReceiveChannelClosed) and isinstance(transaction, ContractSendChannelWithdraw) and (state_change.token_network_address == transaction.token_network_address) and (state_change.channel_identifier == transaction.channel_identifier)) if is_our_failed_withdraw: return True return False
class MobileNetV2(nn.Module): def __init__(self, num_classes, loss={'xent'}, **kwargs): super(MobileNetV2, self).__init__() self.loss = loss self.conv1 = ConvBlock(3, 32, 3, s=2, p=1) self.block2 = Bottleneck(32, 16, 1, 1) self.block3 = nn.Sequential(Bottleneck(16, 24, 6, 2), Bottleneck(24, 24, 6, 1)) self.block4 = nn.Sequential(Bottleneck(24, 32, 6, 2), Bottleneck(32, 32, 6, 1), Bottleneck(32, 32, 6, 1)) self.block5 = nn.Sequential(Bottleneck(32, 64, 6, 2), Bottleneck(64, 64, 6, 1), Bottleneck(64, 64, 6, 1), Bottleneck(64, 64, 6, 1)) self.block6 = nn.Sequential(Bottleneck(64, 96, 6, 1), Bottleneck(96, 96, 6, 1), Bottleneck(96, 96, 6, 1)) self.block7 = nn.Sequential(Bottleneck(96, 160, 6, 2), Bottleneck(160, 160, 6, 1), Bottleneck(160, 160, 6, 1)) self.block8 = Bottleneck(160, 320, 6, 1) self.conv9 = ConvBlock(320, 1280, 1) self.classifier = nn.Linear(1280, num_classes) self.feat_dim = 1280 def featuremaps(self, x): x = self.conv1(x) x = self.block2(x) x = self.block3(x) x = self.block4(x) x = self.block5(x) x = self.block6(x) x = self.block7(x) x = self.block8(x) x = self.conv9(x) return x def forward(self, x): x = self.featuremaps(x) x = F.avg_pool2d(x, x.size()[2:]).view(x.size(0), (- 1)) x = F.dropout(x, training=self.training) if (not self.training): return x y = self.classifier(x) if (self.loss == {'xent'}): return y elif (self.loss == {'xent', 'htri'}): return (y, x) else: raise KeyError('Unsupported loss: {}'.format(self.loss))
_torch _retrieval class RagDPRBartTest(RagTestMixin, unittest.TestCase): _property def config_and_inputs(self): question_encoder_tester = DPRModelTester(self) dpr_config_and_inputs = question_encoder_tester.prepare_config_and_inputs() generator_tester = BartModelTester(self) bart_config_and_inputs = generator_tester.prepare_config_and_inputs_for_common() (question_encoder_config, input_ids, _, input_mask, _, _, _) = dpr_config_and_inputs (generator_config, bart_inputs_dict) = bart_config_and_inputs (decoder_input_ids, decoder_attention_mask) = (bart_inputs_dict['input_ids'], bart_inputs_dict['attention_mask']) config = RagConfig.from_question_encoder_generator_configs(question_encoder_config, generator_config, n_docs=self.n_docs, retrieval_vector_size=self.retrieval_vector_size, max_combined_length=self.max_combined_length) return {'config': config, 'input_ids': input_ids, 'attention_mask': input_mask, 'decoder_input_ids': decoder_input_ids, 'decoder_attention_mask': decoder_attention_mask}
def test_all_migrations(tmpfile): io = SQLiteIO(tmpfile, MagicMock(), create_new=True) io.ex('PRAGMA user_version=1') io.ex('\n CREATE TABLE items (\n id INTEGER PRIMARY KEY,\n type TEXT NOT NULL,\n x REAL DEFAULT 0,\n y REAL DEFAULT 0,\n z REAL DEFAULT 0,\n scale REAL DEFAULT 1,\n rotation REAL DEFAULT 0,\n flip INTEGER DEFAULT 1,\n filename TEXT)') io.ex('\n CREATE TABLE sqlar (\n name TEXT PRIMARY KEY,\n item_id INTEGER NOT NULL,\n mode INT,\n mtime INT default current_timestamp,\n sz INT,\n data BLOB,\n FOREIGN KEY (item_id)\n REFERENCES items (id)\n ON DELETE CASCADE\n ON UPDATE NO ACTION)') io.ex('INSERT INTO items (type, x, y, z, scale, rotation, flip, filename) VALUES (?, ?, ?, ?, ?, ?, ?, ?) ', ('pixmap', 22.2, 33.3, 0.22, 3.4, 45, (- 1), 'bee.png')) io.ex('INSERT INTO sqlar (item_id, data) VALUES (?, ?)', (1, b'bla')) io.connection.commit() del io io = SQLiteIO(tmpfile, MagicMock(), create_new=False) result = io.fetchone('PRAGMA user_version') assert (result[0] == schema.USER_VERSION) result = io.fetchone('SELECT x, y, items.data, sqlar.data FROM items LEFT OUTER JOIN sqlar on sqlar.item_id = items.id') assert (result[0] == 22.2) assert (result[1] == 33.3) assert (json.loads(result[2]) == {'filename': 'bee.png'}) assert (result[3] == b'bla')
('pypyr.moduleloader.get_module') def test_foreach_evaluates_swallow_decorator(mock_moduleloader): step = Step({'name': 'step1', 'swallow': '{dynamic_swallow_expression}', 'foreach': ['{key1}', '{key2}', 'key3']}) context = get_test_context() context['dynamic_swallow_expression'] = False original_len = len(context) arb_error = ValueError('arb error') def mock_step_deliberate_error(context): if (context['i'] == 'value2'): context['dynamic_swallow_expression'] = True elif (context['i'] == 'key3'): raise arb_error with patch.object(Step, 'invoke_step', side_effect=mock_step_deliberate_error) as mock_invoke: with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info: with patch_logger('pypyr.dsl', logging.ERROR) as mock_logger_error: step.run_step(context) assert (mock_logger_info.mock_calls == [call('foreach: running step value1'), call('foreach: running step value2'), call('foreach: running step key3'), call('foreach decorator looped 3 times.')]) assert (mock_invoke.call_count == 3) assert (mock_logger_error.call_count == 1) mock_logger_error.assert_called_once_with('step1 Ignoring error because swallow is True for this step.\nValueError: arb error') assert (len(context) == (original_len + 2)) assert (context['i'] == 'key3') assert (step.for_counter == 'key3') assert (context['runErrors'] == [{'col': None, 'customError': {}, 'description': 'arb error', 'exception': arb_error, 'line': None, 'name': 'ValueError', 'step': step.name, 'swallowed': True}])
class ImageOp(Enum): SHEAR_X = auto() SHEAR_Y = auto() TRANSLATE_X = auto() TRANSLATE_Y = auto() ROTATE = auto() AUTO_CONTRAST = auto() INVERT = auto() EQUALIZE = auto() SOLARIZE = auto() POSTERIZE = auto() CONTRAST = auto() COLOR = auto() BRIGHTNESS = auto() SHARPNESS = auto()
def init_fake_app() -> None: from quodlibet import app from quodlibet import browsers from quodlibet.player.nullbe import NullPlayer from quodlibet.library import SongFileLibrary from quodlibet.library.librarians import SongLibrarian from quodlibet.qltk.quodlibetwindow import QuodLibetWindow, PlayerOptions from quodlibet.util.cover import CoverManager browsers.init() app.name = 'Quod Libet' app.id = 'io.github.quodlibet.QuodLibet' app.player = NullPlayer() app.library = SongFileLibrary() app.library.librarian = SongLibrarian() app.cover_manager = CoverManager() app.window = QuodLibetWindow(app.library, app.player, headless=True) app.player_options = PlayerOptions(app.window)
def delete_substructure_at_idx(editmol, atom_idx_lst): edit_smiles = Chem.MolToSmiles(editmol) new_mol = Chem.RWMol(Chem.MolFromSmiles('')) old_idx2new_idx = dict() for atom in editmol.GetAtoms(): old_idx = atom.GetIdx() if (old_idx in atom_idx_lst): continue new_atom = copy_atom(atom) new_idx = new_mol.AddAtom(new_atom) old_idx2new_idx[old_idx] = new_idx for bond in editmol.GetBonds(): a1 = bond.GetBeginAtom().GetIdx() a2 = bond.GetEndAtom().GetIdx() if ((a1 in atom_idx_lst) or (a2 in atom_idx_lst)): continue a1_new = old_idx2new_idx[a1] a2_new = old_idx2new_idx[a2] bt = bond.GetBondType() new_mol.AddBond(a1_new, a2_new, bt) if (not is_valid_mol(new_mol)): return None try: new_mol.UpdatePropertyCache() except: return None return (new_mol, old_idx2new_idx)
class TestActivity(): def testHasNoIntentFilter(SAMPLE_PATH_14d9f): activity = getActivities(SAMPLE_PATH_14d9f)[0] assert (activity.hasIntentFilter() is False) def testHasIntentFilter(SAMPLE_PATH_13667): activity = getActivities(SAMPLE_PATH_13667)[0] assert (activity.hasIntentFilter() is True) def testIsNotExported(SAMPLE_PATH_14d9f): activity = getActivities(SAMPLE_PATH_14d9f)[0] assert (activity.isExported() is False) def testIsExported(SAMPLE_PATH_13667): activity = getActivities(SAMPLE_PATH_13667)[0] assert (activity.isExported() is True)
class TestDecoderRNN(unittest.TestCase): def setUpClass(self): self.vocab_size = 100 self.batch = None def test_input_dropout_WITH_PROB_ZERO(self): rnn = DecoderRNN(self.vocab_size, 50, 16, 0, 1, input_dropout_p=0) for param in rnn.parameters(): param.data.uniform_((- 1), 1) (output1, _, _) = rnn(self.batch) (output2, _, _) = rnn(self.batch) for (prob1, prob2) in zip(output1, output2): self.assertTrue(torch.equal(prob1.data, prob2.data)) def test_input_dropout_WITH_NON_ZERO_PROB(self): rnn = DecoderRNN(self.vocab_size, 50, 16, 0, 1, input_dropout_p=0.5) for param in rnn.parameters(): param.data.uniform_((- 1), 1) equal = True for _ in range(50): (output1, _, _) = rnn(self.batch) (output2, _, _) = rnn(self.batch) if (not torch.equal(output1[0].data, output2[0].data)): equal = False break self.assertFalse(equal) def test_dropout_WITH_PROB_ZERO(self): rnn = DecoderRNN(self.vocab_size, 50, 16, 0, 1, dropout_p=0) for param in rnn.parameters(): param.data.uniform_((- 1), 1) (output1, _, _) = rnn(self.batch) (output2, _, _) = rnn(self.batch) for (prob1, prob2) in zip(output1, output2): self.assertTrue(torch.equal(prob1.data, prob2.data)) def test_dropout_WITH_NON_ZERO_PROB(self): rnn = DecoderRNN(self.vocab_size, 50, 16, 0, 1, n_layers=2, dropout_p=0.5) for param in rnn.parameters(): param.data.uniform_((- 1), 1) equal = True for _ in range(50): (output1, _, _) = rnn(self.batch) (output2, _, _) = rnn(self.batch) if (not torch.equal(output1[0].data, output2[0].data)): equal = False break self.assertFalse(equal)
class SawyerShelfPlaceV1Policy(Policy): _fully_parsed def _parse_obs(obs): return {'hand_pos': obs[:3], 'block_pos': obs[3:6], 'shelf_x': obs[(- 3)], 'unused_info': obs[[6, 7, 8, 10, 11]]} def get_action(self, obs): o_d = self._parse_obs(obs) action = Action({'delta_pos': np.arange(3), 'grab_effort': 3}) action['delta_pos'] = move(o_d['hand_pos'], to_xyz=self._desired_pos(o_d), p=25.0) action['grab_effort'] = self._grab_effort(o_d) return action.array def _desired_pos(o_d): pos_curr = o_d['hand_pos'] pos_block = (o_d['block_pos'] + np.array([0.005, 0.0, 0.015])) pos_shelf_x = o_d['shelf_x'] if (np.linalg.norm((pos_curr[:2] - pos_block[:2])) > 0.04): return (pos_block + np.array([0.0, 0.0, 0.3])) elif (abs((pos_curr[2] - pos_block[2])) > 0.02): return pos_block elif (np.abs((pos_curr[0] - pos_shelf_x)) > 0.02): return np.array([pos_shelf_x, pos_curr[1], pos_curr[2]]) elif (pos_curr[2] < 0.25): pos_new = (pos_curr + np.array([0.0, 0.0, 0.25])) return pos_new else: pos_new = (pos_curr + np.array([0.0, 0.05, 0.0])) return pos_new def _grab_effort(o_d): pos_curr = o_d['hand_pos'] pos_block = o_d['block_pos'] if ((np.linalg.norm((pos_curr[:2] - pos_block[:2])) > 0.04) or (abs((pos_curr[2] - pos_block[2])) > 0.15)): return (- 1.0) else: return 0.7
def test_class_method_containing_test_issue1558(pytester: Pytester) -> None: pytester.makepyfile(test_foo='\n import unittest\n\n class MyTestCase(unittest.TestCase):\n def test_should_run(self):\n pass\n def test_should_not_run(self):\n pass\n test_should_not_run.__test__ = False\n ') reprec = pytester.inline_run() reprec.assertoutcome(passed=1)
class PlayTabCompleteServerBound(Packet): id = 6 to = 0 def __init__(self, transaction_id: int, text: str) -> None: super().__init__() self.transaction_id = transaction_id self.text = text def decode(cls, buf: Buffer) -> PlayTabCompleteServerBound: return cls(buf.unpack_varint(), buf.unpack_string())
class Aec(Codec): codec_id = 'imagecodecs_aec' def __init__(self, bitspersample=None, flags=None, blocksize=None, rsi=None): self.bitspersample = bitspersample self.flags = flags self.blocksize = blocksize self.rsi = rsi def encode(self, buf): return imagecodecs.aec_encode(buf, bitspersample=self.bitspersample, flags=self.flags, blocksize=self.blocksize, rsi=self.rsi) def decode(self, buf, out=None): return imagecodecs.aec_decode(buf, bitspersample=self.bitspersample, flags=self.flags, blocksize=self.blocksize, rsi=self.rsi, out=_flat(out))
def main(): torch.set_num_threads(3) if (not torch.cuda.is_available()): logging.info('no gpu device available') sys.exit(1) np.random.seed(args.seed) torch.cuda.set_device(args.gpu) cudnn.benchmark = True torch.manual_seed(args.seed) cudnn.enabled = True torch.cuda.manual_seed(args.seed) logging.info(('gpu device = %d' % args.gpu)) logging.info('args = %s', args) if (not ('debug' in args.save)): api = API('pth file path') criterion = nn.CrossEntropyLoss() criterion = criterion.cuda() if ((args.method == 'gdas') or (args.method == 'snas')): tau_step = ((args.tau_min - args.tau_max) / args.epochs) tau_epoch = args.tau_max if (args.method == 'gdas'): model = TinyNetworkGDAS(C=args.init_channels, N=5, max_nodes=4, num_classes=n_classes, criterion=criterion, search_space=NAS_BENCH_201) else: model = TinyNetwork(C=args.init_channels, N=5, max_nodes=4, num_classes=n_classes, criterion=criterion, search_space=NAS_BENCH_201, k=args.k, species='gumbel') elif (args.method == 'dirichlet'): model = TinyNetwork(C=args.init_channels, N=5, max_nodes=4, num_classes=n_classes, criterion=criterion, search_space=NAS_BENCH_201, k=args.k, species='dirichlet', reg_type=args.reg_type, reg_scale=args.reg_scale) elif (args.method == 'darts'): model = TinyNetwork(C=args.init_channels, N=5, max_nodes=4, num_classes=n_classes, criterion=criterion, search_space=NAS_BENCH_201, k=args.k, species='softmax') model = model.cuda() logging.info('param size = %fMB', utils.count_parameters_in_MB(model)) optimizer = torch.optim.SGD(model.get_weights(), args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay) if (args.dataset == 'cifar10'): (train_transform, valid_transform) = utils._data_transforms_cifar10(args) train_data = dset.CIFAR10(root=args.data, train=True, download=True, transform=train_transform) elif (args.dataset == 'cifar100'): (train_transform, valid_transform) = utils._data_transforms_cifar100(args) train_data = dset.CIFAR100(root=args.data, train=True, download=True, transform=train_transform) elif (args.dataset == 'svhn'): (train_transform, valid_transform) = utils._data_transforms_svhn(args) train_data = dset.SVHN(root=args.data, split='train', download=True, transform=train_transform) elif (args.dataset == 'imagenet16-120'): import torchvision.transforms as transforms from nasbench201.DownsampledImageNet import ImageNet16 mean = [(x / 255) for x in [122.68, 116.66, 104.01]] std = [(x / 255) for x in [63.22, 61.26, 65.09]] lists = [transforms.RandomHorizontalFlip(), transforms.RandomCrop(16, padding=2), transforms.ToTensor(), transforms.Normalize(mean, std)] train_transform = transforms.Compose(lists) train_data = ImageNet16(root=os.path.join(args.data, 'imagenet16'), train=True, transform=train_transform, use_num_of_class_only=120) assert (len(train_data) == 151700) num_train = len(train_data) indices = list(range(num_train)) split = int(np.floor((args.train_portion * num_train))) train_queue = torch.utils.data.DataLoader(train_data, batch_size=args.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[:split]), pin_memory=True) valid_queue = torch.utils.data.DataLoader(train_data, batch_size=args.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[split:num_train]), pin_memory=True) scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, float(args.epochs), eta_min=args.learning_rate_min) architect = Architect(model, args) for epoch in range(args.epochs): lr = scheduler.get_lr()[0] logging.info('epoch %d lr %e', epoch, lr) genotype = model.genotype() logging.info('genotype = %s', genotype) model.show_arch_parameters() (train_acc, train_obj) = train(train_queue, valid_queue, model, architect, criterion, optimizer, lr, epoch) logging.info('train_acc %f', train_acc) (valid_acc, valid_obj) = infer(valid_queue, model, criterion) logging.info('valid_acc %f', valid_acc) if (not ('debug' in args.save)): result = api.query_by_arch(model.genotype()) logging.info('{:}'.format(result)) (cifar10_train, cifar10_test, cifar100_train, cifar100_valid, cifar100_test, imagenet16_train, imagenet16_valid, imagenet16_test) = distill(result) logging.info('cifar10 train %f test %f', cifar10_train, cifar10_test) logging.info('cifar100 train %f valid %f test %f', cifar100_train, cifar100_valid, cifar100_test) logging.info('imagenet16 train %f valid %f test %f', imagenet16_train, imagenet16_valid, imagenet16_test) writer.add_scalars('accuracy', {'train': train_acc, 'valid': valid_acc}, epoch) writer.add_scalars('loss', {'train': train_obj, 'valid': valid_obj}, epoch) writer.add_scalars('nasbench201/cifar10', {'train': cifar10_train, 'test': cifar10_test}, epoch) writer.add_scalars('nasbench201/cifar100', {'train': cifar100_train, 'valid': cifar100_valid, 'test': cifar100_test}, epoch) writer.add_scalars('nasbench201/imagenet16', {'train': imagenet16_train, 'valid': imagenet16_valid, 'test': imagenet16_test}, epoch) utils.save_checkpoint({'epoch': (epoch + 1), 'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict(), 'alpha': model.arch_parameters()}, False, args.save) scheduler.step() if ((args.method == 'gdas') or (args.method == 'snas')): tau_epoch += tau_step logging.info('tau %f', tau_epoch) model.set_tau(tau_epoch) writer.close()
class VideoCaptionDataset(Dataset): def __init__(self, transform, video_root, ann_rpath, num_frames=8, max_words=30, prompt='', pickle_path=None, i3d=False): self.annotation = json.load(open(ann_rpath, 'r')) if i3d: import h5py i3d_path = os.path.join(configs.finetune_root, 'vatex', 'I3D.hdf5') db = h5py.File(i3d_path, 'r') mapping_path = os.path.join(configs.finetune_root, 'vatex', 'vatex_mapping.txt') data = open(mapping_path, 'r').read().strip().split('\n') mapping = {line.split(' ')[0]: line.split(' ')[1] for line in data} print("### Preparing vatex's I3D features ...") self.video2embs = {} for k in db.keys(): video = f'{mapping[k]}.mp4' embs = np.asarray(db[k]) ids = get_uniform_frame_ids(embs.shape[0], num_frames) self.video2embs[video] = embs[ids] elif (pickle_path is not None): data = pickle.load(open(pickle_path, 'rb')) self.video2embs = {line['image']: line['image_emb'] for line in data} self.transform = transform self.video_root = video_root self.num_frames = num_frames self.max_words = max_words self.prompt = prompt def get_item_based_on_id(self, id): for (i, ann) in enumerate(self.annotation): if (ann['image_id'] == id): return self.__getitem__(i) def __len__(self): return len(self.annotation) def __getitem__(self, index): ann = self.annotation[index] out = {'image_id': ann['image_id']} if ('caption' in ann): if (type(ann['caption']) is str): out['caption'] = (self.prompt + pre_caption(ann['caption'], self.max_words)) else: out['caption'] = [(self.prompt + pre_caption(caption, max_words=10000)) for caption in ann['caption']] if (not hasattr(self, 'video2embs')): import decord video_path = os.path.join(self.video_root, ann['image']) reader = decord.VideoReader(video_path) frames = reader.get_batch(get_uniform_frame_ids(len(reader), self.num_frames)).asnumpy() frames = [Image.fromarray(frame) for frame in frames] out['frames'] = frames out['image'] = torch.stack([self.transform(frame) for frame in frames], dim=0) if hasattr(self, 'video2embs'): out['clip_image_embs'] = self.video2embs[ann['image']] lang = ann.get('lang', configs.main_lang) out['lang'] = torch.LongTensor([configs.lang2code[lang]]) return out def collate_fn(self, batch): batch_tensors = {} for key in batch[0].keys(): x = [b[key] for b in batch] if (key in ['caption', 'frames']): batch_tensors[key] = x continue if (x[0] is None): batch_tensors[key] = None elif isinstance(x[0], torch.Tensor): batch_tensors[key] = torch.stack(x) elif isinstance(x[0], np.ndarray): batch_tensors[key] = torch.FloatTensor(np.array(x)) elif (type(x[0]) is Image): batch_tensors[key] = x else: batch_tensors[key] = torch.tensor(x, dtype=torch.long) return batch_tensors
def sc_decoration_hop() -> GoalDirectedBenchmark: specification = uniform_specification(1, 10, 100) benchmark_object = decoration_hop() sa_biased = ScoringFunctionSAWrapper(benchmark_object.objective, SCScoreModifier()) return GoalDirectedBenchmark(name='SC_decoration_hop', objective=sa_biased, contribution_specification=specification)
def create_lmdb_for_rain13k(): folder_path = './datasets/Rain13k/train/input' lmdb_path = './datasets/Rain13k/train/input.lmdb' (img_path_list, keys) = prepare_keys(folder_path, 'jpg') make_lmdb_from_imgs(folder_path, lmdb_path, img_path_list, keys) folder_path = './datasets/Rain13k/train/target' lmdb_path = './datasets/Rain13k/train/target.lmdb' (img_path_list, keys) = prepare_keys(folder_path, 'jpg') make_lmdb_from_imgs(folder_path, lmdb_path, img_path_list, keys)
class SeparableConv2d(nn.Module): def __init__(self, inplanes, planes, kernel_size=3, stride=1, padding=0, dilation=1, bias=False): super(SeparableConv2d, self).__init__() self.conv1 = nn.Conv2d(inplanes, inplanes, kernel_size, stride, padding, dilation, groups=inplanes, bias=bias) self.pointwise = nn.Conv2d(inplanes, planes, 1, 1, 0, 1, 1, bias=bias) def forward(self, x): x = self.conv1(x) x = self.pointwise(x) return x
def main(): n1 = int(input()) n2 = int(input()) for i in range(n1, n2): if (i > 1): prime = True for j in range(2, i): if ((i % j) == 0): prime = False break if prime: print(i) return True
def test_object_from_string(): def check(name, expected): actual = qcore.object_from_string(name) assert_eq(expected, actual) name = name.encode('ascii') with AssertRaises(TypeError): qcore.object_from_string(name) with AssertRaises(ValueError): qcore.object_from_string('FooBar') check('test_helpers._stub_serializable_func', _stub_serializable_func) import socket check('socket.gethostname', socket.gethostname) with AssertRaises(TypeError): qcore.object_from_string({'name': 'socket.gethostname'}) check('test_helpers.asserts_.assert_eq', assert_eq)
def write_resource_database(resource_database: ResourceDatabase) -> dict: errors: list[str] = [] for array in (resource_database.item, resource_database.event, resource_database.trick, resource_database.damage, resource_database.version, resource_database.misc): assert isinstance(array, list) errors.extend(check_for_duplicated_index(array, 'short_name')) if errors: raise ValueError('Errors in resource database: {}'.format('\n'.join(errors))) return {'items': write_array(resource_database.item, write_item_resource), 'events': write_array(resource_database.event, write_simple_resource), 'tricks': write_array(resource_database.trick, write_trick_resource), 'damage': write_array(resource_database.damage, write_simple_resource), 'versions': write_array(resource_database.version, write_simple_resource), 'misc': write_array(resource_database.misc, write_simple_resource), 'requirement_template': {name: write_requirement(requirement) for (name, requirement) in resource_database.requirement_template.items()}, 'damage_reductions': [{'name': resource.short_name, 'reductions': [{'name': (reduction.inventory_item.short_name if (reduction.inventory_item is not None) else None), 'multiplier': reduction.damage_multiplier} for reduction in reductions]} for (resource, reductions) in resource_database.damage_reductions.items()], 'energy_tank_item_index': resource_database.energy_tank_item.short_name}
('s1', help='Query SentinelOne') ('--site-id', help='ID of SentinelOne site to query', multiple=True, default=None) ('--account-id', help='ID of SentinelOne account to query', multiple=True, default=None) ('--account-name', help='Name of SentinelOne account to query', multiple=True, default=None) ('--creds', 'creds', help='Path to credential file', type=click.Path(exists=True), required=True) ('--dv', 'dv', help='Use Deep Visibility for queries', is_flag=True, required=False) _context def s1(ctx, site_id: Optional[Tuple], account_id: Optional[Tuple], account_name: Optional[Tuple], creds: Optional[str], dv: bool) -> None: ctx.obj.product_args = {'creds_file': creds, 'site_id': (list(site_id) if site_id else None), 'account_id': (list(account_id) if account_id else None), 'account_name': (list(account_name) if account_name else None), 'pq': (not dv)} survey(ctx, 's1')
_fixtures(WebFixture, FileUploadInputFixture) def test_async_remove(web_fixture, file_upload_input_fixture): fixture = file_upload_input_fixture web_fixture.reahl_server.set_app(fixture.new_wsgi_app(enable_js=True)) browser = web_fixture.driver_browser browser.open('/') browser.type(XPath.input_labelled('Choose file(s)'), fixture.file_to_upload1.name) browser.click(XPath.button_labelled('Upload')) browser.type(XPath.input_labelled('Choose file(s)'), fixture.file_to_upload2.name) browser.click(XPath.button_labelled('Upload')) with browser.no_page_load_expected(): browser.click(XPath.button_labelled('Remove', filename=fixture.file_to_upload1_name)) assert (not fixture.uploaded_file_is_listed(fixture.file_to_upload1.name)) assert fixture.uploaded_file_is_listed(fixture.file_to_upload2.name) browser.refresh() with browser.no_page_load_expected(): browser.click(XPath.button_labelled('Remove', filename=fixture.file_to_upload2_name)) assert (not fixture.uploaded_file_is_listed(fixture.file_to_upload1.name)) assert (not fixture.uploaded_file_is_listed(fixture.file_to_upload2.name)) browser.click(XPath.button_labelled('Submit')) assert (list(fixture.domain_object.submitted_file_info.keys()) == [])
.parametrize('proc_name', ['s1', 's2', 's3']) def test_return_value_on_failure(tcp_port, proc_name, xprocess): class Starter(ProcessStarter): pattern = 'started' args = [sys.executable, server_path, tcp_port] xprocess.ensure(proc_name, Starter) info = xprocess.getinfo(proc_name) assert (info.terminate(timeout=(- 1)) == (- 1)) try: psutil.Process(info.pid).terminate() except psutil.NoSuchProcess: pass
class DicomLocationEndpoint(Resource): parser = reqparse.RequestParser() parser.add_argument('name', required=True, help='Name to identify this Dicom location', location=['args', 'headers', 'values']) parser.add_argument('host', required=True, help='Dicom location host name or IP address', location=['args', 'headers', 'values']) parser.add_argument('port', type=int, required=True, help='The port of the Dicom location', location=['args', 'headers', 'values']) parser.add_argument('ae_title', help='AE Title of the Dicom location', location=['args', 'headers', 'values']) def get(self): key = request.headers['API_KEY'] dl = DicomLocation.query.filter_by(owner_key=key).all() return dl def post(self): args = self.parser.parse_args() key = request.headers['API_KEY'] dl = DicomLocation(owner_key=key, name=args['name'], host=args['host'], port=args['port'], ae_title=args['ae_title']) db.session.add(dl) db.session.commit() return dl
class GenericCopy(TimeStampedModel): key = models.SlugField(_('slug'), max_length=200) content = I18nTextField(_('content'), blank=False) conference = models.ForeignKey('conferences.Conference', on_delete=models.CASCADE, verbose_name=_('conference'), related_name='copy') def __str__(self): return f'{self.key} ({self.conference.name})' class Meta(): unique_together = ['key', 'conference'] verbose_name = _('Generic Copy') verbose_name_plural = _('Generic Copy')
class GeoBoundingBox(BaseModel, extra='forbid'): top_left: 'GeoPoint' = Field(..., description='Geo filter request Matches coordinates inside the rectangle, described by coordinates of lop-left and bottom-right edges') bottom_right: 'GeoPoint' = Field(..., description='Geo filter request Matches coordinates inside the rectangle, described by coordinates of lop-left and bottom-right edges')