code
stringlengths
281
23.7M
def test_resnet(): resnet45_aster = ResNet(in_channels=3, stem_channels=[64, 128], block_cfgs=dict(type='BasicBlock', use_conv1x1='True'), arch_layers=[3, 4, 6, 6, 3], arch_channels=[32, 64, 128, 256, 512], strides=[(2, 2), (2, 2), (2, 1), (2, 1), (2, 1)]) resnet45_abi = ResNet(in_channels=3, stem_channels=32, block_cfgs=dict(type='BasicBlock', use_conv1x1='True'), arch_layers=[3, 4, 6, 6, 3], arch_channels=[32, 64, 128, 256, 512], strides=[2, 1, 2, 1, 1]) resnet_31 = ResNet(in_channels=3, stem_channels=[64, 128], block_cfgs=dict(type='BasicBlock'), arch_layers=[1, 2, 5, 3], arch_channels=[256, 256, 512, 512], strides=[1, 1, 1, 1], plugins=[dict(cfg=dict(type='Maxpool2d', kernel_size=2, stride=(2, 2)), stages=(True, True, False, False), position='before_stage'), dict(cfg=dict(type='Maxpool2d', kernel_size=(2, 1), stride=(2, 1)), stages=(False, False, True, False), position='before_stage'), dict(cfg=dict(type='ConvModule', kernel_size=3, stride=1, padding=1, norm_cfg=dict(type='BN'), act_cfg=dict(type='ReLU')), stages=(True, True, True, True), position='after_stage')]) resnet31_master = ResNet(in_channels=3, stem_channels=[64, 128], block_cfgs=dict(type='BasicBlock'), arch_layers=[1, 2, 5, 3], arch_channels=[256, 256, 512, 512], strides=[1, 1, 1, 1], plugins=[dict(cfg=dict(type='Maxpool2d', kernel_size=2, stride=(2, 2)), stages=(True, True, False, False), position='before_stage'), dict(cfg=dict(type='Maxpool2d', kernel_size=(2, 1), stride=(2, 1)), stages=(False, False, True, False), position='before_stage'), dict(cfg=dict(type='GCAModule', ratio=0.0625, n_head=1), stages=[True, True, True, True], position='after_stage'), dict(cfg=dict(type='ConvModule', kernel_size=3, stride=1, padding=1, norm_cfg=dict(type='BN'), act_cfg=dict(type='ReLU')), stages=(True, True, True, True), position='after_stage')]) img = torch.rand(1, 3, 32, 100) assert (resnet45_aster(img).shape == torch.Size([1, 512, 1, 25])) assert (resnet45_abi(img).shape == torch.Size([1, 512, 8, 25])) assert (resnet_31(img).shape == torch.Size([1, 512, 4, 25])) assert (resnet31_master(img).shape == torch.Size([1, 512, 4, 25]))
def parse_args(): parser = argparse.ArgumentParser(description='Test CornerNet') parser.add_argument('cfg_file', help='config file', type=str) parser.add_argument('--testiter', dest='testiter', help='test at iteration i', default=None, type=int) parser.add_argument('--split', dest='split', help='which split to use', default='validation', type=str) parser.add_argument('--suffix', dest='suffix', default=None, type=str) parser.add_argument('--debug', action='store_true') args = parser.parse_args() return args
def test_get_direction_from_center_bottomright_cropped_item(view, item): with patch.object(item, 'bounding_rect_unselected', return_value=QtCore.QRectF(5, 5, 100, 80)): direction = item.get_direction_from_center(QtCore.QPointF(105, 95)) assert (direction == approx((QtCore.QPointF(1, 1) / math.sqrt(2))))
def test_readonly_push_pull(pusher, puller, basic_images, different_images, liveserver_session, app_reloader, api_caller, liveserver, registry_server_executor): credentials = ('devtable', 'password') pusher.push(liveserver_session, 'devtable', 'newrepo', 'latest', basic_images, credentials=credentials) with ConfigChange('REGISTRY_STATE', 'readonly', registry_server_executor.on(liveserver), liveserver): puller.pull(liveserver_session, 'devtable', 'newrepo', 'latest', basic_images, credentials=credentials) pusher.push(liveserver_session, 'devtable', 'newrepo', 'latest', different_images, credentials=credentials, expected_failure=Failures.READONLY_REGISTRY) puller.pull(liveserver_session, 'devtable', 'newrepo', 'latest', basic_images, credentials=credentials)
def test_async_methods_signature(async_file: AsyncIOWrapper[mock.Mock]) -> None: assert (async_file.read.__name__ == 'read') assert (async_file.read.__qualname__ == 'AsyncIOWrapper.read') assert (async_file.read.__doc__ is not None) assert ('io.StringIO.read' in async_file.read.__doc__)
def create_rule(repository, rule_value, rule_type=RepoMirrorRuleType.TAG_GLOB_CSV, left_child=None, right_child=None): validate_rule(rule_type, rule_value) rule_kwargs = {'repository': repository, 'rule_value': rule_value, 'rule_type': rule_type, 'left_child': left_child, 'right_child': right_child} rule = RepoMirrorRule.create(**rule_kwargs) return rule
def test_evaluated_once(testdir): testdir.makepyfile('\n from pytest import fixture\n from pytest_describe import behaves_like\n\n count = 0\n def thing():\n global count\n count += 1\n def is_evaluated_once():\n assert count == 1\n\n _like(thing)\n def describe_something():\n pass\n _like(thing)\n def describe_something_else():\n pass\n ') result = testdir.runpytest('-v') result.assert_outcomes(passed=2)
def _init_weights(module: nn.Module, name: str, head_bias: float=0.0, flax=False): if isinstance(module, nn.Linear): if name.startswith('head'): nn.init.zeros_(module.weight) nn.init.constant_(module.bias, head_bias) elif flax: lecun_normal_(module.weight) if (module.bias is not None): nn.init.zeros_(module.bias) else: nn.init.xavier_uniform_(module.weight) if (module.bias is not None): if ('mlp' in name): nn.init.normal_(module.bias, std=1e-06) else: nn.init.zeros_(module.bias) elif isinstance(module, nn.Conv2d): lecun_normal_(module.weight) if (module.bias is not None): nn.init.zeros_(module.bias) elif isinstance(module, (nn.LayerNorm, nn.BatchNorm2d, nn.GroupNorm)): nn.init.ones_(module.weight) nn.init.zeros_(module.bias) elif hasattr(module, 'init_weights'): module.init_weights()
class TestSys(): def test_sys_builtin_module_names(self) -> None: node = _extract_single_node('\n import sys\n sys.builtin_module_names\n ') inferred = list(node.infer()) assert (len(inferred) == 1) assert isinstance(inferred[0], nodes.Tuple) assert inferred[0].elts def test_sys_modules(self) -> None: node = _extract_single_node('\n import sys\n sys.modules\n ') inferred = list(node.infer()) assert (len(inferred) == 1) assert isinstance(inferred[0], nodes.Dict) assert inferred[0].items
class SingleContextWithBottleneckToQuestionModel(MultipleContextModel): def __init__(self, encoder: QuestionsAndParagraphsEncoder, word_embed: Optional[WordEmbedder], char_embed: Optional[CharWordEmbedder], embed_mapper: Optional[SequenceMapper], context_to_question_attention: AttentionMapper, question_to_context_attention: Optional[AttentionWithPostMapper], sequence_encoder: SequenceEncoder, rep_merge: MergeLayer, predictor: BinaryFixedPredictor, max_batch_size: Optional[int]=None): super().__init__(encoder=encoder, word_embed=word_embed, char_embed=char_embed, max_batch_size=max_batch_size) self.embed_mapper = embed_mapper self.context_to_question_attention = context_to_question_attention self.question_to_context_attention = question_to_context_attention self.sequence_encoder = sequence_encoder self.rep_merge = rep_merge self.predictor = predictor def _get_predictions_for(self, is_train, question_embed, question_mask, context_embed, context_mask, answer, question_lm=None, context_lm=None, sentence_segments=None, sentence_mask=None): (question_rep, context_rep) = (question_embed, context_embed) (context1_rep,) = tf.unstack(context_rep, axis=1, num=1) (context1_mask,) = tf.unstack(context_mask, axis=1, num=1) if (self.embed_mapper is not None): with tf.variable_scope('map_embed'): context1_rep = self.embed_mapper.apply(is_train, context1_rep, context1_mask) context1_rep = tf.identity(context1_rep, name='sequence') tf.add_to_collection(INTERMEDIATE_LAYER_COLLECTION, context1_rep) with tf.variable_scope('map_embed', reuse=True): question_rep = self.embed_mapper.apply(is_train, question_rep, question_mask) question_rep = tf.identity(question_rep, name='question_sequence') tf.add_to_collection(INTERMEDIATE_LAYER_COLLECTION, question_rep) with tf.variable_scope('context_encode'): fixed_context = self.sequence_encoder.apply(is_train, context1_rep, context1_mask) fixed_context = tf.identity(fixed_context, name='fixed') tf.add_to_collection(INTERMEDIATE_LAYER_COLLECTION, fixed_context) if (self.question_to_context_attention is not None): with tf.variable_scope('q2c'): context1_rep = self.question_to_context_attention.apply(is_train, x=context1_rep, keys=question_rep, memories=question_rep, x_mask=context1_mask, memory_mask=question_mask) with tf.variable_scope('c2q'): attended_rep = self.context_to_question_attention.apply(is_train, x=question_rep, keys=context1_rep, memories=context1_rep, x_mask=question_mask, memory_mask=context1_mask) with tf.variable_scope('seq_enc'): fixed_rep = self.sequence_encoder.apply(is_train, attended_rep, question_mask) with tf.variable_scope('rep_merge'): fixed_rep = self.rep_merge.apply(is_train, fixed_rep, fixed_context) with tf.variable_scope('predictor'): return self.predictor.apply(is_train, fixed_rep, answer)
class NotificationBridgePresenter(QObject): def __init__(self, parent: QObject=None) -> None: super().__init__(parent) self._active_notifications: Dict[(int, 'QWebEngineNotification')] = {} self._adapter: Optional[AbstractNotificationAdapter] = None config.instance.changed.connect(self._init_adapter) _filter('content.notifications.presenter') def _init_adapter(self) -> None: setting = config.val.content.notifications.presenter log.misc.debug(f'Setting up notification adapter ({setting})...') if (setting == 'qt'): message.error("Can't switch to qt notification presenter at runtime.") setting = 'auto' for candidate in self._get_adapter_candidates(setting): try: self._adapter = candidate() except Error as e: msg = f'Failed to initialize {candidate.NAME} notification adapter: {e}' if (candidate.NAME == setting): message.error(msg) else: log.misc.debug(msg) else: log.misc.debug(f'Initialized {self._adapter.NAME} notification adapter') break assert (self._adapter is not None) self._adapter.click_id.connect(self._on_adapter_clicked) self._adapter.close_id.connect(self._on_adapter_closed) self._adapter.error.connect(self._on_adapter_error) self._adapter.clear_all.connect(self._on_adapter_clear_all) def _get_adapter_candidates(self, setting: str) -> List[Type[AbstractNotificationAdapter]]: candidates: Dict[(str, List[Type[AbstractNotificationAdapter]])] = {'libnotify': [DBusNotificationAdapter, SystrayNotificationAdapter, MessagesNotificationAdapter], 'systray': [SystrayNotificationAdapter, DBusNotificationAdapter, MessagesNotificationAdapter], 'herbe': [HerbeNotificationAdapter, DBusNotificationAdapter, SystrayNotificationAdapter, MessagesNotificationAdapter], 'messages': [MessagesNotificationAdapter]} candidates['auto'] = candidates['libnotify'] return candidates[setting] def install(self, profile: 'QWebEngineProfile') -> None: profile.setNotificationPresenter(self.present) def present(self, qt_notification: 'QWebEngineNotification') -> None: if (self._adapter is None): self._init_adapter() assert (self._adapter is not None) replaces_id = self._find_replaces_id(qt_notification) qtutils.ensure_valid(qt_notification.origin()) notification_id = self._adapter.present(qt_notification, replaces_id=replaces_id) log.misc.debug(f'New notification ID from adapter: {notification_id}') if (self._adapter is None): log.misc.debug('Adapter vanished, bailing out') return if (replaces_id is None): if (notification_id in self._active_notifications): raise Error(f'Got duplicate id {notification_id}') qt_notification.show() self._active_notifications[notification_id] = qt_notification qt_notification.closed.connect(functools.partial(self._adapter.on_web_closed, notification_id)) def _find_replaces_id(self, new_notification: 'QWebEngineNotification') -> Optional[int]: if (not new_notification.tag()): return None log.misc.debug(f'Finding notification for tag {new_notification.tag()}, origin {new_notification.origin()}') for (notification_id, notification) in sorted(self._active_notifications.items(), reverse=True): if notification.matches(new_notification): log.misc.debug(f'Found match: {notification_id}') return notification_id log.misc.debug('Did not find match') return None (int) def _on_adapter_closed(self, notification_id: int) -> None: log.misc.debug(f'Notification {notification_id} closed by adapter') try: notification = self._active_notifications.pop(notification_id) except KeyError: log.misc.debug('Did not find matching notification, ignoring') return notification.close() (int) def _on_adapter_clicked(self, notification_id: int) -> None: log.misc.debug(f'Notification {notification_id} clicked by adapter') try: notification = self._active_notifications[notification_id] except KeyError: log.misc.debug('Did not find matching notification, ignoring') return notification.click() self._focus_first_matching_tab(notification) def _focus_first_matching_tab(self, notification: 'QWebEngineNotification') -> None: for win_id in objreg.window_registry: tabbedbrowser = objreg.get('tabbed-browser', window=win_id, scope='window') for (idx, tab) in enumerate(tabbedbrowser.widgets()): if tab.url().matches(notification.origin(), QUrl.UrlFormattingOption.RemovePath): tabbedbrowser.widget.setCurrentIndex(idx) return log.misc.debug(f'No matching tab found for {notification.origin()}') def _drop_adapter(self) -> None: if self._adapter: log.misc.debug(f'Dropping adapter {self._adapter.NAME}') self._adapter.deleteLater() self._adapter = None self._on_adapter_clear_all() () def _on_adapter_clear_all(self) -> None: for notification_id in list(self._active_notifications): self._on_adapter_closed(notification_id) (str) def _on_adapter_error(self, error: str) -> None: if (self._adapter is None): return message.error(f'Notification error from {self._adapter.NAME} adapter: {error}') self._drop_adapter()
def create_nettree(): global S global ptn_len nettree = [[] for i in range((ptn_len + 1))] start = [0 for i in range((ptn_len + 1))] for i in range(len(S)): node0 = node(i) if (S[i] == sub_ptn_list[0].start): node0.toleave = True nettree[0].append(deepcopy(node0)) for j in range(ptn_len): if (sub_ptn_list[j].end == S[i]): if (len(nettree[j]) == 0): break for k in range(start[j], len(nettree[j])): if (((i - nettree[j][k].name) - 1) > sub_ptn_list[j].max): start[j] += 1 if ((((i - nettree[j][(len(nettree[j]) - 1)].name) - 1) > sub_ptn_list[j].max) or (((i - nettree[j][start[j]].name) - 1) < sub_ptn_list[j].min)): continue node0.toleave = True nettree[(j + 1)].append(deepcopy(node0)) for k in range(start[j], len(nettree[j])): if (((i - nettree[j][k].name) - 1) < sub_ptn_list[j].min): break nettree[j][k].children.append((len(nettree[(j + 1)]) - 1)) nettree[(j + 1)][(len(nettree[(j + 1)]) - 1)].parent.append(k) del start return nettree
_required _POST def send(request): try: form = MessageForm(request.POST) if form.is_valid(): message = form.send() if (len(message.connections) == 1): return HttpResponse('Your message was sent to 1 recipient.') else: msg = 'Your message was sent to {0} recipients.'.format(len(message.connections)) return HttpResponse(msg) else: return HttpResponseBadRequest(str(form.errors)) except Exception: return HttpResponse('Unable to send message.', status=500)
def handle_disk_serialized(pxy: ProxyDetail): (org_header, frames) = pxy.obj header = _copy.deepcopy(org_header) if header['disk-io-header']['shared-filesystem']: from .proxify_host_file import ProxifyHostFile assert ProxifyHostFile._spill_to_disk new_path = ProxifyHostFile._spill_to_disk.gen_file_path() os.link(header['disk-io-header']['path'], new_path) header['disk-io-header']['path'] = new_path else: assert (frames == []) frames = disk_read(header.pop('disk-io-header')) if ('compression' in header['serialize-header']): frames = decompress(header['serialize-header'], frames) header = header['serialize-header'] pxy.serializer = header['serializer'] pxy.obj = (header, frames) return (header, frames)
class TestGetAngleBetween(): def test_get_angle_between(self): ray1 = Ray(Point((0, 0)), Point((1, 0))) ray2 = Ray(Point((0, 0)), Point((1, 0))) assert (get_angle_between(ray1, ray2) == 0.0) def test_get_angle_between_expect45(self): ray1 = Ray(Point((0, 0)), Point((1, 0))) ray2 = Ray(Point((0, 0)), Point((1, 1))) assert (math.degrees(get_angle_between(ray1, ray2)) == 45.0) def test_get_angle_between_expect90(self): ray1 = Ray(Point((0, 0)), Point((1, 0))) ray2 = Ray(Point((0, 0)), Point((0, 1))) assert (math.degrees(get_angle_between(ray1, ray2)) == 90.0)
class HTMLFormatter(logging.Formatter): def __init__(self, fmt: str, datefmt: str, log_colors: Mapping[(str, str)]) -> None: super().__init__(fmt, datefmt) self._log_colors: Mapping[(str, str)] = log_colors self._colordict: Mapping[(str, str)] = {} for color in COLORS: self._colordict[color] = '<font color="{}">'.format(color) self._colordict['reset'] = '</font>' def format(self, record: logging.LogRecord) -> str: record_clone = copy.copy(record) record_clone.__dict__.update(self._colordict) if (record_clone.levelname in self._log_colors): color = self._log_colors[record_clone.levelname] color_str = self._colordict[color] record_clone.log_color = color_str else: record_clone.log_color = '' for field in ['msg', 'filename', 'funcName', 'levelname', 'module', 'name', 'pathname', 'processName', 'threadName']: data = str(getattr(record_clone, field)) setattr(record_clone, field, pyhtml.escape(data)) msg = super().format(record_clone) if (not msg.endswith(self._colordict['reset'])): msg += self._colordict['reset'] return msg def formatTime(self, record: logging.LogRecord, datefmt: str=None) -> str: out = super().formatTime(record, datefmt) return pyhtml.escape(out)
def main(params): rng = np.random.RandomState() log_hndlr_stream = logging.StreamHandler() log_hndlr_stream.setLevel(logging.DEBUG) log_handlr_file = logging.FileHandler(path.join(PATH, f'create_datasets_{datetime.datetime.now().isoformat()}.log')) log_handlr_file.setLevel(logging.DEBUG) formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') log_hndlr_stream.setFormatter(formatter) log_handlr_file.setFormatter(formatter) dag_extraction_utils.logger.addHandler(log_hndlr_stream) dag_extraction_utils.logger.addHandler(log_handlr_file) dag_extraction_utils.logger.setLevel(logging.DEBUG) for ds in params['datasets']: dag_extraction_utils.logger.info(f''' # Working on {ds.name}''') reactant_to_reactant_id_map = dict(misc.load_json(ds.reactant_to_reactant_id_json_path)) all_reactions = set() for ds_creator in ds.sources: dataset = ds_creator() (reactions, *_) = dag_extraction_utils.extract_reactions(dataset) all_reactions.update(set(reactions)) all_reactions = list(all_reactions) rng.shuffle(all_reactions) dag_extraction_utils.logger.info(f'Finished merging all reaction sets, left with {len(all_reactions)} total reactions.') mega_graph = dag_extraction_utils.create_mega_graph(all_reactions, reactant_to_reactant_id_map) (depth_and_tree_tuples, _) = dag_extraction_utils.extract_tuple_trees_from_mega_dag(mega_graph, reactant_to_reactant_id_map) rng.shuffle(depth_and_tree_tuples) nx.write_gpickle(mega_graph, path.join(PATH, f'{ds.name}-mgraph.gpickle')) misc.to_pickle(all_reactions, path.join(PATH, f'{ds.name}-reactions.pick')) (indics, out_trees_dict) = _split_on_proportions_and_save(ds.name, ds.split_proportions, dag_extraction_utils.logger, depth_and_tree_tuples) misc.to_pickle({'all_depth_and_tree_tuples': depth_and_tree_tuples, 'subset_indices': indics}, path.join(PATH, f'{ds.name}-all_depth_and_tree_tuples.pick')) _create_equiv_train_val_sets_and_save(ds.name, out_trees_dict, list(reactant_to_reactant_id_map.keys()), NAME_FOR_TRAIN) subprocess.run(f'cd {PATH}; shasum -a 256 * > {datetime.datetime.now().isoformat()}_data_checklist.sha256', shell=True)
def _get_truncated_description(elements: Iterable[(Tag | NavigableString)], markdown_converter: DocMarkdownConverter, max_length: int, max_lines: int) -> str: result = '' markdown_element_ends = [] rendered_length = 0 tag_end_index = 0 for element in elements: is_tag = isinstance(element, Tag) element_length = (len(element.text) if is_tag else len(element)) if ((rendered_length + element_length) < max_length): if is_tag: element_markdown = markdown_converter.process_tag(element, convert_as_inline=False) else: element_markdown = markdown_converter.process_text(element) rendered_length += element_length tag_end_index += len(element_markdown) if (not element_markdown.isspace()): markdown_element_ends.append(tag_end_index) result += element_markdown else: break if (not markdown_element_ends): return '' newline_truncate_index = find_nth_occurrence(result, '\n', max_lines) if ((newline_truncate_index is not None) and (newline_truncate_index < (_MAX_DESCRIPTION_LENGTH - 3))): truncate_index = newline_truncate_index else: truncate_index = (_MAX_DESCRIPTION_LENGTH - 3) if (truncate_index >= markdown_element_ends[(- 1)]): return result possible_truncation_indices = [cut for cut in markdown_element_ends if (cut < truncate_index)] if (not possible_truncation_indices): force_truncated = result[:truncate_index] if (force_truncated.count('```') % 2): force_truncated = force_truncated[:force_truncated.rfind('```')] for string_ in ('\n\n', '\n', '. ', ', ', ',', ' '): cutoff = force_truncated.rfind(string_) if (cutoff != (- 1)): truncated_result = force_truncated[:cutoff] break else: truncated_result = force_truncated else: markdown_truncate_index = possible_truncation_indices[(- 1)] truncated_result = result[:markdown_truncate_index] return (truncated_result.strip(_TRUNCATE_STRIP_CHARACTERS) + '...')
def test_format_currency_format_type(): assert (numbers.format_currency(1099.98, 'USD', locale='en_US', format_type='standard') == '$1,099.98') assert (numbers.format_currency(0, 'USD', locale='en_US', format_type='standard') == '$0.00') assert (numbers.format_currency(1099.98, 'USD', locale='en_US', format_type='accounting') == '$1,099.98') assert (numbers.format_currency(0, 'USD', locale='en_US', format_type='accounting') == '$0.00') with pytest.raises(numbers.UnknownCurrencyFormatError) as excinfo: numbers.format_currency(1099.98, 'USD', locale='en_US', format_type='unknown') assert (excinfo.value.args[0] == "'unknown' is not a known currency format type") assert (numbers.format_currency(1099.98, 'JPY', locale='en_US') == '1,100') assert (numbers.format_currency(1099.98, 'COP', '#,##0.00', locale='es_ES') == '1.099,98') assert (numbers.format_currency(1099.98, 'JPY', locale='en_US', currency_digits=False) == '1,099.98') assert (numbers.format_currency(1099.98, 'COP', '#,##0.00', locale='es_ES', currency_digits=False) == '1.099,98')
def _add_kwargs(func: Callable[(..., Any)], kwargs: Dict[(str, Any)], event_loop_fixture_id: str, event_loop: asyncio.AbstractEventLoop, request: SubRequest) -> Dict[(str, Any)]: sig = inspect.signature(func) ret = kwargs.copy() if ('request' in sig.parameters): ret['request'] = request if (event_loop_fixture_id in sig.parameters): ret[event_loop_fixture_id] = event_loop return ret
def add_title(image): text = 'Bahot-Hard ESPORTS' font = ImageFont.truetype('theboldfont.ttf', 90) d1 = ImageDraw.Draw(image) (w, h) = d1.textsize(text, font) left = ((image.width - w) / 2) top = 50 d1.text((left, top), text, font=font) (w, h) = d1.textsize('Overall Standings', font) left = ((image.width - w) / 2) d1.text((left, 150), 'Overall Standings', font=font)
class KiteCovariogram(KiteSubplot): legend_template = {'exponential': 'Model: {0:.2g} e^(-d/{1:.1f}) | RMS: {rms:.4e}', 'exponential_cosine': 'Model: {0:.2g} e^(-d/{1:.1f}) - cos((d-({2:.1f}))/{3:.1f})| RMS: {rms:.4e}'} class VarianceLine(pg.InfiniteLine): def __init__(self, *args, **kwargs): pg.InfiniteLine.__init__(self, *args, **kwargs) self.setCursor(QtCore.Qt.SizeVerCursor) def __init__(self, parent_plot): KiteSubplot.__init__(self, parent_plot) self.plot.setLabels(bottom=('Distance', 'm'), left='Covariance (m<sup>2</sup>)') self.cov_spectral = pg.PlotDataItem(antialias=True) self.cov_spectral.setZValue(10) self.cov_spatial = pg.PlotDataItem(antialias=True) self.cov_model = pg.PlotDataItem(antialias=True, pen=pen_covariance_model) self.variance = self.VarianceLine(pen=pen_variance, angle=0, movable=True, hoverPen=pen_variance_highlight, label='Variance: {value:.5f}', labelOpts={'position': 0.975, 'anchors': ((1.0, 0.0), (1.0, 1.0)), 'color': pg.mkColor(255, 255, 255, 155)}) self.variance.setToolTip('Move to change variance') self.variance.sigPositionChangeFinished.connect(self.setVariance) self.addItem(self.cov_spectral) self.addItem(self.cov_spatial) self.addItem(self.cov_model) self.addItem(self.variance) self.legend = pg.LegendItem(offset=(0.0, 0.5)) self.legend.setParentItem(self.plot.graphicsItem()) self.legend.addItem(self.cov_model, '') def setVariance(self): self.model.covariance.variance = self.variance.value() () def update(self): covariance = self.model.covariance (cov_spectral, dist) = covariance.covariance_spectral self.cov_spectral.setData(dist, cov_spectral) (cov_spatial, dist) = covariance.covariance_spatial self.cov_spatial.setData(dist, cov_spatial) if (self.model.covariance.config.sampling_method == 'spatial'): self.cov_spatial.setPen(pen_covariance_active) self.cov_spectral.setPen(pen_covariance) else: self.cov_spatial.setPen(pen_covariance) self.cov_spectral.setPen(pen_covariance_active) model = self.model.covariance.getModelFunction() self.cov_model.setData(dist, model(dist, *covariance.covariance_model)) tmpl = self.legend_template[covariance.config.model_function] self.legend.items[(- 1)][1].setText(tmpl.format(*covariance.covariance_model, rms=covariance.covariance_model_rms)) self.variance.setValue(covariance.variance) def activatePlot(self): self.model.sigCovarianceChanged.connect(self.update) self.update() def deactivatePlot(self): self.model.sigCovarianceChanged.disconnect(self.update)
.supported(only_if=(lambda backend: (not backend.ed448_supported())), skip_message='Requires OpenSSL without Ed448 support') def test_ed448_unsupported(backend): with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM): Ed448PublicKey.from_public_bytes((b'0' * 57)) with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM): Ed448PrivateKey.from_private_bytes((b'0' * 57)) with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM): Ed448PrivateKey.generate()
class BertweetTokenizationTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = BertweetTokenizer test_rust_tokenizer = False def setUp(self): super().setUp() vocab = ['I', 'm', '', '', 'r', ''] vocab_tokens = dict(zip(vocab, range(len(vocab)))) merges = ['#version: 0.2', 'a m</w>'] self.special_tokens_map = {'unk_token': '<unk>'} self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file']) self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file']) with open(self.vocab_file, 'w', encoding='utf-8') as fp: for token in vocab_tokens: fp.write(f'''{token} {vocab_tokens[token]} ''') with open(self.merges_file, 'w', encoding='utf-8') as fp: fp.write('\n'.join(merges)) def get_tokenizer(self, **kwargs): kwargs.update(self.special_tokens_map) return BertweetTokenizer.from_pretrained(self.tmpdirname, **kwargs) def get_input_output_texts(self, tokenizer): input_text = 'I am VinAI Research' output_text = 'I <unk> m V<unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>' return (input_text, output_text) def test_full_tokenizer(self): tokenizer = BertweetTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map) text = 'I am VinAI Research' bpe_tokens = 'I m I h'.split() tokens = tokenizer.tokenize(text) self.assertListEqual(tokens, bpe_tokens) input_tokens = (tokens + [tokenizer.unk_token]) input_bpe_tokens = [4, 3, 5, 6, 3, 3, 3, 4, 7, 9, 3, 9, 3, 3, 3, 3, 3] self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens)
class RandomListSearcher(Searcher): def __init__(self, param_grid): self._configurations = param_grid Searcher.__init__(self) def suggest(self, trial_id): selected_dict = self._configurations[random.randint(0, (len(self._configurations) - 1))] generated_config = {} for (key, distribution) in selected_dict.items(): if isinstance(distribution, list): generated_config[key] = distribution[random.randint(0, (len(distribution) - 1))] else: generated_config[key] = distribution.rvs(1)[0] return generated_config def on_trial_complete(self, **kwargs): pass
class Ui_Form(object): def setupUi(self, Form): if (not Form.objectName()): Form.setObjectName(u'Form') Form.resize(476, 447) self.training_code = QLineEdit(Form) self.training_code.setObjectName(u'training_code') self.training_code.setGeometry(QRect(100, 40, 301, 20)) self.label = QLabel(Form) self.label.setObjectName(u'label') self.label.setGeometry(QRect(10, 40, 81, 16)) self.label_2 = QLabel(Form) self.label_2.setObjectName(u'label_2') self.label_2.setGeometry(QRect(20, 70, 61, 20)) self.training_data = QLineEdit(Form) self.training_data.setObjectName(u'training_data') self.training_data.setGeometry(QRect(100, 70, 301, 20)) self.output_path = QLineEdit(Form) self.output_path.setObjectName(u'output_path') self.output_path.setGeometry(QRect(100, 100, 301, 20)) self.label_3 = QLabel(Form) self.label_3.setObjectName(u'label_3') self.label_3.setGeometry(QRect(20, 100, 71, 20)) self.label_4 = QLabel(Form) self.label_4.setObjectName(u'label_4') self.label_4.setGeometry(QRect(30, 140, 81, 16)) self.max_iteration = QLineEdit(Form) self.max_iteration.setObjectName(u'max_iteration') self.max_iteration.setGeometry(QRect(120, 140, 71, 20)) self.learning_rate = QLineEdit(Form) self.learning_rate.setObjectName(u'learning_rate') self.learning_rate.setGeometry(QRect(120, 180, 71, 20)) self.label_5 = QLabel(Form) self.label_5.setObjectName(u'label_5') self.label_5.setGeometry(QRect(30, 180, 81, 16)) self.confirm = QPushButton(Form) self.confirm.setObjectName(u'confirm') self.confirm.setGeometry(QRect(310, 220, 75, 24)) self.load_training_code = QPushButton(Form) self.load_training_code.setObjectName(u'load_training_code') self.load_training_code.setGeometry(QRect(381, 41, 19, 18)) self.load_training_data = QPushButton(Form) self.load_training_data.setObjectName(u'load_training_data') self.load_training_data.setGeometry(QRect(381, 71, 19, 18)) self.error_message = QLabel(Form) self.error_message.setObjectName(u'error_message') self.error_message.setGeometry(QRect(112, 220, 191, 20)) self.error_message.setLayoutDirection(Qt.LeftToRight) self.error_message.setAlignment(((Qt.AlignRight | Qt.AlignTrailing) | Qt.AlignVCenter)) self.retranslateUi(Form) QMetaObject.connectSlotsByName(Form) def retranslateUi(self, Form): Form.setWindowTitle(QCoreApplication.translate('Form', u'Form', None)) self.label.setText(QCoreApplication.translate('Form', u'training code', None)) self.label_2.setText(QCoreApplication.translate('Form', u'traing data', None)) self.label_3.setText(QCoreApplication.translate('Form', u'output path', None)) self.label_4.setText(QCoreApplication.translate('Form', u'max iteration', None)) self.max_iteration.setText(QCoreApplication.translate('Form', u'100', None)) self.learning_rate.setText(QCoreApplication.translate('Form', u'0.001', None)) self.label_5.setText(QCoreApplication.translate('Form', u'learning rate', None)) self.confirm.setText(QCoreApplication.translate('Form', u'train', None)) self.load_training_code.setText(QCoreApplication.translate('Form', u'...', None)) self.load_training_data.setText(QCoreApplication.translate('Form', u'...', None)) self.error_message.setText('')
def iou_calculator(annotation, segmentation, void_pixels=None): if (void_pixels is not None): assert (annotation.shape == void_pixels.shape), f'Annotation({annotation.shape}) and void pixels:{void_pixels.shape} dimensions do not match.' void_pixels = void_pixels.astype(np.bool) else: void_pixels = np.zeros_like(segmentation) annotation = annotation.astype(np.bool) segmentation = segmentation.astype(np.bool) inters = np.sum(((segmentation & annotation) & np.logical_not(void_pixels))) union = np.sum(((segmentation | annotation) & np.logical_not(void_pixels))) j = (inters / union) if (j.ndim == 0): j = (1 if np.isclose(union, 0) else j) else: j[np.isclose(union, 0)] = 1 return j
def list_environments_from_aws(config_obj: dict) -> None: try: session = boto3.session.Session(profile_name=config_obj['aws_profile']) s3_client = session.client('s3') bucket_objs = s3_client.list_objects_v2(Bucket=config_obj['bucket']) print(':link: Listing your cloud environments.') tree = Tree('Cloud Environments') if (('Contents' in bucket_objs) and (len(bucket_objs['Contents']) > 0)): for obj in bucket_objs['Contents']: tree.add(obj['Key']) else: tree.add('No cloud environments found.') print(tree) except exceptions.ClientError as client_error: if (client_error.response['Error']['Code'] == 'NoSuchBucket'): print(':person_shrugging: Bucket does not exist.') except exceptions.ProfileNotFound: print(':person_shrugging: config profile does not exist.')
class ThreadContext(object): def __init__(self, tid: int): self.cregs = dict() self.sregs = dict() self._join_th_id = None self.tid = tid self.count = 0 self.state = ThreadState.RUNNING def save(self, tt_ctx: TritonContext) -> None: self.sregs = tt_ctx.getSymbolicRegisters() for r in tt_ctx.getParentRegisters(): self.cregs.update({r.getId(): tt_ctx.getConcreteRegisterValue(r)}) def restore(self, tt_ctx: TritonContext) -> None: for (rid, v) in self.cregs.items(): tt_ctx.setConcreteRegisterValue(tt_ctx.getRegister(rid), v) for (rid, e) in self.sregs.items(): tt_ctx.assignSymbolicExpressionToRegister(e, tt_ctx.getRegister(rid)) def kill(self) -> None: self.state = ThreadState.DEAD def is_dead(self) -> bool: return (self.state == ThreadState.DEAD) def join_thread(self, th_id: int) -> None: self._join_th_id = th_id self.state = ThreadState.JOINING def is_waiting_to_join(self) -> bool: return (self.state == ThreadState.JOINING) def cancel_join(self) -> None: self._join_th_id = None self.state = ThreadState.RUNNING def is_main_thread(self) -> bool: return (self.tid == 0) def is_running(self) -> bool: return (self.state == ThreadState.RUNNING)
def init_weights(net, init_type='normal'): print(('initialization method [%s]' % init_type)) if (init_type == 'normal'): net.apply(weights_init_normal) elif (init_type == 'xavier'): net.apply(weights_init_xavier) elif (init_type == 'kaiming'): net.apply(weights_init_kaiming) elif (init_type == 'orthogonal'): net.apply(weights_init_orthogonal) else: raise NotImplementedError(('initialization method [%s] is not implemented' % init_type))
class TestSWA(unittest.TestCase): def _test_averaged_model(self, net_device: torch.device, swa_device: torch.device, ema: bool) -> None: dnn = torch.nn.Sequential(torch.nn.Conv2d(1, 5, kernel_size=3), torch.nn.ReLU(), torch.nn.MaxPool2d(kernel_size=2), torch.nn.BatchNorm2d(5, momentum=0.3), torch.nn.Conv2d(5, 2, kernel_size=3), torch.nn.ReLU(), torch.nn.Linear(5, 5), torch.nn.ReLU(), torch.nn.Linear(5, 10)).to(net_device) (averaged_params, averaged_dnn) = self._run_averaged_steps(dnn, swa_device, ema) for (p_avg, p_swa) in zip(averaged_params, averaged_dnn.parameters()): torch.testing.assert_close(p_avg, p_swa, check_device=False) self.assertTrue((p_swa.device == swa_device)) self.assertTrue((p_avg.device == net_device)) self.assertTrue((averaged_dnn.n_averaged.device == swa_device)) def _run_averaged_steps(self, dnn: torch.nn.Module, swa_device: torch.device, ema: bool) -> Tuple[(List[torch.Tensor], torch.nn.Module)]: ema_decay = 0.999 if ema: averaged_dnn = AveragedModel(dnn, device=swa_device, averaging_method='ema', ema_decay=ema_decay) else: averaged_dnn = AveragedModel(dnn, device=swa_device, averaging_method='swa') averaged_params = [torch.zeros_like(param) for param in dnn.parameters()] n_updates = 10 for i in range(n_updates): for (p, p_avg) in zip(dnn.parameters(), averaged_params): p.detach().add_(torch.randn_like(p)) if ema: p_avg += ((p.detach() * (ema_decay ** ((n_updates - i) - 1))) * ((1 - ema_decay) if (i > 0) else 1.0)) else: p_avg += (p.detach() / n_updates) averaged_dnn.update_parameters(dnn) return (averaged_params, averaged_dnn) def test_averaged_model_all_devices(self) -> None: cpu = torch.device('cpu') self._test_averaged_model(cpu, cpu, ema=True) self._test_averaged_model(cpu, cpu, ema=False) if torch.cuda.is_available(): cuda = torch.device(0) combos = itertools.product([cuda, cpu], [cuda, cpu], [True, False]) for (device1, device2, ema) in combos: self._test_averaged_model(device1, device2, ema=ema) def test_averaged_model_state_dict(self) -> None: dnn = torch.nn.Sequential(torch.nn.Conv2d(1, 5, kernel_size=3), torch.nn.Linear(5, 10)) averaged_dnn = AveragedModel(dnn) averaged_dnn2 = AveragedModel(dnn) n_updates = 10 for _ in range(n_updates): for p in dnn.parameters(): p.detach().add_(torch.randn_like(p)) averaged_dnn.update_parameters(dnn) averaged_dnn2.load_state_dict(averaged_dnn.state_dict()) for (p_swa, p_swa2) in zip(averaged_dnn.parameters(), averaged_dnn2.parameters()): torch.testing.assert_close(p_swa, p_swa2, check_device=False) self.assertTrue((averaged_dnn.n_averaged == averaged_dnn2.n_averaged)) def test_averaged_model_exponential(self) -> None: combos = itertools.product([True, False], [True, False]) for (use_buffers, skip_deepcopy) in combos: self._test_averaged_model_exponential(use_buffers, skip_deepcopy) def _test_averaged_model_exponential(self, use_buffers: bool, skip_deepcopy: bool) -> None: dnn = torch.nn.Sequential(torch.nn.Conv2d(1, 5, kernel_size=3), torch.nn.BatchNorm2d(5, momentum=0.3), torch.nn.Linear(5, 10)) decay: float = 0.9 averaged_dnn = AveragedModel((deepcopy(dnn) if skip_deepcopy else dnn), averaging_method='ema', ema_decay=decay, use_buffers=use_buffers, skip_deepcopy=skip_deepcopy) if use_buffers: dnn_params = list(itertools.chain(dnn.parameters(), dnn.buffers())) else: dnn_params = list(dnn.parameters()) averaged_params = [torch.zeros_like(param) for param in dnn_params if (param.size() != torch.Size([]))] n_updates = 10 for i in range(n_updates): updated_averaged_params = [] for (p, p_avg) in zip(dnn_params, averaged_params): if (p.size() == torch.Size([])): continue p.detach().add_(torch.randn_like(p)) if (i == 0): updated_averaged_params.append(p.clone()) else: updated_averaged_params.append(((p_avg * decay) + (p * (1 - decay))).clone()) averaged_dnn.update_parameters(dnn) averaged_params = updated_averaged_params if use_buffers: for (p_avg, p_swa) in zip(averaged_params, itertools.chain(averaged_dnn.module.parameters(), averaged_dnn.module.buffers())): torch.testing.assert_close(p_avg, p_swa, check_device=False) else: for (p_avg, p_swa) in zip(averaged_params, averaged_dnn.parameters()): torch.testing.assert_close(p_avg, p_swa, check_device=False) for (b_avg, b_swa) in zip(dnn.buffers(), averaged_dnn.module.buffers()): torch.testing.assert_close(b_avg, b_swa, check_device=False) def test_averaged_model_skip_deepcopy(self) -> None: device = torch.device('cpu') dnn = torch.nn.Sequential(torch.nn.Conv2d(1, 5, kernel_size=3), torch.nn.ReLU(), torch.nn.MaxPool2d(kernel_size=2), torch.nn.BatchNorm2d(5, momentum=0.3), torch.nn.Conv2d(5, 2, kernel_size=3), torch.nn.ReLU(), torch.nn.Linear(5, 5), torch.nn.ReLU(), torch.nn.Linear(5, 10)).to(device) averaged_dnn = AveragedModel(dnn, device, skip_deepcopy=True) self.assertEqual(id(dnn), id(averaged_dnn.module)) averaged_dnn2 = AveragedModel(dnn, device) self.assertNotEqual(id(dnn), id(averaged_dnn2.module)) def test_input_checks(self) -> None: model = torch.nn.Linear(2, 2) with self.assertRaisesRegex(ValueError, 'Decay must be between 0 and 1'): AveragedModel(model, averaging_method='ema', ema_decay=1.3) with self.assertRaisesRegex(ValueError, 'Unknown averaging method: foo. Only ema and swa are supported.'): AveragedModel(model, averaging_method='foo') def test_lit_ema(self) -> None: model = torch.nn.Linear(10, 10) ema_decay = 0.999 averaged_model = AveragedModel(model, averaging_method='ema', ema_decay=ema_decay, use_lit=True) averaged_params = [torch.zeros_like(param) for param in model.parameters()] n_updates = 10 for i in range(n_updates): decay = min(ema_decay, (((1 + i) + 1) / ((10 + i) + 1))) updated_averaged_params = [] for (p, p_avg) in zip(model.parameters(), averaged_params): p.detach().add_(torch.randn_like(p)) if (i == 0): updated_averaged_params.append(p.clone()) else: updated_averaged_params.append(((p_avg * decay) + (p * (1 - decay))).clone()) averaged_model.update_parameters(model) averaged_params = updated_averaged_params for (p_avg, p_swa) in zip(averaged_params, averaged_model.parameters()): torch.testing.assert_close(p_avg, p_swa, check_device=False)
class CharDataset(Dataset): def __init__(self, data_cfg: DataConfig): data = fsspec.open(data_cfg.path).open().read().decode('utf-8') data = data[:int((len(data) * data_cfg.truncate))] chars = sorted(list(set(data))) (data_size, vocab_size) = (len(data), len(chars)) print(('Data has %d characters, %d unique.' % (data_size, vocab_size))) self.stoi = {ch: i for (i, ch) in enumerate(chars)} self.itos = {i: ch for (i, ch) in enumerate(chars)} self.block_size = data_cfg.block_size self.vocab_size = vocab_size self.data = data def __len__(self): return (len(self.data) - self.block_size) def __getitem__(self, idx): chunk = self.data[idx:((idx + self.block_size) + 1)] dix = [self.stoi[s] for s in chunk] x = torch.tensor(dix[:(- 1)], dtype=torch.long) y = torch.tensor(dix[1:], dtype=torch.long) return (x, y)
class TestSafetyRequirement(unittest.TestCase): ((tuple(map(int, packaging.__version__.split('.'))) < (22, 0)), 'not validated in these versions') def test_with_invalid_input(self): invalid_inputs = ['django*', 'django>=python>=3.6', 'numpy>=3.3python>=3.6', '', '\n'] for i_input in invalid_inputs: with self.assertRaises(InvalidRequirementError): SafetyRequirement(i_input)
def _view_to_component(view: ((Callable | View) | str), compatibility: bool, transforms: Sequence[Callable[([VdomDict], Any)]], strict_parsing: bool, request: (HttpRequest | None), args: (Sequence | None), kwargs: (dict | None)): (converted_view, set_converted_view) = hooks.use_state(cast(Union[(VdomDict, None)], None)) _args: Sequence = (args or ()) _kwargs: dict = (kwargs or {}) if request: _request: HttpRequest = request else: _request = HttpRequest() _request.method = 'GET' resolved_view: Callable = (import_module(view) if isinstance(view, str) else view) _effect(dependencies=[json.dumps(vars(_request), default=(lambda x: generate_obj_name(x))), json.dumps([_args, _kwargs], default=(lambda x: generate_obj_name(x)))]) async def async_render(): if compatibility: return response = (await render_view(resolved_view, _request, _args, _kwargs)) set_converted_view(utils.html_to_vdom(response.content.decode('utf-8').strip(), utils.del_html_head_body_transform, *transforms, strict=strict_parsing)) if compatibility: warn('view_to_component(compatibility=True) is deprecated and will be removed in a future version. Please use `view_to_iframe` instead.', DeprecationWarning) return view_to_iframe(resolved_view)(*_args, **_kwargs) return converted_view
class BaseJobSet(ABC): name: str = '' job_name: str = '' def started_job(self, name: str) -> None: pass def finished_job(self) -> None: pass def check_status(self) -> None: pass ('Just use JobSet.job_name attribute/property instead') def get_active_job_name(self) -> str: pass def get_percent_done(self) -> Optional[float]: pass ('Just use JobSet.name attribute/property instead') def get_name(self) -> str: pass def increment(self) -> None: pass
def ssimloss(X, Y): assert (not torch.is_complex(X)) assert (not torch.is_complex(Y)) win_size = 7 k1 = 0.01 k2 = 0.03 w = (torch.ones(1, 1, win_size, win_size).to(X) / (win_size ** 2)) NP = (win_size ** 2) cov_norm = (NP / (NP - 1)) data_range = 1 C1 = ((k1 * data_range) ** 2) C2 = ((k2 * data_range) ** 2) ux = F.conv2d(X, w) uy = F.conv2d(Y, w) uxx = F.conv2d((X * X), w) uyy = F.conv2d((Y * Y), w) uxy = F.conv2d((X * Y), w) vx = (cov_norm * (uxx - (ux * ux))) vy = (cov_norm * (uyy - (uy * uy))) vxy = (cov_norm * (uxy - (ux * uy))) (A1, A2, B1, B2) = ((((2 * ux) * uy) + C1), ((2 * vxy) + C2), (((ux ** 2) + (uy ** 2)) + C1), ((vx + vy) + C2)) D = (B1 * B2) S = ((A1 * A2) / D) return (1 - S.mean())
class Graphsn_GIN(nn.Module): def __init__(self, nfeat, nhid, nclass, dropout): super(Graphsn_GIN, self).__init__() self.nn = Linear(nfeat, nhid) self.fc = Linear(nhid, nclass) self.dropout = dropout self.eps = nn.Parameter(torch.FloatTensor(1)) self.reset_parameters() def reset_parameters(self): stdv_eps = (0.44 / math.sqrt(self.eps.size(0))) nn.init.constant_(self.eps, stdv_eps) def forward(self, x, adj): v = (self.eps * torch.diag(adj)) mask = torch.diag(torch.ones_like(v)) adj = ((mask * torch.diag(v)) + ((1.0 - mask) * adj)) x = torch.mm(adj, x) x = F.relu(self.nn(x)) x = F.dropout(x, self.dropout, training=self.training) x = self.fc(x) return F.log_softmax(x, dim=(- 1))
class MiningYieldViewFull(StatsView): name = 'miningyieldViewFull' def __init__(self, parent): StatsView.__init__(self) self.parent = parent self._cachedValues = [] def getHeaderText(self, fit): return _t('Mining Yield') def getTextExtentW(self, text): (width, height) = self.parent.GetTextExtent(text) return width def populatePanel(self, contentPanel, headerPanel): contentSizer = contentPanel.GetSizer() parent = self.panel = contentPanel self.headerPanel = headerPanel panel = 'full' sizerMiningYield = wx.FlexGridSizer(1, 4, 0, 0) sizerMiningYield.AddGrowableCol(1) contentSizer.Add(sizerMiningYield, 0, wx.EXPAND, 0) counter = 0 for (miningType, image) in (('miner', 'mining'), ('drone', 'drones')): baseBox = wx.BoxSizer(wx.HORIZONTAL) sizerMiningYield.Add(baseBox, 1, (wx.ALIGN_LEFT if (counter == 0) else wx.ALIGN_CENTER_HORIZONTAL)) baseBox.Add(BitmapLoader.getStaticBitmap(('%s_big' % image), parent, 'gui'), 0, wx.ALIGN_CENTER) box = wx.BoxSizer(wx.VERTICAL) baseBox.Add(box, 0, wx.ALIGN_CENTER) box.Add(wx.StaticText(parent, wx.ID_ANY, _t(miningType).capitalize()), 0, wx.ALIGN_LEFT) hbox = wx.BoxSizer(wx.HORIZONTAL) box.Add(hbox, 1, wx.ALIGN_CENTER) lbl = wx.StaticText(parent, wx.ID_ANY, '0.0 m3/s') setattr(self, ('label%sminingyield%s' % (panel.capitalize(), miningType.capitalize())), lbl) hbox.Add(lbl, 0, wx.ALIGN_CENTER) self._cachedValues.append(0) counter += 1 targetSizer = sizerMiningYield baseBox = wx.BoxSizer(wx.HORIZONTAL) targetSizer.Add(baseBox, 0, wx.ALIGN_LEFT) baseBox.Add(BitmapLoader.getStaticBitmap('cargoBay_big', parent, 'gui'), 0, wx.ALIGN_CENTER) box = wx.BoxSizer(wx.VERTICAL) baseBox.Add(box, 0, wx.EXPAND) box.Add(wx.StaticText(parent, wx.ID_ANY, _t('Total')), 0, wx.ALIGN_LEFT) hbox = wx.BoxSizer(wx.HORIZONTAL) box.Add(hbox, 1, wx.EXPAND) lbl = wx.StaticText(parent, wx.ID_ANY, '0.0 m3/s') setattr(self, ('label%sminingyieldTotal' % panel.capitalize()), lbl) hbox.Add(lbl, 0, wx.ALIGN_LEFT) self._cachedValues.append(0) image = BitmapLoader.getBitmap('turret_small', 'gui') firepower = wx.BitmapButton(contentPanel, (- 1), image) firepower.SetToolTip(wx.ToolTip(_t('Click to toggle to Firepower View'))) firepower.Bind(wx.EVT_BUTTON, self.switchToFirepowerView) sizerMiningYield.Add(firepower, 0, wx.ALIGN_LEFT) self._cachedValues.append(0) def switchToFirepowerView(self, event): mainFrame = gui.mainFrame.MainFrame.getInstance() sFit = Fit.getInstance() fit = sFit.getFit(mainFrame.getActiveFit()) self.parent.views.remove(self) self._cachedValues = [] self.panel.GetSizer().Clear(True) self.panel.GetSizer().Layout() view = StatsView.getView('firepowerViewFull')(self.parent) view.populatePanel(self.panel, self.headerPanel) self.parent.views.append(view) tp = self.panel.GetParent() tp.SetLabel(view.getHeaderText(fit)) view.refreshPanel(fit) def refreshPanel(self, fit): stats = (('labelFullminingyieldMiner', (lambda : fit.minerYield), (lambda : fit.minerWaste), 3, 0, 0, '{}{} m3/s', None), ('labelFullminingyieldDrone', (lambda : fit.droneYield), (lambda : fit.droneWaste), 3, 0, 0, '{}{} m3/s', None), ('labelFullminingyieldTotal', (lambda : fit.totalYield), (lambda : fit.totalWaste), 3, 0, 0, '{}{} m3/s', None)) def processValue(value): value = (value() if (fit is not None) else 0) value = (value if (value is not None) else 0) return value counter = 0 for (labelName, yieldValue, wasteValue, prec, lowest, highest, valueFormat, altFormat) in stats: label = getattr(self, labelName) yieldValue = processValue(yieldValue) wasteValue = processValue(wasteValue) if (self._cachedValues[counter] != (yieldValue, wasteValue)): yps = formatAmount(yieldValue, prec, lowest, highest) yph = formatAmount((yieldValue * 3600), prec, lowest, highest) wps = formatAmount(wasteValue, prec, lowest, highest) wph = formatAmount((wasteValue * 3600), prec, lowest, highest) wasteSuffix = ('w' if (wasteValue > 0) else '') label.SetLabel(valueFormat.format(yps, wasteSuffix)) tipLines = [] tipLines.append('{} m3 mining yield per second ({} m3 per hour)'.format(yps, yph)) if (wasteValue > 0): tipLines.append('{} m3 mining waste per second ({} m3 per hour)'.format(wps, wph)) label.SetToolTip(wx.ToolTip('\n'.join(tipLines))) self._cachedValues[counter] = (yieldValue, wasteValue) counter += 1 self.panel.Layout() self.headerPanel.Layout()
class FileInfo(): def __init__(self, filename): self._filename = filename def FullName(self): return os.path.abspath(self._filename).replace('\\', '/') def RepositoryName(self): fullname = self.FullName() if os.path.exists(fullname): project_dir = os.path.dirname(fullname) if os.path.exists(os.path.join(project_dir, '.svn')): root_dir = project_dir one_up_dir = os.path.dirname(root_dir) while os.path.exists(os.path.join(one_up_dir, '.svn')): root_dir = os.path.dirname(root_dir) one_up_dir = os.path.dirname(one_up_dir) prefix = os.path.commonprefix([root_dir, project_dir]) return fullname[(len(prefix) + 1):] root_dir = os.path.dirname(fullname) while ((root_dir != os.path.dirname(root_dir)) and (not os.path.exists(os.path.join(root_dir, '.git'))) and (not os.path.exists(os.path.join(root_dir, '.hg'))) and (not os.path.exists(os.path.join(root_dir, '.svn')))): root_dir = os.path.dirname(root_dir) if (os.path.exists(os.path.join(root_dir, '.git')) or os.path.exists(os.path.join(root_dir, '.hg')) or os.path.exists(os.path.join(root_dir, '.svn'))): prefix = os.path.commonprefix([root_dir, project_dir]) return fullname[(len(prefix) + 1):] return fullname def Split(self): googlename = self.RepositoryName() (project, rest) = os.path.split(googlename) return ((project,) + os.path.splitext(rest)) def BaseName(self): return self.Split()[1] def Extension(self): return self.Split()[2] def NoExtension(self): return '/'.join(self.Split()[0:2]) def IsSource(self): return (self.Extension()[1:] in ('c', 'cc', 'cpp', 'cxx'))
def plot_model_metrics_rewards(results, size: int, N: int, split: float=0.01, reward='scores', si_fig: bool=False): xs = [int(((size * split) * i)) for i in range(1, 7)] (fig, axs) = plt.subplots(1, 3, sharex=True, sharey=True, figsize=(((4 / 1.5) * 3), 4)) fmt = 'o-' ms = 5 capsize = 2 for (i, (model, ax)) in enumerate(zip(MODELS, axs)): for metric in results['retrain'][split][model]: if (metric == 'greedy'): metric_ = metric elif (metric == 'thompson'): metric_ = 'TS' else: metric_ = metric.upper() if (not si_fig): (ys, y_sds) = zip(*results['retrain'][split][model][metric][reward]) ys = [(y * 100) for y in ys] y_sds = [(y * 100) for y in y_sds] ax.errorbar(xs, ys, yerr=y_sds, color=METRIC_COLORS[metric], label=metric_, fmt=fmt, ms=ms, mec='black', capsize=capsize) else: (ys, y_sds) = zip(*results['retrain'][split][model][metric][reward]) ys = [(y * 100) for y in ys] y_sds = [(y * 100) for y in y_sds] ax.plot(xs, ys, fmt, color=METRIC_COLORS[metric], ms=ms, mec='black', alpha=0.33) (ys, y_sds) = zip(*results['online'][split][model][metric][reward]) ys = [(y * 100) for y in ys] y_sds = [(y * 100) for y in y_sds] ax.errorbar(xs, ys, yerr=y_sds, color=METRIC_COLORS[metric], fmt=fmt, ms=ms, mec='black', capsize=capsize, label=metric_) add_random_trace(ax, results, split, reward, xs, fmt, ms, capsize) ax.set_title(model.upper()) if (i == 0): ax.set_ylabel(f'Percentage of Top-{N} {reward.capitalize()} Found') ax.legend(loc='upper left', title='Metric') ax.set_ylim(bottom=0) ax.set_xlabel(f'Molecules explored') ax.set_xlim(left=0) ax.set_ylim(top=100) ax.xaxis.set_major_locator(ticker.MaxNLocator(7)) ax.xaxis.set_tick_params(rotation=30) ax.grid(True) fig.tight_layout() return fig
def main(args): args = parse_args(args) if torch.cuda.is_available(): torch.backends.cuda.matmul.allow_tf32 = True torch.backends.cudnn.benchmark = True torch.backends.cudnn.deterministic = False device = init_distributed_device(args) if (args.name is None): model_name_safe = args.model.replace('/', '-') date_str = datetime.now().strftime('%Y_%m_%d-%H_%M_%S') if args.distributed: date_str = broadcast_object(args, date_str) args.name = '-'.join([date_str, f'model_{model_name_safe}', f'lr_{args.lr}', f'b_{args.batch_size}', f'j_{args.workers}', f'p_{args.precision}']) log_base_path = os.path.join(args.logs, args.name) args.log_path = None if is_master(args, local=args.log_local): os.makedirs(log_base_path, exist_ok=True) log_filename = (f'out-{args.rank}' if args.log_local else 'out.log') args.log_path = os.path.join(log_base_path, log_filename) if os.path.exists(args.log_path): print('Error. Experiment already exists. Use --name {} to specify a new experiment.') return (- 1) args.log_level = (logging.DEBUG if args.debug else logging.INFO) setup_logging(args.log_path, args.log_level) args.checkpoint_path = os.path.join(log_base_path, 'checkpoints') if (args.precision == 'fp16'): logging.warning('It is recommended to use AMP mixed-precision instead of FP16. FP16 support needs further verification and tuning, especially for train.') elif args.distributed: logging.info(f'Running in distributed mode with multiple processes. Device: {args.device}.Process (global: {args.rank}, local {args.local_rank}), total {args.world_size}.') else: logging.info(f'Running with a single process. Device {args.device}.') if (isinstance(args.force_image_size, (tuple, list)) and (len(args.force_image_size) == 1)): args.force_image_size = args.force_image_size[0] random_seed(args.seed, 0) (model, preprocess_train, preprocess_val) = create_model_and_transforms(args.model, args.pretrained, precision=args.precision, device=device, jit=args.torchscript, force_quick_gelu=args.force_quick_gelu, force_custom_text=args.force_custom_text, force_patch_dropout=args.force_patch_dropout, force_image_size=args.force_image_size, pretrained_image=args.pretrained_image, image_mean=args.image_mean, image_std=args.image_std, aug_cfg=args.aug_cfg, output_dict=True, cache_dir=args.cache_dir, det_image_size=args.det_image_size, dataset_type=args.dataset_type) args.input_size = model.visual.image_size if (args.dataset_type in ['grid_distill', 'proposals_distill']): method = CLIPSelf() elif (args.dataset_type == 'region_clip'): method = RegionCLIP(args=args).to(device) else: raise NotImplementedError if (args.dataset_type == 'region_clip'): logging.info(f'{args.dataset_type}, set dist_model as None') dist_model = None else: logging.info(f'{args.dataset_type}, use dist_model') dist_model = create_model(args.model, args.pretrained, device=device, precision=args.precision, output_dict=True, cache_dir=args.cache_dir) random_seed(args.seed, args.rank) if args.lock_image: model.lock_image_tower(unlocked_groups=args.lock_image_unlocked_groups, freeze_bn_stats=args.lock_image_freeze_bn_stats) if args.grad_checkpointing: model.set_grad_checkpointing() if is_master(args): logging.info('Model:') logging.info(f'{str(model)}') logging.info('Params:') params_file = os.path.join(args.logs, args.name, 'params.txt') with open(params_file, 'w') as f: for name in sorted(vars(args)): val = getattr(args, name) logging.info(f' {name}: {val}') f.write(f'''{name}: {val} ''') if args.distributed: if args.use_bn_sync: model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model) ddp_args = {} if args.ddp_static_graph: ddp_args['static_graph'] = True model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[device], **ddp_args) if (args.dataset_type == 'region_clip'): method = torch.nn.parallel.DistributedDataParallel(method, device_ids=[device], **ddp_args) if (dist_model is not None): dist_model = torch.nn.parallel.DistributedDataParallel(dist_model, device_ids=[device], **ddp_args) optimizer = None scaler = None if args.train_data: exclude = (lambda n, p: ((p.ndim < 2) or ('bn' in n) or ('ln' in n) or ('bias' in n) or ('logit_scale' in n))) include = (lambda n, p: (not exclude(n, p))) named_parameters = list(model.named_parameters()) gain_or_bias_params = [p for (n, p) in named_parameters if (exclude(n, p) and p.requires_grad)] rest_params = [p for (n, p) in named_parameters if (include(n, p) and p.requires_grad)] optimizer = optim.AdamW([{'params': gain_or_bias_params, 'weight_decay': 0.0}, {'params': rest_params, 'weight_decay': args.wd}], lr=args.lr, betas=(args.beta1, args.beta2), eps=args.eps) scaler = (GradScaler() if (args.precision == 'amp') else None) start_epoch = 0 if (args.resume is not None): checkpoint = pt_load(args.resume, map_location='cpu') if ('epoch' in checkpoint): start_epoch = checkpoint['epoch'] sd = checkpoint['state_dict'] if ((not args.distributed) and next(iter(sd.items()))[0].startswith('module')): sd = {k[len('module.'):]: v for (k, v) in sd.items()} model.load_state_dict(sd) if (optimizer is not None): optimizer.load_state_dict(checkpoint['optimizer']) if ((scaler is not None) and ('scaler' in checkpoint)): scaler.load_state_dict(checkpoint['scaler']) logging.info(f"=> resuming checkpoint '{args.resume}' (epoch {start_epoch})") else: model.load_state_dict(checkpoint) logging.info(f"=> loaded checkpoint '{args.resume}' (epoch {start_epoch})") data = get_data(args, (preprocess_train, preprocess_val), epoch=start_epoch, tokenizer=get_tokenizer(args.model)) assert len(data), 'At least one train or eval dataset must be specified.' scheduler = None if (('train' in data) and (optimizer is not None)): total_steps = ((data['train'].dataloader.num_batches // args.accum_freq) * args.epochs) if (args.lr_scheduler == 'cosine'): scheduler = cosine_lr(optimizer, args.lr, args.warmup, total_steps) elif (args.lr_scheduler == 'const'): scheduler = const_lr(optimizer, args.lr, args.warmup, total_steps) elif (args.lr_scheduler == 'const-cooldown'): assert (args.epochs_cooldown is not None), 'Please specify the number of cooldown epochs for this lr schedule.' cooldown_steps = ((data['train'].dataloader.num_batches // args.accum_freq) * args.epochs_cooldown) scheduler = const_lr_cooldown(optimizer, args.lr, args.warmup, total_steps, cooldown_steps, args.lr_cooldown_power, args.lr_cooldown_end) else: logging.error(f'Unknown scheduler, {args.lr_scheduler}. Available options are: cosine, const, const-cooldown.') exit(1) args.save_logs = (args.logs and (args.logs.lower() != 'none') and is_master(args)) logging.info('Evaluate before training') os.makedirs(args.checkpoint_path, exist_ok=True) if ('train' not in data): del dist_model evaluate(model, data, start_epoch, args) return evaluate(model, data, start_epoch, args) loss = None for epoch in range(start_epoch, args.epochs): if is_master(args): logging.info(f'Start epoch {epoch}') train_one_epoch(model, method, data, loss, epoch, optimizer, scaler, scheduler, dist_model, args) completed_epoch = (epoch + 1) student_state_dict = (model.module.state_dict() if args.distributed else model.state_dict()) if (args.alpha < 1.0): if (dist_model is not None): teacher_state_dict = (dist_model.module.state_dict() if args.distributed else dist_model.state_dict()) else: dist_model = create_model(args.model, args.pretrained, device=device, precision=args.precision, output_dict=True, cache_dir=args.cache_dir) teacher_state_dict = dist_model.state_dict() dist_model = None target_state_dict = student_teacher_ensemble(student_state_dict, teacher_state_dict, args.alpha) else: target_state_dict = student_state_dict if is_master(args): checkpoint_dict = {'epoch': completed_epoch, 'name': args.name, 'state_dict': target_state_dict, 'optimizer': optimizer.state_dict()} if (scaler is not None): checkpoint_dict['scaler'] = scaler.state_dict() if ((completed_epoch == args.epochs) or ((args.save_frequency > 0) and ((completed_epoch % args.save_frequency) == 0))): torch.save(checkpoint_dict, os.path.join(args.checkpoint_path, f'epoch_{completed_epoch}.pt')) if args.delete_previous_checkpoint: previous_checkpoint = os.path.join(args.checkpoint_path, f'epoch_{(completed_epoch - 1)}.pt') if os.path.exists(previous_checkpoint): os.remove(previous_checkpoint) if args.save_most_recent: tmp_save_path = os.path.join(args.checkpoint_path, 'tmp.pt') latest_save_path = os.path.join(args.checkpoint_path, LATEST_CHECKPOINT_NAME) torch.save(checkpoint_dict, tmp_save_path) os.replace(tmp_save_path, latest_save_path) if ((completed_epoch % args.zeroshot_frequency) == 0): test_model = create_model(args.model, args.pretrained, device=device, precision=args.precision, output_dict=True, cache_dir=args.cache_dir) test_model.load_state_dict(target_state_dict) if args.distributed: test_model = torch.nn.parallel.DistributedDataParallel(test_model, device_ids=[device], **ddp_args) evaluate(test_model, data, completed_epoch, args) del test_model
class ForbiddenImportChecker(BaseChecker): name = 'forbidden_import' msgs = {'E9999': ('You may not import any modules - you imported %s on line %s.', 'forbidden-import', 'Used when you use import')} options = (('allowed-import-modules', {'default': (), 'type': 'csv', 'metavar': '<modules>', 'help': 'Allowed modules to be imported.'}), ('extra-imports', {'default': (), 'type': 'csv', 'metavar': '<extra-modules>', 'help': 'Extra allowed modules to be imported.'})) _required_for_messages('forbidden-import') def visit_import(self, node: nodes.Import) -> None: temp = [name for name in node.names if ((name[0] not in self.linter.config.allowed_import_modules) and (name[0] not in self.linter.config.extra_imports))] if (temp != []): self.add_message('forbidden-import', node=node, args=(', '.join(map((lambda x: x[0]), temp)), node.lineno)) _required_for_messages('forbidden-import') def visit_importfrom(self, node: nodes.ImportFrom) -> None: if ((node.modname not in self.linter.config.allowed_import_modules) and (node.modname not in self.linter.config.extra_imports)): self.add_message('forbidden-import', node=node, args=(node.modname, node.lineno)) _required_for_messages('forbidden-import') def visit_call(self, node: nodes.Call) -> None: if isinstance(node.func, nodes.Name): name = node.func.name if (not ((name in node.frame()) or (name in node.root()))): if (name == '__import__'): if ((node.args[0].value not in self.linter.config.allowed_import_modules) and (node.args[0].value not in self.linter.config.extra_imports)): args = (node.args[0].value, node.lineno) self.add_message('forbidden-import', node=node, args=args)
class Effect989(BaseEffect): type = 'passive' def handler(fit, ship, context, projectionRange, **kwargs): fit.modules.filteredItemBoost((lambda mod: mod.item.requiresSkill('Small Hybrid Turret')), 'maxRange', ship.getModifiedItemAttr('eliteBonusGunship1'), skill='Assault Frigates', **kwargs)
class TestBooleanAttribute(): def test_boolean_attribute(self): attr = BooleanAttribute(default=True) assert (attr.attr_type == BOOLEAN) assert (attr.default is True) def test_boolean_serialize(self): attr = BooleanAttribute() assert (attr.serialize(True) is True) assert (attr.serialize(False) is False) assert (attr.serialize(None) is None) def test_boolean_deserialize(self): attr = BooleanAttribute() assert (attr.deserialize(True) is True) assert (attr.deserialize(False) is False)
def main(args): (train_loader, test_loader, DATASET_CONFIG) = get_loader(args) n_data = len(train_loader.dataset) logger.info(f'length of training dataset: {n_data}') n_data = len(test_loader.dataset) logger.info(f'length of testing dataset: {n_data}') (model, criterion) = get_model(args, DATASET_CONFIG) if (dist.get_rank() == 0): logger.info(str(model)) if (args.optimizer == 'adamW'): param_dicts = [{'params': [p for (n, p) in model.named_parameters() if (('decoder' not in n) and p.requires_grad)]}, {'params': [p for (n, p) in model.named_parameters() if (('decoder' in n) and p.requires_grad)], 'lr': args.decoder_learning_rate}] optimizer = optim.AdamW(param_dicts, lr=args.learning_rate, weight_decay=args.weight_decay) else: raise NotImplementedError scheduler = get_scheduler(optimizer, len(train_loader), args) model = model.cuda() model = DistributedDataParallel(model, device_ids=[args.local_rank], broadcast_buffers=False) if args.checkpoint_path: assert os.path.isfile(args.checkpoint_path) load_checkpoint(args, model, optimizer, scheduler) CONFIG_DICT = {'remove_empty_box': False, 'use_3d_nms': True, 'nms_iou': 0.25, 'use_old_type_nms': False, 'cls_nms': True, 'per_class_proposal': True, 'conf_thresh': 0.0, 'dataset_config': DATASET_CONFIG} for epoch in range(args.start_epoch, (args.max_epoch + 1)): train_loader.sampler.set_epoch(epoch) tic = time.time() train_one_epoch(epoch, train_loader, DATASET_CONFIG, model, criterion, optimizer, scheduler, args) logger.info('epoch {}, total time {:.2f}, lr_base {:.5f}, lr_decoder {:.5f}'.format(epoch, (time.time() - tic), optimizer.param_groups[0]['lr'], optimizer.param_groups[1]['lr'])) if ((epoch % args.val_freq) == 0): evaluate_one_epoch(test_loader, DATASET_CONFIG, CONFIG_DICT, args.ap_iou_thresholds, model, criterion, args) if (dist.get_rank() == 0): save_checkpoint(args, epoch, model, optimizer, scheduler) evaluate_one_epoch(test_loader, DATASET_CONFIG, CONFIG_DICT, args.ap_iou_thresholds, model, criterion, args) save_checkpoint(args, 'last', model, optimizer, scheduler, save_cur=True) logger.info('Saved in {}'.format(os.path.join(args.log_dir, f'ckpt_epoch_last.pth'))) return os.path.join(args.log_dir, f'ckpt_epoch_last.pth')
class FIR2(Stage): _format = [E(1, 4, x_fixed(b'FIR2'), dummy=True), E(6, 7, 'i2'), E(9, 18, 'e10.2'), E(20, 23, 'i4'), E(25, 32, 'f8.3'), E(34, 34, 'a1'), E(36, 39, 'i4'), E(41, None, 'a25+')] gain = Float.T(help='filter gain (relative factor, not in dB)') decimation = Int.T(optional=True, help='decimation') correction = Float.T(help='group correction applied [s]') symmetry = SymmetryFlag.T(help='symmetry flag (A=asymmetric, B=symmetric (odd), C=symmetric (even))') nfactors = Int.T(help='number of factors') description = String.T(default='', optional=True, help='description') comments = List.T(String.T(optional=True)) factors = List.T(Float.T()) def append_dataline(self, line, version_dialect): d = FIR2Data.deserialize(line, version_dialect) self.factors.extend(d.factors) def write_datalines(self, writer): i = 0 while (i < len(self.factors)): FIR2Data(factors=self.factors[i:(i + 5)]).write(writer) i += 5
class VarEarlyStopper(): def __init__(self, eps: float=0.15, window: int=200): self.eps = eps self.window = window self.stopped = False self.history = np.array([]) self.normalized_var = 1 def __call__(self, loss: float): self.history = np.append(self.history, loss) if (len(self.history) >= self.window): self.normalized_var = (np.var(self.history[(- self.window):]) / np.var(self.history)) if (self.normalized_var < self.eps): self.stopped = True return self.normalized_var
def export_plugin_maintainers(request, **kwargs): if (not request.user.is_superuser): raise PermissionDenied() import csv response = HttpResponse(content_type='text/csv') response['Content-Disposition'] = 'attachment; filename=plugin_maintainers.csv' writer = csv.writer(response, dialect='excel-tab') for u in User.objects.filter(plugins_created_by__isnull=False, email__isnull=False).exclude(email='').order_by('email').distinct(): writer.writerow([u.email]) return response
class BotRepoConfigTest(TestCase): def test_fetches_file_success(self): bot = bot_factory() bot.provider.get_file.return_value = ('foo: bar', None) self.assertEqual(bot.get_repo_config(bot.user_repo), {'foo': 'bar'}) def test_yaml_error(self): bot = bot_factory() bot.provider.get_file.return_value = ('foo: bar: baz: fii:', None) with self.assertRaises(ConfigError): self.assertEqual(bot.get_repo_config(bot.user_repo), None) def test_fetches_file_error(self): bot = bot_factory() bot.provider.get_file.return_value = (None, None) self.assertEqual(bot.get_repo_config(bot.user_repo), None)
class FairseqLMDecoder(BaseDecoder): def __init__(self, cfg: FlashlightDecoderConfig, tgt_dict: Dictionary) -> None: super().__init__(tgt_dict) self.nbest = cfg.nbest self.unitlm = cfg.unitlm self.lexicon = (load_words(cfg.lexicon) if cfg.lexicon else None) self.idx_to_wrd = {} checkpoint = torch.load(cfg.lmpath, map_location='cpu') if (('cfg' in checkpoint) and (checkpoint['cfg'] is not None)): lm_args = checkpoint['cfg'] else: lm_args = convert_namespace_to_omegaconf(checkpoint['args']) if (not OmegaConf.is_dict(lm_args)): lm_args = OmegaConf.create(lm_args) with open_dict(lm_args.task): lm_args.task.data = osp.dirname(cfg.lmpath) task = tasks.setup_task(lm_args.task) model = task.build_model(lm_args.model) model.load_state_dict(checkpoint['model'], strict=False) self.trie = Trie(self.vocab_size, self.silence) self.word_dict = task.dictionary self.unk_word = self.word_dict.unk() self.lm = FairseqLM(self.word_dict, model) if self.lexicon: start_state = self.lm.start(False) for (i, (word, spellings)) in enumerate(self.lexicon.items()): if self.unitlm: word_idx = i self.idx_to_wrd[i] = word score = 0 else: word_idx = self.word_dict.index(word) (_, score) = self.lm.score(start_state, word_idx, no_cache=True) for spelling in spellings: spelling_idxs = [tgt_dict.index(token) for token in spelling] assert (tgt_dict.unk() not in spelling_idxs), f'{spelling} {spelling_idxs}' self.trie.insert(spelling_idxs, word_idx, score) self.trie.smear(SmearingMode.MAX) self.decoder_opts = LexiconDecoderOptions(beam_size=cfg.beam, beam_size_token=(cfg.beamsizetoken or len(tgt_dict)), beam_threshold=cfg.beamthreshold, lm_weight=cfg.lmweight, word_score=cfg.wordscore, unk_score=cfg.unkweight, sil_score=cfg.silweight, log_add=False, criterion_type=CriterionType.CTC) self.decoder = LexiconDecoder(self.decoder_opts, self.trie, self.lm, self.silence, self.blank, self.unk_word, [], self.unitlm) else: assert self.unitlm, 'Lexicon-free decoding requires unit LM' d = {w: [[w]] for w in tgt_dict.symbols} self.word_dict = create_word_dict(d) self.lm = KenLM(cfg.lmpath, self.word_dict) self.decoder_opts = LexiconFreeDecoderOptions(beam_size=cfg.beam, beam_size_token=(cfg.beamsizetoken or len(tgt_dict)), beam_threshold=cfg.beamthreshold, lm_weight=cfg.lmweight, sil_score=cfg.silweight, log_add=False, criterion_type=CriterionType.CTC) self.decoder = LexiconFreeDecoder(self.decoder_opts, self.lm, self.silence, self.blank, []) def decode(self, emissions: torch.FloatTensor) -> List[List[Dict[(str, torch.LongTensor)]]]: (B, T, N) = emissions.size() hypos = [] def make_hypo(result: DecodeResult) -> Dict[(str, Any)]: hypo = {'tokens': self.get_tokens(result.tokens), 'score': result.score} if self.lexicon: hypo['words'] = [(self.idx_to_wrd[x] if self.unitlm else self.word_dict[x]) for x in result.words if (x >= 0)] return hypo for b in range(B): emissions_ptr = (emissions.data_ptr() + ((4 * b) * emissions.stride(0))) results = self.decoder.decode(emissions_ptr, T, N) nbest_results = results[:self.nbest] hypos.append([make_hypo(result) for result in nbest_results]) self.lm.empty_cache() return hypos
class RHEL4_NetworkData(FC3_NetworkData): removedKeywords = FC3_NetworkData.removedKeywords removedAttrs = FC3_NetworkData.removedAttrs def __init__(self, *args, **kwargs): FC3_NetworkData.__init__(self, *args, **kwargs) self.notksdevice = kwargs.get('notksdevice', False) def _getArgsAsStr(self): retval = FC3_NetworkData._getArgsAsStr(self) if self.notksdevice: retval += ' --notksdevice' return retval
class CmdFight(Command): key = 'fight' help_category = 'combat' def func(self): here = self.caller.location fighters = [] if (not self.caller.db.hp): self.caller.msg("You can't start a fight if you've been defeated!") return if is_in_combat(self.caller): self.caller.msg("You're already in a fight!") return for thing in here.contents: if thing.db.HP: fighters.append(thing) if (len(fighters) <= 1): self.caller.msg("There's nobody here to fight!") return if here.db.combat_turnhandler: here.msg_contents(('%s joins the fight!' % self.caller)) here.db.combat_turnhandler.join_fight(self.caller) return here.msg_contents(('%s starts a fight!' % self.caller)) here.scripts.add('contrib.turnbattle.tb_magic.TBMagicTurnHandler')
class Application(tornado.web.Application): def __init__(self, db: DB, default_version=None): settings = dict(template_path=os.path.join(os.path.dirname(__file__), 'tpl'), static_path=os.path.join(os.path.dirname(__file__), 'static'), static_url_prefix=config.static_url_prefix, debug=config.debug, gzip=config.gzip, autoreload=config.autoreload, cookie_secret=config.cookie_secret, login_url='/login', websocket_ping_interval=config.websocket.ping_interval, websocket_ping_timeout=config.websocket.ping_timeout, websocket_max_message_size=config.websocket.max_message_size) super(Application, self).__init__(handlers, **settings) self.jinja_env = jinja2.Environment(loader=jinja2.FileSystemLoader(settings['template_path']), extensions=['jinja2.ext.loopcontrols'], autoescape=True, auto_reload=config.autoreload) self.db = db self.version = (default_version or 'Debug') self.fetcher = Fetcher() self.jinja_env.globals.update({'config': config, 'format_date': utils.format_date, 'varbinary2ip': utils.varbinary2ip, 'version': self.version}) self.jinja_env.filters.update(ui_methods)
def _get(package: str, resource: str, name: str) -> dict[(str, t.Any)]: try: return t.cast('dict[str, t.Any]', json.loads(importlib_resources.files(package).joinpath(resource).read_bytes())) except (FileNotFoundError, ModuleNotFoundError): raise NoSuchSchemaError(f'no builtin schema named {name} was found')
class TestBinaryBinnedAUROC(MetricClassTester): def _test_auroc_class_with_input(self, input: torch.Tensor, target: torch.Tensor, num_tasks: int, threshold: Union[(int, List[float], torch.Tensor)], compute_result: Tuple[(torch.Tensor, torch.Tensor)]) -> None: self.run_class_implementation_tests(metric=BinaryBinnedAUROC(num_tasks=num_tasks, threshold=threshold), state_names={'inputs', 'targets'}, update_kwargs={'input': input, 'target': target}, compute_result=compute_result) def test_auroc_class_valid_input(self) -> None: torch.manual_seed(123) input = torch.randint(high=2, size=(NUM_TOTAL_UPDATES, BATCH_SIZE)) target = torch.randint(high=2, size=(NUM_TOTAL_UPDATES, BATCH_SIZE)) threshold = 5 self._test_auroc_class_with_input(input, target, num_tasks=1, threshold=threshold, compute_result=(torch.tensor([0.], dtype=torch.float64), torch.tensor([0.0, 0.25, 0.5, 0.75, 1.0]))) torch.manual_seed(123) num_tasks = 2 input = torch.rand(NUM_TOTAL_UPDATES, num_tasks, BATCH_SIZE) target = torch.randint(high=2, size=(NUM_TOTAL_UPDATES, num_tasks, BATCH_SIZE)) self._test_auroc_class_with_input(input, target, num_tasks=2, threshold=threshold, compute_result=(torch.tensor([0., 0.], dtype=torch.float64), torch.tensor([0.0, 0.25, 0.5, 0.75, 1.0]))) num_classes = 2 threshold = 5 torch.manual_seed(123) update_input = [torch.rand(5), torch.rand(8), torch.rand(2), torch.rand(5)] update_target = [torch.randint(high=num_classes, size=(5,)), torch.randint(high=num_classes, size=(8,)), torch.randint(high=num_classes, size=(2,)), torch.randint(high=num_classes, size=(5,))] compute_result = (torch.tensor([0.], dtype=torch.float64), torch.tensor([0.0, 0.25, 0.5, 0.75, 1.0])) self.run_class_implementation_tests(metric=BinaryBinnedAUROC(threshold=threshold), state_names={'inputs', 'targets'}, update_kwargs={'input': update_input, 'target': update_target}, compute_result=compute_result, num_total_updates=4, num_processes=2) def test_auroc_class_invalid_input(self) -> None: with self.assertRaisesRegex(ValueError, '`num_tasks` has to be at least 1.'): metric = BinaryBinnedAUROC(num_tasks=(- 1)) with self.assertRaisesRegex(ValueError, 'The `input` and `target` should have the same shape, got shapes torch.Size\\(\\[4\\]\\) and torch.Size\\(\\[3\\]\\).'): metric = BinaryBinnedAUROC() metric.update(torch.rand(4), torch.rand(3)) with self.assertRaisesRegex(ValueError, '`num_tasks = 1`, `input` is expected to be one-dimensional tensor, but got shape torch.Size\\(\\[3, 2\\]\\).'): metric = BinaryBinnedAUROC() metric.update(torch.rand(3, 2), torch.rand(3, 2)) with self.assertRaisesRegex(ValueError, 'The `threshold` should be a sorted tensor.'): metric = BinaryBinnedAUROC(threshold=torch.tensor([0.1, 0.2, 0.5, 0.7, 0.6])) with self.assertRaisesRegex(ValueError, 'The values in `threshold` should be in the range of \\[0, 1\\].'): metric = BinaryBinnedAUROC(threshold=torch.tensor([(- 0.1), 0.2, 0.5, 0.7]))
class AbstractAudioPlayer(metaclass=ABCMeta): audio_sync_required_measurements = 8 audio_desync_time_critical = 0.28 audio_desync_time_minor = 0.03 audio_minor_desync_correction_time = 0.012 audio_buffer_length = 0.9 def __init__(self, source, player): self.source = weakref.proxy(source) self.player = weakref.proxy(player) afmt = source.audio_format self._buffered_data_ideal_size = max(32768, afmt.timestamp_to_bytes_aligned(self.audio_buffer_length)) self._buffered_data_comfortable_limit = int((self._buffered_data_ideal_size * (2 / 3))) self._events = deque() self.desync_bytes_critical = afmt.timestamp_to_bytes_aligned(self.audio_desync_time_critical) self.desync_bytes_minor = afmt.timestamp_to_bytes_aligned(self.audio_desync_time_minor) self.desync_correction_bytes_minor = afmt.timestamp_to_bytes_aligned(self.audio_minor_desync_correction_time) self.audio_sync_measurements = deque(maxlen=self.audio_sync_required_measurements) self.audio_sync_cumul_measurements = 0 self._compensated_bytes = 0 def on_driver_destroy(self): pass def on_driver_reset(self): pass def set_source(self, source): assert (self.source.audio_format == source.audio_format) self.clear() self.source = weakref.proxy(source) def prefill_audio(self): def work(self): pass def play(self): def stop(self): def clear(self): self._events.clear() self._compensated_bytes = 0 self.audio_sync_measurements.clear() self.audio_sync_cumul_measurements = 0 def delete(self): def get_play_cursor(self): def get_time(self): return (self._raw_play_cursor_to_time(self.get_play_cursor()) + self.player.last_seek_time) def _raw_play_cursor_to_time(self, cursor): if (cursor is None): return None return (self._to_perceived_play_cursor(cursor) / self.source.audio_format.bytes_per_second) def _to_perceived_play_cursor(self, play_cursor): return (play_cursor - self._compensated_bytes) def _play_group(self, audio_players): for player in audio_players: player.play() def _stop_group(self, audio_players): for player in audio_players: player.stop() def append_events(self, start_index, events): bps = self.source.audio_format.bytes_per_second lst = self.player.last_seek_time for event in events: event_cursor = (start_index + (max(0.0, (event.timestamp - lst)) * bps)) assert _debug(f'AbstractAudioPlayer: Adding event {event} at {event_cursor}') self._events.append((event_cursor, event)) def dispatch_media_events(self, until_cursor): until_cursor = self._to_perceived_play_cursor(until_cursor) while (self._events and (self._events[0][0] <= until_cursor)): self._events.popleft()[1].sync_dispatch_to_player(self.player) def get_audio_time_diff(self, audio_time): required_measurement_count = self.audio_sync_measurements.maxlen if (audio_time is not None): p_time = self.player.time audio_time += self.player.last_seek_time diff_bytes = self.source.audio_format.timestamp_to_bytes_aligned((audio_time - p_time)) if (abs(diff_bytes) >= self.desync_bytes_critical): self.audio_sync_measurements.clear() self.audio_sync_cumul_measurements = 0 return (diff_bytes, True) if (len(self.audio_sync_measurements) == required_measurement_count): self.audio_sync_cumul_measurements -= self.audio_sync_measurements[0] self.audio_sync_measurements.append(diff_bytes) self.audio_sync_cumul_measurements += diff_bytes if (len(self.audio_sync_measurements) == required_measurement_count): avg_diff = self.source.audio_format.align((self.audio_sync_cumul_measurements // required_measurement_count)) if (abs(avg_diff) > self.desync_bytes_minor): return (avg_diff, False) return (0, False) def _get_and_compensate_audio_data(self, requested_size, audio_position=None): audio_time = self._raw_play_cursor_to_time(audio_position) (desync_bytes, extreme_desync) = self.get_audio_time_diff(audio_time) if (desync_bytes == 0): return self.source.get_audio_data(requested_size) compensated_bytes = 0 afmt = self.source.audio_format assert _debug(f'Audio desync, desync_bytes={desync_bytes!r}, extreme_desync={extreme_desync!r}') assert ((desync_bytes % afmt.bytes_per_frame) == 0) if (desync_bytes > 0): compensated_bytes = min((requested_size - afmt.align_ceil(1024)), desync_bytes, self.desync_correction_bytes_minor) audio_data = self.source.get_audio_data((requested_size - compensated_bytes)) if (audio_data is not None): if (audio_data.length < afmt.bytes_per_frame): raise RuntimeError('Partial audio frame returned?') first_frame = ctypes.string_at(audio_data.pointer, afmt.bytes_per_frame) ad = bytearray((audio_data.length + compensated_bytes)) ad[0:compensated_bytes] = (first_frame * (compensated_bytes // afmt.bytes_per_frame)) ad[compensated_bytes:] = audio_data.data audio_data = AudioData(ad, len(ad), audio_data.events) elif (desync_bytes < 0): compensated_bytes = ((- desync_bytes) if extreme_desync else min((- desync_bytes), self.desync_correction_bytes_minor)) audio_data = self.source.get_audio_data((requested_size + compensated_bytes)) if (audio_data is not None): if (audio_data.length <= compensated_bytes): compensated_bytes = (- audio_data.length) audio_data = None else: audio_data = AudioData(ctypes.string_at((audio_data.pointer + compensated_bytes), (audio_data.length - compensated_bytes)), (audio_data.length - compensated_bytes), audio_data.events) compensated_bytes *= (- 1) assert _debug(f'Compensated {compensated_bytes} after audio desync') self._compensated_bytes += compensated_bytes return audio_data def set_volume(self, volume): pass def set_position(self, position): pass def set_min_distance(self, min_distance): pass def set_max_distance(self, max_distance): pass def set_pitch(self, pitch): pass def set_cone_orientation(self, cone_orientation): pass def set_cone_inner_angle(self, cone_inner_angle): pass def set_cone_outer_angle(self, cone_outer_angle): pass def set_cone_outer_gain(self, cone_outer_gain): pass
def resnetv2_50x1_vit(pretrained=False, strict=False, progress=False, **kwargs): model = ResNetV2(layers=(3, 4, 9), num_classes=0, global_pool='avg', in_chans=kwargs.get('in_chans', 3), preact=False, stem_type='same') if pretrained: state_dict = model_zoo.load_url(model_urls['resnetv2_50x1_vit'], progress=progress, map_location='cpu') state_dict = {'.'.join(k.split('.')[2:]): v for (k, v) in state_dict.items() if k.startswith('patch_embed.backbone')} (miss, unexp) = model.load_state_dict(state_dict, strict=strict) return model
def _build_family_tree(dirlist, parent_DID, child_DID): if (child_DID < 0): return _build_family_tree(dirlist, parent_DID, dirlist[child_DID].left_DID) dirlist[parent_DID].children.append(child_DID) dirlist[child_DID].parent = parent_DID _build_family_tree(dirlist, parent_DID, dirlist[child_DID].right_DID) if (dirlist[child_DID].etype == 1): _build_family_tree(dirlist, child_DID, dirlist[child_DID].root_DID)
class Finder(): ref_types = {r.type: r for r in (Ref('call', 5), Ref('lea', 7))} STR_SAMPLE_LEN = 100 NULL = b'\x00' def __init__(self, file: File, sig: Sig): self.file = file self.sig = sig it = re.finditer(self.sig.pattern, self.file.data, flags=re.DOTALL) match = next(it, None) if (not match): raise ValueError(f'Could not find signature {self.sig}') if next(it, None): raise ValueError(f'Found multiple matches for signature {self.sig}') self.offset = (match.start() + self.sig.offset) if self.sig.ref: ref = self.ref_types.get(self.sig.ref) if (not ref): raise ValueError(f'Unsupported ref type {self.sig.ref}') logger.debug('Processing ref for signature %s...', self.sig) matched_bytes = match[0] logger.debug('Found %s: %s', ref.type, PrettyBytes(matched_bytes)) matched_bytes = matched_bytes[self.sig.offset:] rel_addr = self.get_addr(ref, matched_bytes) logger.debug('Found relative address: %s', hex(rel_addr)) if (ref.type == 'lea'): self.offset = self.off_to_rva(self.offset, '.text') self.offset = ((self.offset + ref.total_size) + rel_addr) self.offset = self.rva_to_off(self.offset, '.rdata') else: self.offset = ((self.offset + ref.total_size) + rel_addr) self.offset %= (2 ** 32) logger.debug('Determined actual offset: %s', hex(self.offset)) def find(self): return self.offset def get_string(self): sample = self.file.data[self.offset:(self.offset + self.STR_SAMPLE_LEN)] return sample[:sample.tobytes().find(self.NULL)].tobytes().decode() def off_to_rva(self, value: int, section: str): return ((value - self.file.sections[section].PointerToRawData) + self.file.sections[section].VirtualAddress) def rva_to_off(self, value: int, section: str): return ((value - self.file.sections[section].VirtualAddress) + self.file.sections[section].PointerToRawData) def get_addr(ref: Ref, matched_bytes: bytes): rel_addr = matched_bytes[ref.op_size:ref.total_size] return int.from_bytes(rel_addr, byteorder='little')
def set_requires_grad(requires_grad, *models): for model in models: if isinstance(model, torch.nn.Module): for param in model.parameters(): param.requires_grad = requires_grad elif isinstance(model, (torch.nn.Parameter, torch.Tensor)): model.requires_grad = requires_grad else: assert False, ('unknown type %r' % type(model))
def test_invalid_def_file(runner, mocker): mocker.patch('products.vmware_cb_response.CbResponse._authenticate') mocked_nested_process_search = mocker.patch('products.vmware_cb_response.CbResponse.nested_process_search') result = runner.invoke(cli, ['--deffile', 'nonexistent.json']) assert ("The deffile doesn't exist" in result.output) mocked_nested_process_search.assert_not_called()
def build_function(name: str, args: (list[str] | None)=None, posonlyargs: (list[str] | None)=None, defaults: (list[Any] | None)=None, doc: (str | None)=None, kwonlyargs: (list[str] | None)=None, kwonlydefaults: (list[Any] | None)=None) -> nodes.FunctionDef: func = nodes.FunctionDef(name, lineno=0, col_offset=0, parent=node_classes.Unknown(), end_col_offset=0, end_lineno=0) argsnode = nodes.Arguments(parent=func, vararg=None, kwarg=None) if (args is not None): arguments = [nodes.AssignName(name=arg, parent=argsnode, lineno=0, col_offset=0, end_lineno=None, end_col_offset=None) for arg in args] else: arguments = None default_nodes: (list[nodes.NodeNG] | None) if (defaults is None): default_nodes = None else: default_nodes = [] for default in defaults: default_node = nodes.const_factory(default) default_node.parent = argsnode default_nodes.append(default_node) kwonlydefault_nodes: (list[(nodes.NodeNG | None)] | None) if (kwonlydefaults is None): kwonlydefault_nodes = None else: kwonlydefault_nodes = [] for kwonlydefault in kwonlydefaults: kwonlydefault_node = nodes.const_factory(kwonlydefault) kwonlydefault_node.parent = argsnode kwonlydefault_nodes.append(kwonlydefault_node) argsnode.postinit(args=arguments, defaults=default_nodes, kwonlyargs=[nodes.AssignName(name=arg, parent=argsnode, lineno=0, col_offset=0, end_lineno=None, end_col_offset=None) for arg in (kwonlyargs or ())], kw_defaults=kwonlydefault_nodes, annotations=[], posonlyargs=[nodes.AssignName(name=arg, parent=argsnode, lineno=0, col_offset=0, end_lineno=None, end_col_offset=None) for arg in (posonlyargs or ())], kwonlyargs_annotations=[], posonlyargs_annotations=[]) func.postinit(args=argsnode, body=[], doc_node=(nodes.Const(value=doc) if doc else None)) if args: register_arguments(func) return func
def write_to_outfile(out_path: str, data: InputExample, mode: str) -> None: Path(out_path).mkdir(parents=True, exist_ok=True) fp = os.path.join(out_path, f'en_ewt-ud-{mode}.conllu') comment = '# Cats and oats' col2 = 'c2' with open(fp, 'w', encoding='utf-8') as out: for section in data: out.write((comment + '\n')) (words, labels) = (section.words, section.labels) for (e, (word, label)) in enumerate(zip(words, labels)): out.write(f'''{(e + 1)} {word} {col2} {label} ''') out.write('\n')
class SeparationNet(nn.Module): def __init__(self, encoder: nn.Module, decoder_fg: nn.Module, decoder_bg: nn.Module) -> None: super().__init__() self.encoder = encoder self.decoder_fg = decoder_fg self.decoder_bg = decoder_bg def encode(self, x: torch.Tensor) -> Tuple[(torch.Tensor, torch.Tensor)]: encoded = self.encoder(x) hidden_shape = list(encoded.size()) hidden_shape[1] //= 2 half_dim = (self.encoder.num_features[(- 1)] // 2) split = torch.split(encoded, half_dim, dim=1) hidden_fg = split[0].reshape(hidden_shape) hidden_bg = split[1].reshape(hidden_shape) return (hidden_fg, hidden_bg) def forward(self, x: torch.Tensor) -> Tuple[(torch.Tensor, torch.Tensor)]: (hidden_fg, hidden_bg) = self.encode(x) decoded_fg = self.decoder_fg(hidden_fg) decoded_bg = self.decoder_bg(hidden_bg) return (decoded_fg, decoded_bg)
def download_delta_manifest_entry(delta_like: Union[(Delta, DeltaLocator)], entry_index: int, table_type: TableType=TableType.PYARROW, columns: Optional[List[str]]=None, file_reader_kwargs_provider: Optional[ReadKwargsProvider]=None, *args, **kwargs) -> LocalTable: (cur, con) = _get_sqlite3_cursor_con(kwargs) manifest = get_delta_manifest(delta_like, *args, **kwargs) if (entry_index >= len(manifest.entries)): raise IndexError(f'Manifest entry index {entry_index} does not exist. Valid values: [0, {len(manifest.entries)}]') entry = manifest.entries[entry_index] res = cur.execute('SELECT value FROM data WHERE uri = ?', (entry.uri,)) serialized_data = res.fetchone() if (serialized_data is None): raise ValueError(f'Invalid value of delta locator: {delta_like.canonical_string()}') serialized_data = serialized_data[0] if (entry.meta.content_type == ContentType.PARQUET): if (table_type == TableType.PYARROW_PARQUET): table = pa.parquet.ParquetFile(io.BytesIO(serialized_data)) else: table = pa.parquet.read_table(io.BytesIO(serialized_data), columns=columns) elif (entry.meta.content_type == ContentType.UNESCAPED_TSV): assert (table_type != TableType.PYARROW_PARQUET), f'uTSV table cannot be read as {table_type}' parse_options = pa.csv.ParseOptions(delimiter='\t') convert_options = pa.csv.ConvertOptions(null_values=[''], strings_can_be_null=True, include_columns=columns) table = pa.csv.read_csv(io.BytesIO(serialized_data), parse_options=parse_options, convert_options=convert_options) else: raise ValueError(f'Content type: {entry.meta.content_type} not supported.') if (table_type == TableType.PYARROW): return table elif (table_type == TableType.PYARROW_PARQUET): return table elif (table_type == TableType.NUMPY): raise NotImplementedError(f'Table type={table_type} not supported') elif (table_type == TableType.PANDAS): return table.to_pandas() return table
def test_object_parking_space(): parking_space_object = xodr.Object(s=0, t=0, length=5, width=3, height=0.0, Type=xodr.ObjectType.parkingSpace, name='parkingSpace') parking_space = xodr.ParkingSpace(xodr.Access.all, 'test string') parking_space_object.add_parking_space(parking_space) road = xodr.create_road(xodr.Line(100), 0) road.add_object(parking_space_object) prettyprint(road.get_element()) assert (version_validation('t_road_objects_object', parking_space_object, wanted_schema='xodr') == ValidationResponse.OK)
.django_project(project_root='django_project_root', create_manage_py=True) def test_django_project_found(django_pytester: DjangoPytester) -> None: django_pytester.create_test_module('\n def test_foobar():\n assert 1 + 1 == 2\n ') result = django_pytester.runpytest_subprocess('django_project_root') assert (result.ret == 0) outcomes = result.parseoutcomes() assert (outcomes['passed'] == 1)
def get_status_view(process_id, start_time): url = (BH_URL + '/api/v1/client-view/status') payload = {'processId': process_id, 'startTime': start_time} try: r = requests.get(url, params=payload, headers=json_auth_headers()) status_view_json = json.dumps(r.json()) return StatusView.from_JSON(status_view_json) except ConnectionError as error: print("Sorry, looks like there's a connection error") return None except Exception as error: if (r.status_code in (403, 401)): print('Permissons Issue. Run bashhub setup to re-login.') else: print(('Sorry, an error occurred communicating with Bashhub. Response Code: ' + str(r.status_code))) return None
def test_learnerND_log_works(): loss = curvature_loss_function() learner = LearnerND(ring_of_fire, bounds=[((- 1), 1), ((- 1), 1)], loss_per_simplex=loss) learner.ask(4) learner.tell(((- 1), (- 1)), (- 1.0)) learner.ask(1) learner.tell(((- 1), 1), (- 1.0)) learner.tell((1, (- 1)), 1.0) learner.ask(2)
class IoUBalancedNegSampler(RandomSampler): def __init__(self, num, pos_fraction, floor_thr=(- 1), floor_fraction=0, num_bins=3, **kwargs): super(IoUBalancedNegSampler, self).__init__(num, pos_fraction, **kwargs) assert ((floor_thr >= 0) or (floor_thr == (- 1))) assert (0 <= floor_fraction <= 1) assert (num_bins >= 1) self.floor_thr = floor_thr self.floor_fraction = floor_fraction self.num_bins = num_bins def sample_via_interval(self, max_overlaps, full_set, num_expected): max_iou = max_overlaps.max() iou_interval = ((max_iou - self.floor_thr) / self.num_bins) per_num_expected = int((num_expected / self.num_bins)) sampled_inds = [] for i in range(self.num_bins): start_iou = (self.floor_thr + (i * iou_interval)) end_iou = (self.floor_thr + ((i + 1) * iou_interval)) tmp_set = set(np.where(np.logical_and((max_overlaps >= start_iou), (max_overlaps < end_iou)))[0]) tmp_inds = list((tmp_set & full_set)) if (len(tmp_inds) > per_num_expected): tmp_sampled_set = self.random_choice(tmp_inds, per_num_expected) else: tmp_sampled_set = np.array(tmp_inds, dtype=np.int) sampled_inds.append(tmp_sampled_set) sampled_inds = np.concatenate(sampled_inds) if (len(sampled_inds) < num_expected): num_extra = (num_expected - len(sampled_inds)) extra_inds = np.array(list((full_set - set(sampled_inds)))) if (len(extra_inds) > num_extra): extra_inds = self.random_choice(extra_inds, num_extra) sampled_inds = np.concatenate([sampled_inds, extra_inds]) return sampled_inds def _sample_neg(self, assign_result, num_expected, **kwargs): neg_inds = torch.nonzero((assign_result.gt_inds == 0)) if (neg_inds.numel() != 0): neg_inds = neg_inds.squeeze(1) if (len(neg_inds) <= num_expected): return neg_inds else: max_overlaps = assign_result.max_overlaps.cpu().numpy() neg_set = set(neg_inds.cpu().numpy()) if (self.floor_thr > 0): floor_set = set(np.where(np.logical_and((max_overlaps >= 0), (max_overlaps < self.floor_thr)))[0]) iou_sampling_set = set(np.where((max_overlaps >= self.floor_thr))[0]) elif (self.floor_thr == 0): floor_set = set(np.where((max_overlaps == 0))[0]) iou_sampling_set = set(np.where((max_overlaps > self.floor_thr))[0]) else: floor_set = set() iou_sampling_set = set(np.where((max_overlaps > self.floor_thr))[0]) floor_neg_inds = list((floor_set & neg_set)) iou_sampling_neg_inds = list((iou_sampling_set & neg_set)) num_expected_iou_sampling = int((num_expected * (1 - self.floor_fraction))) if (len(iou_sampling_neg_inds) > num_expected_iou_sampling): if (self.num_bins >= 2): iou_sampled_inds = self.sample_via_interval(max_overlaps, set(iou_sampling_neg_inds), num_expected_iou_sampling) else: iou_sampled_inds = self.random_choice(iou_sampling_neg_inds, num_expected_iou_sampling) else: iou_sampled_inds = np.array(iou_sampling_neg_inds, dtype=np.int) num_expected_floor = (num_expected - len(iou_sampled_inds)) if (len(floor_neg_inds) > num_expected_floor): sampled_floor_inds = self.random_choice(floor_neg_inds, num_expected_floor) else: sampled_floor_inds = np.array(floor_neg_inds, dtype=np.int) sampled_inds = np.concatenate((sampled_floor_inds, iou_sampled_inds)) if (len(sampled_inds) < num_expected): num_extra = (num_expected - len(sampled_inds)) extra_inds = np.array(list((neg_set - set(sampled_inds)))) if (len(extra_inds) > num_extra): extra_inds = self.random_choice(extra_inds, num_extra) sampled_inds = np.concatenate((sampled_inds, extra_inds)) sampled_inds = torch.from_numpy(sampled_inds).long().to(assign_result.gt_inds.device) return sampled_inds
class CheckpointParams(FairseqDataclass): save_dir: str = field(default='checkpoints', metadata={'help': 'path to save checkpoints'}) restore_file: str = field(default='checkpoint_last.pt', metadata={'help': 'filename from which to load checkpoint (default: <save-dir>/checkpoint_last.pt'}) finetune_from_model: Optional[str] = field(default=None, metadata={'help': 'finetune from a pretrained model; note that meters and lr scheduler will be reset'}) reset_dataloader: bool = field(default=False, metadata={'help': 'if set, does not reload dataloader state from the checkpoint'}) reset_lr_scheduler: bool = field(default=False, metadata={'help': 'if set, does not load lr scheduler state from the checkpoint'}) reset_meters: bool = field(default=False, metadata={'help': 'if set, does not load meters from the checkpoint'}) reset_optimizer: bool = field(default=False, metadata={'help': 'if set, does not load optimizer state from the checkpoint'}) optimizer_overrides: str = field(default='{}', metadata={'help': 'a dictionary used to override optimizer args when loading a checkpoint'}) save_interval: int = field(default=1, metadata={'help': 'save a checkpoint every N epochs'}) save_interval_updates: int = field(default=0, metadata={'help': 'save a checkpoint (and validate) every N updates'}) keep_interval_updates: int = field(default=(- 1), metadata={'help': 'keep the last N checkpoints saved with --save-interval-updates'}) keep_last_epochs: int = field(default=(- 1), metadata={'help': 'keep last N epoch checkpoints'}) keep_best_checkpoints: int = field(default=(- 1), metadata={'help': 'keep best N checkpoints based on scores'}) no_save: bool = field(default=False, metadata={'help': "don't save models or checkpoints"}) no_epoch_checkpoints: bool = field(default=False, metadata={'help': 'only store last and best checkpoints'}) no_last_checkpoints: bool = field(default=False, metadata={'help': "don't store last checkpoints"}) no_save_optimizer_state: bool = field(default=False, metadata={'help': "don't save optimizer-state as part of checkpoint"}) best_checkpoint_metric: str = field(default='loss', metadata={'help': 'metric to use for saving "best" checkpoints'}) maximize_best_checkpoint_metric: bool = field(default=False, metadata={'help': 'select the largest metric value for saving "best" checkpoints'}) patience: int = field(default=(- 1), metadata={'help': "early stop training if valid performance doesn't improve for N consecutive validation runs; note that this is influenced by --validate-interval"})
class INatDataset(ImageFolder): def __init__(self, root, train=True, year=2018, transform=None, target_transform=None, category='name', loader=default_loader): self.transform = transform self.loader = loader self.target_transform = target_transform self.year = year path_json = os.path.join(root, f"{('train' if train else 'val')}{year}.json") with open(path_json) as json_file: data = json.load(json_file) with open(os.path.join(root, 'categories.json')) as json_file: data_catg = json.load(json_file) path_json_for_targeter = os.path.join(root, f'train{year}.json') with open(path_json_for_targeter) as json_file: data_for_targeter = json.load(json_file) targeter = {} indexer = 0 for elem in data_for_targeter['annotations']: king = [] king.append(data_catg[int(elem['category_id'])][category]) if (king[0] not in targeter.keys()): targeter[king[0]] = indexer indexer += 1 self.nb_classes = len(targeter) self.samples = [] for elem in data['images']: cut = elem['file_name'].split('/') target_current = int(cut[2]) path_current = os.path.join(root, cut[0], cut[2], cut[3]) categors = data_catg[target_current] target_current_true = targeter[categors[category]] self.samples.append((path_current, target_current_true))
def main(sample): try: pathserv = fs.get_path_info_for_active_session() except mpexceptions.ExceptionUndefinedSamplesDir: print("The env var 'pyglet_mp_samples_dir' is not defined.") return 1 except mpexceptions.ExceptionNoSessionIsActive: print('*** Error, no session active.') return 1 bokeh_render_timeline(pathserv, sample)
def get_task_dict(task_name_list: List[Union[(str, lm_eval.base.Task)]]): task_name_dict = {task_name: get_task(task_name)() for task_name in task_name_list if isinstance(task_name, str)} task_name_from_object_dict = {get_task_name_from_object(task_object): task_object for task_object in task_name_list if (not isinstance(task_object, str))} assert set(task_name_dict.keys()).isdisjoint(set(task_name_from_object_dict.keys())) return {**task_name_dict, **task_name_from_object_dict}
class PicklingMixin(): filename = None def load(self, filename): self.filename = filename print_d(('Loading contents of %r.' % filename), self) items = _load_items(filename) self._load_init(items) print_d(f'Done loading contents of {filename!r}', self._name) def save(self, filename=None): if (filename is None): filename = self.filename print_d(f'Saving contents to {filename!r}', self._name) try: dirname = os.path.dirname(filename) mkdir(dirname) with atomic_save(filename, 'wb') as fileobj: fileobj.write(dump_audio_files(self.get_content())) except SerializationError: util.print_exc() except OSError: print_w(f"Couldn't save library to path {filename!r}") else: self.dirty = False
def delayed_import(): global _ServerSession, _AccountDB, _ServerConfig, _ScriptDB if (not _ServerSession): (modulename, classname) = settings.SERVER_SESSION_CLASS.rsplit('.', 1) _ServerSession = variable_from_module(modulename, classname) if (not _AccountDB): from evennia.accounts.models import AccountDB as _AccountDB if (not _ServerConfig): from evennia.server.models import ServerConfig as _ServerConfig if (not _ScriptDB): from evennia.scripts.models import ScriptDB as _ScriptDB assert _ServerSession assert _AccountDB assert _ServerConfig assert _ScriptDB
class TimeRange(BaseElement): tag: ClassVar[str] = ns('C', 'time-range') def __init__(self, start: Optional[datetime]=None, end: Optional[datetime]=None) -> None: super(TimeRange, self).__init__() if (self.attributes is None): raise ValueError('Unexpected value None for self.attributes') if (start is not None): self.attributes['start'] = _to_utc_date_string(start) if (end is not None): self.attributes['end'] = _to_utc_date_string(end)
def iou_pytorch(outputs: torch.Tensor, labels: torch.Tensor): outputs = outputs.squeeze(1) intersection = (outputs & labels).float().sum((1, 2)) union = (outputs | labels).float().sum((1, 2)) iou = ((intersection + SMOOTH) / (union + SMOOTH)) thresholded = (torch.clamp((20 * (iou - 0.5)), 0, 10).ceil() / 10) return thresholded
def train_one_epoch(model, optimizer, train_loader, model_func, lr_scheduler, accumulated_iter, optim_cfg, rank, tbar, total_it_each_epoch, dataloader_iter, tb_log=None, leave_pbar=False): if (total_it_each_epoch == len(train_loader)): dataloader_iter = iter(train_loader) if (rank == 0): pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar, desc='train', dynamic_ncols=True) for cur_it in range(total_it_each_epoch): try: batch = next(dataloader_iter) except StopIteration: dataloader_iter = iter(train_loader) batch = next(dataloader_iter) print('new iters') lr_scheduler.step(accumulated_iter) try: cur_lr = float(optimizer.lr) except: cur_lr = optimizer.param_groups[0]['lr'] if (tb_log is not None): tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter) model.train() optimizer.zero_grad() (loss, tb_dict, disp_dict) = model_func(model, batch) loss.backward() clip_grad_norm_(model.parameters(), optim_cfg.GRAD_NORM_CLIP) optimizer.step() accumulated_iter += 1 disp_dict.update({'loss': loss.item(), 'lr': cur_lr}) if (rank == 0): pbar.update() pbar.set_postfix(dict(total_it=accumulated_iter)) tbar.set_postfix(disp_dict) tbar.refresh() if (tb_log is not None): tb_log.add_scalar('train/loss', loss, accumulated_iter) tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter) for (key, val) in tb_dict.items(): tb_log.add_scalar(('train/' + key), val, accumulated_iter) if (rank == 0): pbar.close() return accumulated_iter
class BoolQGen(): def __init__(self): self.tokenizer = T5Tokenizer.from_pretrained('t5-base') model = T5ForConditionalGeneration.from_pretrained('ramsrigouthamg/t5_boolean_questions') device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu')) model.to(device) self.device = device self.model = model self.set_seed(42) def set_seed(self, seed): numpy.random.seed(seed) torch.manual_seed(seed) if torch.cuda.is_available(): torch.cuda.manual_seed_all(seed) def random_choice(self): a = random.choice([0, 1]) return bool(a) def predict_boolq(self, payload): start = time.time() inp = {'input_text': payload.get('input_text'), 'max_questions': payload.get('max_questions', 4)} text = inp['input_text'] num = inp['max_questions'] sentences = tokenize_sentences(text) joiner = ' ' modified_text = joiner.join(sentences) answer = self.random_choice() form = ('truefalse: %s passage: %s </s>' % (modified_text, answer)) encoding = self.tokenizer.encode_plus(form, return_tensors='pt') (input_ids, attention_masks) = (encoding['input_ids'].to(self.device), encoding['attention_mask'].to(self.device)) output = beam_search_decoding(input_ids, attention_masks, self.model, self.tokenizer) if (torch.device == 'cuda'): torch.cuda.empty_cache() final = {} final['Text'] = text final['Count'] = num final['Boolean Questions'] = output return final
('/api/chat_xlang_webot', methods=['POST']) def chat_xlang_webot() -> Dict: try: request_json = request.get_json() user_id = request_json.pop('user_id', DEFAULT_USER_ID) chat_id = request_json['chat_id'] user_intent = request_json['user_intent'] parent_message_id = request_json['parent_message_id'] llm_name = request_json['llm_name'] temperature = request_json.get('temperature', 0.4) stop_words = ['[RESPONSE_BEGIN]', 'TOOL RESPONSE'] kwargs = {'temperature': temperature, 'stop': stop_words} llm = get_llm(llm_name, **kwargs) logger.bind(user_id=user_id, chat_id=chat_id, api='/chat', msg_head='Request json').debug(request_json) human_message_id = message_id_register.add_variable(user_intent) ai_message_id = message_id_register.add_variable('') stream_handler = AgentStreamingStdOutCallbackHandler() reset_webot(user_id=user_id, chat_id=chat_id) reset_webot_status(user_id=user_id, chat_id=chat_id) interaction_executor = create_webot_interaction_executor(llm=llm, llm_name=llm_name, chat_id=chat_id, user_id=user_id) activated_message_list = message_pool.get_activated_message_list(user_id, chat_id, list(), parent_message_id) message_pool.load_agent_memory_from_list(interaction_executor.memory, activated_message_list) return stream_with_context(Response(single_round_chat_with_agent_streaming(interaction_executor=interaction_executor, user_intent=user_intent, human_message_id=human_message_id, ai_message_id=ai_message_id, user_id=user_id, chat_id=chat_id, message_list=activated_message_list, parent_message_id=parent_message_id, stream_handler=stream_handler, llm_name=llm_name, app_type='webot'), content_type='application/json')) except Exception as e: import traceback traceback.print_exc() return Response(response=None, status=f'{OVERLOAD} backend is currently overloaded')
class MapReduce(): def __init__(self, map_func: Callable, iterable: Iterable, *iterables, reduce_func: Optional[Callable]=None, reduce_kwargs: Optional[dict]=None, parallel: bool=True, ordered: bool=False, total: Optional[int]=None, chunksize: Optional[int]=None, sequential_threshold: int=1, max_depth: Optional[int]=None, max_size: Optional[int]=None, max_leaves: Optional[int]=None, branch_factor: int=2, shortcircuit_func: Callable=false, shortcircuit_callback: Optional[Callable]=None, shortcircuit_callback_args: Any=None, inflight_limit: int=1000, progress: Optional[bool]=None, desc: Optional[str]=None, map_kwargs: Optional[dict]=None): self.map_func = map_func self.iterables = ((iterable,) + iterables) self.reduce_func = fallback(reduce_func, _flatten) self.reduce_kwargs = fallback(reduce_kwargs, dict()) self.parallel = parallel self.ordered = ordered self.total = fallback(try_len(*self.iterables), total) self.shortcircuit_func = shortcircuit_func self.shortcircuit_callback = shortcircuit_callback self.shortcircuit_callback_args = shortcircuit_callback_args self.inflight_limit = inflight_limit self.progress = fallback(progress, config.PROGRESS_BARS) self.desc = desc self.map_kwargs = fallback(map_kwargs, dict()) self._shortcircuit_callback = shortcircuit_callback if self.parallel: self.constraints = get_constraints(total=self.total, chunksize=chunksize, sequential_threshold=sequential_threshold, max_depth=max_depth, max_size=max_size, max_leaves=max_leaves, branch_factor=branch_factor) self.tree = self.constraints.simulate() self.chunksize = self.constraints.get_initial_chunksize() if (self.shortcircuit_callback is None): self.shortcircuit_callback = cancel_all self.progress_bar = None self.error = None self.done = False self.result = None def _repr_attrs(self): attrs = ['map_func', 'map_kwargs', 'iterables', 'reduce_func', 'reduce_kwargs', 'parallel', 'ordered', 'total', 'shortcircuit_func', 'shortcircuit_callback', 'shortcircuit_callback_args', 'inflight_limit', 'progress', 'desc'] if self.parallel: attrs += ['constraints', 'tree'] return attrs def __repr__(self): data = [f'{attr}={getattr(self, attr)}' for attr in self._repr_attrs()] return '\n'.join([f'{self.__class__.__name__}(', indent('\n'.join(data), ' '), ')']) def _run_parallel(self): init() if self.progress: self.progress_bar = ProgressBar(self.total, desc=self.desc) self.shortcircuit_callback = progress_hook(self.progress_bar)(self.shortcircuit_callback) try: self.result = _map_reduce_tree(self.iterables, self.map_func, self.reduce_func, self.constraints, self.tree, self.chunksize, self.shortcircuit_func, self.shortcircuit_callback, self.shortcircuit_callback_args, self.ordered, self.inflight_limit, self.map_kwargs, self.reduce_kwargs, self.progress_bar) self.done = True return self.result except Exception as e: self.error = e raise e finally: if self.progress: self.progress_bar.actor.__ray_terminate__.remote() def _run_sequential(self): try: results = _map_sequential(self.map_func, *self.iterables, **self.map_kwargs) if self.progress: results = tqdm(results, total=self.total, desc=self.desc) results = get(results, remote=False, shortcircuit_func=self.shortcircuit_func, shortcircuit_callback=self.shortcircuit_callback, shortcircuit_callback_args=self.shortcircuit_callback_args) self.result = _reduce(results, self.reduce_func, self.reduce_kwargs, branch=False) self.done = True return self.result except Exception as e: self.error = e raise e def run(self): if self.done: return self.result if (self.parallel and (self.tree.depth > 1)): return self._run_parallel() return self._run_sequential()
def get_error(output, target, topk=(1,)): maxk = max(topk) batch_size = target.size(0) (_, pred) = output.topk(maxk, 1, True, True) pred = pred.t() correct = pred.eq(target.view(1, (- 1)).expand_as(pred)) res = [] for k in topk: correct_k = correct[:k].view((- 1)).float().sum(0, keepdim=True) res.append((100.0 - correct_k.mul_((100.0 / batch_size)))) return res
def test_bool_type_factory(): o = MyHarderConfigurable(required_str='yes', also_required='True') with inspect_node(o) as ni: assert (not ni.partial) assert (o.required_str == 'yes') assert (o.default_str == 'foo') assert (o.integer is None) assert (o.also_required is True)
def get_backend_name(): display = Gdk.Display.get_default() if (display is not None): name = display.__gtype__.name if name.startswith('Gdk'): name = name[3:] if name.endswith('Display'): name = name[:(- 7)] return name return 'Unknown'
def is_a_tf_op_lambda_layer(layer: tf.keras.layers.Layer) -> bool: if (version.parse(tf.version.VERSION) >= version.parse('2.10')): from keras.layers.core.tf_op_layer import TFOpLambda else: from tensorflow.python.keras.layers.core import TFOpLambda return isinstance(layer, TFOpLambda)
class Model(nn.Module): def __init__(self, n_cont_features: int, cat_cardinalities: List[int], bins: Optional[List[Tensor]], mlp_kwargs: dict) -> None: super().__init__() self.cat_cardinalities = cat_cardinalities d_cat = sum(cat_cardinalities) d_embedding = 24 self.cont_embeddings = rtdl_num_embeddings.PeriodicEmbeddings(n_cont_features, d_embedding, lite=False) d_num = (n_cont_features * d_embedding) self.backbone = rtdl_revisiting_models.MLP(d_in=(d_num + d_cat), **mlp_kwargs) def forward(self, x_cont: Tensor, x_cat: Optional[Tensor]) -> Tensor: x = [] x.append(self.cont_embeddings(x_cont).flatten(1)) if (x_cat is not None): x.extend((F.one_hot(column, cardinality) for (column, cardinality) in zip(x_cat.T, self.cat_cardinalities))) x = torch.column_stack(x) return self.backbone(x)
class CronTabSchedule(object): def __init__(self, crontab, delimiter='\n'): self.entries = [] entry_lines = [s for s in (s.strip() for s in crontab.split(delimiter)) if (s and (s[0] != '#'))] self.smallest_change_gap = None for line in entry_lines: self.add_entry(line) def __len__(self): return len(self.entries) def check(self): if (not self.entries): raise CronTabScheduleException('Schedule has no entries') def add_entry(self, entry_line): fields = entry_line.split(None, 5) if (len(fields) < 6): raise CronTabScheduleException('{} does not have six fields'.format(entry_line)) if (fields[0] != '*'): gap = ONE_MINUTE elif (fields[1] != '*'): gap = ONE_HOUR elif ((fields[2] == '*') and (fields[3] == '*') and (fields[4] == '*')): gap = LIKE_FOREVER else: gap = ONE_DAY if (self.smallest_change_gap is None): self.smallest_change_gap = gap else: self.smallest_change_gap = min(self.smallest_change_gap, gap) e = FastCronTab(' '.join(fields[0:5])) self.entries.append((e, fields[5])) def next_minute(self, now=None, multi=True): self.check() if (now is None): now = datetime.now().replace(second=0, microsecond=0) elif (now.second or now.microsecond): now = now.replace(second=0, microsecond=0) matches = [] for i in range(len(self.entries)): if (self.entries[i][0].next(now) == 60): matches.append(i) if (len(matches) == 0): return None if ((len(matches) > 1) and (not multi)): raise CronTabScheduleException('Multiple schedule matches at {}'.format((now + ONE_MINUTE))) return (matches[0] if (not multi) else matches) def soonest(self, now=None): self.check() if (now is None): now = datetime.now() if (not (now.second or now.microsecond)): now -= ONE_MINUTE soonest = self.entries[0][0].next(now) for entry in self.entries[1:]: soonest = min(soonest, entry[0].next(now)) return (now + timedelta(seconds=soonest)) def round_up(self, now): self.check() if (self.smallest_change_gap == ONE_MINUTE): return now if (self.smallest_change_gap == LIKE_FOREVER): return (now + LIKE_FOREVER) if (self.smallest_change_gap == ONE_HOUR): return (now + timedelta(minutes=(60 - now.minute))) if (self.smallest_change_gap == ONE_DAY): return ((now + timedelta(hours=(24 - now.hour))) - timedelta(minutes=now.minute)) raise CronTabScheduleException('Unrecognized smallest change gap {}'.format(self.smallest_change_gap)) def key_of(self, entry): return (self.entries[entry][1] if (entry is not None) else None) def fix_key(key, multi): return (key[0] if ((not multi) or (key[0] is None)) else key) def schedule_iter(self, start=None, end=None, multi=True, endless=False): self.check() if (start is None): start = datetime.now().replace(hour=0, minute=0, second=0, microsecond=0) else: start = start.replace(second=0, microsecond=0) if (end is None): hard_stop = False else: if endless: raise ValueError("Can't specify both 'end' and 'endless") end = end.replace(second=0, microsecond=0) hard_stop = True used_rules = set() current_rules = set() num_rules = len(self.entries) current_start = start current_entries = self.next_minute((current_start - ONE_MINUTE), multi) if ((not multi) or (current_entries is None)): current_entries = [current_entries] if (current_entries != [None]): current_rules.update(current_entries) current_key = tuple(sorted(set((self.key_of(e) for e in current_entries)))) next_start = self.round_up(current_start) while (((not hard_stop) and (endless or (len(used_rules) < num_rules))) or (hard_stop and (next_start < end))): new_entries = self.next_minute((next_start - ONE_MINUTE), multi) if ((not multi) or (new_entries is None)): new_entries = [new_entries] new_key = tuple(sorted(set((self.key_of(e) for e in new_entries)))) if ((new_key != current_key) or (self.smallest_change_gap == LIKE_FOREVER)): (yield (current_start, (next_start - ONE_MINUTE), self.fix_key(current_key, multi))) used_rules.update(current_rules) current_rules = set(new_entries) current_start = next_start current_entries = new_entries current_key = new_key elif (new_entries != [None]): current_rules.update(new_entries) next_start += self.smallest_change_gap if (not hard_stop): return if (current_start < end): (yield (current_start, end, self.fix_key(current_key, multi)))
class FullImageSampler(PatchSampler): def __init__(self): super(FullImageSampler, self).__init__() self.full_indices = True def __call__(self, nbatch, wh, device): (w, h) = torch.meshgrid([torch.linspace((- 1), 1, wh[1]), torch.linspace((- 1), 1, wh[0])]) h = h[(None, ..., None)] w = w[(None, ..., None)] coords = torch.cat([h, w], dim=(- 1)) coords = coords.repeat(nbatch, 1, 1, 1).to(device) scales = torch.ones((nbatch, 1, 1, 1), device=device) return (coords.contiguous(), scales.contiguous())
def _symmetric_two_body_terms(quad, complex_valued): (p, q, r, s) = quad (yield (p, q, r, s)) (yield (q, p, s, r)) (yield (s, r, q, p)) (yield (r, s, p, q)) if (not complex_valued): (yield (p, s, r, q)) (yield (q, r, s, p)) (yield (s, p, q, r)) (yield (r, q, p, s))
def test_set_after_show(skip_qtbot): label = DelayedTextLabel() skip_qtbot.addWidget(label) label.setText('Foo') assert (label.text() == 'Foo') assert (label._delayed_text == 'Foo') assert (not label._already_shown) label.showEvent(QtGui.QShowEvent()) assert (label.text() == 'Foo') assert (label._delayed_text is None) assert label._already_shown
class ChangeEmailForm(forms.Form): email1 = forms.EmailField(max_length=254, label=_('new e-mail address')) email2 = forms.EmailField(max_length=254, label=_('new e-mail address (again)')) password_confirm = forms.CharField(label=_('confirm your password'), strip=False, widget=forms.PasswordInput) def clean(self): form_data = self.cleaned_data if (form_data.get('email1') != form_data.get('email2')): raise forms.ValidationError(gettext("e-mails didn't match.")) if Author.objects.filter(email=form_data.get('email1')).exists(): raise forms.ValidationError(gettext('this e-mail is already in use.')) super().clean()
class CoinChooserBase(Logger): def __init__(self, *, enable_output_value_rounding: bool): Logger.__init__(self) self.enable_output_value_rounding = enable_output_value_rounding def keys(self, coins: Sequence[PartialTxInput]) -> Sequence[str]: raise NotImplementedError def bucketize_coins(self, coins: Sequence[PartialTxInput], *, fee_estimator_vb): keys = self.keys(coins) buckets = defaultdict(list) for (key, coin) in zip(keys, coins): buckets[key].append(coin) constant_fee = (fee_estimator_vb(2000) == fee_estimator_vb(200)) def make_Bucket(desc: str, coins: List[PartialTxInput]): witness = any((coin.is_segwit(guess_for_address=True) for coin in coins)) weight = sum((Transaction.estimated_input_weight(coin, witness) for coin in coins)) value = sum((coin.value_sats() for coin in coins)) min_height = min((coin.block_height for coin in coins)) assert (min_height is not None) if constant_fee: effective_value = value else: fee = fee_estimator_vb((Decimal(weight) / 4)) effective_value = (value - fee) return Bucket(desc=desc, weight=weight, value=value, effective_value=effective_value, coins=coins, min_height=min_height, witness=witness) return list(map(make_Bucket, buckets.keys(), buckets.values())) def penalty_func(self, base_tx, *, tx_from_buckets: Callable[([List[Bucket]], Tuple[(PartialTransaction, List[PartialTxOutput])])]) -> Callable[([List[Bucket]], ScoredCandidate)]: raise NotImplementedError def _change_amounts(self, tx: PartialTransaction, count: int, fee_estimator_numchange) -> List[int]: output_amounts = [o.value for o in tx.outputs()] max_change = max((max(output_amounts) * 1.25), (0.02 * COIN)) for n in range(1, (count + 1)): change_amount = max(0, (tx.get_fee() - fee_estimator_numchange(n))) if ((change_amount // n) <= max_change): break def trailing_zeroes(val): s = str(val) return (len(s) - len(s.rstrip('0'))) zeroes = [trailing_zeroes(i) for i in output_amounts] min_zeroes = min(zeroes) max_zeroes = max(zeroes) if (n > 1): zeroes = range(max(0, (min_zeroes - 1)), ((max_zeroes + 1) + 1)) else: zeroes = [min_zeroes] remaining = change_amount amounts = [] while (n > 1): average = (remaining / n) amount = self.p.randint(int((average * 0.7)), int((average * 1.3))) precision = min(self.p.choice(zeroes), int(floor(log10(amount)))) amount = int(round(amount, (- precision))) amounts.append(amount) remaining -= amount n -= 1 max_dp_to_round_for_privacy = (2 if self.enable_output_value_rounding else 0) N = int(pow(10, min(max_dp_to_round_for_privacy, zeroes[0]))) amount = ((remaining // N) * N) amounts.append(amount) assert (sum(amounts) <= change_amount) return amounts def _change_outputs(self, tx: PartialTransaction, change_addrs, fee_estimator_numchange, dust_threshold) -> List[PartialTxOutput]: amounts = self._change_amounts(tx, len(change_addrs), fee_estimator_numchange) assert (min(amounts) >= 0) assert (len(change_addrs) >= len(amounts)) assert all([isinstance(amt, int) for amt in amounts]) amounts = [amount for amount in amounts if (amount >= dust_threshold)] change = [PartialTxOutput.from_address_and_value(addr, amount) for (addr, amount) in zip(change_addrs, amounts)] return change def _construct_tx_from_selected_buckets(self, *, buckets: Sequence[Bucket], base_tx: PartialTransaction, change_addrs, fee_estimator_w, dust_threshold, base_weight, gas_fee) -> Tuple[(PartialTransaction, List[PartialTxOutput])]: tx = PartialTransaction.from_io(base_tx.inputs()[:], base_tx.outputs()[:]) tx.add_inputs([coin for b in buckets for coin in b.coins]) tx_weight = self._get_tx_weight(buckets, base_weight=base_weight) if (not change_addrs): change_addrs = [tx.inputs()[0].address] assert is_address(change_addrs[0]) output_weight = (4 * Transaction.estimated_output_size(change_addrs[0])) fee_estimator_numchange = (lambda count: (fee_estimator_w((tx_weight + (count * output_weight))) + gas_fee)) change = self._change_outputs(tx, change_addrs, fee_estimator_numchange, dust_threshold) tx.add_outputs(change) return (tx, change) def _get_tx_weight(self, buckets: Sequence[Bucket], *, base_weight: int) -> int: total_weight = (base_weight + sum((bucket.weight for bucket in buckets))) is_segwit_tx = any((bucket.witness for bucket in buckets)) if is_segwit_tx: total_weight += 2 num_legacy_inputs = sum((((not bucket.witness) * len(bucket.coins)) for bucket in buckets)) total_weight += num_legacy_inputs return total_weight def make_tx(self, *, coins: Sequence[PartialTxInput], inputs: List[PartialTxInput], outputs: List[PartialTxOutput], change_addrs: Sequence[str], fee_estimator_vb: Callable, dust_threshold: int, gas_fee=0, sender=None) -> PartialTransaction: assert outputs, 'tx outputs cannot be empty' utxos = [c.prevout.serialize_to_network() for c in coins] self.p = PRNG(b''.join(sorted(utxos))) base_tx = PartialTransaction.from_io(inputs[:], outputs[:]) input_value = base_tx.input_value() base_weight = base_tx.estimated_weight() spent_amount = base_tx.output_value() def fee_estimator_w(weight): return fee_estimator_vb(Transaction.virtual_size_from_weight(weight)) def sufficient_funds(buckets, *, bucket_value_sum): total_input = (input_value + bucket_value_sum) if (total_input < spent_amount): return False if ((not buckets) and (not inputs)): return False total_weight = self._get_tx_weight(buckets, base_weight=base_weight) return (total_input >= ((spent_amount + fee_estimator_w(total_weight)) + gas_fee)) def tx_from_buckets(buckets): return self._construct_tx_from_selected_buckets(buckets=buckets, base_tx=base_tx, change_addrs=change_addrs, fee_estimator_w=fee_estimator_w, dust_threshold=dust_threshold, base_weight=base_weight, gas_fee=gas_fee) all_buckets = self.bucketize_coins(coins, fee_estimator_vb=fee_estimator_vb) all_buckets = list(filter((lambda b: (b.effective_value > 0)), all_buckets)) scored_candidate = self.choose_buckets(all_buckets, sufficient_funds, self.penalty_func(base_tx, tx_from_buckets=tx_from_buckets)) tx = scored_candidate.tx self.logger.info(f'using {len(tx.inputs())} inputs') self.logger.info(f'using buckets: {[bucket.desc for bucket in scored_candidate.buckets]}') return tx def choose_buckets(self, buckets: List[Bucket], sufficient_funds: Callable, penalty_func: Callable[([List[Bucket]], ScoredCandidate)]) -> ScoredCandidate: raise NotImplemented('To be subclassed')
class GenericUtilTests(unittest.TestCase): .patch('sys.stdout', new_callable=io.StringIO) def test_context_managers_no_context(self, mock_stdout): with ContextManagers([]): print('Transformers are awesome!') self.assertEqual(mock_stdout.getvalue(), 'Transformers are awesome!\n') .patch('sys.stdout', new_callable=io.StringIO) def test_context_managers_one_context(self, mock_stdout): with ContextManagers([context_en()]): print('Transformers are awesome!') self.assertEqual(mock_stdout.getvalue(), 'Welcome!\nTransformers are awesome!\nBye!\n') .patch('sys.stdout', new_callable=io.StringIO) def test_context_managers_two_context(self, mock_stdout): with ContextManagers([context_fr(), context_en()]): print('Transformers are awesome!') self.assertEqual(mock_stdout.getvalue(), 'Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n') _torch def test_find_labels_pt(self): self.assertEqual(find_labels(BertForSequenceClassification), ['labels']) self.assertEqual(find_labels(BertForPreTraining), ['labels', 'next_sentence_label']) self.assertEqual(find_labels(BertForQuestionAnswering), ['start_positions', 'end_positions']) class DummyModel(BertForSequenceClassification): pass self.assertEqual(find_labels(DummyModel), ['labels']) _tf def test_find_labels_tf(self): self.assertEqual(find_labels(TFBertForSequenceClassification), ['labels']) self.assertEqual(find_labels(TFBertForPreTraining), ['labels', 'next_sentence_label']) self.assertEqual(find_labels(TFBertForQuestionAnswering), ['start_positions', 'end_positions']) class DummyModel(TFBertForSequenceClassification): pass self.assertEqual(find_labels(DummyModel), ['labels']) _flax def test_find_labels_flax(self): self.assertEqual(find_labels(FlaxBertForSequenceClassification), []) self.assertEqual(find_labels(FlaxBertForPreTraining), []) self.assertEqual(find_labels(FlaxBertForQuestionAnswering), []) class DummyModel(FlaxBertForSequenceClassification): pass self.assertEqual(find_labels(DummyModel), [])
def _get_version_from_arguments(arguments): if (len(arguments) != 1): raise ValueError('Expected exactly 1 argument') version = arguments[0] parts = version.split('.') if (len(parts) != 2): raise ValueError('not of the form: YY.N') if (not all((part.isdigit() for part in parts))): raise ValueError('non-integer segments') return version
def customize_compiler_for_nvcc(self): super = self.compile def compile(sources, output_dir=None, macros=None, include_dirs=None, debug=0, extra_preargs=None, extra_postargs=None, depends=None): postfix = os.path.splitext(sources[0])[1] if (postfix == '.cu'): postargs = extra_postargs['nvcc'] else: postargs = extra_postargs['gcc'] return super(sources, output_dir, macros, include_dirs, debug, extra_preargs, postargs, depends) self.compile = compile
class Migration(migrations.Migration): dependencies = [('core', '0005_auto__1730')] operations = [migrations.RenameField(model_name='currentsong', old_name='url', new_name='internal_url'), migrations.RenameField(model_name='queuedsong', old_name='url', new_name='internal_url'), migrations.AddField(model_name='currentsong', name='external_url', field=models.CharField(blank=True, max_length=200)), migrations.AddField(model_name='queuedsong', name='external_url', field=models.CharField(blank=True, max_length=200)), migrations.RemoveField(model_name='currentsong', name='location'), migrations.AlterField(model_name='currentsong', name='duration', field=models.IntegerField()), migrations.AlterField(model_name='queuedsong', name='duration', field=models.IntegerField())]
def se_resnet50(num_classes, loss, pretrained='imagenet', **kwargs): model = SENet(num_classes=num_classes, loss=loss, block=SEResNetBottleneck, layers=[3, 4, 6, 3], groups=1, reduction=16, dropout_p=None, inplanes=64, input_3x3=False, downsample_kernel_size=1, downsample_padding=0, last_stride=2, fc_dims=None, **kwargs) if (pretrained == 'imagenet'): model_url = pretrained_settings['se_resnet50']['imagenet']['url'] init_pretrained_weights(model, model_url) return model
class Yelp_f_Processor(DataProcessor): def get_train_examples(self, data_dir): train_data = pd.read_csv(os.path.join(data_dir, 'train.csv'), header=None, sep=',').values return self._create_examples(train_data, 'train') def get_dev_examples(self, data_dir): dev_data = pd.read_csv(os.path.join(data_dir, 'test.csv'), header=None, sep=',').values return self._create_examples(dev_data, 'dev') def get_labels(self): return ['1', '2', '3', '4', '5'] def _create_examples(self, lines, set_type): examples = [] for (i, line) in enumerate(lines): guid = ('%s-%s' % (set_type, i)) text_a = tokenization.convert_to_unicode(str(line[1])) label = tokenization.convert_to_unicode(str(line[0])) if ((i % 1000) == 0): print(i) print('guid=', guid) print('text_a=', text_a) print('label=', label) examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) return examples
def modified_resnet(arch, block, layers, pretrained, progress, **kwargs): model = ModifiedResNet(block, layers, **kwargs) if pretrained: state_dict = load_state_dict_from_url(model_urls[arch], progress=progress) model.load_state_dict(state_dict, strict=False) return model