code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def destroy(self): """ Cleanup the activty lifecycle listener """ if self.widget: self.set_active(False) super(AndroidBarcodeView, self).destroy()
Cleanup the activty lifecycle listener
def create_context_plot(ra, dec, name="Your object"): """Creates a K2FootprintPlot showing a given position in context with respect to the campaigns.""" plot = K2FootprintPlot() plot.plot_galactic() plot.plot_ecliptic() for c in range(0, 20): plot.plot_campaign_outline(c, facecolor="#666666") # for c in [11, 12, 13, 14, 15, 16]: # plot.plot_campaign_outline(c, facecolor="green") plot.ax.scatter(ra, dec, marker='x', s=250, lw=3, color="red", zorder=500) plot.ax.text(ra, dec - 2, name, ha="center", va="top", color="red", fontsize=20, fontweight='bold', zorder=501) return plot
Creates a K2FootprintPlot showing a given position in context with respect to the campaigns.
def perr(self, *args, **kwargs): """ Console to STERR """ kwargs['file'] = self.err self.print(*args, **kwargs) sys.stderr.flush()
Console to STERR
def build_sanitiser_node_dict( cfg, sinks_in_file ): """Build a dict of string -> TriggerNode pairs, where the string is the sanitiser and the TriggerNode is a TriggerNode of the sanitiser. Args: cfg(CFG): cfg to traverse. sinks_in_file(list[TriggerNode]): list of TriggerNodes containing the sinks in the file. Returns: A string -> TriggerNode dict. """ sanitisers = list() for sink in sinks_in_file: sanitisers.extend(sink.sanitisers) sanitisers_in_file = list() for sanitiser in sanitisers: for cfg_node in cfg.nodes: if sanitiser in cfg_node.label: sanitisers_in_file.append(Sanitiser(sanitiser, cfg_node)) sanitiser_node_dict = dict() for sanitiser in sanitisers: sanitiser_node_dict[sanitiser] = list(find_sanitiser_nodes( sanitiser, sanitisers_in_file )) return sanitiser_node_dict
Build a dict of string -> TriggerNode pairs, where the string is the sanitiser and the TriggerNode is a TriggerNode of the sanitiser. Args: cfg(CFG): cfg to traverse. sinks_in_file(list[TriggerNode]): list of TriggerNodes containing the sinks in the file. Returns: A string -> TriggerNode dict.
def rm_special(user, cmd, special=None, identifier=None): ''' Remove a special cron job for a specified user. CLI Example: .. code-block:: bash salt '*' cron.rm_special root /usr/bin/foo ''' lst = list_tab(user) ret = 'absent' rm_ = None for ind in range(len(lst['special'])): if rm_ is not None: break if _cron_matched(lst['special'][ind], cmd, identifier=identifier): if special is None: # No special param was specified rm_ = ind else: if lst['special'][ind]['spec'] == special: rm_ = ind if rm_ is not None: lst['special'].pop(rm_) ret = 'removed' comdat = _write_cron_lines(user, _render_tab(lst)) if comdat['retcode']: # Failed to commit, return the error return comdat['stderr'] return ret
Remove a special cron job for a specified user. CLI Example: .. code-block:: bash salt '*' cron.rm_special root /usr/bin/foo
def send_to_default_exchange(self, sess_id, message=None): """ Send messages through RabbitMQ's default exchange, which will be delivered through routing_key (sess_id). This method only used for un-authenticated users, i.e. login process. Args: sess_id string: Session id message dict: Message object. """ msg = json.dumps(message, cls=ZEngineJSONEncoder) log.debug("Sending following message to %s queue through default exchange:\n%s" % ( sess_id, msg)) self.get_channel().publish(exchange='', routing_key=sess_id, body=msg)
Send messages through RabbitMQ's default exchange, which will be delivered through routing_key (sess_id). This method only used for un-authenticated users, i.e. login process. Args: sess_id string: Session id message dict: Message object.
def replace_drive_enclosure(self, information): """ When a drive enclosure has been physically replaced, initiate the replacement operation that enables the new drive enclosure to take over as a replacement for the prior drive enclosure. The request requires specification of both the serial numbers of the original drive enclosure and its replacement to be provided. Args: information: Options to replace the drive enclosure. Returns: dict: SAS Logical Interconnect. """ uri = "{}/replaceDriveEnclosure".format(self.data["uri"]) result = self._helper.create(information, uri) self.refresh() return result
When a drive enclosure has been physically replaced, initiate the replacement operation that enables the new drive enclosure to take over as a replacement for the prior drive enclosure. The request requires specification of both the serial numbers of the original drive enclosure and its replacement to be provided. Args: information: Options to replace the drive enclosure. Returns: dict: SAS Logical Interconnect.
def get(issue_id, issue_type_id): """Return issue by ID Args: issue_id (str): Unique Issue identifier issue_type_id (str): Type of issue to get Returns: :obj:`Issue`: Returns Issue object if found, else None """ return db.Issue.find_one( Issue.issue_id == issue_id, Issue.issue_type_id == issue_type_id )
Return issue by ID Args: issue_id (str): Unique Issue identifier issue_type_id (str): Type of issue to get Returns: :obj:`Issue`: Returns Issue object if found, else None
def get_tan_media(self, media_type = TANMediaType2.ALL, media_class = TANMediaClass4.ALL): """Get information about TAN lists/generators. Returns tuple of fints.formals.TANUsageOption and a list of fints.formals.TANMedia4 or fints.formals.TANMedia5 objects.""" with self._get_dialog() as dialog: hktab = self._find_highest_supported_command(HKTAB4, HKTAB5) seg = hktab( tan_media_type = media_type, tan_media_class = str(media_class), ) response = dialog.send(seg) for resp in response.response_segments(seg, 'HITAB'): return resp.tan_usage_option, list(resp.tan_media_list)
Get information about TAN lists/generators. Returns tuple of fints.formals.TANUsageOption and a list of fints.formals.TANMedia4 or fints.formals.TANMedia5 objects.
def get_previous_tag(cls, el): """Get previous sibling tag.""" sibling = el.previous_sibling while not cls.is_tag(sibling) and sibling is not None: sibling = sibling.previous_sibling return sibling
Get previous sibling tag.
def File(self, path): """Returns a reference to a file with a given path on client's VFS.""" return vfs.FileRef( client_id=self.client_id, path=path, context=self._context)
Returns a reference to a file with a given path on client's VFS.
def num_compositions(m, n): """ The total number of m-part compositions of n, which is equal to (n+m-1) choose (m-1). Parameters ---------- m : scalar(int) Number of parts of composition. n : scalar(int) Integer to decompose. Returns ------- scalar(int) Total number of m-part compositions of n. """ # docs.scipy.org/doc/scipy/reference/generated/scipy.special.comb.html return scipy.special.comb(n+m-1, m-1, exact=True)
The total number of m-part compositions of n, which is equal to (n+m-1) choose (m-1). Parameters ---------- m : scalar(int) Number of parts of composition. n : scalar(int) Integer to decompose. Returns ------- scalar(int) Total number of m-part compositions of n.
def _get_win_argv(): """Returns a unicode argv under Windows and standard sys.argv otherwise Returns: List[`fsnative`] """ assert is_win argc = ctypes.c_int() try: argv = winapi.CommandLineToArgvW( winapi.GetCommandLineW(), ctypes.byref(argc)) except WindowsError: return [] if not argv: return [] res = argv[max(0, argc.value - len(sys.argv)):argc.value] winapi.LocalFree(argv) return res
Returns a unicode argv under Windows and standard sys.argv otherwise Returns: List[`fsnative`]
def backup_file(*, file, host): """ Backup a file on S3 :param file: full path to the file to be backed up :param host: this will be used to locate the file on S3 :raises TypeError: if an argument in kwargs does not have the type expected :raises ValueError: if an argument within kwargs has an invalid value """ # This driver won't do a thing unless it has been properly initialised # via load() if not _has_init: raise RuntimeError("This driver has not been properly initialised!") # If the destination bucket does not exist, create one try: if not _dry_run: bucket = _boto_conn.get_bucket(_bucket_name) except boto.exception.S3ResponseError: log.msg_warn("Bucket '{bucket_name}' does not exist!, creating it..." .format(bucket_name=_bucket_name)) if not _dry_run: bucket = _boto_conn.create_bucket(_bucket_name) log.msg("Created bucket '{bucket}'".format(bucket=_bucket_name)) # The key is the name of the file itself who needs to be stripped # from its full path key_path = "{key}/{file}".format(key=host, file=ntpath.basename(file)) # Create a new bucket key if not _dry_run: k = boto.s3.key.Key(bucket) k.key = key_path # Upload the file to Amazon log.msg("Uploading '{key_path}' to bucket '{bucket_name}' ..." .format(key_path=key_path, bucket_name=_bucket_name)) # It is important to encrypt the data on the server side if not _dry_run: k.set_contents_from_filename(file, encrypt_key=True) # Log the thing log.msg("The file '{key_path}' has been successfully uploaded to S3!" .format(key_path=key_path))
Backup a file on S3 :param file: full path to the file to be backed up :param host: this will be used to locate the file on S3 :raises TypeError: if an argument in kwargs does not have the type expected :raises ValueError: if an argument within kwargs has an invalid value
def main(): """Run the core.""" parser = ArgumentParser() subs = parser.add_subparsers(dest='cmd') setup_parser = subs.add_parser('setup') setup_parser.add_argument('-e', '--email', dest='email', required=True, help='Email of the Google user.', type=str) setup_parser.add_argument('-p', '--password', dest='pwd', required=True, help='Password of the Google user.', type=str) setup_parser = subs.add_parser('seed') setup_parser.add_argument('-d', '--driver', dest='driver', required=True, type=str, help='Location of the Chrome driver. This can be downloaded by visiting http://chromedriver.chromium.org/downloads',) setup_parser = subs.add_parser('list') setup_parser = subs.add_parser('create') setup_parser.add_argument('-t', '--term', dest='term', required=True, help='Term to store.', type=str) setup_parser.add_argument('--exact', dest='exact', action='store_true', help='Exact matches only for term.') setup_parser.add_argument('-d', '--delivery', dest='delivery', required=True, choices=['rss', 'mail'], help='Delivery method of results.') setup_parser.add_argument('-f', '--frequency', dest='frequency', default="realtime", choices=['realtime', 'daily', 'weekly'], help='Frequency to send results. RSS only allows for realtime alerting.') setup_parser = subs.add_parser('delete') setup_parser.add_argument('--id', dest='term_id', required=True, help='ID of the term to find for deletion.', type=str) args = parser.parse_args() if args.cmd == 'setup': if not os.path.exists(CONFIG_PATH): os.makedirs(CONFIG_PATH) if not os.path.exists(CONFIG_FILE): json.dump(CONFIG_DEFAULTS, open(CONFIG_FILE, 'w'), indent=4, separators=(',', ': ')) config = CONFIG_DEFAULTS config['email'] = args.email config['password'] = str(obfuscate(args.pwd, 'store')) json.dump(config, open(CONFIG_FILE, 'w'), indent=4, separators=(',', ': ')) config = json.load(open(CONFIG_FILE)) if config.get('py2', PY2) != PY2: raise Exception("Python versions have changed. Please run `setup` again to reconfigure the client.") if config['password'] == '': raise Exception("Run setup before any other actions!") if args.cmd == 'seed': config['password'] = obfuscate(str(config['password']), 'fetch') ga = GoogleAlerts(config['email'], config['password']) with contextlib.closing(webdriver.Chrome(args.driver)) as driver: driver.get(ga.LOGIN_URL) wait = ui.WebDriverWait(driver, 10) # timeout after 10 seconds inputElement = driver.find_element_by_name('Email') inputElement.send_keys(config['email']) inputElement.submit() time.sleep(3) inputElement = driver.find_element_by_id('Passwd') inputElement.send_keys(config['password']) inputElement.submit() print("[!] Waiting 15 seconds for authentication to complete") time.sleep(15) cookies = driver.get_cookies() collected = dict() for cookie in cookies: collected[str(cookie['name'])] = str(cookie['value']) with open(SESSION_FILE, 'wb') as f: pickle.dump(collected, f, protocol=2) print("Session has been seeded.") if args.cmd == 'list': config['password'] = obfuscate(str(config['password']), 'fetch') ga = GoogleAlerts(config['email'], config['password']) ga.authenticate() print(json.dumps(ga.list(), indent=4)) if args.cmd == 'create': config['password'] = obfuscate(str(config['password']), 'fetch') ga = GoogleAlerts(config['email'], config['password']) ga.authenticate() alert_frequency = 'as_it_happens' if args.frequency == 'realtime': alert_frequency = 'as_it_happens' elif args.frequency == 'daily': alert_frequency = 'at_most_once_a_day' else: alert_frequency = 'at_most_once_a_week' monitor = ga.create(args.term, {'delivery': args.delivery.upper(), 'alert_frequency': alert_frequency.upper(), 'exact': args.exact}) print(json.dumps(monitor, indent=4)) if args.cmd == 'delete': config['password'] = obfuscate(str(config['password']), 'fetch') ga = GoogleAlerts(config['email'], config['password']) ga.authenticate() result = ga.delete(args.term_id) if result: print("%s was deleted" % args.term_id)
Run the core.
def getTemplates(fnames, blend=True): """ Process all headers to produce a set of combined headers that follows the rules defined by each instrument. """ if not blend: newhdrs = blendheaders.getSingleTemplate(fnames[0]) newtab = None else: # apply rules to create final version of headers, plus table newhdrs, newtab = blendheaders.get_blended_headers(inputs=fnames) cleanTemplates(newhdrs[1],newhdrs[2],newhdrs[3]) return newhdrs, newtab
Process all headers to produce a set of combined headers that follows the rules defined by each instrument.
def make_lat_lons(cvects): """ Convert from directional cosines to latitidue and longitude Parameters ---------- cvects : directional cosine (i.e., x,y,z component) values returns (np.ndarray(2,nsrc)) with the directional cosine (i.e., x,y,z component) values """ lats = np.degrees(np.arcsin(cvects[2])) lons = np.degrees(np.arctan2(cvects[0], cvects[1])) return np.hstack([lats, lons])
Convert from directional cosines to latitidue and longitude Parameters ---------- cvects : directional cosine (i.e., x,y,z component) values returns (np.ndarray(2,nsrc)) with the directional cosine (i.e., x,y,z component) values
def import_sqlite(db_file, older_than=None, **kwargs): """Reads the content of the database file and returns imported data.""" conn = _open_sqlite(db_file) cur = conn.cursor() # get rows that were not exported yet select = "SELECT * FROM testcases WHERE exported != 'yes'" if older_than: cur.execute(" ".join((select, "AND sqltime < ?")), (older_than,)) else: cur.execute(select) columns = [description[0] for description in cur.description] rows = cur.fetchall() # map data to columns results = [] for row in rows: record = OrderedDict(list(zip(columns, row))) results.append(record) testrun = _get_testrun_from_sqlite(conn) conn.close() return xunit_exporter.ImportedData(results=results, testrun=testrun)
Reads the content of the database file and returns imported data.
def _backspace(self): """Erase the last character in the snippet command.""" if self.command == ':': return logger.log(5, "Snippet keystroke `Backspace`.") self.command = self.command[:-1]
Erase the last character in the snippet command.
def triple(self): """ This module's target "triple" specification, as a string. """ # LLVMGetTarget() points inside a std::string managed by LLVM. with ffi.OutputString(owned=False) as outmsg: ffi.lib.LLVMPY_GetTarget(self, outmsg) return str(outmsg)
This module's target "triple" specification, as a string.
def binary_classification_metrics(y_true, y_pred, y_prob): #TODO: update description """classification_metrics. This function cal... Parameters ---------- y_true : array-like Ground truth (correct) labels. y_pred : array-like Predicted labels, as returned by a classifier. y_prob : array-like Predicted probabilities, as returned by a classifier. Returns ------- dict(tp, fp, fn, tn, accuracy, recall, precision, f1score, auc, brier_loss) Examples -------- >>> from costcla.metrics import binary_classification_metrics >>> y_pred = [0, 1, 0, 0] >>> y_true = [0, 1, 1, 0] >>> y_prob = [0.2, 0.8, 0.4, 0.3] >>> binary_classification_metrics(y_true, y_pred, y_prob) {'accuracy': 0.75, 'auc': 0.75, 'brier_loss': 0.13249999999999998, 'f1score': 0.6666666666666666, 'fn': 1.0, 'fp': 0.0, 'precision': 1.0, 'recall': 0.5, 'tn': 2.0, 'tp': 1.0} """ y_true = column_or_1d(y_true) y_pred = column_or_1d(y_pred) y_prob = column_or_1d(y_prob) n_samples = y_true.shape[0] tp = float((y_pred * y_true).sum()) fp = float((y_pred[np.nonzero(y_true == 0)[0]]).sum()) fn = float((y_true[np.nonzero(y_pred == 0)[0]]).sum()) tn = float(n_samples - tp - fn - fp) accuracy = (tp + tn) / n_samples auc = roc_auc_score(y_true, y_pred) brier_loss = brier_score_loss(y_true, y_prob) if (tp + fp) == 0: precision = 0 else: precision = tp / (tp + fp) if (tp + fn) == 0: recall = 0 else: recall = tp / (tp + fn) if (recall + precision) == 0: f1score = 0 else: f1score = 2 * (precision * recall) / (precision + recall) return dict(tp=tp, fp=fp, fn=fn, tn=tn, accuracy=accuracy, recall=recall, precision=precision, f1score=f1score, auc=auc, brier_loss=brier_loss)
classification_metrics. This function cal... Parameters ---------- y_true : array-like Ground truth (correct) labels. y_pred : array-like Predicted labels, as returned by a classifier. y_prob : array-like Predicted probabilities, as returned by a classifier. Returns ------- dict(tp, fp, fn, tn, accuracy, recall, precision, f1score, auc, brier_loss) Examples -------- >>> from costcla.metrics import binary_classification_metrics >>> y_pred = [0, 1, 0, 0] >>> y_true = [0, 1, 1, 0] >>> y_prob = [0.2, 0.8, 0.4, 0.3] >>> binary_classification_metrics(y_true, y_pred, y_prob) {'accuracy': 0.75, 'auc': 0.75, 'brier_loss': 0.13249999999999998, 'f1score': 0.6666666666666666, 'fn': 1.0, 'fp': 0.0, 'precision': 1.0, 'recall': 0.5, 'tn': 2.0, 'tp': 1.0}
def load_configs(self): """Load configurations for all widgets in General, Scrolling and Appearance tabs from dconf. """ self._load_default_shell_settings() # restore tabs startup value = self.settings.general.get_boolean('restore-tabs-startup') self.get_widget('restore-tabs-startup').set_active(value) # restore tabs notify value = self.settings.general.get_boolean('restore-tabs-notify') self.get_widget('restore-tabs-notify').set_active(value) # save tabs when changed value = self.settings.general.get_boolean('save-tabs-when-changed') self.get_widget('save-tabs-when-changed').set_active(value) # login shell value = self.settings.general.get_boolean('use-login-shell') self.get_widget('use_login_shell').set_active(value) # tray icon value = self.settings.general.get_boolean('use-trayicon') self.get_widget('use_trayicon').set_active(value) # popup value = self.settings.general.get_boolean('use-popup') self.get_widget('use_popup').set_active(value) # workspace-specific tab sets value = self.settings.general.get_boolean('workspace-specific-tab-sets') self.get_widget('workspace-specific-tab-sets').set_active(value) # prompt on quit value = self.settings.general.get_boolean('prompt-on-quit') self.get_widget('prompt_on_quit').set_active(value) # prompt on close_tab value = self.settings.general.get_int('prompt-on-close-tab') self.get_widget('prompt_on_close_tab').set_active(value) self.get_widget('prompt_on_quit').set_sensitive(value != 2) # gtk theme theme value = self.settings.general.get_string('gtk-theme-name') combo = self.get_widget('gtk_theme_name') for i in combo.get_model(): if i[0] == value: combo.set_active_iter(i.iter) break # prefer gtk theme theme value = self.settings.general.get_boolean('gtk-prefer-dark-theme') self.get_widget('gtk_prefer_dark_theme').set_active(value) # ontop value = self.settings.general.get_boolean('window-ontop') self.get_widget('window_ontop').set_active(value) # tab ontop value = self.settings.general.get_boolean('tab-ontop') self.get_widget('tab_ontop').set_active(value) # refocus value = self.settings.general.get_boolean('window-refocus') self.get_widget('window_refocus').set_active(value) # losefocus value = self.settings.general.get_boolean('window-losefocus') self.get_widget('window_losefocus').set_active(value) # use VTE titles value = self.settings.general.get_boolean('use-vte-titles') self.get_widget('use_vte_titles').set_active(value) # set window title value = self.settings.general.get_boolean('set-window-title') self.get_widget('set_window_title').set_active(value) # abbreviate tab names self.get_widget('abbreviate_tab_names').set_sensitive(value) value = self.settings.general.get_boolean('abbreviate-tab-names') self.get_widget('abbreviate_tab_names').set_active(value) # max tab name length value = self.settings.general.get_int('max-tab-name-length') self.get_widget('max_tab_name_length').set_value(value) self.update_vte_subwidgets_states() value = self.settings.general.get_int('window-height') self.get_widget('window_height').set_value(value) value = self.settings.general.get_int('window-width') self.get_widget('window_width').set_value(value) # window displacements value = self.settings.general.get_int('window-vertical-displacement') self.get_widget('window_vertical_displacement').set_value(value) value = self.settings.general.get_int('window-horizontal-displacement') self.get_widget('window_horizontal_displacement').set_value(value) value = self.settings.general.get_int('window-halignment') which_button = { ALIGN_RIGHT: 'radiobutton_align_right', ALIGN_LEFT: 'radiobutton_align_left', ALIGN_CENTER: 'radiobutton_align_center' } self.get_widget(which_button[value]).set_active(True) self.get_widget("window_horizontal_displacement").set_sensitive(value != ALIGN_CENTER) value = self.settings.general.get_boolean('open-tab-cwd') self.get_widget('open_tab_cwd').set_active(value) # tab bar value = self.settings.general.get_boolean('window-tabbar') self.get_widget('window_tabbar').set_active(value) # start fullscreen value = self.settings.general.get_boolean('start-fullscreen') self.get_widget('start_fullscreen').set_active(value) # start at GNOME login value = self.settings.general.get_boolean('start-at-login') self.get_widget('start_at_login').set_active(value) # use audible bell value = self.settings.general.get_boolean('use-audible-bell') self.get_widget('use_audible_bell').set_active(value) self._load_screen_settings() value = self.settings.general.get_boolean('quick-open-enable') self.get_widget('quick_open_enable').set_active(value) self.get_widget('quick_open_command_line').set_sensitive(value) self.get_widget('quick_open_in_current_terminal').set_sensitive(value) text = Gtk.TextBuffer() text = self.get_widget('quick_open_supported_patterns').get_buffer() for title, matcher, _useless in QUICK_OPEN_MATCHERS: text.insert_at_cursor("%s: %s\n" % (title, matcher)) self.get_widget('quick_open_supported_patterns').set_buffer(text) value = self.settings.general.get_string('quick-open-command-line') if value is None: value = "subl %(file_path)s:%(line_number)s" self.get_widget('quick_open_command_line').set_text(value) value = self.settings.general.get_boolean('quick-open-in-current-terminal') self.get_widget('quick_open_in_current_terminal').set_active(value) value = self.settings.general.get_string('startup-script') if value: self.get_widget('startup_script').set_text(value) # use display where the mouse is currently value = self.settings.general.get_boolean('mouse-display') self.get_widget('mouse_display').set_active(value) # scrollbar value = self.settings.general.get_boolean('use-scrollbar') self.get_widget('use_scrollbar').set_active(value) # history size value = self.settings.general.get_int('history-size') self.get_widget('history_size').set_value(value) # infinite history value = self.settings.general.get_boolean('infinite-history') self.get_widget('infinite_history').set_active(value) # scroll output value = self.settings.general.get_boolean('scroll-output') self.get_widget('scroll_output').set_active(value) # scroll keystroke value = self.settings.general.get_boolean('scroll-keystroke') self.get_widget('scroll_keystroke').set_active(value) # default font value = self.settings.general.get_boolean('use-default-font') self.get_widget('use_default_font').set_active(value) self.get_widget('font_style').set_sensitive(not value) # font value = self.settings.styleFont.get_string('style') if value: self.get_widget('font_style').set_font_name(value) # allow bold font value = self.settings.styleFont.get_boolean('allow-bold') self.get_widget('allow_bold').set_active(value) # use bold is bright value = self.settings.styleFont.get_boolean('bold-is-bright') self.get_widget('bold_is_bright').set_active(value) # palette self.fill_palette_names() value = self.settings.styleFont.get_string('palette-name') self.set_palette_name(value) value = self.settings.styleFont.get_string('palette') self.set_palette_colors(value) self.update_demo_palette(value) # cursor shape value = self.settings.style.get_int('cursor-shape') self.set_cursor_shape(value) # cursor blink value = self.settings.style.get_int('cursor-blink-mode') self.set_cursor_blink_mode(value) value = self.settings.styleBackground.get_int('transparency') self.get_widget('background_transparency').set_value(MAX_TRANSPARENCY - value) value = self.settings.general.get_int('window-valignment') self.get_widget('top_align').set_active(value) # it's a separated method, to be reused. self.reload_erase_combos() # custom command context-menu configuration file custom_command_file = self.settings.general.get_string('custom-command-file') if custom_command_file: custom_command_file_name = os.path.expanduser(custom_command_file) else: custom_command_file_name = None custom_cmd_filter = Gtk.FileFilter() custom_cmd_filter.set_name(_("JSON files")) custom_cmd_filter.add_pattern("*.json") self.get_widget('custom_command_file_chooser').add_filter(custom_cmd_filter) all_files_filter = Gtk.FileFilter() all_files_filter.set_name(_("All files")) all_files_filter.add_pattern("*") self.get_widget('custom_command_file_chooser').add_filter(all_files_filter) if custom_command_file_name: self.get_widget('custom_command_file_chooser').set_filename(custom_command_file_name) # hooks self._load_hooks_settings()
Load configurations for all widgets in General, Scrolling and Appearance tabs from dconf.
def _get_calculated_value(self, value): """ Get's the final value of the field and runs the lambda functions recursively until a final value is derived. :param value: The value to calculate/expand :return: The final value """ if isinstance(value, types.LambdaType): expanded_value = value(self.structure) return self._get_calculated_value(expanded_value) else: # perform one final parsing of the value in case lambda value # returned a different type return self._parse_value(value)
Get's the final value of the field and runs the lambda functions recursively until a final value is derived. :param value: The value to calculate/expand :return: The final value
def make_seekable(fileobj): """ If the file-object is not seekable, return ArchiveTemp of the fileobject, otherwise return the file-object itself """ if sys.version_info < (3, 0) and isinstance(fileobj, file): filename = fileobj.name fileobj = io.FileIO(fileobj.fileno(), closefd=False) fileobj.name = filename assert isinstance(fileobj, io.IOBase), \ "fileobj must be an instance of io.IOBase or a file, got %s" \ % type(fileobj) return fileobj if fileobj.seekable() \ else ArchiveTemp(fileobj)
If the file-object is not seekable, return ArchiveTemp of the fileobject, otherwise return the file-object itself
def get_list(self, section, option): """ This allows for loading of Pyramid list style configuration options: [foo] bar = baz qux zap ``get_list('foo', 'bar')`` returns ``['baz', 'qux', 'zap']`` :param str section: The section to read. :param str option: The option to read from the section. :returns: list """ value = self.get(section, option) return list(filter(None, (x.strip() for x in value.splitlines())))
This allows for loading of Pyramid list style configuration options: [foo] bar = baz qux zap ``get_list('foo', 'bar')`` returns ``['baz', 'qux', 'zap']`` :param str section: The section to read. :param str option: The option to read from the section. :returns: list
def get_backoff_time(self): """ Formula for computing the current backoff :rtype: float """ if self._observed_errors <= 1: return 0 backoff_value = self.backoff_factor * (2 ** (self._observed_errors - 1)) return min(self.BACKOFF_MAX, backoff_value)
Formula for computing the current backoff :rtype: float
def diff_list(self, list1, list2): """Extracts differences between lists. For debug purposes""" for key in list1: if key in list2 and list2[key] != list1[key]: print key elif key not in list2: print key
Extracts differences between lists. For debug purposes
def warp(self, warp_matrix, img, iflag=cv2.INTER_NEAREST): """ Function to warp input image given an estimated 2D linear transformation :param warp_matrix: Linear 2x3 matrix to use to linearly warp the input images :type warp_matrix: ndarray :param img: Image to be warped with estimated transformation :type img: ndarray :param iflag: Interpolation flag, specified interpolation using during resampling of warped image :type iflag: cv2.INTER_* :return: Warped image using the linear matrix """ height, width = img.shape[:2] warped_img = np.zeros_like(img, dtype=img.dtype) # Check if image to warp is 2D or 3D. If 3D need to loop over channels if (self.interpolation_type == InterpolationType.LINEAR) or img.ndim == 2: warped_img = cv2.warpAffine(img.astype(np.float32), warp_matrix, (width, height), flags=iflag).astype(img.dtype) elif img.ndim == 3: for idx in range(img.shape[-1]): warped_img[..., idx] = cv2.warpAffine(img[..., idx].astype(np.float32), warp_matrix, (width, height), flags=iflag).astype(img.dtype) else: raise ValueError('Image has incorrect number of dimensions: {}'.format(img.ndim)) return warped_img
Function to warp input image given an estimated 2D linear transformation :param warp_matrix: Linear 2x3 matrix to use to linearly warp the input images :type warp_matrix: ndarray :param img: Image to be warped with estimated transformation :type img: ndarray :param iflag: Interpolation flag, specified interpolation using during resampling of warped image :type iflag: cv2.INTER_* :return: Warped image using the linear matrix
def list_present(name, value, delimiter=DEFAULT_TARGET_DELIM): ''' .. versionadded:: 2014.1.0 Ensure the value is present in the list-type grain. Note: If the grain that is provided in ``name`` is not present on the system, this new grain will be created with the corresponding provided value. name The grain name. value The value is present in the list type grain. delimiter A delimiter different from the default ``:`` can be provided. .. versionadded:: v2015.8.2 The grain should be `list type <http://docs.python.org/2/tutorial/datastructures.html#data-structures>`_ .. code-block:: yaml roles: grains.list_present: - value: web For multiple grains, the syntax looks like: .. code-block:: yaml roles: grains.list_present: - value: - web - dev ''' name = re.sub(delimiter, DEFAULT_TARGET_DELIM, name) ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''} grain = __salt__['grains.get'](name) if grain: # check whether grain is a list if not isinstance(grain, list): ret['result'] = False ret['comment'] = 'Grain {0} is not a valid list'.format(name) return ret if isinstance(value, list): if set(value).issubset(set(__salt__['grains.get'](name))): ret['comment'] = 'Value {1} is already in grain {0}'.format(name, value) return ret elif name in __context__.get('pending_grains', {}): # elements common to both intersection = set(value).intersection(__context__.get('pending_grains', {})[name]) if intersection: value = list(set(value).difference(__context__['pending_grains'][name])) ret['comment'] = 'Removed value {0} from update due to context found in "{1}".\n'.format(value, name) if 'pending_grains' not in __context__: __context__['pending_grains'] = {} if name not in __context__['pending_grains']: __context__['pending_grains'][name] = set() __context__['pending_grains'][name].update(value) else: if value in grain: ret['comment'] = 'Value {1} is already in grain {0}'.format(name, value) return ret if __opts__['test']: ret['result'] = None ret['comment'] = 'Value {1} is set to be appended to grain {0}'.format(name, value) ret['changes'] = {'new': grain} return ret if __opts__['test']: ret['result'] = None ret['comment'] = 'Grain {0} is set to be added'.format(name) ret['changes'] = {'new': grain} return ret new_grains = __salt__['grains.append'](name, value) if isinstance(value, list): if not set(value).issubset(set(__salt__['grains.get'](name))): ret['result'] = False ret['comment'] = 'Failed append value {1} to grain {0}'.format(name, value) return ret else: if value not in __salt__['grains.get'](name, delimiter=DEFAULT_TARGET_DELIM): ret['result'] = False ret['comment'] = 'Failed append value {1} to grain {0}'.format(name, value) return ret ret['comment'] = 'Append value {1} to grain {0}'.format(name, value) ret['changes'] = {'new': new_grains} return ret
.. versionadded:: 2014.1.0 Ensure the value is present in the list-type grain. Note: If the grain that is provided in ``name`` is not present on the system, this new grain will be created with the corresponding provided value. name The grain name. value The value is present in the list type grain. delimiter A delimiter different from the default ``:`` can be provided. .. versionadded:: v2015.8.2 The grain should be `list type <http://docs.python.org/2/tutorial/datastructures.html#data-structures>`_ .. code-block:: yaml roles: grains.list_present: - value: web For multiple grains, the syntax looks like: .. code-block:: yaml roles: grains.list_present: - value: - web - dev
def get_upgrade(self, using=None, **kwargs): """ Monitor how much of the index is upgraded. Any additional keyword arguments will be passed to ``Elasticsearch.indices.get_upgrade`` unchanged. """ return self._get_connection(using).indices.get_upgrade(index=self._name, **kwargs)
Monitor how much of the index is upgraded. Any additional keyword arguments will be passed to ``Elasticsearch.indices.get_upgrade`` unchanged.
def batch_transformer(U, thetas, out_size, name='BatchSpatialTransformer2dAffine'): """Batch Spatial Transformer function for `2D Affine Transformation <https://en.wikipedia.org/wiki/Affine_transformation>`__. Parameters ---------- U : list of float tensor of inputs [batch, height, width, num_channels] thetas : list of float a set of transformations for each input [batch, num_transforms, 6] out_size : list of int the size of the output [out_height, out_width] name : str optional function name Returns ------ float Tensor of size [batch * num_transforms, out_height, out_width, num_channels] """ with tf.variable_scope(name): num_batch, num_transforms = map(int, thetas.get_shape().as_list()[:2]) indices = [[i] * num_transforms for i in xrange(num_batch)] input_repeated = tf.gather(U, tf.reshape(indices, [-1])) return transformer(input_repeated, thetas, out_size)
Batch Spatial Transformer function for `2D Affine Transformation <https://en.wikipedia.org/wiki/Affine_transformation>`__. Parameters ---------- U : list of float tensor of inputs [batch, height, width, num_channels] thetas : list of float a set of transformations for each input [batch, num_transforms, 6] out_size : list of int the size of the output [out_height, out_width] name : str optional function name Returns ------ float Tensor of size [batch * num_transforms, out_height, out_width, num_channels]
def route(self, path=None, method='GET', callback=None, name=None, apply=None, skip=None, **config): """ A decorator to bind a function to a request URL. Example:: @app.route('/hello/:name') def hello(name): return 'Hello %s' % name The ``:name`` part is a wildcard. See :class:`Router` for syntax details. :param path: Request path or a list of paths to listen to. If no path is specified, it is automatically generated from the signature of the function. :param method: HTTP method (`GET`, `POST`, `PUT`, ...) or a list of methods to listen to. (default: `GET`) :param callback: An optional shortcut to avoid the decorator syntax. ``route(..., callback=func)`` equals ``route(...)(func)`` :param name: The name for this route. (default: None) :param apply: A decorator or plugin or a list of plugins. These are applied to the route callback in addition to installed plugins. :param skip: A list of plugins, plugin classes or names. Matching plugins are not installed to this route. ``True`` skips all. Any additional keyword arguments are stored as route-specific configuration and passed to plugins (see :meth:`Plugin.apply`). """ if callable(path): path, callback = None, path plugins = makelist(apply) skiplist = makelist(skip) if 'decorate' in config: depr("The 'decorate' parameter was renamed to 'apply'") # 0.9 plugins += makelist(config.pop('decorate')) if config.pop('no_hooks', False): depr("The no_hooks parameter is no longer used. Add 'hooks' to the"\ " list of skipped plugins instead.") # 0.9 skiplist.append('hooks') static = config.get('static', False) # depr 0.9 def decorator(callback): for rule in makelist(path) or yieldroutes(callback): for verb in makelist(method): verb = verb.upper() cfg = dict(rule=rule, method=verb, callback=callback, name=name, app=self, config=config, apply=plugins, skip=skiplist) self.routes.append(cfg) cfg['id'] = self.routes.index(cfg) self.router.add(rule, verb, cfg['id'], name=name, static=static) if DEBUG: self.ccache[cfg['id']] = self._build_callback(cfg) return callback return decorator(callback) if callback else decorator
A decorator to bind a function to a request URL. Example:: @app.route('/hello/:name') def hello(name): return 'Hello %s' % name The ``:name`` part is a wildcard. See :class:`Router` for syntax details. :param path: Request path or a list of paths to listen to. If no path is specified, it is automatically generated from the signature of the function. :param method: HTTP method (`GET`, `POST`, `PUT`, ...) or a list of methods to listen to. (default: `GET`) :param callback: An optional shortcut to avoid the decorator syntax. ``route(..., callback=func)`` equals ``route(...)(func)`` :param name: The name for this route. (default: None) :param apply: A decorator or plugin or a list of plugins. These are applied to the route callback in addition to installed plugins. :param skip: A list of plugins, plugin classes or names. Matching plugins are not installed to this route. ``True`` skips all. Any additional keyword arguments are stored as route-specific configuration and passed to plugins (see :meth:`Plugin.apply`).
def condition(condition=None, statement=None, _else=None, **kwargs): """ Run an statement if input condition is checked and return statement result. :param condition: condition to check. :type condition: str or dict :param statement: statement to process if condition is checked. :type statement: str or dict :param _else: else statement. :type _else: str or dict :param kwargs: condition and statement additional parameters. :return: statement result. """ result = None checked = False if condition is not None: checked = run(condition, **kwargs) if checked: # if condition is checked if statement is not None: # process statement result = run(statement, **kwargs) elif _else is not None: # else process _else statement result = run(_else, **kwargs) return result
Run an statement if input condition is checked and return statement result. :param condition: condition to check. :type condition: str or dict :param statement: statement to process if condition is checked. :type statement: str or dict :param _else: else statement. :type _else: str or dict :param kwargs: condition and statement additional parameters. :return: statement result.
def patch(self, item, byte_order=BYTEORDER): """ Returns a memory :class:`Patch` for the given *item* that shall be patched in the `data source`. :param item: item to patch. :param byte_order: encoding :class:`Byteorder` for the item. :type byte_order: :class:`Byteorder`, :class:`str` """ # Re-index the data object self.index_data() if is_container(item): length = item.container_size() if length[1] is not 0: # Incomplete container raise ContainerLengthError(item, length) field = item.first_field() if field is None: # Empty container? return None index = field.index if index.bit is not 0: # Bad placed container raise FieldIndexError(field, index) # Create a dummy byte array filled with zero bytes. # The dummy byte array is necessary because the length of # the buffer must correlate to the field indexes of the # appending fields. buffer = bytearray(b'\x00' * index.byte) # Append to the buffer the content mapped by the container fields item.serialize(buffer, index, byte_order=byte_order) # Content of the buffer mapped by the container fields content = buffer[index.byte:] if len(content) != length[0]: # Not correct filled buffer! raise BufferError(len(content), length[0]) return Patch(content, index.address, byte_order, length[0] * 8, 0, False) elif is_field(item): # Field index index = item.index # Field alignment alignment = item.alignment if index.bit != alignment.bit_offset: # Bad aligned field? raise FieldGroupOffsetError( item, index, Alignment(alignment.byte_size, index.bit)) # Create a dummy byte array filled with zero bytes. # The dummy byte array is necessary because the length of # the buffer must correlate to the field index of the # appending field group. buffer = bytearray(b'\x00' * index.byte) # Append to the buffer the content mapped by the field item.serialize(buffer, index, byte_order=byte_order) # Content of the buffer mapped by the field group content = buffer[index.byte:] if len(content) != alignment.byte_size: # Not correct filled buffer! raise BufferError(len(content), alignment.byte_size) # Patch size in bytes for the field in the content buffer patch_size, bit_offset = divmod(item.bit_size, 8) if bit_offset is not 0: inject = True patch_size += 1 else: inject = False # Patch offset in bytes for the field in the content buffer patch_offset, bit_offset = divmod(alignment.bit_offset, 8) if bit_offset is not 0: inject = True if byte_order is Byteorder.big: start = alignment.byte_size - (patch_offset + patch_size) stop = alignment.byte_size - patch_offset else: start = patch_offset stop = patch_offset + patch_size return Patch(content[start:stop], index.address + start, byte_order, item.bit_size, bit_offset, inject) else: raise MemberTypeError(self, item)
Returns a memory :class:`Patch` for the given *item* that shall be patched in the `data source`. :param item: item to patch. :param byte_order: encoding :class:`Byteorder` for the item. :type byte_order: :class:`Byteorder`, :class:`str`
def modify_module(channel, module_name, module_state): """ Creates an embed UI containing the module modified message Args: channel (discord.Channel): The Discord channel to bind the embed to module_name (str): The name of the module that was updated module_state (bool): The current state of the module Returns: embed: The created embed """ # Create embed UI object gui = ui_embed.UI( channel, "{} updated".format(module_name), "{} is now {}".format(module_name, "activated" if module_state else "deactivated"), modulename=modulename ) return gui
Creates an embed UI containing the module modified message Args: channel (discord.Channel): The Discord channel to bind the embed to module_name (str): The name of the module that was updated module_state (bool): The current state of the module Returns: embed: The created embed
def create_message(self): """Returns a message body to send in this email. Should be from email.mime.*""" body = dedent("""\ Received exception {exception} on {queue} from worker {worker}: {traceback} Payload: {payload} """).format(exception=self._exception, traceback=self._traceback, queue=self._queue, payload=self._payload, worker=self._worker) return MIMEText(body)
Returns a message body to send in this email. Should be from email.mime.*
def set_encoding(self, encoding): """! @brief Change clusters encoding to specified type (index list, object list, labeling). @param[in] encoding (type_encoding): New type of clusters representation. """ if(encoding == self.__type_representation): return; if (self.__type_representation == type_encoding.CLUSTER_INDEX_LABELING): if (encoding == type_encoding.CLUSTER_INDEX_LIST_SEPARATION): self.__clusters = self.__convert_label_to_index(); else: self.__clusters = self.__convert_label_to_object(); elif (self.__type_representation == type_encoding.CLUSTER_INDEX_LIST_SEPARATION): if (encoding == type_encoding.CLUSTER_INDEX_LABELING): self.__clusters = self.__convert_index_to_label(); else: self.__clusters = self.__convert_index_to_object(); else: if (encoding == type_encoding.CLUSTER_INDEX_LABELING): self.__clusters = self.__convert_object_to_label(); else: self.__clusters = self.__convert_object_to_index(); self.__type_representation = encoding;
! @brief Change clusters encoding to specified type (index list, object list, labeling). @param[in] encoding (type_encoding): New type of clusters representation.
def errorhandle(self, resp): """Parse API error responses and raise appropriate exceptions.""" if self.format == 'json': parsed = xmltodict.parse(resp) errors = parsed[self.RESPONSE_TOKEN][self.ERROR_TOKEN] # Create list of errors if more than one error response is given if type(errors) is list and len(errors) > 1: messages = ", ".join([" ".join(["{}: {}".format(k,v) for k, v in e.items()]) for e in errors]) else: overlimit = any('transaction limit' in msg.lower() for msg in errors.values()) if overlimit: raise APILimitExceeded("This API key has used up its daily quota of calls.") else: messages = " ".join(["{}: {}".format(k,v) for k, v in errors.items()]) elif self.format == 'xml': import xml.etree.ElementTree as ET errors = ET.fromstring(resp).findall(self.ERROR_TOKEN) messages = ", ".join(err.find('msg').text for err in errors) else: raise ValueError("Invalid API response format specified: {}." % self.format) raise BustimeError("API returned: {}".format(messages))
Parse API error responses and raise appropriate exceptions.
def intertwine(*iterables): """Constructs an iterable which intertwines given iterables. The resulting iterable will return an item from first sequence, then from second, etc. until the last one - and then another item from first, then from second, etc. - up until all iterables are exhausted. """ iterables = tuple(imap(ensure_iterable, iterables)) empty = object() return (item for iterable in izip_longest(*iterables, fillvalue=empty) for item in iterable if item is not empty)
Constructs an iterable which intertwines given iterables. The resulting iterable will return an item from first sequence, then from second, etc. until the last one - and then another item from first, then from second, etc. - up until all iterables are exhausted.
def waliki_box(context, slug, show_edit=True, *args, **kwargs): """ A templatetag to render a wiki page content as a box in any webpage, and allow rapid edition if you have permission. It's inspired in `django-boxes`_ .. _django-boxes: https://github.com/eldarion/django-boxes """ request = context["request"] try: page = Page.objects.get(slug=slug) except Page.DoesNotExist: page = None if (page and check_perms_helper('change_page', request.user, slug) or (not page and check_perms_helper('add_page', request.user, slug))): form = PageForm(instance=page, initial={'slug': slug}) form_action = reverse("waliki_edit", args=[slug]) else: form = None form_action = None return { "request": request, "slug": slug, "label": slug.replace('/', '_'), "page": page, "form": form, "form_action": form_action, }
A templatetag to render a wiki page content as a box in any webpage, and allow rapid edition if you have permission. It's inspired in `django-boxes`_ .. _django-boxes: https://github.com/eldarion/django-boxes
def load_images(input_dir, batch_shape): """Read png images from input directory in batches. Args: input_dir: input directory batch_shape: shape of minibatch array, i.e. [batch_size, height, width, 3] Yields: filenames: list file names without path of each image Lenght of this list could be less than batch_size, in this case only first few images of the result are elements of the minibatch. images: array with all images from this batch """ images = np.zeros(batch_shape) filenames = [] idx = 0 batch_size = batch_shape[0] for filepath in tf.gfile.Glob(os.path.join(input_dir, '*.png')): with tf.gfile.Open(filepath) as f: image = np.array(Image.open(f).convert('RGB')).astype(np.float) / 255.0 # Images for inception classifier are normalized to be in [-1, 1] interval. images[idx, :, :, :] = image * 2.0 - 1.0 filenames.append(os.path.basename(filepath)) idx += 1 if idx == batch_size: yield filenames, images filenames = [] images = np.zeros(batch_shape) idx = 0 if idx > 0: yield filenames, images
Read png images from input directory in batches. Args: input_dir: input directory batch_shape: shape of minibatch array, i.e. [batch_size, height, width, 3] Yields: filenames: list file names without path of each image Lenght of this list could be less than batch_size, in this case only first few images of the result are elements of the minibatch. images: array with all images from this batch
def deserialize_assign(self, workflow, start_node): """ Reads the "pre-assign" or "post-assign" tag from the given node. start_node -- the xml node (xml.dom.minidom.Node) """ name = start_node.getAttribute('name') attrib = start_node.getAttribute('field') value = start_node.getAttribute('value') kwargs = {} if name == '': _exc('name attribute required') if attrib != '' and value != '': _exc('Both, field and right-value attributes found') elif attrib == '' and value == '': _exc('field or value attribute required') elif value != '': kwargs['right'] = value else: kwargs['right_attribute'] = attrib return operators.Assign(name, **kwargs)
Reads the "pre-assign" or "post-assign" tag from the given node. start_node -- the xml node (xml.dom.minidom.Node)
def run(self): """Run the log monitor. This will query Redis once every second to check if there are new log files to monitor. It will also store those log files in Redis. """ while True: self.update_log_filenames() self.open_closed_files() anything_published = self.check_log_files_and_publish_updates() # If nothing was published, then wait a little bit before checking # for logs to avoid using too much CPU. if not anything_published: time.sleep(0.05)
Run the log monitor. This will query Redis once every second to check if there are new log files to monitor. It will also store those log files in Redis.
def fasta_files_equal(seq_file1, seq_file2): """Check equality of a FASTA file to another FASTA file Args: seq_file1: Path to a FASTA file seq_file2: Path to another FASTA file Returns: bool: If the sequences are the same """ # Load already set representative sequence seq1 = SeqIO.read(open(seq_file1), 'fasta') # Load kegg sequence seq2 = SeqIO.read(open(seq_file2), 'fasta') # Test equality if str(seq1.seq) == str(seq2.seq): return True else: return False
Check equality of a FASTA file to another FASTA file Args: seq_file1: Path to a FASTA file seq_file2: Path to another FASTA file Returns: bool: If the sequences are the same
def ks(self, num_ngrams): # type (int) -> [int] """ Provide a k for each ngram in the field value. :param num_ngrams: number of ngrams in the field value :return: [ k, ... ] a k value for each of num_ngrams such that the sum is exactly num_bits """ if self.num_bits: k = int(self.num_bits / num_ngrams) residue = self.num_bits % num_ngrams return ([k + 1] * residue) + ([k] * (num_ngrams - residue)) else: return [self.k if self.k else 0] * num_ngrams
Provide a k for each ngram in the field value. :param num_ngrams: number of ngrams in the field value :return: [ k, ... ] a k value for each of num_ngrams such that the sum is exactly num_bits
def _add_in_streams(self, bolt): """Adds inputs to a given protobuf Bolt message""" if self.inputs is None: return # sanitize inputs and get a map <GlobalStreamId -> Grouping> input_dict = self._sanitize_inputs() for global_streamid, gtype in input_dict.items(): in_stream = bolt.inputs.add() in_stream.stream.CopyFrom(self._get_stream_id(global_streamid.component_id, global_streamid.stream_id)) if isinstance(gtype, Grouping.FIELDS): # it's a field grouping in_stream.gtype = gtype.gtype in_stream.grouping_fields.CopyFrom(self._get_stream_schema(gtype.fields)) elif isinstance(gtype, Grouping.CUSTOM): # it's a custom grouping in_stream.gtype = gtype.gtype in_stream.custom_grouping_object = gtype.python_serialized in_stream.type = topology_pb2.CustomGroupingObjectType.Value("PYTHON_OBJECT") else: in_stream.gtype = gtype
Adds inputs to a given protobuf Bolt message
def unlocked(self): """ Is the store unlocked so that I can decrypt the content? """ if self.password is not None: return bool(self.password) else: if ( "UNLOCK" in os.environ and os.environ["UNLOCK"] and self.config_key in self.config and self.config[self.config_key] ): log.debug("Trying to use environmental " "variable to unlock wallet") self.unlock(os.environ.get("UNLOCK")) return bool(self.password) return False
Is the store unlocked so that I can decrypt the content?
def addConstraint(self, constraint, variables=None): """ Add a constraint to the problem Example: >>> problem = Problem() >>> problem.addVariables(["a", "b"], [1, 2, 3]) >>> problem.addConstraint(lambda a, b: b == a+1, ["a", "b"]) >>> solutions = problem.getSolutions() >>> @param constraint: Constraint to be included in the problem @type constraint: instance a L{Constraint} subclass or a function to be wrapped by L{FunctionConstraint} @param variables: Variables affected by the constraint (default to all variables). Depending on the constraint type the order may be important. @type variables: set or sequence of variables """ if not isinstance(constraint, Constraint): if callable(constraint): constraint = FunctionConstraint(constraint) else: msg = "Constraints must be instances of subclasses " "of the Constraint class" raise ValueError(msg) self._constraints.append((constraint, variables))
Add a constraint to the problem Example: >>> problem = Problem() >>> problem.addVariables(["a", "b"], [1, 2, 3]) >>> problem.addConstraint(lambda a, b: b == a+1, ["a", "b"]) >>> solutions = problem.getSolutions() >>> @param constraint: Constraint to be included in the problem @type constraint: instance a L{Constraint} subclass or a function to be wrapped by L{FunctionConstraint} @param variables: Variables affected by the constraint (default to all variables). Depending on the constraint type the order may be important. @type variables: set or sequence of variables
def plot_sector_exposures_net(net_exposures, sector_dict=None, ax=None): """ Plots output of compute_sector_exposures as line graphs Parameters ---------- net_exposures : arrays Arrays of net sector exposures (output of compute_sector_exposures). sector_dict : dict or OrderedDict Dictionary of all sectors - See full description in compute_sector_exposures """ if ax is None: ax = plt.gca() if sector_dict is None: sector_names = SECTORS.values() else: sector_names = sector_dict.values() color_list = plt.cm.gist_rainbow(np.linspace(0, 1, 11)) for i in range(len(net_exposures)): ax.plot(net_exposures[i], color=color_list[i], alpha=0.8, label=sector_names[i]) ax.set(title='Net exposures to sectors', ylabel='Proportion of net exposure \n in sectors') return ax
Plots output of compute_sector_exposures as line graphs Parameters ---------- net_exposures : arrays Arrays of net sector exposures (output of compute_sector_exposures). sector_dict : dict or OrderedDict Dictionary of all sectors - See full description in compute_sector_exposures
def _check_hint_bounds(self, ds): ''' Checks for variables ending with _bounds, if they are not cell methods, make the recommendation :param netCDF4.Dataset ds: An open netCDF dataset :rtype: list :return: List of results ''' ret_val = [] boundary_variables = cfutil.get_cell_boundary_variables(ds) for name in ds.variables: if name.endswith('_bounds') and name not in boundary_variables: msg = ('{} might be a cell boundary variable but there are no variables that define it ' 'as a boundary using the `bounds` attribute.'.format(name)) result = Result(BaseCheck.LOW, True, self.section_titles['7.1'], [msg]) ret_val.append(result) return ret_val
Checks for variables ending with _bounds, if they are not cell methods, make the recommendation :param netCDF4.Dataset ds: An open netCDF dataset :rtype: list :return: List of results
def class_associations(self, cn: ClassDefinitionName, must_render: bool=False) -> str: """ Emit all associations for a focus class. If none are specified, all classes are generated @param cn: Name of class to be emitted @param must_render: True means render even if this is a target (class is specifically requested) @return: YUML representation of the association """ # NOTE: YUML diagrams draw in the opposite order in which they are created, so we work from bottom to top and # from right to left assocs: List[str] = [] if cn not in self.associations_generated and (not self.focus_classes or cn in self.focus_classes): cls = self.schema.classes[cn] # Slots for slotname in self.filtered_cls_slots(cn, False)[::-1]: slot = self.schema.slots[slotname] if slot.range in self.schema.classes: assocs.append(self.class_box(cn) + (yuml_inline if slot.inlined else yuml_ref) + self.aliased_slot_name(slot) + self.prop_modifier(cls, slot) + self.cardinality(slot) + '>' + self.class_box(slot.range)) # Referencing slots if cn in self.synopsis.rangerefs: for slotname in sorted(self.synopsis.rangerefs[cn]): slot = self.schema.slots[slotname] if slot.domain in self.schema.classes and (slot.range != cls.name or must_render): assocs.append(self.class_box(slot.domain) + (yuml_inline if slot.inlined else yuml_ref) + self.aliased_slot_name(slot) + self.prop_modifier(cls, slot) + self.cardinality(slot) + '>' + self.class_box(cn)) # Mixins used in the class for mixin in cls.mixins: assocs.append(self.class_box(cn) + yuml_uses + self.class_box(mixin)) # Classes that use the class as a mixin if cls.name in self.synopsis.mixinrefs: for mixin in sorted(self.synopsis.mixinrefs[cls.name].classrefs, reverse=True): assocs.append(self.class_box(ClassDefinitionName(mixin)) + yuml_uses + self.class_box(cn)) # Classes that inject information if cn in self.synopsis.applytos: for injector in sorted(self.synopsis.applytos[cn].classrefs, reverse=True): assocs.append(self.class_box(cn) + yuml_injected + self.class_box(ClassDefinitionName(injector))) self.associations_generated.add(cn) # Children if cn in self.synopsis.isarefs: for is_a_cls in sorted(self.synopsis.isarefs[cn].classrefs, reverse=True): assocs.append(self.class_box(cn) + yuml_is_a + self.class_box(ClassDefinitionName(is_a_cls))) # Parent if cls.is_a: assocs.append(self.class_box(cls.is_a) + yuml_is_a + self.class_box(cn)) return ', '.join(assocs)
Emit all associations for a focus class. If none are specified, all classes are generated @param cn: Name of class to be emitted @param must_render: True means render even if this is a target (class is specifically requested) @return: YUML representation of the association
def _process_state_final_run(self, job_record): """method takes care of processing job records in STATE_FINAL_RUN state""" uow = self.uow_dao.get_one(job_record.related_unit_of_work) if uow.is_processed: self.update_job(job_record, uow, job.STATE_PROCESSED) elif uow.is_noop: self.update_job(job_record, uow, job.STATE_NOOP) elif uow.is_canceled: self.update_job(job_record, uow, job.STATE_SKIPPED) elif uow.is_invalid: msg = 'Job {0}: UOW for {1}@{2} is in {3}; ' \ 'relying on the Garbage Collector to either recycle or cancel the UOW.' \ .format(job_record.db_id, job_record.process_name, job_record.timeperiod, uow.state) self._log_message(INFO, job_record.process_name, job_record.timeperiod, msg) else: msg = 'Suppressed creating UOW for {0}@{1}; Job is in {2}; uow is in {3}' \ .format(job_record.process_name, job_record.timeperiod, job_record.state, uow.state) self._log_message(INFO, job_record.process_name, job_record.timeperiod, msg) timetable_tree = self.timetable.get_tree(job_record.process_name) timetable_tree.build_tree()
method takes care of processing job records in STATE_FINAL_RUN state
def merge(directory, message, branch_label, rev_id, revisions): """Merge two revisions together, creating a new revision file""" _merge(directory, revisions, message, branch_label, rev_id)
Merge two revisions together, creating a new revision file
def get_core_api(): """ Create instance of Core V1 API of kubernetes: https://github.com/kubernetes-client/python/blob/master/kubernetes/docs/CoreV1Api.md :return: instance of client """ global core_api if core_api is None: config.load_kube_config() if API_KEY is not None: # Configure API key authorization: BearerToken configuration = client.Configuration() configuration.api_key['authorization'] = API_KEY configuration.api_key_prefix['authorization'] = 'Bearer' core_api = client.CoreV1Api(client.ApiClient(configuration)) else: core_api = client.CoreV1Api() return core_api
Create instance of Core V1 API of kubernetes: https://github.com/kubernetes-client/python/blob/master/kubernetes/docs/CoreV1Api.md :return: instance of client
def collect_results(self, data_values): """Receive the data from the consumers polled and process it. :param dict data_values: The poll data returned from the consumer :type data_values: dict """ self.last_poll_results['timestamp'] = self.poll_data['timestamp'] # Get the name and consumer name and remove it from what is reported consumer_name = data_values['consumer_name'] del data_values['consumer_name'] process_name = data_values['name'] del data_values['name'] # Add it to our last poll global data if consumer_name not in self.last_poll_results: self.last_poll_results[consumer_name] = dict() self.last_poll_results[consumer_name][process_name] = data_values # Calculate the stats self.stats = self.calculate_stats(self.last_poll_results)
Receive the data from the consumers polled and process it. :param dict data_values: The poll data returned from the consumer :type data_values: dict
def read(filename): """Reads an unstructured mesh with added data. :param filenames: The files to read from. :type filenames: str :returns mesh{2,3}d: The mesh data. :returns point_data: Point data read from file. :type point_data: dict :returns field_data: Field data read from file. :type field_data: dict """ mesh = meshio.read(filename) # make sure to include the used nodes only if "tetra" in mesh.cells: points, cells = _sanitize(mesh.points, mesh.cells["tetra"]) return ( MeshTetra(points, cells), mesh.point_data, mesh.cell_data, mesh.field_data, ) elif "triangle" in mesh.cells: points, cells = _sanitize(mesh.points, mesh.cells["triangle"]) return ( MeshTri(points, cells), mesh.point_data, mesh.cell_data, mesh.field_data, ) else: raise RuntimeError("Unknown mesh type.")
Reads an unstructured mesh with added data. :param filenames: The files to read from. :type filenames: str :returns mesh{2,3}d: The mesh data. :returns point_data: Point data read from file. :type point_data: dict :returns field_data: Field data read from file. :type field_data: dict
def purge_queue(self, name): """Create message content and properties to purge queue with QMFv2 :param name: Name of queue to purge :type name: str :returns: Tuple containing content and method properties """ content = {"_object_id": {"_object_name": "org.apache.qpid.broker:queue:{0}".format(name)}, "_method_name": "purge", "_arguments": {"type": "queue", "name": name, "filter": dict()}} logger.debug("Message content -> {0}".format(content)) return content, self.method_properties
Create message content and properties to purge queue with QMFv2 :param name: Name of queue to purge :type name: str :returns: Tuple containing content and method properties
def process_point_value(cls, command_type, command, index, op_type): """ A PointValue was received from the Master. Process its payload. :param command_type: (string) Either 'Select' or 'Operate'. :param command: A ControlRelayOutputBlock or else a wrapped data value (AnalogOutputInt16, etc.). :param index: (integer) DNP3 index of the payload's data definition. :param op_type: An OperateType, or None if command_type == 'Select'. """ _log.debug('Processing received point value for index {}: {}'.format(index, command))
A PointValue was received from the Master. Process its payload. :param command_type: (string) Either 'Select' or 'Operate'. :param command: A ControlRelayOutputBlock or else a wrapped data value (AnalogOutputInt16, etc.). :param index: (integer) DNP3 index of the payload's data definition. :param op_type: An OperateType, or None if command_type == 'Select'.
def harvest_openaire_projects(source=None, setspec=None): """Harvest grants from OpenAIRE and store as authority records.""" loader = LocalOAIRELoader(source=source) if source \ else RemoteOAIRELoader(setspec=setspec) for grant_json in loader.iter_grants(): register_grant.delay(grant_json)
Harvest grants from OpenAIRE and store as authority records.
def evaluate(self, dataset): """ Evaluates the model on a test dataset. :param dataset: Test dataset to evaluate model on, where dataset is an instance of :py:class:`pyspark.sql.DataFrame` """ if not isinstance(dataset, DataFrame): raise ValueError("dataset must be a DataFrame but got %s." % type(dataset)) java_glr_summary = self._call_java("evaluate", dataset) return GeneralizedLinearRegressionSummary(java_glr_summary)
Evaluates the model on a test dataset. :param dataset: Test dataset to evaluate model on, where dataset is an instance of :py:class:`pyspark.sql.DataFrame`
def fileImport(filepath, ignore=None): """ Imports the module located at the given filepath. :param filepath | <str> ignore | [<str>, ..] || None :return <module> || None """ basepath, package = EnvManager.packageSplit(filepath) if not (basepath and package): return None # make sure this is not part of the ignored package list if ignore and package in ignore: return None basepath = os.path.normcase(basepath) if basepath not in sys.path: sys.path.insert(0, basepath) logger.debug('Importing: %s' % package) try: __import__(package) module = sys.modules[package] except ImportError: logger.exception('ImportError: %s' % package) return None except KeyError: logger.exception('Could not find sys.modules package: %s' % package) return None except StandardError: logger.exception('Unknown error occurred not import %s' % package) return None return module
Imports the module located at the given filepath. :param filepath | <str> ignore | [<str>, ..] || None :return <module> || None
def get_parent_element(self): """Signatures and Audit elements share sub-elements, we need to know which to set attributes on""" return {AUDIT_REF_STATE: self.context.audit_record, SIGNATURE_REF_STATE: self.context.signature}[self.ref_state]
Signatures and Audit elements share sub-elements, we need to know which to set attributes on
def path(self): """Getter property for the URL path to this Task. :rtype: string :returns: The URL path to this task. """ if not self.id: raise ValueError('Cannot determine path without a task id.') return self.path_helper(self.taskqueue.path, self.id)
Getter property for the URL path to this Task. :rtype: string :returns: The URL path to this task.
def _Resample(self, stats, target_size): """Resamples the stats to have a specific number of data points.""" t_first = stats[0][0] t_last = stats[-1][0] interval = (t_last - t_first) / target_size result = [] current_t = t_first current_v = 0 i = 0 while i < len(stats): stat_t = stats[i][0] stat_v = stats[i][1] if stat_t <= (current_t + interval): # Always add the last value in an interval to the result. current_v = stat_v i += 1 else: result.append([current_t + interval, current_v]) current_t += interval result.append([current_t + interval, current_v]) return result
Resamples the stats to have a specific number of data points.
def console_blit( src: tcod.console.Console, x: int, y: int, w: int, h: int, dst: tcod.console.Console, xdst: int, ydst: int, ffade: float = 1.0, bfade: float = 1.0, ) -> None: """Blit the console src from x,y,w,h to console dst at xdst,ydst. .. deprecated:: 8.5 Call the :any:`Console.blit` method instead. """ lib.TCOD_console_blit( _console(src), x, y, w, h, _console(dst), xdst, ydst, ffade, bfade )
Blit the console src from x,y,w,h to console dst at xdst,ydst. .. deprecated:: 8.5 Call the :any:`Console.blit` method instead.
def service_list(auth=None, **kwargs): ''' List services CLI Example: .. code-block:: bash salt '*' keystoneng.service_list ''' cloud = get_operator_cloud(auth) kwargs = _clean_kwargs(**kwargs) return cloud.list_services(**kwargs)
List services CLI Example: .. code-block:: bash salt '*' keystoneng.service_list
def create_key(self, title, key): """Create a new key for the authenticated user. :param str title: (required), key title :param key: (required), actual key contents, accepts path as a string or file-like object :returns: :class:`Key <github3.users.Key>` """ created = None if title and key: url = self._build_url('user', 'keys') req = self._post(url, data={'title': title, 'key': key}) json = self._json(req, 201) if json: created = Key(json, self) return created
Create a new key for the authenticated user. :param str title: (required), key title :param key: (required), actual key contents, accepts path as a string or file-like object :returns: :class:`Key <github3.users.Key>`
def get_zip_data(self, filename): """Get data from `filename` if it is a zip file path. Returns the string data read from the zip file, or None if no zip file could be found or `filename` isn't in it. The data returned will be an empty string if the file is empty. """ import zipimport markers = ['.zip'+os.sep, '.egg'+os.sep] for marker in markers: if marker in filename: parts = filename.split(marker) try: zi = zipimport.zipimporter(parts[0]+marker[:-1]) except zipimport.ZipImportError: continue try: data = zi.get_data(parts[1]) except IOError: continue return to_string(data) return None
Get data from `filename` if it is a zip file path. Returns the string data read from the zip file, or None if no zip file could be found or `filename` isn't in it. The data returned will be an empty string if the file is empty.
def check_unique_tokens(sender, instance, **kwargs): """ Ensures that mobile and email tokens are unique or tries once more to generate. """ if isinstance(instance, CallbackToken): if CallbackToken.objects.filter(key=instance.key, is_active=True).exists(): instance.key = generate_numeric_token()
Ensures that mobile and email tokens are unique or tries once more to generate.
def save_plain_image_as_file(self, filepath, format='png', quality=90): """Used for generating thumbnails. Does not include overlaid graphics. """ pixbuf = self.get_plain_image_as_pixbuf() options, values = [], [] if format == 'jpeg': options.append('quality') values.append(str(quality)) pixbuf.savev(filepath, format, options, values)
Used for generating thumbnails. Does not include overlaid graphics.
def parse_wiki_terms(doc): '''who needs an html parser. fragile hax, but checks the result at the end''' results = [] last3 = ['', '', ''] header = True for line in doc.split('\n'): last3.pop(0) last3.append(line.strip()) if all(s.startswith('<td>') and not s == '<td></td>' for s in last3): if header: header = False continue last3 = [s.replace('<td>', '').replace('</td>', '').strip() for s in last3] rank, term, count = last3 rank = int(rank.split()[0]) term = term.replace('</a>', '') term = term[term.index('>')+1:].lower() results.append(term) assert len(results) in [1000, 2000, 1284] # early docs have 1k entries, later have 2k, last doc has 1284 return results
who needs an html parser. fragile hax, but checks the result at the end
def analyse(file, length=None): """Analyse application layer packets. Keyword arguments: * file -- bytes or file-like object, packet to be analysed * length -- int, length of the analysing packet Returns: * Analysis -- an Analysis object from `pcapkit.analyser` """ if isinstance(file, bytes): file = io.BytesIO(file) io_check(file) int_check(length or sys.maxsize) return analyse2(file, length)
Analyse application layer packets. Keyword arguments: * file -- bytes or file-like object, packet to be analysed * length -- int, length of the analysing packet Returns: * Analysis -- an Analysis object from `pcapkit.analyser`
def create_session(self): """Create a session. First we look in self.key_file for a path to a json file with the credentials. The key file should have 'AWSAccessKeyId' and 'AWSSecretKey'. Next we look at self.profile for a profile name and try to use the Session call to automatically pick up the keys for the profile from the user default keys file ~/.aws/config. Finally, boto3 will look for the keys in environment variables: AWS_ACCESS_KEY_ID: The access key for your AWS account. AWS_SECRET_ACCESS_KEY: The secret key for your AWS account. AWS_SESSION_TOKEN: The session key for your AWS account. This is only needed when you are using temporary credentials. The AWS_SECURITY_TOKEN environment variable can also be used, but is only supported for backwards compatibility purposes. AWS_SESSION_TOKEN is supported by multiple AWS SDKs besides python. """ session = None if self.key_file is not None: credfile = os.path.expandvars(os.path.expanduser(self.key_file)) try: with open(credfile, 'r') as f: creds = json.load(f) except json.JSONDecodeError as e: logger.error( "EC2Provider '{}': json decode error in credential file {}".format(self.label, credfile) ) raise e except Exception as e: logger.debug( "EC2Provider '{0}' caught exception while reading credential file: {1}".format( self.label, credfile ) ) raise e logger.debug("EC2Provider '{}': Using credential file to create session".format(self.label)) session = boto3.session.Session(region_name=self.region, **creds) elif self.profile is not None: logger.debug("EC2Provider '{}': Using profile name to create session".format(self.label)) session = boto3.session.Session( profile_name=self.profile, region_name=self.region ) else: logger.debug("EC2Provider '{}': Using environment variables to create session".format(self.label)) session = boto3.session.Session(region_name=self.region) return session
Create a session. First we look in self.key_file for a path to a json file with the credentials. The key file should have 'AWSAccessKeyId' and 'AWSSecretKey'. Next we look at self.profile for a profile name and try to use the Session call to automatically pick up the keys for the profile from the user default keys file ~/.aws/config. Finally, boto3 will look for the keys in environment variables: AWS_ACCESS_KEY_ID: The access key for your AWS account. AWS_SECRET_ACCESS_KEY: The secret key for your AWS account. AWS_SESSION_TOKEN: The session key for your AWS account. This is only needed when you are using temporary credentials. The AWS_SECURITY_TOKEN environment variable can also be used, but is only supported for backwards compatibility purposes. AWS_SESSION_TOKEN is supported by multiple AWS SDKs besides python.
def jdbc(self, url, table, mode=None, properties=None): """Saves the content of the :class:`DataFrame` to an external database table via JDBC. .. note:: Don't create too many partitions in parallel on a large cluster; otherwise Spark might crash your external database systems. :param url: a JDBC URL of the form ``jdbc:subprotocol:subname`` :param table: Name of the table in the external database. :param mode: specifies the behavior of the save operation when data already exists. * ``append``: Append contents of this :class:`DataFrame` to existing data. * ``overwrite``: Overwrite existing data. * ``ignore``: Silently ignore this operation if data already exists. * ``error`` or ``errorifexists`` (default case): Throw an exception if data already \ exists. :param properties: a dictionary of JDBC database connection arguments. Normally at least properties "user" and "password" with their corresponding values. For example { 'user' : 'SYSTEM', 'password' : 'mypassword' } """ if properties is None: properties = dict() jprop = JavaClass("java.util.Properties", self._spark._sc._gateway._gateway_client)() for k in properties: jprop.setProperty(k, properties[k]) self.mode(mode)._jwrite.jdbc(url, table, jprop)
Saves the content of the :class:`DataFrame` to an external database table via JDBC. .. note:: Don't create too many partitions in parallel on a large cluster; otherwise Spark might crash your external database systems. :param url: a JDBC URL of the form ``jdbc:subprotocol:subname`` :param table: Name of the table in the external database. :param mode: specifies the behavior of the save operation when data already exists. * ``append``: Append contents of this :class:`DataFrame` to existing data. * ``overwrite``: Overwrite existing data. * ``ignore``: Silently ignore this operation if data already exists. * ``error`` or ``errorifexists`` (default case): Throw an exception if data already \ exists. :param properties: a dictionary of JDBC database connection arguments. Normally at least properties "user" and "password" with their corresponding values. For example { 'user' : 'SYSTEM', 'password' : 'mypassword' }
def fetch(clobber=False): """ Downloads the Marshall et al. (2006) dust map, which is based on 2MASS stellar photometry. Args: clobber (Optional[:obj:`bool`]): If ``True``, any existing file will be overwritten, even if it appears to match. If ``False`` (the default), :obj:`fetch()` will attempt to determine if the dataset already exists. This determination is not 100\% robust against data corruption. """ table_dir = os.path.join(data_dir(), 'marshall') # Check if file already exists if not clobber: h5_fname = os.path.join(table_dir, 'marshall.h5') h5_size = 5033290 # Guess, in Bytes h5_dsets = { 'l': (801, 81), 'b': (801, 81), 'chi2_all': (801, 81), 'chi2_giants': (801, 81), 'A': (801, 81, 33), 'sigma_A': (801, 81, 33), 'dist': (801, 81, 33), 'sigma_dist': (801, 81, 33) } if fetch_utils.h5_file_exists(h5_fname, h5_size, dsets=h5_dsets): print('File appears to exist already. Call ``fetch(clobber=True)`` ' 'to force overwriting of existing file.') return # Download the ASCII table url = 'ftp://cdsarc.u-strasbg.fr/pub/cats/J/A%2BA/453/635/table1.dat.gz' md5 = '637b95b025517a8b9757b6465b632285' table_fname = os.path.join(table_dir, 'table1.dat.gz') fetch_utils.download_and_verify(url, md5, fname=table_fname) # Download the README url = 'ftp://cdsarc.u-strasbg.fr/pub/cats/J/A%2BA/453/635/ReadMe' md5 = '3b7c1296b181b3d77106ab50193dc7ee' readme_fname = os.path.join(table_dir, 'ReadMe') fetch_utils.download_and_verify(url, md5, fname=readme_fname) # Convert from ASCII table to HDF5 dat2hdf5(table_dir) # Cleanup print('Cleaning up ...') os.remove(table_fname) os.remove(readme_fname)
Downloads the Marshall et al. (2006) dust map, which is based on 2MASS stellar photometry. Args: clobber (Optional[:obj:`bool`]): If ``True``, any existing file will be overwritten, even if it appears to match. If ``False`` (the default), :obj:`fetch()` will attempt to determine if the dataset already exists. This determination is not 100\% robust against data corruption.
def big_rnn_lm_2048_512(dataset_name=None, vocab=None, pretrained=False, ctx=cpu(), root=os.path.join(get_home_dir(), 'models'), **kwargs): r"""Big 1-layer LSTMP language model. Both embedding and projection size are 512. Hidden size is 2048. Parameters ---------- dataset_name : str or None, default None The dataset name on which the pre-trained model is trained. Options are 'gbw'. If specified, then the returned vocabulary is extracted from the training set of the dataset. If None, then vocab is required, for specifying embedding weight size, and is directly returned. The pre-trained model achieves 44.05 ppl on Test of GBW dataset. vocab : gluonnlp.Vocab or None, default None Vocabulary object to be used with the language model. Required when dataset_name is not specified. pretrained : bool, default False Whether to load the pre-trained weights for model. ctx : Context, default CPU The context in which to load the pre-trained weights. root : str, default '$MXNET_HOME/models' Location for keeping the model parameters. MXNET_HOME defaults to '~/.mxnet'. Returns ------- gluon.Block, gluonnlp.Vocab """ predefined_args = {'embed_size': 512, 'hidden_size': 2048, 'projection_size': 512, 'num_layers': 1, 'embed_dropout': 0.1, 'encode_dropout': 0.1} mutable_args = ['embed_dropout', 'encode_dropout'] assert all((k not in kwargs or k in mutable_args) for k in predefined_args), \ 'Cannot override predefined model settings.' predefined_args.update(kwargs) return _get_rnn_model(BigRNN, 'big_rnn_lm_2048_512', dataset_name, vocab, pretrained, ctx, root, **predefined_args)
r"""Big 1-layer LSTMP language model. Both embedding and projection size are 512. Hidden size is 2048. Parameters ---------- dataset_name : str or None, default None The dataset name on which the pre-trained model is trained. Options are 'gbw'. If specified, then the returned vocabulary is extracted from the training set of the dataset. If None, then vocab is required, for specifying embedding weight size, and is directly returned. The pre-trained model achieves 44.05 ppl on Test of GBW dataset. vocab : gluonnlp.Vocab or None, default None Vocabulary object to be used with the language model. Required when dataset_name is not specified. pretrained : bool, default False Whether to load the pre-trained weights for model. ctx : Context, default CPU The context in which to load the pre-trained weights. root : str, default '$MXNET_HOME/models' Location for keeping the model parameters. MXNET_HOME defaults to '~/.mxnet'. Returns ------- gluon.Block, gluonnlp.Vocab
def t_php_START_HEREDOC(t): r'<<<[ \t]*(?P<label>[A-Za-z_][\w_]*)\n' t.lexer.lineno += t.value.count("\n") t.lexer.push_state('heredoc') t.lexer.heredoc_label = t.lexer.lexmatch.group('label') return t
r'<<<[ \t]*(?P<label>[A-Za-z_][\w_]*)\n
def httpapi_request(client, **params) -> 'Response': """Send a request to AniDB HTTP API. https://wiki.anidb.net/w/HTTP_API_Definition """ return requests.get( _HTTPAPI, params={ 'client': client.name, 'clientver': client.version, 'protover': 1, **params })
Send a request to AniDB HTTP API. https://wiki.anidb.net/w/HTTP_API_Definition
def posttrans_hook(conduit): """ Hook after the package installation transaction. :param conduit: :return: """ # Integrate Yum with Salt if 'SALT_RUNNING' not in os.environ: with open(CK_PATH, 'w') as ck_fh: ck_fh.write('{chksum} {mtime}\n'.format(chksum=_get_checksum(), mtime=_get_mtime()))
Hook after the package installation transaction. :param conduit: :return:
def add_role(ctx, role): """Grant a role to an existing user""" if role is None: log('Specify the role with --role') return if ctx.obj['username'] is None: log('Specify the username with --username') return change_user = ctx.obj['db'].objectmodels['user'].find_one({ 'name': ctx.obj['username'] }) if role not in change_user.roles: change_user.roles.append(role) change_user.save() log('Done') else: log('User already has that role!', lvl=warn)
Grant a role to an existing user
def debug(self): '''Retrieve the debug information from the charmstore.''' url = '{}/debug/status'.format(self.url) data = self._get(url) return data.json()
Retrieve the debug information from the charmstore.
def run(items, background=None): """Detect copy number variations from batched set of samples using WHAM. """ if not background: background = [] background_bams = [] paired = vcfutils.get_paired_bams([x["align_bam"] for x in items], items) if paired: inputs = [paired.tumor_data] if paired.normal_bam: background = [paired.normal_data] background_bams = [paired.normal_bam] else: assert not background inputs, background = shared.find_case_control(items) background_bams = [x["align_bam"] for x in background] orig_vcf = _run_wham(inputs, background_bams) out = [] for data in inputs: if "sv" not in data: data["sv"] = [] final_vcf = shared.finalize_sv(orig_vcf, data, items) data["sv"].append({"variantcaller": "wham", "vrn_file": final_vcf}) out.append(data) return out
Detect copy number variations from batched set of samples using WHAM.
def load_env_from_file(filename): """ Read an env file into a collection of (name, value) tuples. """ if not os.path.exists(filename): raise FileNotFoundError("Environment file {} does not exist.".format(filename)) with open(filename) as f: for lineno, line in enumerate(f): line = line.strip() if not line or line.startswith("#"): continue if "=" not in line: raise SyntaxError("Invalid environment file syntax in {} at line {}.".format(filename, lineno + 1)) name, value = parse_var(line) yield name, value
Read an env file into a collection of (name, value) tuples.
def buildFileListOrig(input, output=None, ivmlist=None, wcskey=None, updatewcs=True, **workinplace): """ Builds a file list which has undergone various instrument-specific checks for input to MultiDrizzle, including splitting STIS associations. Compared to buildFileList, this version returns the list of the original file names as specified by the user (e.g., before GEIS->MEF, or WAIVER FITS->MEF conversion). """ # NOTE: original file name is required in order to correctly associate # user catalog files (e.g., user masks to be used with 'skymatch') with # corresponding imageObjects. filelist, output, ivmlist, oldasndict = processFilenames(input,output) # verify that all input images specified can be updated as needed filelist = util.verifyFilePermissions(filelist) if filelist is None or len(filelist) == 0: return None, None, None, None, None manageInputCopies(filelist,**workinplace) # to keep track of the original file names we do the following trick: # pack filelist with the ivmlist using zip and later unpack the zipped list. # # NOTE: this required a small modification of the checkStisFiles function # in stsci.tools.check_files to be able to handle ivmlists that are tuples. if ivmlist is None: ivmlist = len(filelist)*[None] else: assert(len(filelist) == len(ivmlist)) #TODO: remove after debugging ivmlist = list(zip(ivmlist,filelist)) # Check format of FITS files - convert Waiver/GEIS to MEF if necessary filelist, ivmlist = check_files.checkFITSFormat(filelist, ivmlist) # check for non-polynomial distortion correction if not updatewcs: # with updatewcs turned on, any problems will get resolved # so we do not need to be concerned about the state of the DGEOFILEs filelist = checkDGEOFile(filelist) # run all WCS updating updated_input = _process_input_wcs(filelist, wcskey, updatewcs) newfilelist, ivmlist = check_files.checkFiles(updated_input, ivmlist) if updatewcs: uw.updatewcs(','.join(set(newfilelist) - set(filelist))) if len(ivmlist) > 0: ivmlist, filelist = list(zip(*ivmlist)) else: filelist = [] # insure that both filelist and ivmlist are defined as empty lists return newfilelist, ivmlist, output, oldasndict, filelist
Builds a file list which has undergone various instrument-specific checks for input to MultiDrizzle, including splitting STIS associations. Compared to buildFileList, this version returns the list of the original file names as specified by the user (e.g., before GEIS->MEF, or WAIVER FITS->MEF conversion).
def _build_date_header_string(self, date_value): """Gets the date_value (may be None, basestring, float or datetime.datetime instance) and returns a valid date string as per RFC 2822.""" if isinstance(date_value, datetime): date_value = time.mktime(date_value.timetuple()) if not isinstance(date_value, basestring): date_value = formatdate(date_value, localtime=True) # Encode it here to avoid this: # Date: =?utf-8?q?Sat=2C_01_Sep_2012_13=3A08=3A29_-0300?= return native(date_value)
Gets the date_value (may be None, basestring, float or datetime.datetime instance) and returns a valid date string as per RFC 2822.
def repr_values(condition: Callable[..., bool], lambda_inspection: Optional[ConditionLambdaInspection], condition_kwargs: Mapping[str, Any], a_repr: reprlib.Repr) -> List[str]: # pylint: disable=too-many-locals """ Represent function arguments and frame values in the error message on contract breach. :param condition: condition function of the contract :param lambda_inspection: inspected lambda AST node corresponding to the condition function (None if the condition was not given as a lambda function) :param condition_kwargs: condition arguments :param a_repr: representation instance that defines how the values are represented. :return: list of value representations """ if _is_lambda(a_function=condition): assert lambda_inspection is not None, "Expected a lambda inspection when given a condition as a lambda function" else: assert lambda_inspection is None, "Expected no lambda inspection in a condition given as a non-lambda function" reprs = dict() # type: MutableMapping[str, Any] if lambda_inspection is not None: # Collect the variable lookup of the condition function: variable_lookup = [] # type: List[Mapping[str, Any]] # Add condition arguments to the lookup variable_lookup.append(condition_kwargs) # Add closure to the lookup closure_dict = dict() # type: Dict[str, Any] if condition.__closure__ is not None: # type: ignore closure_cells = condition.__closure__ # type: ignore freevars = condition.__code__.co_freevars assert len(closure_cells) == len(freevars), \ "Number of closure cells of a condition function ({}) == number of free vars ({})".format( len(closure_cells), len(freevars)) for cell, freevar in zip(closure_cells, freevars): closure_dict[freevar] = cell.cell_contents variable_lookup.append(closure_dict) # Add globals to the lookup if condition.__globals__ is not None: # type: ignore variable_lookup.append(condition.__globals__) # type: ignore # pylint: disable=protected-access recompute_visitor = icontract._recompute.Visitor(variable_lookup=variable_lookup) recompute_visitor.visit(node=lambda_inspection.node.body) recomputed_values = recompute_visitor.recomputed_values repr_visitor = Visitor( recomputed_values=recomputed_values, variable_lookup=variable_lookup, atok=lambda_inspection.atok) repr_visitor.visit(node=lambda_inspection.node.body) reprs = repr_visitor.reprs else: for key, val in condition_kwargs.items(): if _representable(value=val): reprs[key] = val parts = [] # type: List[str] for key in sorted(reprs.keys()): parts.append('{} was {}'.format(key, a_repr.repr(reprs[key]))) return parts
Represent function arguments and frame values in the error message on contract breach. :param condition: condition function of the contract :param lambda_inspection: inspected lambda AST node corresponding to the condition function (None if the condition was not given as a lambda function) :param condition_kwargs: condition arguments :param a_repr: representation instance that defines how the values are represented. :return: list of value representations
def _recursively_apply_get_cassette_subclass(self, replacement_dict_or_obj): """One of the subtleties of this class is that it does not directly replace HTTPSConnection with `VCRRequestsHTTPSConnection`, but a subclass of the aforementioned class that has the `cassette` class attribute assigned to `self._cassette`. This behavior is necessary to properly support nested cassette contexts. This function exists to ensure that we use the same class object (reference) to patch everything that replaces VCRRequestHTTP[S]Connection, but that we can talk about patching them with the raw references instead, and without worrying about exactly where the subclass with the relevant value for `cassette` is first created. The function is recursive because it looks in to dictionaries and replaces class values at any depth with the subclass described in the previous paragraph. """ if isinstance(replacement_dict_or_obj, dict): for key, replacement_obj in replacement_dict_or_obj.items(): replacement_obj = self._recursively_apply_get_cassette_subclass( replacement_obj) replacement_dict_or_obj[key] = replacement_obj return replacement_dict_or_obj if hasattr(replacement_dict_or_obj, 'cassette'): replacement_dict_or_obj = self._get_cassette_subclass( replacement_dict_or_obj) return replacement_dict_or_obj
One of the subtleties of this class is that it does not directly replace HTTPSConnection with `VCRRequestsHTTPSConnection`, but a subclass of the aforementioned class that has the `cassette` class attribute assigned to `self._cassette`. This behavior is necessary to properly support nested cassette contexts. This function exists to ensure that we use the same class object (reference) to patch everything that replaces VCRRequestHTTP[S]Connection, but that we can talk about patching them with the raw references instead, and without worrying about exactly where the subclass with the relevant value for `cassette` is first created. The function is recursive because it looks in to dictionaries and replaces class values at any depth with the subclass described in the previous paragraph.
def copy_dir( src_fs, # type: Union[FS, Text] src_path, # type: Text dst_fs, # type: Union[FS, Text] dst_path, # type: Text walker=None, # type: Optional[Walker] on_copy=None, # type: Optional[_OnCopy] workers=0, # type: int ): # type: (...) -> None """Copy a directory from one filesystem to another. Arguments: src_fs (FS or str): Source filesystem (instance or URL). src_path (str): Path to a directory on the source filesystem. dst_fs (FS or str): Destination filesystem (instance or URL). dst_path (str): Path to a directory on the destination filesystem. walker (~fs.walk.Walker, optional): A walker object that will be used to scan for files in ``src_fs``. Set this if you only want to consider a sub-set of the resources in ``src_fs``. on_copy (callable, optional): A function callback called after a single file copy is executed. Expected signature is ``(src_fs, src_path, dst_fs, dst_path)``. workers (int): Use ``worker`` threads to copy data, or ``0`` (default) for a single-threaded copy. """ on_copy = on_copy or (lambda *args: None) walker = walker or Walker() _src_path = abspath(normpath(src_path)) _dst_path = abspath(normpath(dst_path)) def src(): return manage_fs(src_fs, writeable=False) def dst(): return manage_fs(dst_fs, create=True) from ._bulk import Copier with src() as _src_fs, dst() as _dst_fs: with _src_fs.lock(), _dst_fs.lock(): _thread_safe = is_thread_safe(_src_fs, _dst_fs) with Copier(num_workers=workers if _thread_safe else 0) as copier: _dst_fs.makedir(_dst_path, recreate=True) for dir_path, dirs, files in walker.walk(_src_fs, _src_path): copy_path = combine(_dst_path, frombase(_src_path, dir_path)) for info in dirs: _dst_fs.makedir(info.make_path(copy_path), recreate=True) for info in files: src_path = info.make_path(dir_path) dst_path = info.make_path(copy_path) copier.copy(_src_fs, src_path, _dst_fs, dst_path) on_copy(_src_fs, src_path, _dst_fs, dst_path)
Copy a directory from one filesystem to another. Arguments: src_fs (FS or str): Source filesystem (instance or URL). src_path (str): Path to a directory on the source filesystem. dst_fs (FS or str): Destination filesystem (instance or URL). dst_path (str): Path to a directory on the destination filesystem. walker (~fs.walk.Walker, optional): A walker object that will be used to scan for files in ``src_fs``. Set this if you only want to consider a sub-set of the resources in ``src_fs``. on_copy (callable, optional): A function callback called after a single file copy is executed. Expected signature is ``(src_fs, src_path, dst_fs, dst_path)``. workers (int): Use ``worker`` threads to copy data, or ``0`` (default) for a single-threaded copy.
def inverse_transform(self, X): """Transform data back to its original space. Returns an array X_original whose transform would be X. Parameters ---------- X : array-like, shape (n_samples, n_components) New data, where n_samples in the number of samples and n_components is the number of components. Returns ------- X_original array-like, shape (n_samples, n_features) Notes ----- If whitening is enabled, inverse_transform does not compute the exact inverse operation of transform. """ check_is_fitted(self, "mean_") if self.whiten: return ( da.dot( X, np.sqrt(self.explained_variance_[:, np.newaxis]) * self.components_, ) + self.mean_ ) else: return da.dot(X, self.components_) + self.mean_
Transform data back to its original space. Returns an array X_original whose transform would be X. Parameters ---------- X : array-like, shape (n_samples, n_components) New data, where n_samples in the number of samples and n_components is the number of components. Returns ------- X_original array-like, shape (n_samples, n_features) Notes ----- If whitening is enabled, inverse_transform does not compute the exact inverse operation of transform.
def _sumterm(lexer): """Return a sum term expresssion.""" xorterm = _xorterm(lexer) sumterm_prime = _sumterm_prime(lexer) if sumterm_prime is None: return xorterm else: return ('or', xorterm, sumterm_prime)
Return a sum term expresssion.
def consensus(aln, weights=None, gap_threshold=0.5, simple=False, trim_ends=True): """Get the consensus of an alignment, as a string. Emit gap characters for majority-gap columns; apply various strategies to choose the consensus amino acid type for the remaining columns. Parameters ---------- simple : bool If True, use simple plurality to determine the consensus amino acid type, without weighting sequences for similarity. Otherwise, weight sequences for similarity and use relative entropy to choose the consensus amino acid type. weights : dict or None Sequence weights. If given, used to calculate amino acid frequencies; otherwise calculated within this function (i.e. this is a way to speed up the function if sequence weights have already been calculated). Ignored in 'simple' mode. trim_ends : bool If False, stretch the consensus sequence to include the N- and C-tails of the alignment, even if those flanking columns are mostly gap characters. This avoids terminal gaps in the consensus (needed for MAPGAPS). gap_threshold : float If the proportion of gap characters in a column is greater than or equal to this value (after sequence weighting, if applicable), then the consensus character emitted will be a gap instead of an amino acid type. """ # Choose your algorithms! if simple: # Use the simple, unweighted algorithm col_consensus = make_simple_col_consensus(alnutils.aa_frequencies(aln)) def is_majority_gap(col): return (float(col.count('-')) / len(col) >= gap_threshold) # ENH (alternatively/additionally): does any aa occur more than once? # ENH: choose gap-decisionmaking separately from col_consensus else: # Use the entropy-based, weighted algorithm if weights is None: seq_weights = alnutils.sequence_weights(aln, 'avg1') else: seq_weights = weights aa_frequencies = alnutils.aa_frequencies(aln, weights=seq_weights) col_consensus = make_entropy_col_consensus(aa_frequencies) def is_majority_gap(col): gap_count = 0.0 for wt, char in zip(seq_weights, col): if char == '-': gap_count += wt return (gap_count / sum(seq_weights) >= gap_threshold) # Traverse the alignment, handling gaps etc. def col_wise_consensus(columns): """Calculate the consensus chars for an iterable of columns.""" if not trim_ends: # Track if we're in the N-term or C-term end of the sequence in_left_end = True maybe_right_tail = [] # prev_col = None # prev_char = None for col in columns: # Lowercase cols mean explicitly, "don't include in consensus" if all(c.islower() for c in col if c not in '.-'): yield '-' continue if any(c.islower() for c in col): logging.warn('Mixed lowercase and uppercase letters in a ' 'column: ' + ''.join(col)) col = map(str.upper, col) # Gap chars is_gap = is_majority_gap(col) if not trim_ends: # Avoid N-terminal gaps in the consensus sequence if in_left_end: if not is_gap: # Match -- we're no longer in the left end in_left_end = False is_gap = False # When to yield a gap here: # ----------- --------- ------ ---------- # in_left_end trim_ends is_gap yield gap? # ----------- --------- ------ ---------- # True True (True) yes # True False (False) (no -- def. char) # False True T/F yes, if is_gap # False False (T/F) NO! use maybe_right_tail # ----------- --------- ------ ---------- if is_gap and trim_ends: yield '-' continue # Get the consensus character, using the chosen algorithm cons_char = col_consensus(col) if trim_ends: yield cons_char else: # Avoid C-terminal gaps in the consensus sequence if is_gap: maybe_right_tail.append(cons_char) else: # Match -> gaps weren't the right tail; emit all gaps for char in maybe_right_tail: yield '-' maybe_right_tail = [] yield cons_char # prev_col = col # prev_char = cons_char # Finally, if we were keeping a right (C-term) tail, emit it if not trim_ends: for char in maybe_right_tail: yield char return ''.join(col_wise_consensus(zip(*aln)))
Get the consensus of an alignment, as a string. Emit gap characters for majority-gap columns; apply various strategies to choose the consensus amino acid type for the remaining columns. Parameters ---------- simple : bool If True, use simple plurality to determine the consensus amino acid type, without weighting sequences for similarity. Otherwise, weight sequences for similarity and use relative entropy to choose the consensus amino acid type. weights : dict or None Sequence weights. If given, used to calculate amino acid frequencies; otherwise calculated within this function (i.e. this is a way to speed up the function if sequence weights have already been calculated). Ignored in 'simple' mode. trim_ends : bool If False, stretch the consensus sequence to include the N- and C-tails of the alignment, even if those flanking columns are mostly gap characters. This avoids terminal gaps in the consensus (needed for MAPGAPS). gap_threshold : float If the proportion of gap characters in a column is greater than or equal to this value (after sequence weighting, if applicable), then the consensus character emitted will be a gap instead of an amino acid type.
def save(self): """ Save the current instance to the DB """ with rconnect() as conn: try: self.validate() except ValidationError as e: log.warn(e.messages) raise except ModelValidationError as e: log.warn(e.messages) raise except ModelConversionError as e: log.warn(e.messages) raise except ValueError as e: log.warn(e) raise except FrinkError as e: log.warn(e.messages) raise except Exception as e: log.warn(e) raise else: # If this is a new unsaved object, it'll likely have an # id of None, which RethinkDB won't like. So if it's None, # generate a UUID for it. If the save fails, we should re-set # it to None. if self.id is None: self.id = str(uuid.uuid4()) log.debug(self.id) try: query = r.db(self._db).table(self._table).insert( self.to_primitive(), conflict="replace" ) log.debug(query) rv = query.run(conn) # Returns something like this: # { # u'errors': 0, # u'deleted': 0, # u'generated_keys': [u'dd8ad1bc-8609-4484-b6c4-ed96c72c03f2'], # u'unchanged': 0, # u'skipped': 0, # u'replaced': 0, # u'inserted': 1 # } log.debug(rv) except Exception as e: log.warn(e) self.id = None raise else: return self
Save the current instance to the DB
def scan_dir(self, path): r"""Scan a directory on disk for color table files and add them to the registry. Parameters ---------- path : str The path to the directory with the color tables """ for fname in glob.glob(os.path.join(path, '*' + TABLE_EXT)): if os.path.isfile(fname): with open(fname, 'r') as fobj: try: self.add_colortable(fobj, os.path.splitext(os.path.basename(fname))[0]) log.debug('Added colortable from file: %s', fname) except RuntimeError: # If we get a file we can't handle, assume we weren't meant to. log.info('Skipping unparsable file: %s', fname)
r"""Scan a directory on disk for color table files and add them to the registry. Parameters ---------- path : str The path to the directory with the color tables
def setEmergencyDecel(self, vehID, decel): """setEmergencyDecel(string, double) -> None Sets the maximal physically possible deceleration in m/s^2 for this vehicle. """ self._connection._sendDoubleCmd( tc.CMD_SET_VEHICLE_VARIABLE, tc.VAR_EMERGENCY_DECEL, vehID, decel)
setEmergencyDecel(string, double) -> None Sets the maximal physically possible deceleration in m/s^2 for this vehicle.
def index(): """Generate a list of all crawlers, alphabetically, with op counts.""" crawlers = [] for crawler in manager: data = Event.get_counts(crawler) data['last_active'] = crawler.last_run data['total_ops'] = crawler.op_count data['running'] = crawler.is_running data['crawler'] = crawler crawlers.append(data) return render_template('index.html', crawlers=crawlers)
Generate a list of all crawlers, alphabetically, with op counts.
def triggered_token(self) -> 'CancelToken': """ Return the token which was triggered. The returned token may be this token or one that it was chained with. """ if self._triggered.is_set(): return self for token in self._chain: if token.triggered: # Use token.triggered_token here to make the lookup recursive as self._chain may # contain other chains. return token.triggered_token return None
Return the token which was triggered. The returned token may be this token or one that it was chained with.
def cli(ctx): """PyHardLinkBackup""" click.secho("\nPyHardLinkBackup v%s\n" % PyHardLinkBackup.__version__, bg="blue", fg="white", bold=True)
PyHardLinkBackup
def _get_django_queryset(self): """Return Django QuerySet with prefetches properly configured.""" prefetches = [] for field, fprefetch in self.prefetches.items(): has_query = hasattr(fprefetch, 'query') qs = fprefetch.query.queryset if has_query else None prefetches.append( Prefetch(field, queryset=qs) ) queryset = self.queryset if prefetches: queryset = queryset.prefetch_related(*prefetches) return queryset
Return Django QuerySet with prefetches properly configured.
def main(): """ Phenologs """ parser = argparse.ArgumentParser(description='Phenologs' """ By default, ontologies are cached locally and synced from a remote sparql endpoint """, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('-r', '--resource1', type=str, required=False, help='Name of ontology1') parser.add_argument('-R', '--resource2', type=str, required=False, help='Name of ontology2') parser.add_argument('-T', '--taxon', type=str, default='NCBITaxon:10090', required=False, help='NCBITaxon ID') parser.add_argument('-s', '--search', type=str, default='', required=False, help='Search type. p=partial, r=regex') parser.add_argument('-b', '--background', type=str, default=None, required=False, help='Class to use for background') parser.add_argument('-p', '--pthreshold', type=float, default=0.05, required=False, help='P-value threshold') parser.add_argument('-v', '--verbosity', default=0, action='count', help='Increase output verbosity') parser.add_argument('ids',nargs='*') args = parser.parse_args() if args.verbosity >= 2: logging.basicConfig(level=logging.DEBUG) if args.verbosity == 1: logging.basicConfig(level=logging.INFO) logging.info("Welcome!") ofactory = OntologyFactory() afactory = AssociationSetFactory() handle = args.resource1 ont1 = ofactory.create(args.resource1) ont2 = ofactory.create(args.resource2) logging.info("onts: {} {}".format(ont1, ont2)) searchp = args.search category = 'gene' aset1 = afactory.create(ontology=ont1, subject_category=category, object_category='phenotype', taxon=args.taxon) aset2 = afactory.create(ontology=ont2, subject_category=category, object_category='function', taxon=args.taxon) bg_cls = None if args.background is not None: bg_ids = resolve(ont1,[args.background],searchp) if len(bg_ids) == 0: logging.error("Cannnot resolve: '{}' using {} in {}".format(args.background, searchp, ont1)) sys.exit(1) elif len(bg_ids) > 1: logging.error("Multiple matches: '{}' using {} MATCHES={}".format(args.background, searchp,bg_ids)) sys.exit(1) else: logging.info("Background: {}".format(bg_cls)) [bg_cls] = bg_ids for id in resolve(ont1,args.ids,searchp): sample = aset1.query([id],[]) print("Gene set class:{} Gene set: {}".format(id, sample)) bg = None if bg_cls is not None: bg = aset1.query([bg_cls],[]) print("BACKGROUND SUBJECTS: {}".format(bg)) rs = aset2.enrichment_test(sample, bg, threshold=args.pthreshold, labels=True) print("RESULTS: {} < {}".format(len(rs), args.pthreshold)) for r in rs: print(str(r))
Phenologs
def timescales_from_eigenvalues(evals, tau=1): r"""Compute implied time scales from given eigenvalues Parameters ---------- evals : eigenvalues tau : lag time Returns ------- ts : ndarray The implied time scales to the given eigenvalues, in the same order. """ """Check for dominant eigenvalues with large imaginary part""" if not np.allclose(evals.imag, 0.0): warnings.warn('Using eigenvalues with non-zero imaginary part', ImaginaryEigenValueWarning) """Check for multiple eigenvalues of magnitude one""" ind_abs_one = np.isclose(np.abs(evals), 1.0, rtol=0.0, atol=1e-14) if sum(ind_abs_one) > 1: warnings.warn('Multiple eigenvalues with magnitude one.', SpectralWarning) """Compute implied time scales""" ts = np.zeros(len(evals)) """Eigenvalues of magnitude one imply infinite timescale""" ts[ind_abs_one] = np.inf """All other eigenvalues give rise to finite timescales""" ts[np.logical_not(ind_abs_one)] = \ -1.0 * tau / np.log(np.abs(evals[np.logical_not(ind_abs_one)])) return ts
r"""Compute implied time scales from given eigenvalues Parameters ---------- evals : eigenvalues tau : lag time Returns ------- ts : ndarray The implied time scales to the given eigenvalues, in the same order.