Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
373,100
def destroy(self): if self.widget: self.set_active(False) super(AndroidBarcodeView, self).destroy()
Cleanup the activty lifecycle listener
373,101
def create_context_plot(ra, dec, name="Your object"): plot = K2FootprintPlot() plot.plot_galactic() plot.plot_ecliptic() for c in range(0, 20): plot.plot_campaign_outline(c, facecolor=" plot.ax.scatter(ra, dec, marker=, s=250, lw=3, color="red", zorder=500) plot.ax.text(ra, dec - 2, name, ha="center", va="top", color="red", fontsize=20, fontweight=, zorder=501) return plot
Creates a K2FootprintPlot showing a given position in context with respect to the campaigns.
373,102
def perr(self, *args, **kwargs): kwargs[] = self.err self.print(*args, **kwargs) sys.stderr.flush()
Console to STERR
373,103
def build_sanitiser_node_dict( cfg, sinks_in_file ): sanitisers = list() for sink in sinks_in_file: sanitisers.extend(sink.sanitisers) sanitisers_in_file = list() for sanitiser in sanitisers: for cfg_node in cfg.nodes: if sanitiser in cfg_node.label: sanitisers_in_file.append(Sanitiser(sanitiser, cfg_node)) sanitiser_node_dict = dict() for sanitiser in sanitisers: sanitiser_node_dict[sanitiser] = list(find_sanitiser_nodes( sanitiser, sanitisers_in_file )) return sanitiser_node_dict
Build a dict of string -> TriggerNode pairs, where the string is the sanitiser and the TriggerNode is a TriggerNode of the sanitiser. Args: cfg(CFG): cfg to traverse. sinks_in_file(list[TriggerNode]): list of TriggerNodes containing the sinks in the file. Returns: A string -> TriggerNode dict.
373,104
def rm_special(user, cmd, special=None, identifier=None): * lst = list_tab(user) ret = rm_ = None for ind in range(len(lst[])): if rm_ is not None: break if _cron_matched(lst[][ind], cmd, identifier=identifier): if special is None: rm_ = ind else: if lst[][ind][] == special: rm_ = ind if rm_ is not None: lst[].pop(rm_) ret = comdat = _write_cron_lines(user, _render_tab(lst)) if comdat[]: return comdat[] return ret
Remove a special cron job for a specified user. CLI Example: .. code-block:: bash salt '*' cron.rm_special root /usr/bin/foo
373,105
def send_to_default_exchange(self, sess_id, message=None): msg = json.dumps(message, cls=ZEngineJSONEncoder) log.debug("Sending following message to %s queue through default exchange:\n%s" % ( sess_id, msg)) self.get_channel().publish(exchange=, routing_key=sess_id, body=msg)
Send messages through RabbitMQ's default exchange, which will be delivered through routing_key (sess_id). This method only used for un-authenticated users, i.e. login process. Args: sess_id string: Session id message dict: Message object.
373,106
def replace_drive_enclosure(self, information): uri = "{}/replaceDriveEnclosure".format(self.data["uri"]) result = self._helper.create(information, uri) self.refresh() return result
When a drive enclosure has been physically replaced, initiate the replacement operation that enables the new drive enclosure to take over as a replacement for the prior drive enclosure. The request requires specification of both the serial numbers of the original drive enclosure and its replacement to be provided. Args: information: Options to replace the drive enclosure. Returns: dict: SAS Logical Interconnect.
373,107
def get(issue_id, issue_type_id): return db.Issue.find_one( Issue.issue_id == issue_id, Issue.issue_type_id == issue_type_id )
Return issue by ID Args: issue_id (str): Unique Issue identifier issue_type_id (str): Type of issue to get Returns: :obj:`Issue`: Returns Issue object if found, else None
373,108
def get_tan_media(self, media_type = TANMediaType2.ALL, media_class = TANMediaClass4.ALL): with self._get_dialog() as dialog: hktab = self._find_highest_supported_command(HKTAB4, HKTAB5) seg = hktab( tan_media_type = media_type, tan_media_class = str(media_class), ) response = dialog.send(seg) for resp in response.response_segments(seg, ): return resp.tan_usage_option, list(resp.tan_media_list)
Get information about TAN lists/generators. Returns tuple of fints.formals.TANUsageOption and a list of fints.formals.TANMedia4 or fints.formals.TANMedia5 objects.
373,109
def get_previous_tag(cls, el): sibling = el.previous_sibling while not cls.is_tag(sibling) and sibling is not None: sibling = sibling.previous_sibling return sibling
Get previous sibling tag.
373,110
def File(self, path): return vfs.FileRef( client_id=self.client_id, path=path, context=self._context)
Returns a reference to a file with a given path on client's VFS.
373,111
def num_compositions(m, n): return scipy.special.comb(n+m-1, m-1, exact=True)
The total number of m-part compositions of n, which is equal to (n+m-1) choose (m-1). Parameters ---------- m : scalar(int) Number of parts of composition. n : scalar(int) Integer to decompose. Returns ------- scalar(int) Total number of m-part compositions of n.
373,112
def _get_win_argv(): assert is_win argc = ctypes.c_int() try: argv = winapi.CommandLineToArgvW( winapi.GetCommandLineW(), ctypes.byref(argc)) except WindowsError: return [] if not argv: return [] res = argv[max(0, argc.value - len(sys.argv)):argc.value] winapi.LocalFree(argv) return res
Returns a unicode argv under Windows and standard sys.argv otherwise Returns: List[`fsnative`]
373,113
def backup_file(*, file, host): .format(key_path=key_path))
Backup a file on S3 :param file: full path to the file to be backed up :param host: this will be used to locate the file on S3 :raises TypeError: if an argument in kwargs does not have the type expected :raises ValueError: if an argument within kwargs has an invalid value
373,114
def main(): parser = ArgumentParser() subs = parser.add_subparsers(dest=) setup_parser = subs.add_parser() setup_parser.add_argument(, , dest=, required=True, help=, type=str) setup_parser.add_argument(, , dest=, required=True, help=, type=str) setup_parser = subs.add_parser() setup_parser.add_argument(, , dest=, required=True, type=str, help=,) setup_parser = subs.add_parser() setup_parser = subs.add_parser() setup_parser.add_argument(, , dest=, required=True, help=, type=str) setup_parser.add_argument(, dest=, action=, help=) setup_parser.add_argument(, , dest=, required=True, choices=[, ], help=) setup_parser.add_argument(, , dest=, default="realtime", choices=[, , ], help=) setup_parser = subs.add_parser() setup_parser.add_argument(, dest=, required=True, help=, type=str) args = parser.parse_args() if args.cmd == : if not os.path.exists(CONFIG_PATH): os.makedirs(CONFIG_PATH) if not os.path.exists(CONFIG_FILE): json.dump(CONFIG_DEFAULTS, open(CONFIG_FILE, ), indent=4, separators=(, )) config = CONFIG_DEFAULTS config[] = args.email config[] = str(obfuscate(args.pwd, )) json.dump(config, open(CONFIG_FILE, ), indent=4, separators=(, )) config = json.load(open(CONFIG_FILE)) if config.get(, PY2) != PY2: raise Exception("Python versions have changed. Please run `setup` again to reconfigure the client.") if config[] == : raise Exception("Run setup before any other actions!") if args.cmd == : config[] = obfuscate(str(config[]), ) ga = GoogleAlerts(config[], config[]) with contextlib.closing(webdriver.Chrome(args.driver)) as driver: driver.get(ga.LOGIN_URL) wait = ui.WebDriverWait(driver, 10) inputElement = driver.find_element_by_name() inputElement.send_keys(config[]) inputElement.submit() time.sleep(3) inputElement = driver.find_element_by_id() inputElement.send_keys(config[]) inputElement.submit() print("[!] Waiting 15 seconds for authentication to complete") time.sleep(15) cookies = driver.get_cookies() collected = dict() for cookie in cookies: collected[str(cookie[])] = str(cookie[]) with open(SESSION_FILE, ) as f: pickle.dump(collected, f, protocol=2) print("Session has been seeded.") if args.cmd == : config[] = obfuscate(str(config[]), ) ga = GoogleAlerts(config[], config[]) ga.authenticate() print(json.dumps(ga.list(), indent=4)) if args.cmd == : config[] = obfuscate(str(config[]), ) ga = GoogleAlerts(config[], config[]) ga.authenticate() alert_frequency = if args.frequency == : alert_frequency = elif args.frequency == : alert_frequency = else: alert_frequency = monitor = ga.create(args.term, {: args.delivery.upper(), : alert_frequency.upper(), : args.exact}) print(json.dumps(monitor, indent=4)) if args.cmd == : config[] = obfuscate(str(config[]), ) ga = GoogleAlerts(config[], config[]) ga.authenticate() result = ga.delete(args.term_id) if result: print("%s was deleted" % args.term_id)
Run the core.
373,115
def getTemplates(fnames, blend=True): if not blend: newhdrs = blendheaders.getSingleTemplate(fnames[0]) newtab = None else: newhdrs, newtab = blendheaders.get_blended_headers(inputs=fnames) cleanTemplates(newhdrs[1],newhdrs[2],newhdrs[3]) return newhdrs, newtab
Process all headers to produce a set of combined headers that follows the rules defined by each instrument.
373,116
def make_lat_lons(cvects): lats = np.degrees(np.arcsin(cvects[2])) lons = np.degrees(np.arctan2(cvects[0], cvects[1])) return np.hstack([lats, lons])
Convert from directional cosines to latitidue and longitude Parameters ---------- cvects : directional cosine (i.e., x,y,z component) values returns (np.ndarray(2,nsrc)) with the directional cosine (i.e., x,y,z component) values
373,117
def import_sqlite(db_file, older_than=None, **kwargs): conn = _open_sqlite(db_file) cur = conn.cursor() select = "SELECT * FROM testcases WHERE exported != " if older_than: cur.execute(" ".join((select, "AND sqltime < ?")), (older_than,)) else: cur.execute(select) columns = [description[0] for description in cur.description] rows = cur.fetchall() results = [] for row in rows: record = OrderedDict(list(zip(columns, row))) results.append(record) testrun = _get_testrun_from_sqlite(conn) conn.close() return xunit_exporter.ImportedData(results=results, testrun=testrun)
Reads the content of the database file and returns imported data.
373,118
def _backspace(self): if self.command == : return logger.log(5, "Snippet keystroke `Backspace`.") self.command = self.command[:-1]
Erase the last character in the snippet command.
373,119
def triple(self): with ffi.OutputString(owned=False) as outmsg: ffi.lib.LLVMPY_GetTarget(self, outmsg) return str(outmsg)
This module's target "triple" specification, as a string.
373,120
def binary_classification_metrics(y_true, y_pred, y_prob): y_true = column_or_1d(y_true) y_pred = column_or_1d(y_pred) y_prob = column_or_1d(y_prob) n_samples = y_true.shape[0] tp = float((y_pred * y_true).sum()) fp = float((y_pred[np.nonzero(y_true == 0)[0]]).sum()) fn = float((y_true[np.nonzero(y_pred == 0)[0]]).sum()) tn = float(n_samples - tp - fn - fp) accuracy = (tp + tn) / n_samples auc = roc_auc_score(y_true, y_pred) brier_loss = brier_score_loss(y_true, y_prob) if (tp + fp) == 0: precision = 0 else: precision = tp / (tp + fp) if (tp + fn) == 0: recall = 0 else: recall = tp / (tp + fn) if (recall + precision) == 0: f1score = 0 else: f1score = 2 * (precision * recall) / (precision + recall) return dict(tp=tp, fp=fp, fn=fn, tn=tn, accuracy=accuracy, recall=recall, precision=precision, f1score=f1score, auc=auc, brier_loss=brier_loss)
classification_metrics. This function cal... Parameters ---------- y_true : array-like Ground truth (correct) labels. y_pred : array-like Predicted labels, as returned by a classifier. y_prob : array-like Predicted probabilities, as returned by a classifier. Returns ------- dict(tp, fp, fn, tn, accuracy, recall, precision, f1score, auc, brier_loss) Examples -------- >>> from costcla.metrics import binary_classification_metrics >>> y_pred = [0, 1, 0, 0] >>> y_true = [0, 1, 1, 0] >>> y_prob = [0.2, 0.8, 0.4, 0.3] >>> binary_classification_metrics(y_true, y_pred, y_prob) {'accuracy': 0.75, 'auc': 0.75, 'brier_loss': 0.13249999999999998, 'f1score': 0.6666666666666666, 'fn': 1.0, 'fp': 0.0, 'precision': 1.0, 'recall': 0.5, 'tn': 2.0, 'tp': 1.0}
373,121
def load_configs(self): self._load_default_shell_settings() value = self.settings.general.get_boolean() self.get_widget().set_active(value) value = self.settings.general.get_boolean() self.get_widget().set_active(value) value = self.settings.general.get_boolean() self.get_widget().set_active(value) value = self.settings.general.get_boolean() self.get_widget().set_active(value) value = self.settings.general.get_boolean() self.get_widget().set_active(value) value = self.settings.general.get_boolean() self.get_widget().set_active(value) value = self.settings.general.get_boolean() self.get_widget().set_active(value) value = self.settings.general.get_boolean() self.get_widget().set_active(value) value = self.settings.general.get_int() self.get_widget().set_active(value) self.get_widget().set_sensitive(value != 2) value = self.settings.general.get_string() combo = self.get_widget() for i in combo.get_model(): if i[0] == value: combo.set_active_iter(i.iter) break value = self.settings.general.get_boolean() self.get_widget().set_active(value) value = self.settings.general.get_boolean() self.get_widget().set_active(value) value = self.settings.general.get_boolean() self.get_widget().set_active(value) value = self.settings.general.get_boolean() self.get_widget().set_active(value) value = self.settings.general.get_boolean() self.get_widget().set_active(value) value = self.settings.general.get_boolean() self.get_widget().set_active(value) value = self.settings.general.get_boolean() self.get_widget().set_active(value) self.get_widget().set_sensitive(value) value = self.settings.general.get_boolean() self.get_widget().set_active(value) value = self.settings.general.get_int() self.get_widget().set_value(value) self.update_vte_subwidgets_states() value = self.settings.general.get_int() self.get_widget().set_value(value) value = self.settings.general.get_int() self.get_widget().set_value(value) value = self.settings.general.get_int() self.get_widget().set_value(value) value = self.settings.general.get_int() self.get_widget().set_value(value) value = self.settings.general.get_int() which_button = { ALIGN_RIGHT: , ALIGN_LEFT: , ALIGN_CENTER: } self.get_widget(which_button[value]).set_active(True) self.get_widget("window_horizontal_displacement").set_sensitive(value != ALIGN_CENTER) value = self.settings.general.get_boolean() self.get_widget().set_active(value) value = self.settings.general.get_boolean() self.get_widget().set_active(value) value = self.settings.general.get_boolean() self.get_widget().set_active(value) value = self.settings.general.get_boolean() self.get_widget().set_active(value) value = self.settings.general.get_boolean() self.get_widget().set_active(value) self._load_screen_settings() value = self.settings.general.get_boolean() self.get_widget().set_active(value) self.get_widget().set_sensitive(value) self.get_widget().set_sensitive(value) text = Gtk.TextBuffer() text = self.get_widget().get_buffer() for title, matcher, _useless in QUICK_OPEN_MATCHERS: text.insert_at_cursor("%s: %s\n" % (title, matcher)) self.get_widget().set_buffer(text) value = self.settings.general.get_string() if value is None: value = "subl %(file_path)s:%(line_number)s" self.get_widget().set_text(value) value = self.settings.general.get_boolean() self.get_widget().set_active(value) value = self.settings.general.get_string() if value: self.get_widget().set_text(value) value = self.settings.general.get_boolean() self.get_widget().set_active(value) value = self.settings.general.get_boolean() self.get_widget().set_active(value) value = self.settings.general.get_int() self.get_widget().set_value(value) value = self.settings.general.get_boolean() self.get_widget().set_active(value) value = self.settings.general.get_boolean() self.get_widget().set_active(value) value = self.settings.general.get_boolean() self.get_widget().set_active(value) value = self.settings.general.get_boolean() self.get_widget().set_active(value) self.get_widget().set_sensitive(not value) value = self.settings.styleFont.get_string() if value: self.get_widget().set_font_name(value) value = self.settings.styleFont.get_boolean() self.get_widget().set_active(value) value = self.settings.styleFont.get_boolean() self.get_widget().set_active(value) self.fill_palette_names() value = self.settings.styleFont.get_string() self.set_palette_name(value) value = self.settings.styleFont.get_string() self.set_palette_colors(value) self.update_demo_palette(value) value = self.settings.style.get_int() self.set_cursor_shape(value) value = self.settings.style.get_int() self.set_cursor_blink_mode(value) value = self.settings.styleBackground.get_int() self.get_widget().set_value(MAX_TRANSPARENCY - value) value = self.settings.general.get_int() self.get_widget().set_active(value) self._load_hooks_settings()
Load configurations for all widgets in General, Scrolling and Appearance tabs from dconf.
373,122
def _get_calculated_value(self, value): if isinstance(value, types.LambdaType): expanded_value = value(self.structure) return self._get_calculated_value(expanded_value) else: return self._parse_value(value)
Get's the final value of the field and runs the lambda functions recursively until a final value is derived. :param value: The value to calculate/expand :return: The final value
373,123
def make_seekable(fileobj): if sys.version_info < (3, 0) and isinstance(fileobj, file): filename = fileobj.name fileobj = io.FileIO(fileobj.fileno(), closefd=False) fileobj.name = filename assert isinstance(fileobj, io.IOBase), \ "fileobj must be an instance of io.IOBase or a file, got %s" \ % type(fileobj) return fileobj if fileobj.seekable() \ else ArchiveTemp(fileobj)
If the file-object is not seekable, return ArchiveTemp of the fileobject, otherwise return the file-object itself
373,124
def get_list(self, section, option): value = self.get(section, option) return list(filter(None, (x.strip() for x in value.splitlines())))
This allows for loading of Pyramid list style configuration options: [foo] bar = baz qux zap ``get_list('foo', 'bar')`` returns ``['baz', 'qux', 'zap']`` :param str section: The section to read. :param str option: The option to read from the section. :returns: list
373,125
def get_backoff_time(self): if self._observed_errors <= 1: return 0 backoff_value = self.backoff_factor * (2 ** (self._observed_errors - 1)) return min(self.BACKOFF_MAX, backoff_value)
Formula for computing the current backoff :rtype: float
373,126
def diff_list(self, list1, list2): for key in list1: if key in list2 and list2[key] != list1[key]: print key elif key not in list2: print key
Extracts differences between lists. For debug purposes
373,127
def warp(self, warp_matrix, img, iflag=cv2.INTER_NEAREST): height, width = img.shape[:2] warped_img = np.zeros_like(img, dtype=img.dtype) if (self.interpolation_type == InterpolationType.LINEAR) or img.ndim == 2: warped_img = cv2.warpAffine(img.astype(np.float32), warp_matrix, (width, height), flags=iflag).astype(img.dtype) elif img.ndim == 3: for idx in range(img.shape[-1]): warped_img[..., idx] = cv2.warpAffine(img[..., idx].astype(np.float32), warp_matrix, (width, height), flags=iflag).astype(img.dtype) else: raise ValueError(.format(img.ndim)) return warped_img
Function to warp input image given an estimated 2D linear transformation :param warp_matrix: Linear 2x3 matrix to use to linearly warp the input images :type warp_matrix: ndarray :param img: Image to be warped with estimated transformation :type img: ndarray :param iflag: Interpolation flag, specified interpolation using during resampling of warped image :type iflag: cv2.INTER_* :return: Warped image using the linear matrix
373,128
def list_present(name, value, delimiter=DEFAULT_TARGET_DELIM): name = re.sub(delimiter, DEFAULT_TARGET_DELIM, name) ret = {: name, : {}, : True, : } grain = __salt__[](name) if grain: if not isinstance(grain, list): ret[] = False ret[] = .format(name) return ret if isinstance(value, list): if set(value).issubset(set(__salt__[](name))): ret[] = .format(name, value) return ret elif name in __context__.get(, {}): intersection = set(value).intersection(__context__.get(, {})[name]) if intersection: value = list(set(value).difference(__context__[][name])) ret[] = .format(value, name) if not in __context__: __context__[] = {} if name not in __context__[]: __context__[][name] = set() __context__[][name].update(value) else: if value in grain: ret[] = .format(name, value) return ret if __opts__[]: ret[] = None ret[] = .format(name, value) ret[] = {: grain} return ret if __opts__[]: ret[] = None ret[] = .format(name) ret[] = {: grain} return ret new_grains = __salt__[](name, value) if isinstance(value, list): if not set(value).issubset(set(__salt__[](name))): ret[] = False ret[] = .format(name, value) return ret else: if value not in __salt__[](name, delimiter=DEFAULT_TARGET_DELIM): ret[] = False ret[] = .format(name, value) return ret ret[] = .format(name, value) ret[] = {: new_grains} return ret
.. versionadded:: 2014.1.0 Ensure the value is present in the list-type grain. Note: If the grain that is provided in ``name`` is not present on the system, this new grain will be created with the corresponding provided value. name The grain name. value The value is present in the list type grain. delimiter A delimiter different from the default ``:`` can be provided. .. versionadded:: v2015.8.2 The grain should be `list type <http://docs.python.org/2/tutorial/datastructures.html#data-structures>`_ .. code-block:: yaml roles: grains.list_present: - value: web For multiple grains, the syntax looks like: .. code-block:: yaml roles: grains.list_present: - value: - web - dev
373,129
def get_upgrade(self, using=None, **kwargs): return self._get_connection(using).indices.get_upgrade(index=self._name, **kwargs)
Monitor how much of the index is upgraded. Any additional keyword arguments will be passed to ``Elasticsearch.indices.get_upgrade`` unchanged.
373,130
def batch_transformer(U, thetas, out_size, name=): with tf.variable_scope(name): num_batch, num_transforms = map(int, thetas.get_shape().as_list()[:2]) indices = [[i] * num_transforms for i in xrange(num_batch)] input_repeated = tf.gather(U, tf.reshape(indices, [-1])) return transformer(input_repeated, thetas, out_size)
Batch Spatial Transformer function for `2D Affine Transformation <https://en.wikipedia.org/wiki/Affine_transformation>`__. Parameters ---------- U : list of float tensor of inputs [batch, height, width, num_channels] thetas : list of float a set of transformations for each input [batch, num_transforms, 6] out_size : list of int the size of the output [out_height, out_width] name : str optional function name Returns ------ float Tensor of size [batch * num_transforms, out_height, out_width, num_channels]
373,131
def route(self, path=None, method=, callback=None, name=None, apply=None, skip=None, **config): if callable(path): path, callback = None, path plugins = makelist(apply) skiplist = makelist(skip) if in config: depr("The parameter was renamed to ") plugins += makelist(config.pop()) if config.pop(, False): depr("The no_hooks parameter is no longer used. Add to the"\ " list of skipped plugins instead.") skiplist.append() static = config.get(, False) def decorator(callback): for rule in makelist(path) or yieldroutes(callback): for verb in makelist(method): verb = verb.upper() cfg = dict(rule=rule, method=verb, callback=callback, name=name, app=self, config=config, apply=plugins, skip=skiplist) self.routes.append(cfg) cfg[] = self.routes.index(cfg) self.router.add(rule, verb, cfg[], name=name, static=static) if DEBUG: self.ccache[cfg[]] = self._build_callback(cfg) return callback return decorator(callback) if callback else decorator
A decorator to bind a function to a request URL. Example:: @app.route('/hello/:name') def hello(name): return 'Hello %s' % name The ``:name`` part is a wildcard. See :class:`Router` for syntax details. :param path: Request path or a list of paths to listen to. If no path is specified, it is automatically generated from the signature of the function. :param method: HTTP method (`GET`, `POST`, `PUT`, ...) or a list of methods to listen to. (default: `GET`) :param callback: An optional shortcut to avoid the decorator syntax. ``route(..., callback=func)`` equals ``route(...)(func)`` :param name: The name for this route. (default: None) :param apply: A decorator or plugin or a list of plugins. These are applied to the route callback in addition to installed plugins. :param skip: A list of plugins, plugin classes or names. Matching plugins are not installed to this route. ``True`` skips all. Any additional keyword arguments are stored as route-specific configuration and passed to plugins (see :meth:`Plugin.apply`).
373,132
def condition(condition=None, statement=None, _else=None, **kwargs): result = None checked = False if condition is not None: checked = run(condition, **kwargs) if checked: if statement is not None: result = run(statement, **kwargs) elif _else is not None: result = run(_else, **kwargs) return result
Run an statement if input condition is checked and return statement result. :param condition: condition to check. :type condition: str or dict :param statement: statement to process if condition is checked. :type statement: str or dict :param _else: else statement. :type _else: str or dict :param kwargs: condition and statement additional parameters. :return: statement result.
373,133
def patch(self, item, byte_order=BYTEORDER): self.index_data() if is_container(item): length = item.container_size() if length[1] is not 0: raise ContainerLengthError(item, length) field = item.first_field() if field is None: return None index = field.index if index.bit is not 0: raise FieldIndexError(field, index) buffer = bytearray(b * index.byte) item.serialize(buffer, index, byte_order=byte_order) content = buffer[index.byte:] if len(content) != length[0]: raise BufferError(len(content), length[0]) return Patch(content, index.address, byte_order, length[0] * 8, 0, False) elif is_field(item): index = item.index alignment = item.alignment if index.bit != alignment.bit_offset: raise FieldGroupOffsetError( item, index, Alignment(alignment.byte_size, index.bit)) buffer = bytearray(b * index.byte) item.serialize(buffer, index, byte_order=byte_order) content = buffer[index.byte:] if len(content) != alignment.byte_size: raise BufferError(len(content), alignment.byte_size) patch_size, bit_offset = divmod(item.bit_size, 8) if bit_offset is not 0: inject = True patch_size += 1 else: inject = False patch_offset, bit_offset = divmod(alignment.bit_offset, 8) if bit_offset is not 0: inject = True if byte_order is Byteorder.big: start = alignment.byte_size - (patch_offset + patch_size) stop = alignment.byte_size - patch_offset else: start = patch_offset stop = patch_offset + patch_size return Patch(content[start:stop], index.address + start, byte_order, item.bit_size, bit_offset, inject) else: raise MemberTypeError(self, item)
Returns a memory :class:`Patch` for the given *item* that shall be patched in the `data source`. :param item: item to patch. :param byte_order: encoding :class:`Byteorder` for the item. :type byte_order: :class:`Byteorder`, :class:`str`
373,134
def modify_module(channel, module_name, module_state): gui = ui_embed.UI( channel, "{} updated".format(module_name), "{} is now {}".format(module_name, "activated" if module_state else "deactivated"), modulename=modulename ) return gui
Creates an embed UI containing the module modified message Args: channel (discord.Channel): The Discord channel to bind the embed to module_name (str): The name of the module that was updated module_state (bool): The current state of the module Returns: embed: The created embed
373,135
def create_message(self): body = dedent().format(exception=self._exception, traceback=self._traceback, queue=self._queue, payload=self._payload, worker=self._worker) return MIMEText(body)
Returns a message body to send in this email. Should be from email.mime.*
373,136
def set_encoding(self, encoding): if(encoding == self.__type_representation): return; if (self.__type_representation == type_encoding.CLUSTER_INDEX_LABELING): if (encoding == type_encoding.CLUSTER_INDEX_LIST_SEPARATION): self.__clusters = self.__convert_label_to_index(); else: self.__clusters = self.__convert_label_to_object(); elif (self.__type_representation == type_encoding.CLUSTER_INDEX_LIST_SEPARATION): if (encoding == type_encoding.CLUSTER_INDEX_LABELING): self.__clusters = self.__convert_index_to_label(); else: self.__clusters = self.__convert_index_to_object(); else: if (encoding == type_encoding.CLUSTER_INDEX_LABELING): self.__clusters = self.__convert_object_to_label(); else: self.__clusters = self.__convert_object_to_index(); self.__type_representation = encoding;
! @brief Change clusters encoding to specified type (index list, object list, labeling). @param[in] encoding (type_encoding): New type of clusters representation.
373,137
def errorhandle(self, resp): if self.format == : parsed = xmltodict.parse(resp) errors = parsed[self.RESPONSE_TOKEN][self.ERROR_TOKEN] if type(errors) is list and len(errors) > 1: messages = ", ".join([" ".join(["{}: {}".format(k,v) for k, v in e.items()]) for e in errors]) else: overlimit = any( in msg.lower() for msg in errors.values()) if overlimit: raise APILimitExceeded("This API key has used up its daily quota of calls.") else: messages = " ".join(["{}: {}".format(k,v) for k, v in errors.items()]) elif self.format == : import xml.etree.ElementTree as ET errors = ET.fromstring(resp).findall(self.ERROR_TOKEN) messages = ", ".join(err.find().text for err in errors) else: raise ValueError("Invalid API response format specified: {}." % self.format) raise BustimeError("API returned: {}".format(messages))
Parse API error responses and raise appropriate exceptions.
373,138
def intertwine(*iterables): iterables = tuple(imap(ensure_iterable, iterables)) empty = object() return (item for iterable in izip_longest(*iterables, fillvalue=empty) for item in iterable if item is not empty)
Constructs an iterable which intertwines given iterables. The resulting iterable will return an item from first sequence, then from second, etc. until the last one - and then another item from first, then from second, etc. - up until all iterables are exhausted.
373,139
def waliki_box(context, slug, show_edit=True, *args, **kwargs): request = context["request"] try: page = Page.objects.get(slug=slug) except Page.DoesNotExist: page = None if (page and check_perms_helper(, request.user, slug) or (not page and check_perms_helper(, request.user, slug))): form = PageForm(instance=page, initial={: slug}) form_action = reverse("waliki_edit", args=[slug]) else: form = None form_action = None return { "request": request, "slug": slug, "label": slug.replace(, ), "page": page, "form": form, "form_action": form_action, }
A templatetag to render a wiki page content as a box in any webpage, and allow rapid edition if you have permission. It's inspired in `django-boxes`_ .. _django-boxes: https://github.com/eldarion/django-boxes
373,140
def load_images(input_dir, batch_shape): images = np.zeros(batch_shape) filenames = [] idx = 0 batch_size = batch_shape[0] for filepath in tf.gfile.Glob(os.path.join(input_dir, )): with tf.gfile.Open(filepath) as f: image = np.array(Image.open(f).convert()).astype(np.float) / 255.0 images[idx, :, :, :] = image * 2.0 - 1.0 filenames.append(os.path.basename(filepath)) idx += 1 if idx == batch_size: yield filenames, images filenames = [] images = np.zeros(batch_shape) idx = 0 if idx > 0: yield filenames, images
Read png images from input directory in batches. Args: input_dir: input directory batch_shape: shape of minibatch array, i.e. [batch_size, height, width, 3] Yields: filenames: list file names without path of each image Lenght of this list could be less than batch_size, in this case only first few images of the result are elements of the minibatch. images: array with all images from this batch
373,141
def deserialize_assign(self, workflow, start_node): name = start_node.getAttribute() attrib = start_node.getAttribute() value = start_node.getAttribute() kwargs = {} if name == : _exc() if attrib != and value != : _exc() elif attrib == and value == : _exc() elif value != : kwargs[] = value else: kwargs[] = attrib return operators.Assign(name, **kwargs)
Reads the "pre-assign" or "post-assign" tag from the given node. start_node -- the xml node (xml.dom.minidom.Node)
373,142
def run(self): while True: self.update_log_filenames() self.open_closed_files() anything_published = self.check_log_files_and_publish_updates() if not anything_published: time.sleep(0.05)
Run the log monitor. This will query Redis once every second to check if there are new log files to monitor. It will also store those log files in Redis.
373,143
def fasta_files_equal(seq_file1, seq_file2): seq1 = SeqIO.read(open(seq_file1), ) seq2 = SeqIO.read(open(seq_file2), ) if str(seq1.seq) == str(seq2.seq): return True else: return False
Check equality of a FASTA file to another FASTA file Args: seq_file1: Path to a FASTA file seq_file2: Path to another FASTA file Returns: bool: If the sequences are the same
373,144
def ks(self, num_ngrams): if self.num_bits: k = int(self.num_bits / num_ngrams) residue = self.num_bits % num_ngrams return ([k + 1] * residue) + ([k] * (num_ngrams - residue)) else: return [self.k if self.k else 0] * num_ngrams
Provide a k for each ngram in the field value. :param num_ngrams: number of ngrams in the field value :return: [ k, ... ] a k value for each of num_ngrams such that the sum is exactly num_bits
373,145
def _add_in_streams(self, bolt): if self.inputs is None: return input_dict = self._sanitize_inputs() for global_streamid, gtype in input_dict.items(): in_stream = bolt.inputs.add() in_stream.stream.CopyFrom(self._get_stream_id(global_streamid.component_id, global_streamid.stream_id)) if isinstance(gtype, Grouping.FIELDS): in_stream.gtype = gtype.gtype in_stream.custom_grouping_object = gtype.python_serialized in_stream.type = topology_pb2.CustomGroupingObjectType.Value("PYTHON_OBJECT") else: in_stream.gtype = gtype
Adds inputs to a given protobuf Bolt message
373,146
def unlocked(self): if self.password is not None: return bool(self.password) else: if ( "UNLOCK" in os.environ and os.environ["UNLOCK"] and self.config_key in self.config and self.config[self.config_key] ): log.debug("Trying to use environmental " "variable to unlock wallet") self.unlock(os.environ.get("UNLOCK")) return bool(self.password) return False
Is the store unlocked so that I can decrypt the content?
373,147
def addConstraint(self, constraint, variables=None): if not isinstance(constraint, Constraint): if callable(constraint): constraint = FunctionConstraint(constraint) else: msg = "Constraints must be instances of subclasses " "of the Constraint class" raise ValueError(msg) self._constraints.append((constraint, variables))
Add a constraint to the problem Example: >>> problem = Problem() >>> problem.addVariables(["a", "b"], [1, 2, 3]) >>> problem.addConstraint(lambda a, b: b == a+1, ["a", "b"]) >>> solutions = problem.getSolutions() >>> @param constraint: Constraint to be included in the problem @type constraint: instance a L{Constraint} subclass or a function to be wrapped by L{FunctionConstraint} @param variables: Variables affected by the constraint (default to all variables). Depending on the constraint type the order may be important. @type variables: set or sequence of variables
373,148
def plot_sector_exposures_net(net_exposures, sector_dict=None, ax=None): if ax is None: ax = plt.gca() if sector_dict is None: sector_names = SECTORS.values() else: sector_names = sector_dict.values() color_list = plt.cm.gist_rainbow(np.linspace(0, 1, 11)) for i in range(len(net_exposures)): ax.plot(net_exposures[i], color=color_list[i], alpha=0.8, label=sector_names[i]) ax.set(title=, ylabel=) return ax
Plots output of compute_sector_exposures as line graphs Parameters ---------- net_exposures : arrays Arrays of net sector exposures (output of compute_sector_exposures). sector_dict : dict or OrderedDict Dictionary of all sectors - See full description in compute_sector_exposures
373,149
def _check_hint_bounds(self, ds): ret_val = [] boundary_variables = cfutil.get_cell_boundary_variables(ds) for name in ds.variables: if name.endswith() and name not in boundary_variables: msg = ( .format(name)) result = Result(BaseCheck.LOW, True, self.section_titles[], [msg]) ret_val.append(result) return ret_val
Checks for variables ending with _bounds, if they are not cell methods, make the recommendation :param netCDF4.Dataset ds: An open netCDF dataset :rtype: list :return: List of results
373,150
def class_associations(self, cn: ClassDefinitionName, must_render: bool=False) -> str: assocs: List[str] = [] if cn not in self.associations_generated and (not self.focus_classes or cn in self.focus_classes): cls = self.schema.classes[cn] for slotname in self.filtered_cls_slots(cn, False)[::-1]: slot = self.schema.slots[slotname] if slot.range in self.schema.classes: assocs.append(self.class_box(cn) + (yuml_inline if slot.inlined else yuml_ref) + self.aliased_slot_name(slot) + self.prop_modifier(cls, slot) + self.cardinality(slot) + + self.class_box(slot.range)) if cn in self.synopsis.rangerefs: for slotname in sorted(self.synopsis.rangerefs[cn]): slot = self.schema.slots[slotname] if slot.domain in self.schema.classes and (slot.range != cls.name or must_render): assocs.append(self.class_box(slot.domain) + (yuml_inline if slot.inlined else yuml_ref) + self.aliased_slot_name(slot) + self.prop_modifier(cls, slot) + self.cardinality(slot) + + self.class_box(cn)) for mixin in cls.mixins: assocs.append(self.class_box(cn) + yuml_uses + self.class_box(mixin)) if cls.name in self.synopsis.mixinrefs: for mixin in sorted(self.synopsis.mixinrefs[cls.name].classrefs, reverse=True): assocs.append(self.class_box(ClassDefinitionName(mixin)) + yuml_uses + self.class_box(cn)) if cn in self.synopsis.applytos: for injector in sorted(self.synopsis.applytos[cn].classrefs, reverse=True): assocs.append(self.class_box(cn) + yuml_injected + self.class_box(ClassDefinitionName(injector))) self.associations_generated.add(cn) if cn in self.synopsis.isarefs: for is_a_cls in sorted(self.synopsis.isarefs[cn].classrefs, reverse=True): assocs.append(self.class_box(cn) + yuml_is_a + self.class_box(ClassDefinitionName(is_a_cls))) if cls.is_a: assocs.append(self.class_box(cls.is_a) + yuml_is_a + self.class_box(cn)) return .join(assocs)
Emit all associations for a focus class. If none are specified, all classes are generated @param cn: Name of class to be emitted @param must_render: True means render even if this is a target (class is specifically requested) @return: YUML representation of the association
373,151
def _process_state_final_run(self, job_record): uow = self.uow_dao.get_one(job_record.related_unit_of_work) if uow.is_processed: self.update_job(job_record, uow, job.STATE_PROCESSED) elif uow.is_noop: self.update_job(job_record, uow, job.STATE_NOOP) elif uow.is_canceled: self.update_job(job_record, uow, job.STATE_SKIPPED) elif uow.is_invalid: msg = \ \ .format(job_record.db_id, job_record.process_name, job_record.timeperiod, uow.state) self._log_message(INFO, job_record.process_name, job_record.timeperiod, msg) else: msg = \ .format(job_record.process_name, job_record.timeperiod, job_record.state, uow.state) self._log_message(INFO, job_record.process_name, job_record.timeperiod, msg) timetable_tree = self.timetable.get_tree(job_record.process_name) timetable_tree.build_tree()
method takes care of processing job records in STATE_FINAL_RUN state
373,152
def merge(directory, message, branch_label, rev_id, revisions): _merge(directory, revisions, message, branch_label, rev_id)
Merge two revisions together, creating a new revision file
373,153
def get_core_api(): global core_api if core_api is None: config.load_kube_config() if API_KEY is not None: configuration = client.Configuration() configuration.api_key[] = API_KEY configuration.api_key_prefix[] = core_api = client.CoreV1Api(client.ApiClient(configuration)) else: core_api = client.CoreV1Api() return core_api
Create instance of Core V1 API of kubernetes: https://github.com/kubernetes-client/python/blob/master/kubernetes/docs/CoreV1Api.md :return: instance of client
373,154
def collect_results(self, data_values): self.last_poll_results[] = self.poll_data[] consumer_name = data_values[] del data_values[] process_name = data_values[] del data_values[] if consumer_name not in self.last_poll_results: self.last_poll_results[consumer_name] = dict() self.last_poll_results[consumer_name][process_name] = data_values self.stats = self.calculate_stats(self.last_poll_results)
Receive the data from the consumers polled and process it. :param dict data_values: The poll data returned from the consumer :type data_values: dict
373,155
def read(filename): mesh = meshio.read(filename) if "tetra" in mesh.cells: points, cells = _sanitize(mesh.points, mesh.cells["tetra"]) return ( MeshTetra(points, cells), mesh.point_data, mesh.cell_data, mesh.field_data, ) elif "triangle" in mesh.cells: points, cells = _sanitize(mesh.points, mesh.cells["triangle"]) return ( MeshTri(points, cells), mesh.point_data, mesh.cell_data, mesh.field_data, ) else: raise RuntimeError("Unknown mesh type.")
Reads an unstructured mesh with added data. :param filenames: The files to read from. :type filenames: str :returns mesh{2,3}d: The mesh data. :returns point_data: Point data read from file. :type point_data: dict :returns field_data: Field data read from file. :type field_data: dict
373,156
def purge_queue(self, name): content = {"_object_id": {"_object_name": "org.apache.qpid.broker:queue:{0}".format(name)}, "_method_name": "purge", "_arguments": {"type": "queue", "name": name, "filter": dict()}} logger.debug("Message content -> {0}".format(content)) return content, self.method_properties
Create message content and properties to purge queue with QMFv2 :param name: Name of queue to purge :type name: str :returns: Tuple containing content and method properties
373,157
def process_point_value(cls, command_type, command, index, op_type): _log.debug(.format(index, command))
A PointValue was received from the Master. Process its payload. :param command_type: (string) Either 'Select' or 'Operate'. :param command: A ControlRelayOutputBlock or else a wrapped data value (AnalogOutputInt16, etc.). :param index: (integer) DNP3 index of the payload's data definition. :param op_type: An OperateType, or None if command_type == 'Select'.
373,158
def harvest_openaire_projects(source=None, setspec=None): loader = LocalOAIRELoader(source=source) if source \ else RemoteOAIRELoader(setspec=setspec) for grant_json in loader.iter_grants(): register_grant.delay(grant_json)
Harvest grants from OpenAIRE and store as authority records.
373,159
def evaluate(self, dataset): if not isinstance(dataset, DataFrame): raise ValueError("dataset must be a DataFrame but got %s." % type(dataset)) java_glr_summary = self._call_java("evaluate", dataset) return GeneralizedLinearRegressionSummary(java_glr_summary)
Evaluates the model on a test dataset. :param dataset: Test dataset to evaluate model on, where dataset is an instance of :py:class:`pyspark.sql.DataFrame`
373,160
def fileImport(filepath, ignore=None): basepath, package = EnvManager.packageSplit(filepath) if not (basepath and package): return None if ignore and package in ignore: return None basepath = os.path.normcase(basepath) if basepath not in sys.path: sys.path.insert(0, basepath) logger.debug( % package) try: __import__(package) module = sys.modules[package] except ImportError: logger.exception( % package) return None except KeyError: logger.exception( % package) return None except StandardError: logger.exception( % package) return None return module
Imports the module located at the given filepath. :param filepath | <str> ignore | [<str>, ..] || None :return <module> || None
373,161
def get_parent_element(self): return {AUDIT_REF_STATE: self.context.audit_record, SIGNATURE_REF_STATE: self.context.signature}[self.ref_state]
Signatures and Audit elements share sub-elements, we need to know which to set attributes on
373,162
def path(self): if not self.id: raise ValueError() return self.path_helper(self.taskqueue.path, self.id)
Getter property for the URL path to this Task. :rtype: string :returns: The URL path to this task.
373,163
def _Resample(self, stats, target_size): t_first = stats[0][0] t_last = stats[-1][0] interval = (t_last - t_first) / target_size result = [] current_t = t_first current_v = 0 i = 0 while i < len(stats): stat_t = stats[i][0] stat_v = stats[i][1] if stat_t <= (current_t + interval): current_v = stat_v i += 1 else: result.append([current_t + interval, current_v]) current_t += interval result.append([current_t + interval, current_v]) return result
Resamples the stats to have a specific number of data points.
373,164
def console_blit( src: tcod.console.Console, x: int, y: int, w: int, h: int, dst: tcod.console.Console, xdst: int, ydst: int, ffade: float = 1.0, bfade: float = 1.0, ) -> None: lib.TCOD_console_blit( _console(src), x, y, w, h, _console(dst), xdst, ydst, ffade, bfade )
Blit the console src from x,y,w,h to console dst at xdst,ydst. .. deprecated:: 8.5 Call the :any:`Console.blit` method instead.
373,165
def service_list(auth=None, **kwargs): * cloud = get_operator_cloud(auth) kwargs = _clean_kwargs(**kwargs) return cloud.list_services(**kwargs)
List services CLI Example: .. code-block:: bash salt '*' keystoneng.service_list
373,166
def create_key(self, title, key): created = None if title and key: url = self._build_url(, ) req = self._post(url, data={: title, : key}) json = self._json(req, 201) if json: created = Key(json, self) return created
Create a new key for the authenticated user. :param str title: (required), key title :param key: (required), actual key contents, accepts path as a string or file-like object :returns: :class:`Key <github3.users.Key>`
373,167
def get_zip_data(self, filename): import zipimport markers = [+os.sep, +os.sep] for marker in markers: if marker in filename: parts = filename.split(marker) try: zi = zipimport.zipimporter(parts[0]+marker[:-1]) except zipimport.ZipImportError: continue try: data = zi.get_data(parts[1]) except IOError: continue return to_string(data) return None
Get data from `filename` if it is a zip file path. Returns the string data read from the zip file, or None if no zip file could be found or `filename` isn't in it. The data returned will be an empty string if the file is empty.
373,168
def check_unique_tokens(sender, instance, **kwargs): if isinstance(instance, CallbackToken): if CallbackToken.objects.filter(key=instance.key, is_active=True).exists(): instance.key = generate_numeric_token()
Ensures that mobile and email tokens are unique or tries once more to generate.
373,169
def save_plain_image_as_file(self, filepath, format=, quality=90): pixbuf = self.get_plain_image_as_pixbuf() options, values = [], [] if format == : options.append() values.append(str(quality)) pixbuf.savev(filepath, format, options, values)
Used for generating thumbnails. Does not include overlaid graphics.
373,170
def parse_wiki_terms(doc): results = [] last3 = [, , ] header = True for line in doc.split(): last3.pop(0) last3.append(line.strip()) if all(s.startswith() and not s == for s in last3): if header: header = False continue last3 = [s.replace(, ).replace(, ).strip() for s in last3] rank, term, count = last3 rank = int(rank.split()[0]) term = term.replace(, ) term = term[term.index()+1:].lower() results.append(term) assert len(results) in [1000, 2000, 1284] return results
who needs an html parser. fragile hax, but checks the result at the end
373,171
def analyse(file, length=None): if isinstance(file, bytes): file = io.BytesIO(file) io_check(file) int_check(length or sys.maxsize) return analyse2(file, length)
Analyse application layer packets. Keyword arguments: * file -- bytes or file-like object, packet to be analysed * length -- int, length of the analysing packet Returns: * Analysis -- an Analysis object from `pcapkit.analyser`
373,172
def create_session(self): session = None if self.key_file is not None: credfile = os.path.expandvars(os.path.expanduser(self.key_file)) try: with open(credfile, ) as f: creds = json.load(f) except json.JSONDecodeError as e: logger.error( "EC2Provider : json decode error in credential file {}".format(self.label, credfile) ) raise e except Exception as e: logger.debug( "EC2Provider caught exception while reading credential file: {1}".format( self.label, credfile ) ) raise e logger.debug("EC2Provider : Using credential file to create session".format(self.label)) session = boto3.session.Session(region_name=self.region, **creds) elif self.profile is not None: logger.debug("EC2Provider : Using profile name to create session".format(self.label)) session = boto3.session.Session( profile_name=self.profile, region_name=self.region ) else: logger.debug("EC2Provider : Using environment variables to create session".format(self.label)) session = boto3.session.Session(region_name=self.region) return session
Create a session. First we look in self.key_file for a path to a json file with the credentials. The key file should have 'AWSAccessKeyId' and 'AWSSecretKey'. Next we look at self.profile for a profile name and try to use the Session call to automatically pick up the keys for the profile from the user default keys file ~/.aws/config. Finally, boto3 will look for the keys in environment variables: AWS_ACCESS_KEY_ID: The access key for your AWS account. AWS_SECRET_ACCESS_KEY: The secret key for your AWS account. AWS_SESSION_TOKEN: The session key for your AWS account. This is only needed when you are using temporary credentials. The AWS_SECURITY_TOKEN environment variable can also be used, but is only supported for backwards compatibility purposes. AWS_SESSION_TOKEN is supported by multiple AWS SDKs besides python.
373,173
def jdbc(self, url, table, mode=None, properties=None): if properties is None: properties = dict() jprop = JavaClass("java.util.Properties", self._spark._sc._gateway._gateway_client)() for k in properties: jprop.setProperty(k, properties[k]) self.mode(mode)._jwrite.jdbc(url, table, jprop)
Saves the content of the :class:`DataFrame` to an external database table via JDBC. .. note:: Don't create too many partitions in parallel on a large cluster; otherwise Spark might crash your external database systems. :param url: a JDBC URL of the form ``jdbc:subprotocol:subname`` :param table: Name of the table in the external database. :param mode: specifies the behavior of the save operation when data already exists. * ``append``: Append contents of this :class:`DataFrame` to existing data. * ``overwrite``: Overwrite existing data. * ``ignore``: Silently ignore this operation if data already exists. * ``error`` or ``errorifexists`` (default case): Throw an exception if data already \ exists. :param properties: a dictionary of JDBC database connection arguments. Normally at least properties "user" and "password" with their corresponding values. For example { 'user' : 'SYSTEM', 'password' : 'mypassword' }
373,174
def fetch(clobber=False): table_dir = os.path.join(data_dir(), ) if not clobber: h5_fname = os.path.join(table_dir, ) h5_size = 5033290 h5_dsets = { : (801, 81), : (801, 81), : (801, 81), : (801, 81), : (801, 81, 33), : (801, 81, 33), : (801, 81, 33), : (801, 81, 33) } if fetch_utils.h5_file_exists(h5_fname, h5_size, dsets=h5_dsets): print( ) return url = md5 = table_fname = os.path.join(table_dir, ) fetch_utils.download_and_verify(url, md5, fname=table_fname) url = md5 = readme_fname = os.path.join(table_dir, ) fetch_utils.download_and_verify(url, md5, fname=readme_fname) dat2hdf5(table_dir) print() os.remove(table_fname) os.remove(readme_fname)
Downloads the Marshall et al. (2006) dust map, which is based on 2MASS stellar photometry. Args: clobber (Optional[:obj:`bool`]): If ``True``, any existing file will be overwritten, even if it appears to match. If ``False`` (the default), :obj:`fetch()` will attempt to determine if the dataset already exists. This determination is not 100\% robust against data corruption.
373,175
def big_rnn_lm_2048_512(dataset_name=None, vocab=None, pretrained=False, ctx=cpu(), root=os.path.join(get_home_dir(), ), **kwargs): r predefined_args = {: 512, : 2048, : 512, : 1, : 0.1, : 0.1} mutable_args = [, ] assert all((k not in kwargs or k in mutable_args) for k in predefined_args), \ predefined_args.update(kwargs) return _get_rnn_model(BigRNN, , dataset_name, vocab, pretrained, ctx, root, **predefined_args)
r"""Big 1-layer LSTMP language model. Both embedding and projection size are 512. Hidden size is 2048. Parameters ---------- dataset_name : str or None, default None The dataset name on which the pre-trained model is trained. Options are 'gbw'. If specified, then the returned vocabulary is extracted from the training set of the dataset. If None, then vocab is required, for specifying embedding weight size, and is directly returned. The pre-trained model achieves 44.05 ppl on Test of GBW dataset. vocab : gluonnlp.Vocab or None, default None Vocabulary object to be used with the language model. Required when dataset_name is not specified. pretrained : bool, default False Whether to load the pre-trained weights for model. ctx : Context, default CPU The context in which to load the pre-trained weights. root : str, default '$MXNET_HOME/models' Location for keeping the model parameters. MXNET_HOME defaults to '~/.mxnet'. Returns ------- gluon.Block, gluonnlp.Vocab
373,176
def t_php_START_HEREDOC(t): r t.lexer.lineno += t.value.count("\n") t.lexer.push_state() t.lexer.heredoc_label = t.lexer.lexmatch.group() return t
r'<<<[ \t]*(?P<label>[A-Za-z_][\w_]*)\n
373,177
def httpapi_request(client, **params) -> : return requests.get( _HTTPAPI, params={ : client.name, : client.version, : 1, **params })
Send a request to AniDB HTTP API. https://wiki.anidb.net/w/HTTP_API_Definition
373,178
def posttrans_hook(conduit): if not in os.environ: with open(CK_PATH, ) as ck_fh: ck_fh.write(.format(chksum=_get_checksum(), mtime=_get_mtime()))
Hook after the package installation transaction. :param conduit: :return:
373,179
def add_role(ctx, role): if role is None: log() return if ctx.obj[] is None: log() return change_user = ctx.obj[].objectmodels[].find_one({ : ctx.obj[] }) if role not in change_user.roles: change_user.roles.append(role) change_user.save() log() else: log(, lvl=warn)
Grant a role to an existing user
373,180
def debug(self): url = .format(self.url) data = self._get(url) return data.json()
Retrieve the debug information from the charmstore.
373,181
def run(items, background=None): if not background: background = [] background_bams = [] paired = vcfutils.get_paired_bams([x["align_bam"] for x in items], items) if paired: inputs = [paired.tumor_data] if paired.normal_bam: background = [paired.normal_data] background_bams = [paired.normal_bam] else: assert not background inputs, background = shared.find_case_control(items) background_bams = [x["align_bam"] for x in background] orig_vcf = _run_wham(inputs, background_bams) out = [] for data in inputs: if "sv" not in data: data["sv"] = [] final_vcf = shared.finalize_sv(orig_vcf, data, items) data["sv"].append({"variantcaller": "wham", "vrn_file": final_vcf}) out.append(data) return out
Detect copy number variations from batched set of samples using WHAM.
373,182
def load_env_from_file(filename): if not os.path.exists(filename): raise FileNotFoundError("Environment file {} does not exist.".format(filename)) with open(filename) as f: for lineno, line in enumerate(f): line = line.strip() if not line or line.startswith(" continue if "=" not in line: raise SyntaxError("Invalid environment file syntax in {} at line {}.".format(filename, lineno + 1)) name, value = parse_var(line) yield name, value
Read an env file into a collection of (name, value) tuples.
373,183
def buildFileListOrig(input, output=None, ivmlist=None, wcskey=None, updatewcs=True, **workinplace): filelist, output, ivmlist, oldasndict = processFilenames(input,output) filelist = util.verifyFilePermissions(filelist) if filelist is None or len(filelist) == 0: return None, None, None, None, None manageInputCopies(filelist,**workinplace) if ivmlist is None: ivmlist = len(filelist)*[None] else: assert(len(filelist) == len(ivmlist)) ivmlist = list(zip(ivmlist,filelist)) filelist, ivmlist = check_files.checkFITSFormat(filelist, ivmlist) if not updatewcs: filelist = checkDGEOFile(filelist) updated_input = _process_input_wcs(filelist, wcskey, updatewcs) newfilelist, ivmlist = check_files.checkFiles(updated_input, ivmlist) if updatewcs: uw.updatewcs(.join(set(newfilelist) - set(filelist))) if len(ivmlist) > 0: ivmlist, filelist = list(zip(*ivmlist)) else: filelist = [] return newfilelist, ivmlist, output, oldasndict, filelist
Builds a file list which has undergone various instrument-specific checks for input to MultiDrizzle, including splitting STIS associations. Compared to buildFileList, this version returns the list of the original file names as specified by the user (e.g., before GEIS->MEF, or WAIVER FITS->MEF conversion).
373,184
def _build_date_header_string(self, date_value): if isinstance(date_value, datetime): date_value = time.mktime(date_value.timetuple()) if not isinstance(date_value, basestring): date_value = formatdate(date_value, localtime=True) return native(date_value)
Gets the date_value (may be None, basestring, float or datetime.datetime instance) and returns a valid date string as per RFC 2822.
373,185
def repr_values(condition: Callable[..., bool], lambda_inspection: Optional[ConditionLambdaInspection], condition_kwargs: Mapping[str, Any], a_repr: reprlib.Repr) -> List[str]: if _is_lambda(a_function=condition): assert lambda_inspection is not None, "Expected a lambda inspection when given a condition as a lambda function" else: assert lambda_inspection is None, "Expected no lambda inspection in a condition given as a non-lambda function" reprs = dict() if lambda_inspection is not None: variable_lookup = [] variable_lookup.append(condition_kwargs) closure_dict = dict() if condition.__closure__ is not None: closure_cells = condition.__closure__ freevars = condition.__code__.co_freevars assert len(closure_cells) == len(freevars), \ "Number of closure cells of a condition function ({}) == number of free vars ({})".format( len(closure_cells), len(freevars)) for cell, freevar in zip(closure_cells, freevars): closure_dict[freevar] = cell.cell_contents variable_lookup.append(closure_dict) if condition.__globals__ is not None: variable_lookup.append(condition.__globals__) recompute_visitor = icontract._recompute.Visitor(variable_lookup=variable_lookup) recompute_visitor.visit(node=lambda_inspection.node.body) recomputed_values = recompute_visitor.recomputed_values repr_visitor = Visitor( recomputed_values=recomputed_values, variable_lookup=variable_lookup, atok=lambda_inspection.atok) repr_visitor.visit(node=lambda_inspection.node.body) reprs = repr_visitor.reprs else: for key, val in condition_kwargs.items(): if _representable(value=val): reprs[key] = val parts = [] for key in sorted(reprs.keys()): parts.append(.format(key, a_repr.repr(reprs[key]))) return parts
Represent function arguments and frame values in the error message on contract breach. :param condition: condition function of the contract :param lambda_inspection: inspected lambda AST node corresponding to the condition function (None if the condition was not given as a lambda function) :param condition_kwargs: condition arguments :param a_repr: representation instance that defines how the values are represented. :return: list of value representations
373,186
def _recursively_apply_get_cassette_subclass(self, replacement_dict_or_obj): if isinstance(replacement_dict_or_obj, dict): for key, replacement_obj in replacement_dict_or_obj.items(): replacement_obj = self._recursively_apply_get_cassette_subclass( replacement_obj) replacement_dict_or_obj[key] = replacement_obj return replacement_dict_or_obj if hasattr(replacement_dict_or_obj, ): replacement_dict_or_obj = self._get_cassette_subclass( replacement_dict_or_obj) return replacement_dict_or_obj
One of the subtleties of this class is that it does not directly replace HTTPSConnection with `VCRRequestsHTTPSConnection`, but a subclass of the aforementioned class that has the `cassette` class attribute assigned to `self._cassette`. This behavior is necessary to properly support nested cassette contexts. This function exists to ensure that we use the same class object (reference) to patch everything that replaces VCRRequestHTTP[S]Connection, but that we can talk about patching them with the raw references instead, and without worrying about exactly where the subclass with the relevant value for `cassette` is first created. The function is recursive because it looks in to dictionaries and replaces class values at any depth with the subclass described in the previous paragraph.
373,187
def copy_dir( src_fs, src_path, dst_fs, dst_path, walker=None, on_copy=None, workers=0, ): on_copy = on_copy or (lambda *args: None) walker = walker or Walker() _src_path = abspath(normpath(src_path)) _dst_path = abspath(normpath(dst_path)) def src(): return manage_fs(src_fs, writeable=False) def dst(): return manage_fs(dst_fs, create=True) from ._bulk import Copier with src() as _src_fs, dst() as _dst_fs: with _src_fs.lock(), _dst_fs.lock(): _thread_safe = is_thread_safe(_src_fs, _dst_fs) with Copier(num_workers=workers if _thread_safe else 0) as copier: _dst_fs.makedir(_dst_path, recreate=True) for dir_path, dirs, files in walker.walk(_src_fs, _src_path): copy_path = combine(_dst_path, frombase(_src_path, dir_path)) for info in dirs: _dst_fs.makedir(info.make_path(copy_path), recreate=True) for info in files: src_path = info.make_path(dir_path) dst_path = info.make_path(copy_path) copier.copy(_src_fs, src_path, _dst_fs, dst_path) on_copy(_src_fs, src_path, _dst_fs, dst_path)
Copy a directory from one filesystem to another. Arguments: src_fs (FS or str): Source filesystem (instance or URL). src_path (str): Path to a directory on the source filesystem. dst_fs (FS or str): Destination filesystem (instance or URL). dst_path (str): Path to a directory on the destination filesystem. walker (~fs.walk.Walker, optional): A walker object that will be used to scan for files in ``src_fs``. Set this if you only want to consider a sub-set of the resources in ``src_fs``. on_copy (callable, optional): A function callback called after a single file copy is executed. Expected signature is ``(src_fs, src_path, dst_fs, dst_path)``. workers (int): Use ``worker`` threads to copy data, or ``0`` (default) for a single-threaded copy.
373,188
def inverse_transform(self, X): check_is_fitted(self, "mean_") if self.whiten: return ( da.dot( X, np.sqrt(self.explained_variance_[:, np.newaxis]) * self.components_, ) + self.mean_ ) else: return da.dot(X, self.components_) + self.mean_
Transform data back to its original space. Returns an array X_original whose transform would be X. Parameters ---------- X : array-like, shape (n_samples, n_components) New data, where n_samples in the number of samples and n_components is the number of components. Returns ------- X_original array-like, shape (n_samples, n_features) Notes ----- If whitening is enabled, inverse_transform does not compute the exact inverse operation of transform.
373,189
def _sumterm(lexer): xorterm = _xorterm(lexer) sumterm_prime = _sumterm_prime(lexer) if sumterm_prime is None: return xorterm else: return (, xorterm, sumterm_prime)
Return a sum term expresssion.
373,190
def consensus(aln, weights=None, gap_threshold=0.5, simple=False, trim_ends=True): if simple: col_consensus = make_simple_col_consensus(alnutils.aa_frequencies(aln)) def is_majority_gap(col): return (float(col.count()) / len(col) >= gap_threshold) else: if weights is None: seq_weights = alnutils.sequence_weights(aln, ) else: seq_weights = weights aa_frequencies = alnutils.aa_frequencies(aln, weights=seq_weights) col_consensus = make_entropy_col_consensus(aa_frequencies) def is_majority_gap(col): gap_count = 0.0 for wt, char in zip(seq_weights, col): if char == : gap_count += wt return (gap_count / sum(seq_weights) >= gap_threshold) def col_wise_consensus(columns): if not trim_ends: if all(c.islower() for c in col if c not in ): yield continue if any(c.islower() for c in col): logging.warn( + .join(col)) col = map(str.upper, col) is_gap = is_majority_gap(col) if not trim_ends: if in_left_end: if not is_gap: for char in maybe_right_tail: yield maybe_right_tail = [] yield cons_char if not trim_ends: for char in maybe_right_tail: yield char return .join(col_wise_consensus(zip(*aln)))
Get the consensus of an alignment, as a string. Emit gap characters for majority-gap columns; apply various strategies to choose the consensus amino acid type for the remaining columns. Parameters ---------- simple : bool If True, use simple plurality to determine the consensus amino acid type, without weighting sequences for similarity. Otherwise, weight sequences for similarity and use relative entropy to choose the consensus amino acid type. weights : dict or None Sequence weights. If given, used to calculate amino acid frequencies; otherwise calculated within this function (i.e. this is a way to speed up the function if sequence weights have already been calculated). Ignored in 'simple' mode. trim_ends : bool If False, stretch the consensus sequence to include the N- and C-tails of the alignment, even if those flanking columns are mostly gap characters. This avoids terminal gaps in the consensus (needed for MAPGAPS). gap_threshold : float If the proportion of gap characters in a column is greater than or equal to this value (after sequence weighting, if applicable), then the consensus character emitted will be a gap instead of an amino acid type.
373,191
def save(self): with rconnect() as conn: try: self.validate() except ValidationError as e: log.warn(e.messages) raise except ModelValidationError as e: log.warn(e.messages) raise except ModelConversionError as e: log.warn(e.messages) raise except ValueError as e: log.warn(e) raise except FrinkError as e: log.warn(e.messages) raise except Exception as e: log.warn(e) raise else: log.debug(rv) except Exception as e: log.warn(e) self.id = None raise else: return self
Save the current instance to the DB
373,192
def scan_dir(self, path): r for fname in glob.glob(os.path.join(path, + TABLE_EXT)): if os.path.isfile(fname): with open(fname, ) as fobj: try: self.add_colortable(fobj, os.path.splitext(os.path.basename(fname))[0]) log.debug(, fname) except RuntimeError: log.info(, fname)
r"""Scan a directory on disk for color table files and add them to the registry. Parameters ---------- path : str The path to the directory with the color tables
373,193
def setEmergencyDecel(self, vehID, decel): self._connection._sendDoubleCmd( tc.CMD_SET_VEHICLE_VARIABLE, tc.VAR_EMERGENCY_DECEL, vehID, decel)
setEmergencyDecel(string, double) -> None Sets the maximal physically possible deceleration in m/s^2 for this vehicle.
373,194
def index(): crawlers = [] for crawler in manager: data = Event.get_counts(crawler) data[] = crawler.last_run data[] = crawler.op_count data[] = crawler.is_running data[] = crawler crawlers.append(data) return render_template(, crawlers=crawlers)
Generate a list of all crawlers, alphabetically, with op counts.
373,195
def triggered_token(self) -> : if self._triggered.is_set(): return self for token in self._chain: if token.triggered: return token.triggered_token return None
Return the token which was triggered. The returned token may be this token or one that it was chained with.
373,196
def cli(ctx): click.secho("\nPyHardLinkBackup v%s\n" % PyHardLinkBackup.__version__, bg="blue", fg="white", bold=True)
PyHardLinkBackup
373,197
def _get_django_queryset(self): prefetches = [] for field, fprefetch in self.prefetches.items(): has_query = hasattr(fprefetch, ) qs = fprefetch.query.queryset if has_query else None prefetches.append( Prefetch(field, queryset=qs) ) queryset = self.queryset if prefetches: queryset = queryset.prefetch_related(*prefetches) return queryset
Return Django QuerySet with prefetches properly configured.
373,198
def main(): parser = argparse.ArgumentParser(description= , formatter_class=argparse.RawTextHelpFormatter) parser.add_argument(, , type=str, required=False, help=) parser.add_argument(, , type=str, required=False, help=) parser.add_argument(, , type=str, default=, required=False, help=) parser.add_argument(, , type=str, default=, required=False, help=) parser.add_argument(, , type=str, default=None, required=False, help=) parser.add_argument(, , type=float, default=0.05, required=False, help=) parser.add_argument(, , default=0, action=, help=) parser.add_argument(,nargs=) args = parser.parse_args() if args.verbosity >= 2: logging.basicConfig(level=logging.DEBUG) if args.verbosity == 1: logging.basicConfig(level=logging.INFO) logging.info("Welcome!") ofactory = OntologyFactory() afactory = AssociationSetFactory() handle = args.resource1 ont1 = ofactory.create(args.resource1) ont2 = ofactory.create(args.resource2) logging.info("onts: {} {}".format(ont1, ont2)) searchp = args.search category = aset1 = afactory.create(ontology=ont1, subject_category=category, object_category=, taxon=args.taxon) aset2 = afactory.create(ontology=ont2, subject_category=category, object_category=, taxon=args.taxon) bg_cls = None if args.background is not None: bg_ids = resolve(ont1,[args.background],searchp) if len(bg_ids) == 0: logging.error("Cannnot resolve: using {} in {}".format(args.background, searchp, ont1)) sys.exit(1) elif len(bg_ids) > 1: logging.error("Multiple matches: using {} MATCHES={}".format(args.background, searchp,bg_ids)) sys.exit(1) else: logging.info("Background: {}".format(bg_cls)) [bg_cls] = bg_ids for id in resolve(ont1,args.ids,searchp): sample = aset1.query([id],[]) print("Gene set class:{} Gene set: {}".format(id, sample)) bg = None if bg_cls is not None: bg = aset1.query([bg_cls],[]) print("BACKGROUND SUBJECTS: {}".format(bg)) rs = aset2.enrichment_test(sample, bg, threshold=args.pthreshold, labels=True) print("RESULTS: {} < {}".format(len(rs), args.pthreshold)) for r in rs: print(str(r))
Phenologs
373,199
def timescales_from_eigenvalues(evals, tau=1): r if not np.allclose(evals.imag, 0.0): warnings.warn(, ImaginaryEigenValueWarning) ind_abs_one = np.isclose(np.abs(evals), 1.0, rtol=0.0, atol=1e-14) if sum(ind_abs_one) > 1: warnings.warn(, SpectralWarning) ts = np.zeros(len(evals)) ts[ind_abs_one] = np.inf ts[np.logical_not(ind_abs_one)] = \ -1.0 * tau / np.log(np.abs(evals[np.logical_not(ind_abs_one)])) return ts
r"""Compute implied time scales from given eigenvalues Parameters ---------- evals : eigenvalues tau : lag time Returns ------- ts : ndarray The implied time scales to the given eigenvalues, in the same order.