code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def decode_body(cls, header, f): assert header.packet_type == MqttControlPacketType.pingresp if header.remaining_len != 0: raise DecodeError('Extra bytes at end of packet.') return 0, MqttPingresp()
Generates a `MqttPingresp` packet given a `MqttFixedHeader`. This method asserts that header.packet_type is `pingresp`. Parameters ---------- header: MqttFixedHeader f: file Object with a read method. Raises ------ DecodeError When there are extra bytes at the end of the packet. Returns ------- int Number of bytes consumed from ``f``. MqttPingresp Object extracted from ``f``.
def start_depth_socket(self, symbol, callback, depth=None): socket_name = symbol.lower() + '@depth' if depth and depth != '1': socket_name = '{}{}'.format(socket_name, depth) return self._start_socket(socket_name, callback)
Start a websocket for symbol market depth returning either a diff or a partial book https://github.com/binance-exchange/binance-official-api-docs/blob/master/web-socket-streams.md#partial-book-depth-streams :param symbol: required :type symbol: str :param callback: callback function to handle messages :type callback: function :param depth: optional Number of depth entries to return, default None. If passed returns a partial book instead of a diff :type depth: str :returns: connection key string if successful, False otherwise Partial Message Format .. code-block:: python { "lastUpdateId": 160, # Last update ID "bids": [ # Bids to be updated [ "0.0024", # price level to be updated "10", # quantity [] # ignore ] ], "asks": [ # Asks to be updated [ "0.0026", # price level to be updated "100", # quantity [] # ignore ] ] } Diff Message Format .. code-block:: python { "e": "depthUpdate", # Event type "E": 123456789, # Event time "s": "BNBBTC", # Symbol "U": 157, # First update ID in event "u": 160, # Final update ID in event "b": [ # Bids to be updated [ "0.0024", # price level to be updated "10", # quantity [] # ignore ] ], "a": [ # Asks to be updated [ "0.0026", # price level to be updated "100", # quantity [] # ignore ] ] }
def indentLine(self, block, autoIndent): indent = None if indent is None: indent = self.tryMatchedAnchor(block, autoIndent) if indent is None: indent = self.tryCComment(block) if indent is None and not autoIndent: indent = self.tryCppComment(block) if indent is None: indent = self.trySwitchStatement(block) if indent is None: indent = self.tryAccessModifiers(block) if indent is None: indent = self.tryBrace(block) if indent is None: indent = self.tryCKeywords(block, block.text().lstrip().startswith('{')) if indent is None: indent = self.tryCondition(block) if indent is None: indent = self.tryStatement(block) if indent is not None: return indent else: dbg("Nothing matched") return self._prevNonEmptyBlockIndent(block)
Indent line. Return filler or null.
def users_list(self, *args): if len(self._users) == 0: self.log('No users connected') else: self.log(self._users, pretty=True)
Display a list of connected users
def stop(self, key): self._get_limiter(key).stop() self._cleanup_limiter(key)
Stop a concurrent operation. This gets the concurrency limiter for the given key (creating it if necessary) and stops a concurrent operation on it. If the concurrency limiter is empty, it is deleted.
def addNode(self, node): LOGGER.info('Adding node {}({})'.format(node.name, node.address)) message = { 'addnode': { 'nodes': [{ 'address': node.address, 'name': node.name, 'node_def_id': node.id, 'primary': node.primary, 'drivers': node.drivers, 'hint': node.hint }] } } self.send(message)
Add a node to the NodeServer :param node: Dictionary of node settings. Keys: address, name, node_def_id, primary, and drivers are required.
def rowgroupmap(table, key, mapper, header=None, presorted=False, buffersize=None, tempdir=None, cache=True): return RowGroupMapView(table, key, mapper, header=header, presorted=presorted, buffersize=buffersize, tempdir=tempdir, cache=cache)
Group rows under the given key then apply `mapper` to yield zero or more output rows for each input group of rows.
def use_partial_data(self, sample_pct:float=0.01, seed:int=None)->'ItemList': "Use only a sample of `sample_pct`of the full dataset and an optional `seed`." if seed is not None: np.random.seed(seed) rand_idx = np.random.permutation(range_of(self)) cut = int(sample_pct * len(self)) return self[rand_idx[:cut]]
Use only a sample of `sample_pct`of the full dataset and an optional `seed`.
def get_git_postversion(addon_dir): addon_dir = os.path.realpath(addon_dir) last_version = read_manifest(addon_dir).get('version', '0.0.0') last_version_parsed = parse_version(last_version) if not is_git_controlled(addon_dir): return last_version if get_git_uncommitted(addon_dir): uncommitted = True count = 1 else: uncommitted = False count = 0 last_sha = None git_root = get_git_root(addon_dir) for sha in git_log_iterator(addon_dir): try: manifest = read_manifest_from_sha(sha, addon_dir, git_root) except NoManifestFound: break version = manifest.get('version', '0.0.0') version_parsed = parse_version(version) if version_parsed != last_version_parsed: break if last_sha is None: last_sha = sha else: count += 1 if not count: return last_version if last_sha: return last_version + ".99.dev%s" % count if uncommitted: return last_version + ".dev1" return last_version
return the addon version number, with a developmental version increment if there were git commits in the addon_dir after the last version change. If the last change to the addon correspond to the version number in the manifest it is used as is for the python package version. Otherwise a counter is incremented for each commit and resulting version number has the following form: [8|9].0.x.y.z.1devN, N being the number of git commits since the version change. Note: we use .99.devN because: * pip ignores .postN by design (https://github.com/pypa/pip/issues/2872) * x.y.z.devN is anterior to x.y.z Note: we don't put the sha1 of the commit in the version number because this is not PEP 440 compliant and is therefore misinterpreted by pip.
def merge_sims(oldsims, newsims, clip=None): if oldsims is None: result = newsims or [] elif newsims is None: result = oldsims else: result = sorted(oldsims + newsims, key=lambda item: -item[1]) if clip is not None: result = result[:clip] return result
Merge two precomputed similarity lists, truncating the result to `clip` most similar items.
def call_template_str(self, template): high = compile_template_str(template, self.rend, self.opts['renderer'], self.opts['renderer_blacklist'], self.opts['renderer_whitelist']) if not high: return high high, errors = self.render_template(high, '<template-str>') if errors: return errors return self.call_high(high)
Enforce the states in a template, pass the template as a string
def apply(self, config, raise_on_unknown_key=True): _recursive_merge(self._data, config, raise_on_unknown_key)
Apply additional configuration from a dictionary This will look for dictionary items that exist in the base_config any apply their values on the current configuration object
def event_list_tabs(counts, current_kind, page_number=1): return { 'counts': counts, 'current_kind': current_kind, 'page_number': page_number, 'event_kinds': Event.get_kinds(), 'event_kinds_data': Event.get_kinds_data(), }
Displays the tabs to different event_list pages. `counts` is a dict of number of events for each kind, like: {'all': 30, 'gig': 12, 'movie': 18,} `current_kind` is the event kind that's active, if any. e.g. 'gig', 'movie', etc. `page_number` is the current page of this kind of events we're on.
def generate(env): global PDFAction if PDFAction is None: PDFAction = SCons.Action.Action('$DVIPDFCOM', '$DVIPDFCOMSTR') global DVIPDFAction if DVIPDFAction is None: DVIPDFAction = SCons.Action.Action(DviPdfFunction, strfunction = DviPdfStrFunction) from . import pdf pdf.generate(env) bld = env['BUILDERS']['PDF'] bld.add_action('.dvi', DVIPDFAction) bld.add_emitter('.dvi', PDFEmitter) env['DVIPDF'] = 'dvipdf' env['DVIPDFFLAGS'] = SCons.Util.CLVar('') env['DVIPDFCOM'] = 'cd ${TARGET.dir} && $DVIPDF $DVIPDFFLAGS ${SOURCE.file} ${TARGET.file}' env['PDFCOM'] = ['$DVIPDFCOM']
Add Builders and construction variables for dvipdf to an Environment.
def regroup(self, group_by=None): if not group_by: group_by = self.group_by groups = self.groups self.groups = {} for g in groups: for item in groups[g]: self.add(item, group_by)
Regroup items.
def direct_horizontal_irradiance(self): analysis_period = AnalysisPeriod(timestep=self.timestep, is_leap_year=self.is_leap_year) header_dhr = Header(data_type=DirectHorizontalIrradiance(), unit='W/m2', analysis_period=analysis_period, metadata=self.metadata) direct_horiz = [] sp = Sunpath.from_location(self.location) sp.is_leap_year = self.is_leap_year for dt, dnr in zip(self.datetimes, self.direct_normal_irradiance): sun = sp.calculate_sun_from_date_time(dt) direct_horiz.append(dnr * math.sin(math.radians(sun.altitude))) return HourlyContinuousCollection(header_dhr, direct_horiz)
Returns the direct irradiance on a horizontal surface at each timestep. Note that this is different from the direct_normal_irradiance needed to construct a Wea, which is NORMAL and not HORIZONTAL.
def _createValueObjects(self, valueList, varList, mapTable, indexMap, contaminant, replaceParamFile): def assign_values_to_table(value_list, layer_id): for i, value in enumerate(value_list): value = vrp(value, replaceParamFile) mtValue = MTValue(variable=varList[i], value=float(value)) mtValue.index = mtIndex mtValue.mapTable = mapTable mtValue.layer_id = layer_id if contaminant: mtValue.contaminant = contaminant for row in valueList: mtIndex = MTIndex(index=row['index'], description1=row['description1'], description2=row['description2']) mtIndex.indexMap = indexMap if len(np.shape(row['values'])) == 2: for layer_id, values in enumerate(row['values']): assign_values_to_table(values, layer_id) else: assign_values_to_table(row['values'], 0)
Populate GSSHAPY MTValue and MTIndex Objects Method
def revoke_user_access( self, access_id ): path = "/api/v3/publisher/user/access/revoke" data = { 'api_token': self.api_token, 'access_id': access_id, } r = requests.get( self.base_url + path, data=data ) if r.status_code != 200: raise ValueError( path + ":" + r.reason )
Takes an access_id, probably obtained from the get_access_list structure, and revokes that access. No return value, but may raise ValueError.
def _write_subset_index_file(options, core_results): f_path = os.path.join(options['run_dir'], '_subset_index.csv') subset_strs = zip(*core_results)[0] index = np.arange(len(subset_strs)) + 1 df = pd.DataFrame({'subsets': subset_strs}, index=index) df.to_csv(f_path)
Write table giving index of subsets, giving number and subset string
def manipulate(self, stored_instance, component_instance): self._logger = logging.getLogger(self._name) setattr(component_instance, self._field, self._logger)
Called by iPOPO right after the instantiation of the component. This is the last chance to manipulate the component before the other handlers start. :param stored_instance: The iPOPO component StoredInstance :param component_instance: The component instance
def initLogging(): logging.basicConfig( level=logging.INFO, format='%(asctime)s.%(msecs)03d %(levelname)s %(name)s - %(message)s', datefmt='%H:%M:%S') logging.getLogger('').setLevel(logging.INFO) logging.getLogger('PIL').setLevel(logging.INFO) CONFIG_PATHS = [ os.path.curdir, os.path.join(os.path.expanduser('~')), '/etc', ] for p in CONFIG_PATHS: config_file = os.path.join(p, 'ocrd_logging.py') if os.path.exists(config_file): logging.info("Loading logging configuration from '%s'", config_file) with open(config_file) as f: code = compile(f.read(), config_file, 'exec') exec(code, globals(), locals())
Sets logging defaults
def best_item_from_list(item,options,fuzzy=90,fname_match=True,fuzzy_fragment=None,guess=False): match = best_match_from_list(item,options,fuzzy,fname_match,fuzzy_fragment,guess) if match: return match[0] return None
Returns just the best item, or ``None``
def find(self, instance_ids=None, filters=None): instances = [] reservations = self.retry_on_ec2_error(self.ec2.get_all_instances, instance_ids=instance_ids, filters=filters) for reservation in reservations: instances.extend(reservation.instances) return instances
Flatten list of reservations to a list of instances. :param instance_ids: A list of instance ids to filter by :type instance_ids: list :param filters: A dict of Filter.N values defined in http://goo.gl/jYNej9 :type filters: dict :return: A flattened list of filtered instances. :rtype: list
def setEntry(self, entry=None): busy = Purr.BusyIndicator() self.entry = entry self.setEntryTitle(entry.title) self.setEntryComment(entry.comment.replace("\n", "\n\n").replace("<BR>", "\n")) self.wdplv.clear() self.wdplv.fillDataProducts(entry.dps) self.setTimestamp(entry.timestamp) self.updated = False
Populates the dialog with contents of an existing entry.
def combine_types(types): items = simplify_types(types) if len(items) == 1: return items[0] else: return UnionType(items)
Given some types, return a combined and simplified type. For example, if given 'int' and 'List[int]', return Union[int, List[int]]. If given 'int' and 'int', return just 'int'.
def update_check(self, existing, new): old_state = existing.state if 'NowPlayingItem' in existing.session_raw: try: old_theme = existing.session_raw['NowPlayingItem']['IsThemeMedia'] except KeyError: old_theme = False else: old_theme = False if 'NowPlayingItem' in new: if new['PlayState']['IsPaused']: new_state = STATE_PAUSED else: new_state = STATE_PLAYING try: new_theme = new['NowPlayingItem']['IsThemeMedia'] except KeyError: new_theme = False else: new_state = STATE_IDLE new_theme = False if old_theme or new_theme: return False elif old_state == STATE_PLAYING or new_state == STATE_PLAYING: return True elif old_state != new_state: return True else: return False
Check device state to see if we need to fire the callback. True if either state is 'Playing' False if both states are: 'Paused', 'Idle', or 'Off' True on any state transition.
def query( self, url: Union[str, methods], data: Optional[MutableMapping] = None, headers: Optional[MutableMapping] = None, as_json: Optional[bool] = None, ) -> dict: url, body, headers = sansio.prepare_request( url=url, data=data, headers=headers, global_headers=self._headers, token=self._token, ) return self._make_query(url, body, headers)
Query the slack API When using :class:`slack.methods` the request is made `as_json` if available Args: url: :class:`slack.methods` or url string data: JSON encodable MutableMapping headers: Custom headers as_json: Post JSON to the slack API Returns: dictionary of slack API response data
def off(self): for device in self: if isinstance(device, (OutputDevice, CompositeOutputDevice)): device.off()
Turn all the output devices off.
def get_option(option_name, section_name="main", default=_sentinel, cfg_file=cfg_file): defaults = get_defaults() if default != _sentinel: my_defaults = {option_name: default} else: my_defaults = defaults.get('section_name', {}) parser = get_parser(cfg_file) return parser.get(section_name, option_name, vars=my_defaults)
Returns a specific option specific in a config file Arguments: option_name -- Name of the option (example host_name) section_name -- Which section of the config (default: name) examples: >>> get_option("some option", default="default result") 'default result'
def post(node_name, key, **kwargs): node = nago.core.get_node(node_name) if not node: raise ValueError("Node named %s not found" % node_name) token = node.token node_data[token] = node_data[token] or {} node_data[token][key] = kwargs return "thanks!"
Give the server information about this node Arguments: node -- node_name or token for the node this data belongs to key -- identifiable key, that you use later to retrieve that piece of data kwargs -- the data you need to store
def appendPoint(self, position=None, type="line", smooth=False, name=None, identifier=None, point=None): if point is not None: if position is None: position = point.position type = point.type smooth = point.smooth if name is None: name = point.name if identifier is not None: identifier = point.identifier self.insertPoint( len(self.points), position=position, type=type, smooth=smooth, name=name, identifier=identifier )
Append a point to the contour.
def report_saved(report_stats): if Settings.verbose: report = '' truncated_filename = truncate_cwd(report_stats.final_filename) report += '{}: '.format(truncated_filename) total = new_percent_saved(report_stats) if total: report += total else: report += '0%' if Settings.test: report += ' could be saved.' if Settings.verbose > 1: tools_report = ', '.join(report_stats.report_list) if tools_report: report += '\n\t' + tools_report print(report)
Record the percent saved & print it.
def main(): try: device = AlarmDecoder(SerialDevice(interface=SERIAL_DEVICE)) device.on_rfx_message += handle_rfx with device.open(baudrate=BAUDRATE): while True: time.sleep(1) except Exception as ex: print('Exception:', ex)
Example application that watches for an event from a specific RF device. This feature allows you to watch for events from RF devices if you have an RF receiver. This is useful in the case of internal sensors, which don't emit a FAULT if the sensor is tripped and the panel is armed STAY. It also will monitor sensors that aren't configured. NOTE: You must have an RF receiver installed and enabled in your panel for RFX messages to be seen.
def get_action_handler(self, controller_name, action_name): try_actions = [ controller_name + '/' + action_name, controller_name + '/not_found', 'index/not_found' ] for path in try_actions: if path in self._controllers: return self._controllers[path] return None
Return action of controller as callable. If requested controller isn't found - return 'not_found' action of requested controller or Index controller.
def get_expected_bindings(self): sg_bindings = db_lib.get_baremetal_sg_bindings() all_expected_bindings = collections.defaultdict(set) for sg_binding, port_binding in sg_bindings: sg_id = sg_binding['security_group_id'] try: binding_profile = json.loads(port_binding.profile) except ValueError: binding_profile = {} switchports = self._get_switchports(binding_profile) for switch, intf in switchports: ingress_name = self._acl_name(sg_id, n_const.INGRESS_DIRECTION) egress_name = self._acl_name(sg_id, n_const.EGRESS_DIRECTION) all_expected_bindings[switch].add( (intf, ingress_name, a_const.INGRESS_DIRECTION)) all_expected_bindings[switch].add( (intf, egress_name, a_const.EGRESS_DIRECTION)) return all_expected_bindings
Query the neutron DB for SG->switch interface bindings Bindings are returned as a dict of bindings for each switch: {<switch1>: set([(intf1, acl_name, direction), (intf2, acl_name, direction)]), <switch2>: set([(intf1, acl_name, direction)]), ..., }
def buttons(self, master): box = tk.Frame(master) ttk.Button( box, text="Next", width=10, command=self.next_day ).pack(side=tk.LEFT, padx=5, pady=5) ttk.Button( box, text="OK", width=10, command=self.ok, default=tk.ACTIVE ).pack(side=tk.LEFT, padx=5, pady=5) ttk.Button( box, text="Cancel", width=10, command=self.cancel ).pack(side=tk.LEFT, padx=5, pady=5) self.bind("n", self.next_day) self.bind("<Return>", self.ok) self.bind("<Escape>", self.cancel) box.pack()
Add a standard button box. Override if you do not want the standard buttons
def rm_missing_values_table(d): try: for k, v in d["columns"].items(): d["columns"][k] = rm_keys_from_dict(v, ["missingValue"]) except Exception: pass return d
Loop for each table column and remove the missingValue key & data :param dict d: Metadata (table) :return dict d: Metadata (table)
def set_name_email(configurator, question, answer): name = configurator.variables['author.name'] configurator.variables['author.name_email'] = '"{0}" <{1}>'.format( name, answer) return answer
prepare "Full Name" <email@eg.com>" string
def register_custom_actions(parser: argparse.ArgumentParser) -> None: parser.register('action', None, _StoreRangeAction) parser.register('action', 'store', _StoreRangeAction) parser.register('action', 'append', _AppendRangeAction)
Register custom argument action types
def start_container(self, image, container_name: str, repo_path: Path): command = "bash -i" if self.inherit_image: command = "sh -i" container = self.client.containers.run(image, command=command, detach=True, tty=True, name=container_name, working_dir=str((Path("/srv/data") / self.cwd).resolve()), auto_remove=True) container.exec_run(["mkdir", "-p", "/srv/scripts"]) container.put_archive("/srv", self.tar_files(repo_path)) container.put_archive("/srv/scripts", self.tar_runner()) return container
Starts a container with the image and name ``container_name`` and copies the repository into the container. :type image: docker.models.images.Image :rtype: docker.models.container.Container
def request_token(self): logging.debug("Getting request token from %s:%d", self.server, self.port) token, secret = self._token("/oauth/requestToken") return "{}/oauth/authorize?oauth_token={}".format(self.host, token), \ token, secret
Returns url, request_token, request_secret
def roots(expr, types=(ops.PhysicalTable,)): stack = [ arg.to_expr() for arg in reversed(expr.op().root_tables()) if isinstance(arg, types) ] def extender(op): return reversed( list( itertools.chain.from_iterable( arg.op().root_tables() for arg in op.flat_args() if isinstance(arg, types) ) ) ) return _search_for_nodes(stack, extender, types)
Yield every node of a particular type on which an expression depends. Parameters ---------- expr : Expr The expression to analyze types : tuple(type), optional, default (:mod:`ibis.expr.operations.PhysicalTable`,) The node types to traverse Yields ------ table : Expr Unique node types on which an expression depends Notes ----- If your question is: "What nodes of type T does `expr` depend on?", then you've come to the right place. By default, we yield the physical tables that an expression depends on.
def get_checksum32(oqparam, hazard=False): checksum = 0 for fname in get_input_files(oqparam, hazard): checksum = _checksum(fname, checksum) if hazard: hazard_params = [] for key, val in vars(oqparam).items(): if key in ('rupture_mesh_spacing', 'complex_fault_mesh_spacing', 'width_of_mfd_bin', 'area_source_discretization', 'random_seed', 'ses_seed', 'truncation_level', 'maximum_distance', 'investigation_time', 'number_of_logic_tree_samples', 'imtls', 'ses_per_logic_tree_path', 'minimum_magnitude', 'prefilter_sources', 'sites', 'pointsource_distance', 'filter_distance'): hazard_params.append('%s = %s' % (key, val)) data = '\n'.join(hazard_params).encode('utf8') checksum = zlib.adler32(data, checksum) & 0xffffffff return checksum
Build an unsigned 32 bit integer from the input files of a calculation. :param oqparam: an OqParam instance :param hazard: if True, consider only the hazard files :returns: the checkume
def menu_weekly(self, building_id): din = DiningV2(self.bearer, self.token) response = {'result_data': {'Document': {}}} days = [] for i in range(7): date = str(datetime.date.today() + datetime.timedelta(days=i)) v2_response = din.menu(building_id, date) if building_id in VENUE_NAMES: response["result_data"]["Document"]["location"] = VENUE_NAMES[building_id] else: response["result_data"]["Document"]["location"] = v2_response["result_data"]["days"][0]["cafes"][building_id]["name"] formatted_date = datetime.datetime.strptime(date, '%Y-%m-%d').strftime('%-m/%d/%Y') days.append({"tblDayPart": get_meals(v2_response, building_id), "menudate": formatted_date}) response["result_data"]["Document"]["tblMenu"] = days return normalize_weekly(response)
Get an array of menu objects corresponding to the weekly menu for the venue with building_id. :param building_id: A string representing the id of a building, e.g. "abc". >>> commons_week = din.menu_weekly("593")
def _get_source(link): if link.startswith("http://") or link.startswith("https://"): down = httpkie.Downloader() return down.download(link) if os.path.exists(link): with open(link) as f: return f.read() raise UserWarning("html: '%s' is neither URL or data!" % link)
Return source of the `link` whether it is filename or url. Args: link (str): Filename or URL. Returns: str: Content. Raises: UserWarning: When the `link` couldn't be resolved.
def _ReadSequenceDataTypeDefinition( self, definitions_registry, definition_values, definition_name, is_member=False): if is_member: supported_definition_values = ( self._SUPPORTED_DEFINITION_VALUES_ELEMENTS_MEMBER_DATA_TYPE) else: supported_definition_values = ( self._SUPPORTED_DEFINITION_VALUES_ELEMENTS_DATA_TYPE) return self._ReadElementSequenceDataTypeDefinition( definitions_registry, definition_values, data_types.SequenceDefinition, definition_name, supported_definition_values)
Reads a sequence data type definition. Args: definitions_registry (DataTypeDefinitionsRegistry): data type definitions registry. definition_values (dict[str, object]): definition values. definition_name (str): name of the definition. is_member (Optional[bool]): True if the data type definition is a member data type definition. Returns: SequenceDefinition: sequence data type definition. Raises: DefinitionReaderError: if the definitions values are missing or if the format is incorrect.
def _list_element_starts_with(items, needle): for item in items: if item.startswith(needle): return True return False
True of any of the list elements starts with needle
def _create(self): if not os.path.exists(settings.SALMON_WHISPER_DB_PATH): os.makedirs(settings.SALMON_WHISPER_DB_PATH) archives = [whisper.parseRetentionDef(retentionDef) for retentionDef in settings.ARCHIVES.split(",")] whisper.create(self.path, archives, xFilesFactor=settings.XFILEFACTOR, aggregationMethod=settings.AGGREGATION_METHOD)
Create the Whisper file on disk
def field2type_and_format(self, field): for field_class in type(field).__mro__: if field_class in self.field_mapping: type_, fmt = self.field_mapping[field_class] break else: warnings.warn( "Field of type {} does not inherit from marshmallow.Field.".format( type(field) ), UserWarning, ) type_, fmt = "string", None ret = {"type": type_} if fmt: ret["format"] = fmt return ret
Return the dictionary of OpenAPI type and format based on the field type :param Field field: A marshmallow field. :rtype: dict
def contains_entry(self, key, value): check_not_none(key, "key can't be None") check_not_none(value, "value can't be None") key_data = self._to_data(key) value_data = self._to_data(value) return self._encode_invoke_on_key(multi_map_contains_entry_codec, key_data, key=key_data, value=value_data, thread_id=thread_id())
Returns whether the multimap contains an entry with the value. :param key: (object), the specified key. :param value: (object), the specified value. :return: (bool), ``true`` if this multimap contains the key-value tuple.
def cylinder_inertia(mass, radius, height, transform=None): h2, r2 = height ** 2, radius ** 2 diagonal = np.array([((mass * h2) / 12) + ((mass * r2) / 4), ((mass * h2) / 12) + ((mass * r2) / 4), (mass * r2) / 2]) inertia = diagonal * np.eye(3) if transform is not None: inertia = transform_inertia(transform, inertia) return inertia
Return the inertia tensor of a cylinder. Parameters ------------ mass : float Mass of cylinder radius : float Radius of cylinder height : float Height of cylinder transform : (4,4) float Transformation of cylinder Returns ------------ inertia : (3,3) float Inertia tensor
def blk_coverage_1d(blk, size): rem = size % blk maxpix = size - rem return maxpix, rem
Return the part of a 1d array covered by a block. :param blk: size of the 1d block :param size: size of the 1d a image :return: a tuple of size covered and remaining size Example: >>> blk_coverage_1d(7, 100) (98, 2)
def load(self): pg = self.usr.getPage("http://www.neopets.com/objects.phtml?type=shop&obj_type=" + self.id) self.name = pg.find("td", "contentModuleHeader").text.strip() self.inventory = MainShopInventory(self.usr, self.id)
Loads the shop name and inventory
def as_binning(obj, copy: bool = False) -> BinningBase: if isinstance(obj, BinningBase): if copy: return obj.copy() else: return obj else: bins = make_bin_array(obj) return StaticBinning(bins)
Ensure that an object is a binning Parameters --------- obj : BinningBase or array_like Can be a binning, numpy-like bins or full physt bins copy : If true, ensure that the returned object is independent
def pprint(self): items = sorted(self.items()) return u"\n".join(u"%s=%s" % (k, v.pprint()) for k, v in items)
Return tag key=value pairs in a human-readable format.
def add_filter(self, ftype, func): if not isinstance(ftype, type): raise TypeError("Expected type object, got %s" % type(ftype)) self.castfilter = [(t, f) for (t, f) in self.castfilter if t != ftype] self.castfilter.append((ftype, func)) self.castfilter.sort()
Register a new output filter. Whenever bottle hits a handler output matching `ftype`, `func` is applyed to it.
def command_max_burst_count(self, event=None): try: max_burst_count = self.max_burst_count_var.get() except ValueError: max_burst_count = self.runtime_cfg.max_burst_count if max_burst_count < 1: max_burst_count = self.runtime_cfg.max_burst_count self.runtime_cfg.max_burst_count = max_burst_count self.max_burst_count_var.set(self.runtime_cfg.max_burst_count)
max CPU burst op count - self.runtime_cfg.max_burst_count
def _separate_hdxobjects(self, hdxobjects, hdxobjects_name, id_field, hdxobjectclass): new_hdxobjects = self.data.get(hdxobjects_name, list()) if new_hdxobjects: hdxobject_names = set() for hdxobject in hdxobjects: hdxobject_name = hdxobject[id_field] hdxobject_names.add(hdxobject_name) for new_hdxobject in new_hdxobjects: if hdxobject_name == new_hdxobject[id_field]: merge_two_dictionaries(hdxobject, new_hdxobject) break for new_hdxobject in new_hdxobjects: if not new_hdxobject[id_field] in hdxobject_names: hdxobjects.append(hdxobjectclass(new_hdxobject, configuration=self.configuration)) del self.data[hdxobjects_name]
Helper function to take a list of HDX objects contained in the internal dictionary and add them to a supplied list of HDX objects or update existing metadata if any objects already exist in the list. The list in the internal dictionary is then deleted. Args: hdxobjects (List[T <= HDXObject]): list of HDX objects to which to add new objects or update existing ones hdxobjects_name (str): Name of key in internal dictionary from which to obtain list of HDX objects id_field (str): Field on which to match to determine if object already exists in list hdxobjectclass (type): Type of the HDX Object to be added/updated Returns: None
def get_trial_info(current_trial): if current_trial.end_time and ("_" in current_trial.end_time): time_obj = datetime.datetime.strptime(current_trial.end_time, "%Y-%m-%d_%H-%M-%S") end_time = time_obj.strftime("%Y-%m-%d %H:%M:%S") else: end_time = current_trial.end_time if current_trial.metrics: metrics = eval(current_trial.metrics) else: metrics = None trial_info = { "trial_id": current_trial.trial_id, "job_id": current_trial.job_id, "trial_status": current_trial.trial_status, "start_time": current_trial.start_time, "end_time": end_time, "params": eval(current_trial.params.encode("utf-8")), "metrics": metrics } return trial_info
Get job information for current trial.
def send(self, data, flags=0): return self.llc.send(self._tco, data, flags)
Send data to the socket. The socket must be connected to a remote socket. Returns a boolean value that indicates success or failure. A false value is typically an indication that the socket or connection was closed.
def l2_norm(params): flattened, _ = flatten(params) return np.dot(flattened, flattened)
Computes l2 norm of params by flattening them into a vector.
async def get_user_groups(request): acl_callback = request.get(GROUPS_KEY) if acl_callback is None: raise RuntimeError('acl_middleware not installed') user_id = await get_auth(request) groups = await acl_callback(user_id) if groups is None: return None user_groups = (Group.AuthenticatedUser, user_id) if user_id is not None else () return set(itertools.chain(groups, (Group.Everyone,), user_groups))
Returns the groups that the user in this request has access to. This function gets the user id from the auth.get_auth function, and passes it to the ACL callback function to get the groups. Args: request: aiohttp Request object Returns: If the ACL callback function returns None, this function returns None. Otherwise this function returns the sequence of group permissions provided by the callback, plus the Everyone group. If user_id is not None, the AuthnticatedUser group and the user_id are added to the groups returned by the function Raises: RuntimeError: If the ACL middleware is not installed
def loadFile(self, fileName): self.fileName = fileName self.file = QtCore.QFile(fileName) if self.file.exists(): self.qteScintilla.setText(open(fileName).read()) self.qteScintilla.qteUndoStack.reset() else: msg = "File <b>{}</b> does not exist".format(self.qteAppletID()) self.qteLogger.info(msg)
Display the file ``fileName``.
def move_file( src_fs, src_path, dst_fs, dst_path, ): with manage_fs(src_fs) as _src_fs: with manage_fs(dst_fs, create=True) as _dst_fs: if _src_fs is _dst_fs: _src_fs.move(src_path, dst_path, overwrite=True) else: with _src_fs.lock(), _dst_fs.lock(): copy_file(_src_fs, src_path, _dst_fs, dst_path) _src_fs.remove(src_path)
Move a file from one filesystem to another. Arguments: src_fs (FS or str): Source filesystem (instance or URL). src_path (str): Path to a file on ``src_fs``. dst_fs (FS or str); Destination filesystem (instance or URL). dst_path (str): Path to a file on ``dst_fs``.
def stop_experiment(args): experiment_id_list = parse_ids(args) if experiment_id_list: experiment_config = Experiments() experiment_dict = experiment_config.get_all_experiments() for experiment_id in experiment_id_list: print_normal('Stoping experiment %s' % experiment_id) nni_config = Config(experiment_dict[experiment_id]['fileName']) rest_port = nni_config.get_config('restServerPort') rest_pid = nni_config.get_config('restServerPid') if rest_pid: kill_command(rest_pid) tensorboard_pid_list = nni_config.get_config('tensorboardPidList') if tensorboard_pid_list: for tensorboard_pid in tensorboard_pid_list: try: kill_command(tensorboard_pid) except Exception as exception: print_error(exception) nni_config.set_config('tensorboardPidList', []) print_normal('Stop experiment success!') experiment_config.update_experiment(experiment_id, 'status', 'STOPPED') time_now = time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time())) experiment_config.update_experiment(experiment_id, 'endTime', str(time_now))
Stop the experiment which is running
def export_process_to_csv(bpmn_diagram, directory, filename): nodes = copy.deepcopy(bpmn_diagram.get_nodes()) start_nodes = [] export_elements = [] for node in nodes: incoming_list = node[1].get(consts.Consts.incoming_flow) if len(incoming_list) == 0: start_nodes.append(node) if len(start_nodes) != 1: raise bpmn_exception.BpmnPythonError("Exporting to CSV format accepts only one start event") nodes_classification = utils.BpmnImportUtils.generate_nodes_clasification(bpmn_diagram) start_node = start_nodes.pop() BpmnDiagramGraphCsvExport.export_node(bpmn_diagram, export_elements, start_node, nodes_classification) try: os.makedirs(directory) except OSError as exception: if exception.errno != errno.EEXIST: raise file_object = open(directory + filename, "w") file_object.write("Order,Activity,Condition,Who,Subprocess,Terminated\n") BpmnDiagramGraphCsvExport.write_export_node_to_file(file_object, export_elements) file_object.close()
Root method of CSV export functionality. :param bpmn_diagram: an instance of BpmnDiagramGraph class, :param directory: a string object, which is a path of output directory, :param filename: a string object, which is a name of output file.
def cover(self, pageid): r = requests.get(self.api, params={'action': 'query', 'prop': 'pageimages', 'pageids': pageid, 'format': 'json'}, headers=self.header) jsd = r.json() image = "File:" + jsd['query']['pages'][str(pageid)]['pageimage'] r = requests.get(self.api, params={'action': 'query', 'prop': 'imageinfo', 'iiprop': 'url', 'titles': image, 'format': 'json'}, headers=self.header) jsd = r.json() return jsd['query']['pages'][list(jsd['query']['pages'].keys())[0]]['imageinfo'][0]['url']
Get a cover image given a page id. :param str pageid: The pageid for the light novel you want a cover image for :return str: the image url
def multimask_images(images: Iterable[SpatialImage], masks: Sequence[np.ndarray], image_type: type = None ) -> Iterable[Sequence[np.ndarray]]: for image in images: yield [mask_image(image, mask, image_type) for mask in masks]
Mask images with multiple masks. Parameters ---------- images: Images to mask. masks: Masks to apply. image_type: Type to cast images to. Yields ------ Sequence[np.ndarray] For each mask, a masked image.
def load_results_from_table_definition(table_definition, table_definition_file, options): default_columns = extract_columns_from_table_definition_file(table_definition, table_definition_file) columns_relevant_for_diff = _get_columns_relevant_for_diff(default_columns) results = [] for tag in table_definition: if tag.tag == 'result': columns = extract_columns_from_table_definition_file(tag, table_definition_file) or default_columns run_set_id = tag.get('id') for resultsFile in get_file_list_from_result_tag(tag, table_definition_file): results.append(parallel.submit( load_result, resultsFile, options, run_set_id, columns, columns_relevant_for_diff)) elif tag.tag == 'union': results.append(parallel.submit( handle_union_tag, tag, table_definition_file, options, default_columns, columns_relevant_for_diff)) return [future.result() for future in results]
Load all results in files that are listed in the given table-definition file. @return: a list of RunSetResult objects
def validate( schema: GraphQLSchema, document_ast: DocumentNode, rules: Sequence[RuleType] = None, type_info: TypeInfo = None, ) -> List[GraphQLError]: if not document_ast or not isinstance(document_ast, DocumentNode): raise TypeError("You must provide a document node.") assert_valid_schema(schema) if type_info is None: type_info = TypeInfo(schema) elif not isinstance(type_info, TypeInfo): raise TypeError(f"Not a TypeInfo object: {inspect(type_info)}") if rules is None: rules = specified_rules elif not isinstance(rules, (list, tuple)): raise TypeError("Rules must be passed as a list/tuple.") context = ValidationContext(schema, document_ast, type_info) visitors = [rule(context) for rule in rules] visit(document_ast, TypeInfoVisitor(type_info, ParallelVisitor(visitors))) return context.errors
Implements the "Validation" section of the spec. Validation runs synchronously, returning a list of encountered errors, or an empty list if no errors were encountered and the document is valid. A list of specific validation rules may be provided. If not provided, the default list of rules defined by the GraphQL specification will be used. Each validation rule is a ValidationRule object which is a visitor object that holds a ValidationContext (see the language/visitor API). Visitor methods are expected to return GraphQLErrors, or lists of GraphQLErrors when invalid. Optionally a custom TypeInfo instance may be provided. If not provided, one will be created from the provided schema.
def hide(self, bAsync = True): if bAsync: win32.ShowWindowAsync( self.get_handle(), win32.SW_HIDE ) else: win32.ShowWindow( self.get_handle(), win32.SW_HIDE )
Make the window invisible. @see: L{show} @type bAsync: bool @param bAsync: Perform the request asynchronously. @raise WindowsError: An error occured while processing this request.
def intersection(line1, line2): x1, y1, x2, y2 = line1 u1, v1, u2, v2 = line2 (a, b), (c, d) = (x2 - x1, u1 - u2), (y2 - y1, v1 - v2) e, f = u1 - x1, v1 - y1 denom = float(a * d - b * c) if _near(denom, 0): if b == 0 or d == 0: return None if _near(e / b, f / d): px = x1 py = y1 else: return None else: t = (e * d - b * f) / denom px = x1 + t * (x2 - x1) py = y1 + t * (y2 - y1) return px, py
Return the coordinates of a point of intersection given two lines. Return None if the lines are parallel, but non-colli_near. Return an arbitrary point of intersection if the lines are colli_near. Parameters: line1 and line2: lines given by 4 points (x0,y0,x1,y1).
def read_csv_to_html_table(csvFile, hasHeader='N'): txt = '<table class="as-table as-table-zebra as-table-horizontal">' with open(csvFile, "r") as f: numRows = 1 for row in f: if hasHeader == 'Y': if numRows == 1: td_begin = '<TH>' td_end = '</TH>' else: td_begin = '<TD>' td_end = '</TD>' else: td_begin = '<TD>' td_end = '</TD>' cols = row.split(',') numRows += 1 txt += "<TR>" for col in cols: txt += td_begin try: colString = col except Exception: colString = '<font color=red>Error decoding column data</font>' txt += colString.strip('"') txt += td_end txt += "</TR>\n" txt += "</TABLE>\n\n" return txt
reads a CSV file and converts it to HTML
def get_preparation_data(name): d = dict( name=name, sys_path=sys.path, sys_argv=sys.argv, log_to_stderr=_log_to_stderr, orig_dir=process.ORIGINAL_DIR, authkey=process.current_process().authkey, ) if _logger is not None: d['log_level'] = _logger.getEffectiveLevel() if not WINEXE: main_path = getattr(sys.modules['__main__'], '__file__', None) if not main_path and sys.argv[0] not in ('', '-c'): main_path = sys.argv[0] if main_path is not None: if (not os.path.isabs(main_path) and process.ORIGINAL_DIR is not None): main_path = os.path.join(process.ORIGINAL_DIR, main_path) if not main_path.endswith('.exe'): d['main_path'] = os.path.normpath(main_path) return d
Return info about parent needed by child to unpickle process object. Monkey-patch from
def get_description(self, description_type='Abstract'): if 'descriptions' in self.xml: if isinstance(self.xml['descriptions']['description'], list): for description in self.xml['descriptions']['description']: if description_type in description: return description[description_type] elif isinstance(self.xml['descriptions']['description'], dict): description = self.xml['descriptions']['description'] if description_type in description: return description[description_type] elif len(description) == 1: return description.values()[0] return None
Get DataCite description.
def __sort_analyses(sentence): for word in sentence: if ANALYSIS not in word: raise Exception( '(!) Error: no analysis found from word: '+str(word) ) else: word[ANALYSIS] = sorted(word[ANALYSIS], \ key=lambda x : "_".join( [x[ROOT],x[POSTAG],x[FORM],x[CLITIC]] )) return sentence
Sorts analysis of all the words in the sentence. This is required for consistency, because by default, analyses are listed in arbitrary order;
def copy(self, name=None): r if name is None: name = ws._gen_name() proj = deepcopy(self) ws[name] = proj return proj
r""" Creates a deep copy of the current project A deep copy means that new, unique versions of all the objects are created but with identical data and properties. Parameters ---------- name : string The name to give to the new project. If not supplied, a name is automatically generated. Returns ------- A new Project object containing copies of all objects
def interval_lengths( bits ): end = 0 while 1: start = bits.next_set( end ) if start == bits.size: break end = bits.next_clear( start ) yield end - start
Get the length distribution of all contiguous runs of set bits from
def remove_multi(self, kvs, quiet=None): return _Base.remove_multi(self, kvs, quiet=quiet)
Remove multiple items from the cluster :param kvs: Iterable of keys to delete from the cluster. If you wish to specify a CAS for each item, then you may pass a dictionary of keys mapping to cas, like `remove_multi({k1:cas1, k2:cas2}`) :param quiet: Whether an exception should be raised if one or more items were not found :return: A :class:`~.MultiResult` containing :class:`~.OperationResult` values.
def clip(self, lower=None, upper=None): df = self.export_df() df = df.clip(lower=lower, upper=upper) self.load_df(df)
Trim values at input thresholds using pandas function
def solve_mbar_for_all_states(u_kn, N_k, f_k, solver_protocol): states_with_samples = np.where(N_k > 0)[0] if len(states_with_samples) == 1: f_k_nonzero = np.array([0.0]) else: f_k_nonzero, all_results = solve_mbar(u_kn[states_with_samples], N_k[states_with_samples], f_k[states_with_samples], solver_protocol=solver_protocol) f_k[states_with_samples] = f_k_nonzero f_k = self_consistent_update(u_kn, N_k, f_k) f_k -= f_k[0] return f_k
Solve for free energies of states with samples, then calculate for empty states. Parameters ---------- u_kn : np.ndarray, shape=(n_states, n_samples), dtype='float' The reduced potential energies, i.e. -log unnormalized probabilities N_k : np.ndarray, shape=(n_states), dtype='int' The number of samples in each state f_k : np.ndarray, shape=(n_states), dtype='float' The reduced free energies of each state solver_protocol: tuple(dict()), optional, default=None Sequence of dictionaries of steps in solver protocol for final stage of refinement. Returns ------- f_k : np.ndarray, shape=(n_states), dtype='float' The free energies of states
def get(cls, **kwargs): data = cls._get(**kwargs) if data is None: new = cls() new.from_miss(**kwargs) return new return cls.deserialize(data)
Get a copy of the type from the cache and reconstruct it.
def set_style(network_id, ndex_cred=None, template_id=None): if not template_id: template_id = "ea4ea3b7-6903-11e7-961c-0ac135e8bacf" server = 'http://public.ndexbio.org' username, password = get_default_ndex_cred(ndex_cred) source_network = ndex2.create_nice_cx_from_server(username=username, password=password, uuid=network_id, server=server) source_network.apply_template(server, template_id) source_network.update_to(network_id, server=server, username=username, password=password)
Set the style of the network to a given template network's style Parameters ---------- network_id : str The UUID of the NDEx network whose style is to be changed. ndex_cred : dict A dictionary of NDEx credentials. template_id : Optional[str] The UUID of the NDEx network whose style is used on the network specified in the first argument.
def dependents_of_addresses(self, addresses): seen = OrderedSet(addresses) for address in addresses: seen.update(self._dependent_address_map[address]) seen.update(self._implicit_dependent_address_map[address]) return seen
Given an iterable of addresses, yield all of those addresses dependents.
def _add_intermol_molecule_type(intermol_system, parent): from intermol.moleculetype import MoleculeType from intermol.forces.bond import Bond as InterMolBond molecule_type = MoleculeType(name=parent.name) intermol_system.add_molecule_type(molecule_type) for index, parent_atom in enumerate(parent.particles()): parent_atom.index = index + 1 for atom1, atom2 in parent.bonds(): intermol_bond = InterMolBond(atom1.index, atom2.index) molecule_type.bonds.add(intermol_bond)
Create a molecule type for the parent and add bonds.
def get_package(self): package_data = self._get_data() package_data = package_schema.validate(package_data) if "requires_rez_version" in package_data: ver = package_data.pop("requires_rez_version") if _rez_Version < ver: raise PackageMetadataError( "Failed reading package definition file: rez version >= %s " "needed (current version is %s)" % (ver, _rez_Version)) version_str = package_data.get("version") or "_NO_VERSION" repo_data = {self.name: {version_str: package_data}} repo = create_memory_package_repository(repo_data) family_resource = repo.get_package_family(self.name) it = repo.iter_packages(family_resource) package_resource = it.next() package = self.package_cls(package_resource) package.validate_data() return package
Create the analogous package. Returns: `Package` object.
def raise_right_error(response): if response.status_code == 200: return if response.status_code == 500: raise ServerError('Clef servers are down.') if response.status_code == 403: message = response.json().get('error') error_class = MESSAGE_TO_ERROR_MAP[message] if error_class == InvalidOAuthTokenError: message = 'Something went wrong at Clef. Unable to retrieve user information with this token.' raise error_class(message) if response.status_code == 400: message = response.json().get('error') error_class = MESSAGE_TO_ERROR_MAP[message] if error_class: raise error_class(message) else: raise InvalidLogoutTokenError(message) if response.status_code == 404: raise NotFoundError('Unable to retrieve the page. Are you sure the Clef API endpoint is configured right?') raise APIError
Raise appropriate error when bad response received.
def resources(): ind_id = request.form['ind_id'] upload_dir = os.path.abspath(app.config['UPLOAD_DIR']) req_file = request.files['file'] filename = secure_filename(req_file.filename) file_path = os.path.join(upload_dir, filename) name = request.form['name'] or filename req_file.save(file_path) ind_obj = app.db.individual(ind_id) app.db.add_resource(name, file_path, ind_obj) return redirect(request.referrer)
Upload a new resource for an individual.
def generate_getter(value): @property @wraps(is_) def getter(self): return self.is_(value) return getter
Generate getter for given value.
def get_service_module(service_path): paths = [ os.path.dirname(__file__), os.path.realpath(os.path.join(service_path, "..")), os.path.realpath(os.path.join(service_path)), os.path.realpath(os.path.join(service_path, DEPS_DIR)), ] for path in paths: path = os.path.realpath(path) logger.debug("adding %s to path", path) sys.path.insert(0, path) service_name = os.path.basename(service_path) module = ".".join([service_name, service_name + "_service"]) logger.debug("importing %s", module) return importlib.import_module(module)
Add custom paths to sys and import service module. :param service_path: Path to service folder
def postURL(self, url, headers={}, body=None): return self._load_resource("POST", url, headers, body)
Request a URL using the HTTP method POST.
def add_service_subnet(self, context_id, subnet_id): return self.context.addServiceSubnetToNetworkTunnel(subnet_id, id=context_id)
Adds a service subnet to a tunnel context. :param int context_id: The id-value representing the context instance. :param int subnet_id: The id-value representing the service subnet. :return bool: True if service subnet addition was successful.
def setdefault (self, key, *args): assert isinstance(key, basestring) return dict.setdefault(self, key.lower(), *args)
Set lowercase key value and return.
def with_fields(self, *fields): Unihan = self.sql.base.classes.Unihan query = self.sql.session.query(Unihan) for field in fields: query = query.filter(Column(field).isnot(None)) return query
Returns list of characters with information for certain fields. Parameters ---------- *fields : list of str fields for which information should be available Returns ------- :class:`sqlalchemy.orm.query.Query` : list of matches
def hide_tool(self, context_name, tool_name): data = self._context(context_name) hidden_tools = data["hidden_tools"] if tool_name not in hidden_tools: self._validate_tool(context_name, tool_name) hidden_tools.add(tool_name) self._flush_tools()
Hide a tool so that it is not exposed in the suite. Args: context_name (str): Context containing the tool. tool_name (str): Name of tool to hide.
def lerp(vec1, vec2, time): if isinstance(vec1, Vector2) \ and isinstance(vec2, Vector2): if time < 0: time = 0 elif time > 1: time = 1 x_lerp = vec1[0] + time * (vec2[0] - vec1[0]) y_lerp = vec1[1] + time * (vec2[1] - vec1[1]) return Vector2(x_lerp, y_lerp) else: raise TypeError("Objects must be of type Vector2")
Lerp between vec1 to vec2 based on time. Time is clamped between 0 and 1.
def plot(self, format='segments', bits=None, **kwargs): if format == 'timeseries': return super(StateVector, self).plot(**kwargs) if format == 'segments': from ..plot import Plot kwargs.setdefault('xscale', 'auto-gps') return Plot(*self.to_dqflags(bits=bits).values(), projection='segments', **kwargs) raise ValueError("'format' argument must be one of: 'timeseries' or " "'segments'")
Plot the data for this `StateVector` Parameters ---------- format : `str`, optional, default: ``'segments'`` The type of plot to make, either 'segments' to plot the SegmentList for each bit, or 'timeseries' to plot the raw data for this `StateVector` bits : `list`, optional A list of bit indices or bit names, defaults to `~StateVector.bits`. This argument is ignored if ``format`` is not ``'segments'`` **kwargs Other keyword arguments to be passed to either `~gwpy.plot.SegmentAxes.plot` or `~gwpy.plot.Axes.plot`, depending on ``format``. Returns ------- plot : `~gwpy.plot.Plot` output plot object See Also -------- matplotlib.pyplot.figure for documentation of keyword arguments used to create the figure matplotlib.figure.Figure.add_subplot for documentation of keyword arguments used to create the axes gwpy.plot.SegmentAxes.plot_flag for documentation of keyword arguments used in rendering each statevector flag.
def get(self): if self.timer() > self.deadline: self.value = None return self.value
Returns existing value, or None if deadline has expired.
def patch_stackless(): global _application_set_schedule_callback _application_set_schedule_callback = stackless.set_schedule_callback(_schedule_callback) def set_schedule_callback(callable): global _application_set_schedule_callback old = _application_set_schedule_callback _application_set_schedule_callback = callable return old def get_schedule_callback(): global _application_set_schedule_callback return _application_set_schedule_callback set_schedule_callback.__doc__ = stackless.set_schedule_callback.__doc__ if hasattr(stackless, "get_schedule_callback"): get_schedule_callback.__doc__ = stackless.get_schedule_callback.__doc__ stackless.set_schedule_callback = set_schedule_callback stackless.get_schedule_callback = get_schedule_callback if not hasattr(stackless.tasklet, "trace_function"): __call__.__doc__ = stackless.tasklet.__call__.__doc__ stackless.tasklet.__call__ = __call__ setup.__doc__ = stackless.tasklet.setup.__doc__ stackless.tasklet.setup = setup run.__doc__ = stackless.run.__doc__ stackless.run = run
This function should be called to patch the stackless module so that new tasklets are properly tracked in the debugger.
def __step1(self): C = self.C n = self.n for i in range(n): minval = min(self.C[i]) for j in range(n): self.C[i][j] -= minval return 2
For each row of the matrix, find the smallest element and subtract it from every element in its row. Go to Step 2.