code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def mark_bool_flags_as_mutual_exclusive(flag_names, required=False, flag_values=_flagvalues.FLAGS): for flag_name in flag_names: if not flag_values[flag_name].boolean: raise _exceptions.ValidationError( 'Flag --{} is not Boolean, which is required for flags used in ' 'mark_bool_flags_as_mutual_exclusive.'.format(flag_name)) def validate_boolean_mutual_exclusion(flags_dict): flag_count = sum(bool(val) for val in flags_dict.values()) if flag_count == 1 or (not required and flag_count == 0): return True raise _exceptions.ValidationError( '{} one of ({}) must be True.'.format( 'Exactly' if required else 'At most', ', '.join(flag_names))) register_multi_flags_validator( flag_names, validate_boolean_mutual_exclusion, flag_values=flag_values)
Ensures that only one flag among flag_names is True. Args: flag_names: [str], names of the flags. required: bool. If true, exactly one flag must be True. Otherwise, at most one flag can be True, and it is valid for all flags to be False. flag_values: flags.FlagValues, optional FlagValues instance where the flags are defined.
def convert(data): if isinstance(data, unicode): return data.encode('utf-8') elif isinstance(data, str): return data elif isinstance(data, collections.Mapping): return dict(map(convert, data.iteritems())) elif isinstance(data, collections.Iterable): return type(data)(map(convert, data)) else: return data
convert a standalone unicode string or unicode strings in a mapping or iterable into byte strings.
def import_string_code_as_module(code): sha256 = hashlib.sha256(code.encode('UTF-8')).hexdigest() module = imp.new_module(sha256) try: exec_(code, module.__dict__) except Exception as e: raise exceptions.UserError('User code exception', exception_message=str(e)) sys.modules[sha256] = module return module
Used to run arbitrary passed code as a module Args: code (string): Python code to import as module Returns: module: Python module
def html_content(self): hilite = CodeHiliteExtension(linenums=False, css_class='highlight') extras = ExtraExtension() markdown_content = markdown(self.content, extensions=[hilite, extras]) oembed_content = parse_html( markdown_content, oembed_providers, urlize_all=True, maxwidth=app.config['SITE_WIDTH']) return Markup(oembed_content)
Generate HTML representation of the markdown-formatted blog entry, and also convert any media URLs into rich media objects such as video players or images.
def _exception_for(self, code): if code in self.errors: return self.errors[code] elif 500 <= code < 599: return exceptions.RemoteServerError else: return exceptions.UnknownError
Return the exception class suitable for the specified HTTP status code. Raises: UnknownError: The HTTP status code is not one of the knowns.
def freeze(): echo_waiting('Verifying collected packages...') catalog, errors = make_catalog() if errors: for error in errors: echo_failure(error) abort() static_file = get_agent_requirements() echo_info('Static file: {}'.format(static_file)) pre_packages = list(read_packages(static_file)) catalog.write_packages(static_file) post_packages = list(read_packages(static_file)) display_package_changes(pre_packages, post_packages)
Combine all dependencies for the Agent's static environment.
def get_model_filepath(self, infodict): u = infodict['uniprot_ac'] original_filename = '{}_{}_{}_{}'.format(infodict['from'], infodict['to'], infodict['template'], infodict['coordinate_id']) file_path = op.join(self.metadata_dir, u[:2], u[2:4], u[4:6], 'swissmodel', '{}.pdb'.format(original_filename)) if op.exists(file_path): return file_path else: log.warning('{}: no file {} found for model'.format(u, file_path)) return None
Get the path to the homology model using information from the index dictionary for a single model. Example: use self.get_models(UNIPROT_ID) to get all the models, which returns a list of dictionaries. Use one of those dictionaries as input to this function to get the filepath to the model itself. Args: infodict (dict): Information about a model from get_models Returns: str: Path to homology model
def get_atoms(self, inc_alt_states=False): if inc_alt_states: return itertools.chain(*[x[1].values() for x in sorted(list(self.states.items()))]) return self.atoms.values()
Returns all atoms in the `Monomer`. Parameters ---------- inc_alt_states : bool, optional If `True`, will return `Atoms` for alternate states.
def list_buckets(self, instance): response = self._client.get_proto(path='/buckets/' + instance) message = rest_pb2.ListBucketsResponse() message.ParseFromString(response.content) buckets = getattr(message, 'bucket') return iter([ Bucket(bucket, instance, self) for bucket in buckets])
List the buckets for an instance. :param str instance: A Yamcs instance name. :rtype: ~collections.Iterable[.Bucket]
def __DeclareMessageAlias(self, schema, alias_for): message = extended_descriptor.ExtendedMessageDescriptor() message.name = self.__names.ClassName(schema['id']) message.alias_for = alias_for self.__DeclareDescriptor(message.name) self.__AddImport('from %s import extra_types' % self.__base_files_package) self.__RegisterDescriptor(message)
Declare schema as an alias for alias_for.
def _get_doc_by_line_offset(self, doc_id): bounds = self._get_meta()[str(doc_id)].bounds return xml_utils.load_chunk(self.filename, bounds, slow=True)
Load document from xml using line offset information. This is much slower than _get_doc_by_raw_offset but should work everywhere.
def safe_setattr(obj, name, value): try: setattr(obj, name, value) return True except AttributeError: return False
Attempt to setattr but catch AttributeErrors.
def set_deployment_run_id(self): log = logging.getLogger(self.cls_logger + '.set_deployment_run_id') deployment_run_id_val = self.get_value('cons3rt.deploymentRun.id') if not deployment_run_id_val: log.debug('Deployment run ID not found in deployment properties') return try: deployment_run_id = int(deployment_run_id_val) except ValueError: log.debug('Deployment run ID found was unable to convert to an int: {d}'.format(d=deployment_run_id_val)) return self.deployment_run_id = deployment_run_id log.info('Found deployment run ID: {i}'.format(i=str(self.deployment_run_id)))
Sets the deployment run ID from deployment properties :return: None
def debug_video_writer_factory(output_dir): if FLAGS.disable_ffmpeg: return common_video.IndividualFrameWriter(output_dir) else: output_path = os.path.join(output_dir, "video.avi") return common_video.WholeVideoWriter( fps=10, output_path=output_path, file_format="avi" )
Creates a VideoWriter for debug videos.
def revoke(self, token, pipe=None): p = self.redis.pipeline() if pipe is None else pipe formatted_token = self.format_token(token) try: p.watch(formatted_token) key = p.get(formatted_token) formatted_key = self.format_key(key) p.multi() p.delete(formatted_key, formatted_token) if pipe is None: if not p.execute()[-1]: raise RevokeError(token, 'token not found') except WatchError: raise finally: if pipe is None: p.reset()
\ Revokes the key associated with the given revokation token. If the token does not exist, a :class:`KeyError <KeyError>` is thrown. Otherwise `None` is returned. If `pipe` is given, then a :class:`RevokeError <shorten.RevokeError>` will not be thrown if the key does not exist. The n-th from last result should be checked like so: :: pipe = redis.Pipeline() store.revoke(token, pipe=pipe) results = pipe.execute() if not results[-1]: raise RevokeError(token) :param pipe: a Redis pipeline. If `None`, the token will be revoked immediately. Otherwise they must be extracted from the pipeline results (see above).
def get_all(): ret = [] service = _cmd() for svc in __salt__['cmd.run']('{0} ls all'.format(service)).splitlines(): ret.append(svc) return sorted(ret)
Return all installed services. CLI Example: .. code-block:: bash salt '*' service.get_all
def sam_pair_to_insert(s1, s2): if s1.is_unmapped or s2.is_unmapped or (s1.tid != s2.tid) or (s1.is_reverse == s2.is_reverse): return None if s1.is_reverse: end = s1.reference_end - 1 start = s2.reference_start else: end = s2.reference_end - 1 start = s1.reference_start if start < end: return end - start + 1 else: return None
Returns insert size from pair of sam records, as long as their orientation is "innies". Otherwise returns None.
def save(self, out_path): out = { 'selectors': [str(x) for x in self.selectors], 'trace': [{'stream': str(DataStream.FromEncoded(x.stream)), 'time': x.raw_time, 'value': x.value, 'reading_id': x.reading_id} for x in self] } with open(out_path, "wb") as outfile: json.dump(out, outfile, indent=4)
Save an ascii representation of this simulation trace. Args: out_path (str): The output path to save this simulation trace.
def dumps_bytes(obj): b = dumps(obj) if isinstance(b, unicode): b = b.encode("ascii") return b
Serialize ``obj`` to JSON formatted ``bytes``.
def deconstruct(self, including_private: bool=False) -> bytes: data = self._deconstruct_v1(including_private=including_private) return compress_datablob(DATA_BLOB_MAGIC, 1, data)
Return state of this FinTSClient instance as an opaque datablob. You should not use this object after calling this method. Information about the connection is implicitly retrieved from the bank and cached in the FinTSClient. This includes: system identifier, bank parameter data, user parameter data. It's not strictly required to retain this information across sessions, but beneficial. If possible, an API user SHOULD use this method to serialize the client instance before destroying it, and provide the serialized data next time an instance is constructed. Parameter `including_private` should be set to True, if the storage is sufficiently secure (with regards to confidentiality) to include private data, specifically, account numbers and names. Most often this is the case. Note: No connection information is stored in the datablob, neither is the PIN.
def proj_units_to_meters(proj_str): proj_parts = proj_str.split() new_parts = [] for itm in proj_parts: key, val = itm.split('=') key = key.strip('+') if key in ['a', 'b', 'h']: val = float(val) if val < 6e6: val *= 1000. val = '%.3f' % val if key == 'units' and val == 'km': continue new_parts.append('+%s=%s' % (key, val)) return ' '.join(new_parts)
Convert projection units from kilometers to meters.
def buildWorkbenchWithLauncher(): workbench = ui.Workbench() tools = [exercises.SearchTool()] launcher = ui.Launcher(workbench, tools) workbench.display(launcher) return workbench, launcher
Builds a workbench. The workbench has a launcher with all of the default tools. The launcher will be displayed on the workbench.
def simulate(self): return [t.simulate() for t in itertools.islice(self, random.choice(range(10)))]
Simulates a stream of types.
def register_base_assets(self): from abilian.web import assets as bundles self.register_asset("css", bundles.LESS) self.register_asset("js-top", bundles.TOP_JS) self.register_asset("js", bundles.JS) self.register_i18n_js(*bundles.JS_I18N)
Register assets needed by Abilian. This is done in a separate method in order to allow applications to redefine it at will.
def get_script_str(self, reset=True): s = "\n".join(l for l in self._lines) if reset: self.reset() return s
Returns a string with the script and reset the editor if reset is True
def _load_lsm_data(self, data_var, conversion_factor=1, calc_4d_method=None, calc_4d_dim=None, time_step=None): data = self.xd.lsm.getvar(data_var, yslice=self.yslice, xslice=self.xslice, calc_4d_method=calc_4d_method, calc_4d_dim=calc_4d_dim) if isinstance(time_step, datetime): data = data.loc[{self.lsm_time_dim: [pd.to_datetime(time_step)]}] elif time_step is not None: data = data[{self.lsm_time_dim: [time_step]}] data = data.fillna(0) data.values *= conversion_factor return data
This extracts the LSM data from a folder of netcdf files
def parse(self, data, charset=None): charset = charset or self.charset return self._parse_data(data, charset)
Parse the data. It is usually a better idea to override ``_parse_data()`` than this method in derived classes. :param charset: the charset of the data. Uses datamapper's default (``self.charset``) if not given. :returns:
def get_item_type(self, ttype): for i in self.map_item: if TYPE_MAP_ITEM[i.get_type()] == ttype: return i.get_item() return None
Get a particular item type :param ttype: a string which represents the desired type :rtype: None or the item object
def remote_command(function, self, *args, **kwargs): try: return function(self, *args, **kwargs) except RuntimeError, exception: error_message = str(exception) match = CRE_REMOTE_ERROR.match(error_message) if match: command_code = int(match.group('command_int')) return_code = int(match.group('return_code_int')) raise FirmwareError(command_code, return_code) match = CRE_REMOTE_COMMAND_ERROR.match(error_message) if match: command_code = int(match.group('command_int')) command_name = NAMES_BY_COMMAND_CODE[command_code] raise RuntimeError(CRE_REMOTE_COMMAND_ERROR.sub(command_name, error_message)) raise
Catch `RuntimeError` exceptions raised by remote control board firmware commands and re-raise as more specific `FirmwareError` exception type, which includes command code and return code.
def _use_remote_connection(self, kwargs): kwargs['host'] = kwargs.get('host') kwargs['username'] = kwargs.get('username') kwargs['password'] = kwargs.get('password') if kwargs['host'] is None or \ kwargs['username'] is None or \ kwargs['password'] is None: return False else: return True
Determine if connection is local or remote
def resolve_type_spec(self, name, lineno): if name in self.type_specs: return self.type_specs[name].link(self) if '.' in name: include_name, component = name.split('.', 1) if include_name in self.included_scopes: return self.included_scopes[include_name].resolve_type_spec( component, lineno ) raise ThriftCompilerError( 'Unknown type "%s" referenced at line %d%s' % ( name, lineno, self.__in_path() ) )
Finds and links the TypeSpec with the given name.
def IsImage(self, filename): mimetype = mimetypes.guess_type(filename)[0] if not mimetype: return False return mimetype.startswith("image/")
Returns true if the filename has an image extension.
def install(self, xmlpath): from os import path fullpath = path.abspath(path.expanduser(xmlpath)) if path.isfile(fullpath): repo = RepositorySettings(self, fullpath) if repo.name.lower() not in self.repositories: self.installed.append(fullpath) self._save_installed() self.archive[repo.name.lower()] = {} self._save_archive() self.repositories[repo.name.lower()] = repo else: warn("The file {} does not exist; install aborted.".format(fullpath))
Installs the repository at the specified XML path as an additional repo to monitor pull requests for.
def __sepApp(self, IDs, aspList): sep, app = self.dyn.immediateAspects(self.obj.id, aspList) if sep is None or app is None: return False else: sepCondition = sep['id'] in IDs appCondition = app['id'] in IDs return sepCondition == appCondition == True
Returns true if the object last and next movement are separations and applications to objects in list IDs. It only considers aspects in aspList. This function is static since it does not test if the next application will be indeed perfected. It considers only a snapshot of the chart and not its astronomical movement.
def _create_content_element(self, content, data_property_value): content_element = self.html_parser.create_element('span') content_element.set_attribute( AccessibleCSSImplementation.DATA_ISOLATOR_ELEMENT, 'true' ) content_element.set_attribute( AccessibleCSSImplementation.DATA_SPEAK_AS, data_property_value ) content_element.append_text(content) return content_element
Create a element to show the content. :param content: The text content of element. :type content: str :param data_property_value: The value of custom attribute used to identify the fix. :type data_property_value: str :return: The element to show the content. :rtype: hatemile.util.html.htmldomelement.HTMLDOMElement
def user(self, username=None): if username is None: username = self.__getUsername() parsedUsername = urlparse.quote(username) url = self.root + "/%s" % parsedUsername return User(url=url, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port, initialize=False)
A user resource that represents a registered user in the portal.
def validate(self, raw_data, **kwargs): super(DateTimeField, self).validate(raw_data, **kwargs) try: if isinstance(raw_data, datetime.datetime): self.converted = raw_data elif self.serial_format is None: self.converted = parse(raw_data) else: self.converted = datetime.datetime.strptime(raw_data, self.serial_format) return raw_data except (ParseError, ValueError) as e: msg = self.messages['parse'] % dict(cls=self.__class__.__name__, data=raw_data, format=self.serial_format) raise ValidationException(msg, raw_data)
The raw_data is returned unchanged.
def new_event(self, event_data: str) -> None: event = self.parse_event_xml(event_data) if EVENT_OPERATION in event: self.manage_event(event)
New event to process.
def autoencoder_ordered_text_small(): hparams = autoencoder_ordered_text() hparams.bottleneck_bits = 32 hparams.num_hidden_layers = 3 hparams.hidden_size = 64 hparams.max_hidden_size = 512 hparams.bottleneck_noise = 0.0 hparams.autoregressive_mode = "conv5" hparams.sample_height = 4 return hparams
Ordered discrete autoencoder model for text, small version.
def _d2f(self, x): d2f_dPg2 = lil_matrix((self._ng, 1)) d2f_dQg2 = lil_matrix((self._ng, 1)) for i in self._ipol: p_cost = list(self._gn[i].p_cost) d2f_dPg2[i, 0] = polyval(polyder(p_cost, 2), self._Pg.v0[i] * self._base_mva) * self._base_mva**2 i = r_[range(self._Pg.i1, self._Pg.iN + 1), range(self._Qg.i1, self._Qg.iN + 1)] d2f = csr_matrix((vstack([d2f_dPg2, d2f_dQg2]).toarray().flatten(), (i, i)), shape=(self._nxyz, self._nxyz)) return d2f
Evaluates the cost Hessian.
def os_release(package, base='essex', reset_cache=False): global _os_rel if reset_cache: reset_os_release() if _os_rel: return _os_rel _os_rel = ( get_os_codename_package(package, fatal=False) or get_os_codename_install_source(config('openstack-origin')) or base) return _os_rel
Returns OpenStack release codename from a cached global. If reset_cache then unset the cached os_release version and return the freshly determined version. If the codename can not be determined from either an installed package or the installation source, the earliest release supported by the charm should be returned.
async def _trim_old_connections( new_name: str, con_type: CONNECTION_TYPES) -> Tuple[bool, str]: existing_cons = await connections(for_type=con_type) not_us = [c['name'] for c in existing_cons if c['name'] != new_name] ok = True res = [] for c in not_us: this_ok, remove_res = await remove(name=c) ok = ok and this_ok if not this_ok: log.warning("Could not remove wifi connection {}: {}" .format(c, remove_res)) res.append(remove_res) else: log.debug("Removed old wifi connection {}".format(c)) return ok, ';'.join(res)
Delete all connections of con_type but the one specified.
def _stream_output(process): exit_code = None while exit_code is None: stdout = process.stdout.readline().decode("utf-8") sys.stdout.write(stdout) exit_code = process.poll() if exit_code != 0: raise RuntimeError("Process exited with code: %s" % exit_code) return exit_code
Stream the output of a process to stdout This function takes an existing process that will be polled for output. Only stdout will be polled and sent to sys.stdout. Args: process(subprocess.Popen): a process that has been started with stdout=PIPE and stderr=STDOUT Returns (int): process exit code
def process_temperature_sensors(helper, session): snmp_result_temp_sensor_names = helper.walk_snmp_values( session, helper, DEVICE_TEMPERATURE_OIDS['oid_temperature_probe_location'], "temperature sensors") snmp_result_temp_sensor_states = helper.walk_snmp_values( session, helper, DEVICE_TEMPERATURE_OIDS['oid_temperature_probe_status'], "temperature sensors") snmp_result_temp_sensor_values = helper.walk_snmp_values( session, helper, DEVICE_TEMPERATURE_OIDS['oid_temperature_probe_reading'], "temperature sensors") for i, _result in enumerate(snmp_result_temp_sensor_states): helper.update_status( helper, probe_check(snmp_result_temp_sensor_names[i], snmp_result_temp_sensor_states[i], "Temperature sensor")) if i < len(snmp_result_temp_sensor_values): helper.add_metric(label=snmp_result_temp_sensor_names[i] + " -Celsius-", value=float(snmp_result_temp_sensor_values[i]) / 10)
process the temperature sensors
def pop(self): if self.next is None: raise SimEmptyCallStackError("Cannot pop a frame from an empty call stack.") new_list = self.next.copy({}) if self.state is not None: self.state.register_plugin('callstack', new_list) self.state.history.recent_stack_actions.append(CallStackAction( hash(new_list), len(new_list), 'pop', ret_site_addr=self.ret_addr )) return new_list
Pop the top frame from the stack. Return the new stack.
def _set_data(self, coors, ngroups, conns, mat_ids, descs, nodal_bcs=None): self.coors = nm.ascontiguousarray(coors) if ngroups is None: self.ngroups = nm.zeros((self.coors.shape[0],), dtype=nm.int32) else: self.ngroups = nm.ascontiguousarray(ngroups) self.conns = [nm.asarray(conn, dtype=nm.int32) for conn in conns] self.mat_ids = [nm.asarray(mat_id, dtype=nm.int32) for mat_id in mat_ids] self.descs = descs self.nodal_bcs = get_default(nodal_bcs, {})
Set mesh data. Parameters ---------- coors : array Coordinates of mesh nodes. ngroups : array Node groups. conns : list of arrays The array of mesh elements (connectivities) for each element group. mat_ids : list of arrays The array of material ids for each element group. descs: list of strings The element type for each element group. nodal_bcs : dict of arrays, optional The nodes defining regions for boundary conditions referred to by the dict keys in problem description files.
def _make_ndarray_function(handle, name, func_name): code, doc_str = _generate_ndarray_function_code(handle, name, func_name) local = {} exec(code, None, local) ndarray_function = local[func_name] ndarray_function.__name__ = func_name ndarray_function.__doc__ = doc_str ndarray_function.__module__ = 'mxnet.ndarray' return ndarray_function
Create a NDArray function from the FunctionHandle.
def get_classes(modName): classNames = [] for name, obj in inspect.getmembers(sys.modules[modName]): if inspect.isclass(obj): classNames.append(name) return classNames
return a list of all classes in a module.
def project_stored_info_type_path(cls, project, stored_info_type): return google.api_core.path_template.expand( "projects/{project}/storedInfoTypes/{stored_info_type}", project=project, stored_info_type=stored_info_type, )
Return a fully-qualified project_stored_info_type string.
def PortPathMatcher(cls, port_path): if isinstance(port_path, str): port_path = [int(part) for part in SYSFS_PORT_SPLIT_RE.split(port_path)] return lambda device: device.port_path == port_path
Returns a device matcher for the given port path.
def one_hot(x:Collection[int], c:int): "One-hot encode `x` with `c` classes." res = np.zeros((c,), np.float32) res[listify(x)] = 1. return res
One-hot encode `x` with `c` classes.
def get_cookie(self, name): if self.w3c: try: return self.execute(Command.GET_COOKIE, {'name': name})['value'] except NoSuchCookieException: return None else: cookies = self.get_cookies() for cookie in cookies: if cookie['name'] == name: return cookie return None
Get a single cookie by name. Returns the cookie if found, None if not. :Usage: :: driver.get_cookie('my_cookie')
def saveToClipboard(sheet, rows, filetype=None): 'copy rows from sheet to system clipboard' filetype = filetype or options.save_filetype vs = copy(sheet) vs.rows = rows status('copying rows to clipboard') clipboard().save(vs, filetype)
copy rows from sheet to system clipboard
def wait_for_service_tasks_all_changed( service_name, old_task_ids, task_predicate=None, timeout_sec=120 ): return time_wait( lambda: tasks_all_replaced_predicate(service_name, old_task_ids, task_predicate), timeout_seconds=timeout_sec)
Returns once ALL of old_task_ids have been replaced with new tasks :param service_name: the service name :type service_name: str :param old_task_ids: list of original task ids as returned by get_service_task_ids :type old_task_ids: [str] :param task_predicate: filter to use when searching for tasks :type task_predicate: func :param timeout_sec: duration to wait :type timeout_sec: int :return: the duration waited in seconds :rtype: int
def get_freq(self): if not self.is_monotonic or not self.index._is_unique: return None delta = self.deltas[0] if _is_multiple(delta, _ONE_DAY): return self._infer_daily_rule() if self.hour_deltas in ([1, 17], [1, 65], [1, 17, 65]): return 'BH' elif not self.is_unique_asi8: return None delta = self.deltas_asi8[0] if _is_multiple(delta, _ONE_HOUR): return _maybe_add_count('H', delta / _ONE_HOUR) elif _is_multiple(delta, _ONE_MINUTE): return _maybe_add_count('T', delta / _ONE_MINUTE) elif _is_multiple(delta, _ONE_SECOND): return _maybe_add_count('S', delta / _ONE_SECOND) elif _is_multiple(delta, _ONE_MILLI): return _maybe_add_count('L', delta / _ONE_MILLI) elif _is_multiple(delta, _ONE_MICRO): return _maybe_add_count('U', delta / _ONE_MICRO) else: return _maybe_add_count('N', delta)
Find the appropriate frequency string to describe the inferred frequency of self.values Returns ------- str or None
def _get_segment(self, start, request_size, check_response=True): end = start + request_size - 1 content_range = '%d-%d' % (start, end) headers = {'Range': 'bytes=' + content_range} status, resp_headers, content = yield self._api.get_object_async( self._path, headers=headers) def _checker(): errors.check_status(status, [200, 206], self._path, headers, resp_headers, body=content) self._check_etag(resp_headers.get('etag')) if check_response: _checker() raise ndb.Return(content) raise ndb.Return(content, _checker)
Get a segment of the file from Google Storage. Args: start: start offset of the segment. Inclusive. Have to be within the range of the file. request_size: number of bytes to request. Have to be small enough for a single urlfetch request. May go over the logical range of the file. check_response: True to check the validity of GCS response automatically before the future returns. False otherwise. See Yields section. Yields: If check_response is True, the segment [start, start + request_size) of the file. Otherwise, a tuple. The first element is the unverified file segment. The second element is a closure that checks response. Caller should first invoke the closure before consuing the file segment. Raises: ValueError: if the file has changed while reading.
def _send_outgoing_route(self, outgoing_route): path = outgoing_route.path block, blocked_cause = self._apply_out_filter(path) nlri_str = outgoing_route.path.nlri.formatted_nlri_str sent_route = SentRoute(outgoing_route.path, self, block) self._adj_rib_out[nlri_str] = sent_route self._signal_bus.adj_rib_out_changed(self, sent_route) if not block: update_msg = self._construct_update(outgoing_route) self._protocol.send(update_msg) self.state.incr(PeerCounterNames.SENT_UPDATES) else: LOG.debug('prefix : %s is not sent by filter : %s', path.nlri, blocked_cause) if (not outgoing_route.path.is_withdraw and not outgoing_route.for_route_refresh): tm = self._core_service.table_manager tm.remember_sent_route(sent_route)
Constructs `Update` message from given `outgoing_route` and sends it to peer. Also, checks if any policies prevent sending this message. Populates Adj-RIB-out with corresponding `SentRoute`.
def as_record(self, cls, content_type, strdata): self.validate_record_type(cls) parsedrecord = self.deserialize(content_type, strdata) return self.post_process_record(cls, parsedrecord)
Returns a record from serialized string representation. >>> s = teststore() >>> s.as_record('tstoretest', 'application/json', ... '{"id": "1", "name": "Toto"}') {u'id': u'1', u'name': u'Toto'}
def bool_from_exists_clause(session: Session, exists_clause: Exists) -> bool: if session.get_bind().dialect.name == SqlaDialectName.MSSQL: result = session.query(literal(True)).filter(exists_clause).scalar() else: result = session.query(exists_clause).scalar() return bool(result)
Database dialects are not consistent in how ``EXISTS`` clauses can be converted to a boolean answer. This function manages the inconsistencies. See: - https://bitbucket.org/zzzeek/sqlalchemy/issues/3212/misleading-documentation-for-queryexists - http://docs.sqlalchemy.org/en/latest/orm/query.html#sqlalchemy.orm.query.Query.exists Specifically, we want this: *SQL Server* .. code-block:: sql SELECT 1 WHERE EXISTS (SELECT 1 FROM table WHERE ...) -- ... giving 1 or None (no rows) -- ... fine for SQL Server, but invalid for MySQL (no FROM clause) *Others, including MySQL* .. code-block:: sql SELECT EXISTS (SELECT 1 FROM table WHERE ...) -- ... giving 1 or 0 -- ... fine for MySQL, but invalid syntax for SQL Server
def seek(self, offset, whence=os.SEEK_SET): self.wrapped.seek(offset, whence)
Sets the file's current position. :param offset: the offset to set :type offset: :class:`numbers.Integral` :param whence: see the docs of :meth:`file.seek()`. default is :const:`os.SEEK_SET`
def upload_napp(self, metadata, package): endpoint = os.path.join(self._config.get('napps', 'api'), 'napps', '') metadata['token'] = self._config.get('auth', 'token') request = self.make_request(endpoint, json=metadata, package=package, method="POST") if request.status_code != 201: KytosConfig().clear_token() LOG.error("%s: %s", request.status_code, request.reason) sys.exit(1) username = metadata.get('username', metadata.get('author')) name = metadata.get('name') print("SUCCESS: NApp {}/{} uploaded.".format(username, name))
Upload the napp from the current directory to the napps server.
def add_ctx_property(self, name, fn, cached=True): if name in [item[0] for item in self._ctx_properties]: raise InvalidArgumentError("A context property name '%s' already exists." % name) self._ctx_properties.append([name, (fn, cached)])
Install a context property. A context property is a factory function whos return value will be available as a property named `name` on `Context` objects passing through this mapper. The result will be cached unless `cached` is False. The factory function will be called without arguments, or with the context object if it requests an argument named 'ctx'.
def call_command(self, name, *arguments, **options): command, defaults = get_command_and_defaults( name, exclude_packages=self.get_exclude_packages(), exclude_command_class=self.__class__) if command is None: raise management.CommandError( "Unknown command: {name:s}".format( name=name)) defaults.update(options) return command.execute(*arguments, **defaults)
Finds the given Django management command and default options, excluding this command, and calls it with the given arguments and override options.
def open(cls, filename): import boto file_info = cls.parse_remote(filename) connection = cls.connect(filename) try: s3_bucket = connection.get_bucket(file_info.bucket) except boto.exception.S3ResponseError as error: if error.status == 403: s3_bucket = connection.get_bucket(file_info.bucket, validate=False) else: raise s3_key = s3_bucket.get_key(file_info.key) if s3_key is None: raise ValueError("Did not find S3 key: %s" % filename) return S3Handle(s3_key)
Return a handle like object for streaming from S3.
def calc_geo_dist_vincenty(network, node_source, node_target): branch_detour_factor = network.config['grid_connection'][ 'branch_detour_factor'] branch_length = branch_detour_factor * vincenty((node_source.geom.y, node_source.geom.x), (node_target.geom.y, node_target.geom.x)).m if branch_length == 0: branch_length = 1 logger.debug('Geo distance is zero, check objects\' positions. ' 'Distance is set to 1m') return branch_length
Calculates the geodesic distance between node_source and node_target incorporating the detour factor in config. Parameters ---------- network : :class:`~.grid.network.Network` The eDisGo container object node_source : :class:`~.grid.components.Component` Node to connect (e.g. :class:`~.grid.components.Generator`) node_target : :class:`~.grid.components.Component` Target node (e.g. :class:`~.grid.components.BranchTee`) Returns ------- :obj:`float` Distance in m
def select_workers_to_close(scheduler, n_to_close): workers = list(scheduler.workers.values()) assert n_to_close <= len(workers) key = lambda ws: ws.metrics['memory'] to_close = set(sorted(scheduler.idle, key=key)[:n_to_close]) if len(to_close) < n_to_close: rest = sorted(workers, key=key, reverse=True) while len(to_close) < n_to_close: to_close.add(rest.pop()) return [ws.address for ws in to_close]
Select n workers to close from scheduler
def _get_mean_deep_soil(self, mag, rake, rrup, is_reverse, imt): if mag <= self.NEAR_FIELD_SATURATION_MAG: c4 = self.COEFFS_SOIL_IMT_INDEPENDENT['c4lowmag'] c5 = self.COEFFS_SOIL_IMT_INDEPENDENT['c5lowmag'] else: c4 = self.COEFFS_SOIL_IMT_INDEPENDENT['c4himag'] c5 = self.COEFFS_SOIL_IMT_INDEPENDENT['c5himag'] c2 = self.COEFFS_SOIL_IMT_INDEPENDENT['c2'] c3 = self.COEFFS_SOIL_IMT_INDEPENDENT['c3'] C = self.COEFFS_SOIL[imt] if is_reverse: c1 = self.COEFFS_SOIL_IMT_INDEPENDENT['c1r'] c6 = C['c6r'] else: c1 = self.COEFFS_SOIL_IMT_INDEPENDENT['c1ss'] c6 = C['c6ss'] mag = 8.5 if mag > 8.5 else mag return (c1 + c2 * mag + c6 + C['c7'] * ((8.5 - mag) ** 2.5) - c3 * numpy.log(rrup + c4 * numpy.exp(c5 * mag)))
Calculate and return the mean intensity for deep soil sites. Implements an equation from table 4.
def quality_to_bitmap(quality): if quality not in QUALITIES: raise InvalidChordException( "Unsupported chord quality shorthand: '%s' " "Did you mean to reduce extended chords?" % quality) return np.array(QUALITIES[quality])
Return the bitmap for a given quality. Parameters ---------- quality : str Chord quality name. Returns ------- bitmap : np.ndarray Bitmap representation of this quality (12-dim).
def collect_lockfile_dependencies(lockfile_data): output = {} for dependencyName, installedVersion in lockfile_data.items(): output[dependencyName] = { 'source': 'example-package-manager', 'installed': {'name': installedVersion}, } return output
Convert the lockfile format to the dependencies schema
def DEFINE(parser, name, default, help, flag_values=_flagvalues.FLAGS, serializer=None, module_name=None, **args): DEFINE_flag(_flag.Flag(parser, serializer, name, default, help, **args), flag_values, module_name)
Registers a generic Flag object. NOTE: in the docstrings of all DEFINE* functions, "registers" is short for "creates a new flag and registers it". Auxiliary function: clients should use the specialized DEFINE_<type> function instead. Args: parser: ArgumentParser, used to parse the flag arguments. name: str, the flag name. default: The default value of the flag. help: str, the help message. flag_values: FlagValues, the FlagValues instance with which the flag will be registered. This should almost never need to be overridden. serializer: ArgumentSerializer, the flag serializer instance. module_name: str, the name of the Python module declaring this flag. If not provided, it will be computed using the stack trace of this call. **args: dict, the extra keyword args that are passed to Flag __init__.
def _resolve_model(obj): if isinstance(obj, six.string_types) and len(obj.split('.')) == 2: app_name, model_name = obj.split('.') resolved_model = apps.get_model(app_name, model_name) if resolved_model is None: msg = "Django did not return a model for {0}.{1}" raise ImproperlyConfigured(msg.format(app_name, model_name)) return resolved_model elif inspect.isclass(obj) and issubclass(obj, models.Model): return obj raise ValueError("{0} is not a Django model".format(obj))
Resolve supplied `obj` to a Django model class. `obj` must be a Django model class itself, or a string representation of one. Useful in situations like GH #1225 where Django may not have resolved a string-based reference to a model in another model's foreign key definition. String representations should have the format: 'appname.ModelName'
def stop(config, container, timeout=10, *args, **kwargs): err = "Unknown" client = _get_client(config) try: dcontainer = _get_container_infos(config, container)['Id'] if is_running(config, dcontainer): client.stop(dcontainer, timeout=timeout) if not is_running(config, dcontainer): print "Container stopped." return True else: i = 0 while is_running(config, dcontainer): time.sleep(0.1) if i > 100: return kill(config,container) i += 1 return True else: return True except Exception as e: err = e utils.warning("Container not existing") return True
Stop a running container :type container: string :param container: The container id to stop :type timeout: int :param timeout: Wait for a timeout to let the container exit gracefully before killing it :rtype: dict :returns: boolean
def calc_ethsw_port(self, port_num, port_def): port_def = port_def.split(' ') if len(port_def) == 4: destination = {'device': port_def[2], 'port': port_def[3]} else: destination = {'device': 'NIO', 'port': port_def[2]} port = {'id': self.port_id, 'name': str(port_num), 'port_number': int(port_num), 'type': port_def[0], 'vlan': int(port_def[1])} self.node['ports'].append(port) self.calc_link(self.node['id'], self.port_id, port['name'], destination) self.port_id += 1
Split and create the port entry for an Ethernet Switch :param port_num: port number :type port_num: str or int :param str port_def: port definition
async def serialize(self, native=False): data = {} for field_name, field in self._fields.items(): raw_data = self._data.get(field_name) if field._projection != None: field_data = await field.serialize(raw_data, native) if field_data: data[field_name] = field_data elif field._projection == True: data[field_name] = None for name, func in self._serialize_methods.items(): data[name] = await func(self) return data
Returns a serialized from of the model taking into account projection rules and ``@serialize`` decorated methods. :param native: Deternines if data is serialized to Python native types or primitive form. Defaults to ``False``
def get_restart_power_failure(): ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getrestartpowerfailure') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on'
Displays whether 'restart on power failure' is on or off if supported :return: A string value representing the "restart on power failure" settings :rtype: string CLI Example: .. code-block:: bash salt '*' power.get_restart_power_failure
def path_to_attr(path): return reduce(lambda hpath, last: ast.Attribute(hpath, last, ast.Load()), path[1:], ast.Name(mangle(path[0]), ast.Load(), None))
Transform path to ast.Attribute. >>> import gast as ast >>> path = ('__builtin__', 'my', 'constant') >>> value = path_to_attr(path) >>> ref = ast.Attribute( ... value=ast.Attribute(value=ast.Name(id="__builtin__", ... ctx=ast.Load(), ... annotation=None), ... attr="my", ctx=ast.Load()), ... attr="constant", ctx=ast.Load()) >>> ast.dump(ref) == ast.dump(value) True
def service_status(self, short_name): if short_name not in self.services: raise ArgumentError("Unknown service name", short_name=short_name) info = {} service = self.services[short_name]['state'] info['heartbeat_age'] = monotonic() - service.last_heartbeat info['numeric_status'] = service.state info['string_status'] = service.string_state return info
Get the current status of a service. Returns information about the service such as the length since the last heartbeat, any status messages that have been posted about the service and whether the heartbeat should be considered out of the ordinary. Args: short_name (string): The short name of the service to query Returns: dict: A dictionary with the status of the service
def srandmember(self, key, count=None, *, encoding=_NOTSET): args = [key] count is not None and args.append(count) return self.execute(b'SRANDMEMBER', *args, encoding=encoding)
Get one or multiple random members from a set.
def assert_valid_path(self, path): if not isinstance(path, str): raise NotFoundResourceException( "Resource passed to load() method must be a file path") if not os.path.isfile(path): raise NotFoundResourceException( 'File "{0}" does not exist'.format(path))
Ensures that the path represents an existing file @type path: str @param path: path to check
def optimize_auto(self,max_iters=10000,verbose=True): self.Z.fix(warning=False) self.kern.fix(warning=False) self.kern_row.fix(warning=False) self.Zr.fix(warning=False) self.Xr.fix(warning=False) self.optimize(max_iters=int(0.1*max_iters),messages=verbose) self.unfix() self.optimize(max_iters=max_iters,messages=verbose)
Optimize the model parameters through a pre-defined protocol. :param int max_iters: the maximum number of iterations. :param boolean verbose: print the progress of optimization or not.
def set_dtreat_indch(self, indch=None): if indch is not None: indch = np.asarray(indch) assert indch.ndim==1 indch = _format_ind(indch, n=self._ddataRef['nch']) self._dtreat['indch'] = indch self._ddata['uptodate'] = False
Store the desired index array for the channels If None => all channels Must be a 1d array
def discard_incoming_messages(self): self.inbox.clear() previous = self._discard_incoming_messages self._discard_incoming_messages = True try: yield finally: self._discard_incoming_messages = previous
Discard all incoming messages for the time of the context manager.
def choice_explanation(value: str, choices: Iterable[Tuple[str, str]]) -> str: for k, v in choices: if k == value: return v return ''
Returns the explanation associated with a Django choice tuple-list.
def to_decimal(number, points=None): if not is_number(number): return number number = float(decimal.Decimal(number * 1.)) if is_number(points): return round(number, points) return number
convert datatypes into Decimals
def find_all(self, locator): return self.driver_wrapper.find(locator, True, self.element)
Find wrapper, finds all elements @type locator: webdriverwrapper.support.locator.Locator @param locator: locator used in search @rtype: list @return: A list of WebElementWrappers
def new_encoded_stream(args, stream): if args.ascii_print: return wpull.util.ASCIIStreamWriter(stream) else: return stream
Return a stream writer.
def count_dataset(train=False, dev=False, test=False, train_rows=10000, dev_rows=1000, test_rows=1000, seq_max_length=10): ret = [] for is_requested, n_rows in [(train, train_rows), (dev, dev_rows), (test, test_rows)]: rows = [] for i in range(n_rows): length = random.randint(1, seq_max_length) seq = [] for _ in range(length): seq.append(str(random.randint(0, 9))) input_ = ' '.join(seq) rows.append({'numbers': input_, 'count': str(length)}) if not is_requested: continue ret.append(Dataset(rows)) if len(ret) == 1: return ret[0] else: return tuple(ret)
Load the Count dataset. The Count dataset is a simple task of counting the number of integers in a sequence. This dataset is useful for testing implementations of sequence to label models. Args: train (bool, optional): If to load the training split of the dataset. dev (bool, optional): If to load the development split of the dataset. test (bool, optional): If to load the test split of the dataset. train_rows (int, optional): Number of training rows to generate. dev_rows (int, optional): Number of development rows to generate. test_rows (int, optional): Number of test rows to generate. seq_max_length (int, optional): Maximum sequence length. Returns: :class:`tuple` of :class:`torchnlp.datasets.Dataset` or :class:`torchnlp.datasets.Dataset`: Returns between one and all dataset splits (train, dev and test) depending on if their respective boolean argument is ``True``. Example: >>> import random >>> random.seed(321) >>> >>> from torchnlp.datasets import count_dataset >>> train = count_dataset(train=True) >>> train[0:2] [{'numbers': '6 2 5 8 7', 'count': '5'}, {'numbers': '3 9 7 6 6 7', 'count': '6'}]
def has_metric_plateaued(steps, values, num_steps=100, delta=0.1, decrease=True): assert num_steps > 0 if len(steps) < 2: return False steps_at_least_num_steps_ago = [ s for s in steps if s <= (steps[-1] - num_steps) ] if not steps_at_least_num_steps_ago: return False delta_step_idx = len(steps_at_least_num_steps_ago) - 1 start_val = values[delta_step_idx] values_to_check = values[delta_step_idx:] observed_deltas = [] for val in values_to_check: if decrease: observed_delta = start_val - val else: observed_delta = val - start_val observed_deltas.append(observed_delta) within_range = [obs < delta for obs in observed_deltas] return all(within_range)
Check if metric has plateaued. A metric has plateaued if the value has not increased/decreased (depending on `decrease`) by `delta` for at least `num_steps`. Args: steps: list<int> list of global steps for values. values: list<float> list of metric values. num_steps: int, number of steps the metric has to have been plateaued for. delta: float, how much the metric should have changed by over num_steps. decrease: bool, whether to check if the metric has decreased by delta or increased by delta. Returns: bool, whether the metric has plateaued.
def revoke_access(src, dst='any', port=None, proto=None): return modify_access(src, dst=dst, port=port, proto=proto, action='delete')
Revoke access to an address or subnet :param src: address (e.g. 192.168.1.234) or subnet (e.g. 192.168.1.0/24). :param dst: destiny of the connection, if the machine has multiple IPs and connections to only one of those have to accepted this is the field has to be set. :param port: destiny port :param proto: protocol (tcp or udp)
def _process_scalar_value(name, parse_fn, var_type, m_dict, values, results_dictionary): try: parsed_value = parse_fn(m_dict['val']) except ValueError: _parse_fail(name, var_type, m_dict['val'], values) if not m_dict['index']: if name in results_dictionary: _reuse_fail(name, values) results_dictionary[name] = parsed_value else: if name in results_dictionary: if not isinstance(results_dictionary.get(name), dict): _reuse_fail(name, values) else: results_dictionary[name] = {} index = int(m_dict['index']) if index in results_dictionary[name]: _reuse_fail('{}[{}]'.format(name, index), values) results_dictionary[name][index] = parsed_value
Update results_dictionary with a scalar value. Used to update the results_dictionary to be returned by parse_values when encountering a clause with a scalar RHS (e.g. "s=5" or "arr[0]=5".) Mutates results_dictionary. Args: name: Name of variable in assignment ("s" or "arr"). parse_fn: Function for parsing the actual value. var_type: Type of named variable. m_dict: Dictionary constructed from regex parsing. m_dict['val']: RHS value (scalar) m_dict['index']: List index value (or None) values: Full expression being parsed results_dictionary: The dictionary being updated for return by the parsing function. Raises: ValueError: If the name has already been used.
def read_nmr_efg(self): header_pattern = r'^\s+NMR quadrupolar parameters\s+$\n' \ r'^\s+Cq : quadrupolar parameter\s+Cq=e[*]Q[*]V_zz/h$\n' \ r'^\s+eta: asymmetry parameters\s+\(V_yy - V_xx\)/ V_zz$\n' \ r'^\s+Q : nuclear electric quadrupole moment in mb \(millibarn\)$\n' \ r'^-{50,}$\n' \ r'^\s+ion\s+Cq\(MHz\)\s+eta\s+Q \(mb\)\s+$\n' \ r'^-{50,}\s*$\n' row_pattern = r'\d+\s+(?P<cq>[-]?\d+\.\d+)\s+(?P<eta>[-]?\d+\.\d+)\s+' \ r'(?P<nuclear_quadrupole_moment>[-]?\d+\.\d+)' footer_pattern = r'-{50,}\s*$' self.read_table_pattern(header_pattern, row_pattern, footer_pattern, postprocess=float, last_one_only=True, attribute_name="efg")
Parse the NMR Electric Field Gradient interpretted values. Returns: Electric Field Gradient tensors as a list of dict in the order of atoms from OUTCAR. Each dict key/value pair corresponds to a component of the tensors.
def _get_struct_shapewithstyle(self, shape_number): obj = _make_object("ShapeWithStyle") obj.FillStyles = self._get_struct_fillstylearray(shape_number) obj.LineStyles = self._get_struct_linestylearray(shape_number) bc = BitConsumer(self._src) obj.NumFillBits = n_fill_bits = bc.u_get(4) obj.NumlineBits = n_line_bits = bc.u_get(4) obj.ShapeRecords = self._get_shaperecords( n_fill_bits, n_line_bits, shape_number) return obj
Get the values for the SHAPEWITHSTYLE record.
def _split(string, splitters): part = '' for character in string: if character in splitters: yield part part = '' else: part += character yield part
Splits a string into parts at multiple characters
def populateFromRow(self, quantificationSetRecord): self._dbFilePath = quantificationSetRecord.dataurl self.setAttributesJson(quantificationSetRecord.attributes) self._db = SqliteRnaBackend(self._dbFilePath) self.addRnaQuants()
Populates the instance variables of this RnaQuantificationSet from the specified DB row.
def set_home(self, new_home): if type(new_home) is Position: self.home = new_home elif type(new_home) is tuple: self.home = Position(location=new_home) else: self.home = Position(antenna=new_home) self.reset_cache()
Sets the user's home. The argument can be a Position object or a tuple containing location data.
def tokenize_paragraphs(self): tok = self.__paragraph_tokenizer spans = tok.span_tokenize(self.text) dicts = [] for start, end in spans: dicts.append({'start': start, 'end': end}) self[PARAGRAPHS] = dicts return self
Apply paragraph tokenization to this Text instance. Creates ``paragraphs`` layer.
def is_build_needed(self, data_sink, data_src): return (self._gettask(data_src).last_build_time == 0 or self._gettask(data_src).last_build_time < self._gettask(data_sink).last_build_time)
returns true if data_src needs to be rebuilt, given that data_sink has had a rebuild requested.
def make_invalid_op(name): def invalid_op(self, other=None): raise TypeError("cannot perform {name} with this index type: " "{typ}".format(name=name, typ=type(self).__name__)) invalid_op.__name__ = name return invalid_op
Return a binary method that always raises a TypeError. Parameters ---------- name : str Returns ------- invalid_op : function
def play_NoteContainer(self, notecontainer): if len(notecontainer) <= 1: [self.play_Note(x) for x in notecontainer] else: self.play_Note(notecontainer[0]) self.set_deltatime(0) [self.play_Note(x) for x in notecontainer[1:]]
Convert a mingus.containers.NoteContainer to the equivalent MIDI events and add it to the track_data. Note.channel and Note.velocity can be set as well.
def get_siblings_score(self, top_node): base = 100000 paragraphs_number = 0 paragraphs_score = 0 nodes_to_check = self.parser.getElementsByTag(top_node, tag='p') for node in nodes_to_check: text_node = self.parser.getText(node) word_stats = self.stopwords_class(language=self.get_language()).get_stopword_count(text_node) high_link_density = self.is_highlink_density(node) if word_stats.get_stopword_count() > 2 and not high_link_density: paragraphs_number += 1 paragraphs_score += word_stats.get_stopword_count() if paragraphs_number > 0: base = paragraphs_score / paragraphs_number return base
\ we could have long articles that have tons of paragraphs so if we tried to calculate the base score against the total text score of those paragraphs it would be unfair. So we need to normalize the score based on the average scoring of the paragraphs within the top node. For example if our total score of 10 paragraphs was 1000 but each had an average value of 100 then 100 should be our base.