code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def stop(context): config = context.obj["config"] pidfile = select(config, "application.pidfile", DEFAULT_PIDFILE) daemon_stop(pidfile)
Stop application server.
def post(self, request, key): email = request.POST.get('email') user_id = request.POST.get('user') if not email: return http.HttpResponseBadRequest() try: EmailAddressValidation.objects.create(address=email, user_id=user_id) except IntegrityError: return http.HttpResponse(status=409) return http.HttpResponse(status=201)
Create new email address that will wait for validation
def keypair_field_data(request, include_empty_option=False): keypair_list = [] try: keypairs = api.nova.keypair_list(request) keypair_list = [(kp.name, kp.name) for kp in keypairs] except Exception: exceptions.handle(request, _('Unable to retrieve key pairs.')) if not keypair_list: if include_empty_option: return [("", _("No key pairs available")), ] return [] if include_empty_option: return [("", _("Select a key pair")), ] + keypair_list return keypair_list
Returns a list of tuples of all keypairs. Generates a list of keypairs available to the user (request). And returns a list of (id, name) tuples. :param request: django http request object :param include_empty_option: flag to include a empty tuple in the front of the list :return: list of (id, name) tuples
def _merge_bee(self, bee): random_dimension = randint(0, len(self._value_ranges) - 1) second_bee = randint(0, self._num_employers - 1) while (bee.id == self._employers[second_bee].id): second_bee = randint(0, self._num_employers - 1) new_bee = deepcopy(bee) new_bee.values[random_dimension] = self.__onlooker.calculate_positions( new_bee.values[random_dimension], self._employers[second_bee].values[random_dimension], self._value_ranges[random_dimension] ) fitness_score = new_bee.get_score(self._fitness_fxn( new_bee.values, **self._args )) return (fitness_score, new_bee.values, new_bee.error)
Shifts a random value for a supplied bee with in accordance with another random bee's value Args: bee (EmployerBee): supplied bee to merge Returns: tuple: (score of new position, values of new position, fitness function return value of new position)
def breakpoints_by_caller(bed_files): merged = concat(bed_files) if not merged: return [] grouped_start = merged.groupby(g=[1, 2, 2], c=4, o=["distinct"]).filter(lambda r: r.end > r.start).saveas() grouped_end = merged.groupby(g=[1, 3, 3], c=4, o=["distinct"]).filter(lambda r: r.end > r.start).saveas() together = concat([grouped_start, grouped_end]) if together: final = together.expand(c=4) final = final.sort() return final
given a list of BED files of the form chrom start end caller return a BedTool of breakpoints as each line with the fourth column the caller with evidence for the breakpoint chr1 1 10 caller1 -> chr1 1 1 caller1 chr1 1 20 caller2 chr1 1 1 caller2 chr1 10 10 caller1 chr1 20 20 caller2
def skip(): if not settings.platformCompatible(): return False (output, error) = subprocess.Popen(["osascript", "-e", SKIP], stdout=subprocess.PIPE).communicate()
Tell iTunes to skip a song
def _overlay_for_saml_metadata(self, config, co_name): for co in self.config[self.KEY_CO]: if co[self.KEY_ENCODEABLE_NAME] == co_name: break key = self.KEY_ORGANIZATION if key in co: if key not in config: config[key] = {} for org_key in self.KEY_ORGANIZATION_KEYS: if org_key in co[key]: config[key][org_key] = co[key][org_key] key = self.KEY_CONTACT_PERSON if key in co: config[key] = co[key] return config
Overlay configuration details like organization and contact person from the front end configuration onto the IdP configuration to support SAML metadata generation. :type config: satosa.satosa_config.SATOSAConfig :type co_name: str :rtype: satosa.satosa_config.SATOSAConfig :param config: satosa proxy config :param co_name: CO name :return: config with updated details for SAML metadata
def _approximate_unkown_bond_lengths(self): dataset = self.lengths[BOND_SINGLE] for n1 in periodic.iter_numbers(): for n2 in periodic.iter_numbers(): if n1 <= n2: pair = frozenset([n1, n2]) atom1 = periodic[n1] atom2 = periodic[n2] if (pair not in dataset) and (atom1.covalent_radius is not None) and (atom2.covalent_radius is not None): dataset[pair] = (atom1.covalent_radius + atom2.covalent_radius)
Completes the bond length database with approximations based on VDW radii
def getRoutes(self): routes = [] try: out = subprocess.Popen([routeCmd, "-n"], stdout=subprocess.PIPE).communicate()[0] except: raise Exception('Execution of command %s failed.' % ipCmd) lines = out.splitlines() if len(lines) > 1: headers = [col.lower() for col in lines[1].split()] for line in lines[2:]: routes.append(dict(zip(headers, line.split()))) return routes
Get routing table. @return: List of routes.
def iso8601interval(value, argument='argument'): try: start, end = _parse_interval(value) if end is None: end = _expand_datetime(start, value) start, end = _normalize_interval(start, end, value) except ValueError: raise ValueError( "Invalid {arg}: {value}. {arg} must be a valid ISO8601 " "date/time interval.".format(arg=argument, value=value), ) return start, end
Parses ISO 8601-formatted datetime intervals into tuples of datetimes. Accepts both a single date(time) or a full interval using either start/end or start/duration notation, with the following behavior: - Intervals are defined as inclusive start, exclusive end - Single datetimes are translated into the interval spanning the largest resolution not specified in the input value, up to the day. - The smallest accepted resolution is 1 second. - All timezones are accepted as values; returned datetimes are localized to UTC. Naive inputs and date inputs will are assumed UTC. Examples:: "2013-01-01" -> datetime(2013, 1, 1), datetime(2013, 1, 2) "2013-01-01T12" -> datetime(2013, 1, 1, 12), datetime(2013, 1, 1, 13) "2013-01-01/2013-02-28" -> datetime(2013, 1, 1), datetime(2013, 2, 28) "2013-01-01/P3D" -> datetime(2013, 1, 1), datetime(2013, 1, 4) "2013-01-01T12:00/PT30M" -> datetime(2013, 1, 1, 12), datetime(2013, 1, 1, 12, 30) "2013-01-01T06:00/2013-01-01T12:00" -> datetime(2013, 1, 1, 6), datetime(2013, 1, 1, 12) :param str value: The ISO8601 date time as a string :return: Two UTC datetimes, the start and the end of the specified interval :rtype: A tuple (datetime, datetime) :raises: ValueError, if the interval is invalid.
def transform_pip_requirement_set(self, requirement_set): filtered_requirements = [] for requirement in requirement_set.requirements.values(): if requirement.satisfied_by: continue if requirement.constraint: continue filtered_requirements.append(requirement) self.reported_requirements.append(requirement) return sorted([Requirement(self.config, r) for r in filtered_requirements], key=lambda r: r.name.lower())
Transform pip's requirement set into one that `pip-accel` can work with. :param requirement_set: The :class:`pip.req.RequirementSet` object reported by pip. :returns: A list of :class:`pip_accel.req.Requirement` objects. This function converts the :class:`pip.req.RequirementSet` object reported by pip into a list of :class:`pip_accel.req.Requirement` objects.
def clear_instance_cache(cls, func): @functools.wraps(func) def func_wrapper(*args, **kwargs): if not args: raise ValueError('`self` is not available.') else: the_self = args[0] cls.clear_self_cache(the_self) return func(*args, **kwargs) return func_wrapper
clear the instance cache Decorate a method of a class, the first parameter is supposed to be `self`. It clear all items cached by the `instance_cache` decorator. :param func: function to decorate
def _HostPrefix(client_id): if not client_id: return "" hostname = None if data_store.RelationalDBEnabled(): client_snapshot = data_store.REL_DB.ReadClientSnapshot(client_id) if client_snapshot: hostname = client_snapshot.knowledge_base.fqdn else: client_fd = aff4.FACTORY.Open(client_id, mode="rw") hostname = client_fd.Get(client_fd.Schema.FQDN) or "" if hostname: return "%s: " % hostname else: return ""
Build a host prefix for a notification message based on a client id.
def _get_appoptics(options): conn = appoptics_metrics.connect( options.get('api_token'), sanitizer=appoptics_metrics.sanitize_metric_name, hostname=options.get('api_url')) log.info("Connected to appoptics.") return conn
Return an appoptics connection object.
def get_output_fields(self): emit_fields = list(i.lower() for i in re.sub(r"[^_A-Z]+", ' ', self.format_item(None)).split()) result = [] for name in emit_fields[:]: if name not in engine.FieldDefinition.FIELDS: self.LOG.warn("Omitted unknown name '%s' from statistics and output format sorting" % name) else: result.append(name) return result
Get field names from output template.
def _check_algorithm_values(item): problems = [] for k, v in item.get("algorithm", {}).items(): if v is True and k not in ALG_ALLOW_BOOLEANS: problems.append("%s set as true" % k) elif v is False and (k not in ALG_ALLOW_BOOLEANS and k not in ALG_ALLOW_FALSE): problems.append("%s set as false" % k) if len(problems) > 0: raise ValueError("Incorrect settings in 'algorithm' section for %s:\n%s" "\nSee configuration documentation for supported options:\n%s\n" % (item["description"], "\n".join(problems), ALG_DOC_URL))
Check for misplaced inputs in the algorithms. - Identify incorrect boolean values where a choice is required.
def set(self, hue): x = hue / 360. * self.winfo_width() self.coords('cursor', x, 0, x, self.winfo_height()) self._variable.set(hue)
Set cursor position on the color corresponding to the hue value.
def read_time(self, content): if get_class_name(content) in self.content_type_supported: if hasattr(content, 'readtime'): return None default_lang_conf = self.lang_settings['default'] lang_conf = self.lang_settings.get(content.lang, default_lang_conf) avg_reading_wpm = lang_conf['wpm'] num_words = len(content._content.split()) minutes = num_words // avg_reading_wpm seconds = int((num_words / avg_reading_wpm * 60) - (minutes * 60)) minutes_str = self.pluralize( minutes, lang_conf['min_singular'], lang_conf['min_plural'] ) seconds_str = self.pluralize( seconds, lang_conf['sec_singular'], lang_conf['sec_plural'] ) content.readtime = minutes content.readtime_string = minutes_str content.readtime_with_seconds = (minutes, seconds,) content.readtime_string_with_seconds = "{}, {}".format( minutes_str, seconds_str)
Core function used to generate the read_time for content. Parameters: :param content: Instance of pelican.content.Content Returns: None
def to_unicode_string(string): if string is None: return None if is_unicode_string(string): return string if PY2: return unicode(string, encoding="utf-8") return string.decode(encoding="utf-8")
Return a Unicode string out of the given string. On Python 2, it calls ``unicode`` with ``utf-8`` encoding. On Python 3, it just returns the given string. Return ``None`` if ``string`` is ``None``. :param str string: the string to convert to Unicode :rtype: (Unicode) str
def request_video_count(blink): url = "{}/api/v2/videos/count".format(blink.urls.base_url) return http_get(blink, url)
Request total video count.
def repackage_var(h): if IS_TORCH_04: return h.detach() if type(h) == torch.Tensor else tuple(repackage_var(v) for v in h) else: return Variable(h.data) if type(h) == Variable else tuple(repackage_var(v) for v in h)
Wraps h in new Variables, to detach them from their history.
def limit(self, limit): if limit is None: raise ValueError("Invalid value for `limit`, must not be `None`") if limit > 200: raise ValueError("Invalid value for `limit`, must be a value less than or equal to `200`") if limit < 1: raise ValueError("Invalid value for `limit`, must be a value greater than or equal to `1`") self._limit = limit
Sets the limit of this ListEmployeeWagesRequest. Maximum number of Employee Wages to return per page. Can range between 1 and 200. The default is the maximum at 200. :param limit: The limit of this ListEmployeeWagesRequest. :type: int
def close(self, cancelled=False): self._on_close(cancelled) self._scene.remove_effect(self)
Close this temporary pop-up. :param cancelled: Whether the pop-up was cancelled (e.g. by pressing Esc).
def push_pv(self, tokens): logger.debug("Pushing PV data: %s" % tokens) bus = self.case.buses[tokens["bus_no"]-1] g = Generator(bus) g.p = tokens["p"] g.q_max = tokens["q_max"] g.q_min = tokens["q_min"] self.case.generators.append(g)
Creates and Generator object, populates it with data, finds its Bus and adds it.
def save(self, file, contents, name=None, overwrite=False): if name is None: name = self.format_from_extension(op.splitext(file)[1]) file_format = self.file_type(name) if file_format == 'text': _write_text(file, contents) elif file_format == 'json': _write_json(file, contents) else: write_function = self._formats[name].get('save', None) if write_function is None: raise IOError("The format must declare a file type or " "load/save functions.") if op.exists(file) and not overwrite: print("The file already exists, please use overwrite=True.") return write_function(file, contents)
Save contents into a file. The format name can be specified explicitly or inferred from the file extension.
def re_normalize_flux(self, kwargs_ps, norm_factor): for i, model in enumerate(self.point_source_type_list): if model == 'UNLENSED': kwargs_ps[i]['point_amp'] *= norm_factor elif model in ['LENSED_POSITION', 'SOURCE_POSITION']: if self._fixed_magnification_list[i] is True: kwargs_ps[i]['source_amp'] *= norm_factor else: kwargs_ps[i]['point_amp'] *= norm_factor return kwargs_ps
renormalizes the point source amplitude keywords by a factor :param kwargs_ps_updated: :param norm_factor: :return:
def vecs_to_datmesh(x, y): x, y = meshgrid(x, y) out = zeros(x.shape + (2,), dtype=float) out[:, :, 0] = x out[:, :, 1] = y return out
Converts input arguments x and y to a 2d meshgrid, suitable for calling Means, Covariances and Realizations.
def get_success_url(self): slugs = '+'.join(self.metric_slugs) url = reverse('redis_metric_aggregate_detail', args=[slugs]) return url.replace("%2B", "+")
Reverses the ``redis_metric_aggregate_detail`` URL using ``self.metric_slugs`` as an argument.
def _symlink_or_copy_grabix(in_file, out_file, data): if cwlutils.is_cwl_run(data): if utils.file_exists(in_file + ".gbi"): out_file = in_file else: utils.copy_plus(in_file, out_file) else: utils.symlink_plus(in_file, out_file) return out_file
We cannot symlink in CWL, but may be able to use inputs or copy
def _format_count_file(count_file, data): COUNT_COLUMN = 5 out_file = os.path.splitext(count_file)[0] + ".fixed.counts" if file_exists(out_file): return out_file df = pd.io.parsers.read_table(count_file, sep="\t", index_col=0, header=1) df_sub = df.ix[:, COUNT_COLUMN] with file_transaction(data, out_file) as tx_out_file: df_sub.to_csv(tx_out_file, sep="\t", index_label="id", header=False) return out_file
this cuts the count file produced from featureCounts down to a two column file of gene ids and number of reads mapping to each gene
def filter_by_transcript_expression( self, transcript_expression_dict, min_expression_value=0.0): return self.filter_above_threshold( key_fn=lambda effect: effect.transcript_id, value_dict=transcript_expression_dict, threshold=min_expression_value)
Filters effects to those which have an associated transcript whose expression value in the transcript_expression_dict argument is greater than min_expression_value. Parameters ---------- transcript_expression_dict : dict Dictionary mapping Ensembl transcript IDs to expression estimates (either FPKM or TPM) min_expression_value : float Threshold above which we'll keep an effect in the result collection
def back_bfs(self, start, end=None): return [node for node, step in self._iterbfs(start, end, forward=False)]
Returns a list of nodes in some backward BFS order. Starting from the start node the breadth first search proceeds along incoming edges.
def drop_dims(self, drop_dims): if utils.is_scalar(drop_dims): drop_dims = [drop_dims] missing_dimensions = [d for d in drop_dims if d not in self.dims] if missing_dimensions: raise ValueError('Dataset does not contain the dimensions: %s' % missing_dimensions) drop_vars = set(k for k, v in self._variables.items() for d in v.dims if d in drop_dims) variables = OrderedDict((k, v) for k, v in self._variables.items() if k not in drop_vars) coord_names = set(k for k in self._coord_names if k in variables) return self._replace_with_new_dims(variables, coord_names)
Drop dimensions and associated variables from this dataset. Parameters ---------- drop_dims : str or list Dimension or dimensions to drop. Returns ------- obj : Dataset The dataset without the given dimensions (or any variables containing those dimensions)
def save_sequence_rule_enabler(self, sequence_rule_enabler_form, *args, **kwargs): if sequence_rule_enabler_form.is_for_update(): return self.update_sequence_rule_enabler(sequence_rule_enabler_form, *args, **kwargs) else: return self.create_sequence_rule_enabler(sequence_rule_enabler_form, *args, **kwargs)
Pass through to provider SequenceRuleEnablerAdminSession.update_sequence_rule_enabler
def _check_periodic(periodic): periodic = np.array(periodic) if len(periodic.shape) == 2: assert periodic.shape[0] == periodic.shape[1], 'periodic shoud be a square matrix or a flat array' return np.diag(periodic) elif len(periodic.shape) == 1: return periodic else: raise ValueError("periodic argument can be either a 3x3 matrix or a shape 3 array.")
Validate periodic input
def get_ip_Minv_B(self): if not isinstance(self.M, utils.IdentityLinearOperator): if isinstance(self.Minv, utils.IdentityLinearOperator): raise utils.ArgumentError( 'Minv has to be provided for the evaluation of the inner ' 'product that is implicitly defined by M.') if isinstance(self.ip_B, utils.LinearOperator): return self.Minv*self.ip_B else: return lambda x, y: self.ip_B(x, self.Minv*y) return self.ip_B
Returns the inner product that is implicitly used with the positive definite preconditioner ``M``.
def replace_route_table_association(association_id, route_table_id, region=None, key=None, keyid=None, profile=None): try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) association_id = conn.replace_route_table_association_with_assoc(association_id, route_table_id) log.info('Route table %s was reassociated with association id %s', route_table_id, association_id) return {'replaced': True, 'association_id': association_id} except BotoServerError as e: return {'replaced': False, 'error': __utils__['boto.get_error'](e)}
Replaces a route table association. CLI Example: .. code-block:: bash salt myminion boto_vpc.replace_route_table_association 'rtbassoc-d8ccddba' 'rtb-1f382e7d'
def super_terms(self): if self.doc and self.doc.super_terms: return self.doc.super_terms return {k.lower(): v['inheritsfrom'].lower() for k, v in self._declared_terms.items() if 'inheritsfrom' in v}
Return a dictionary mapping term names to their super terms
def delete(self, uri): try: self.connect(uri, method='DELETE') return True except urllib.error.HTTPError: return False
Method deletes a Fedora Object in the repository Args: uri(str): URI of Fedora Object
def _do_watch_progress(filename, sock, handler): connection, client_address = sock.accept() data = b'' try: while True: more_data = connection.recv(16) if not more_data: break data += more_data lines = data.split(b'\n') for line in lines[:-1]: line = line.decode() parts = line.split('=') key = parts[0] if len(parts) > 0 else None value = parts[1] if len(parts) > 1 else None handler(key, value) data = lines[-1] finally: connection.close()
Function to run in a separate gevent greenlet to read progress events from a unix-domain socket.
def _assemble_and_send_validation_request(self): return self.client.service.validateShipment( WebAuthenticationDetail=self.WebAuthenticationDetail, ClientDetail=self.ClientDetail, TransactionDetail=self.TransactionDetail, Version=self.VersionId, RequestedShipment=self.RequestedShipment)
Fires off the Fedex shipment validation request. @warning: NEVER CALL THIS METHOD DIRECTLY. CALL send_validation_request(), WHICH RESIDES ON FedexBaseService AND IS INHERITED.
def query_file(self, path, fetchall=False, **params): if not os.path.exists(path): raise IOError("File '{}' not found!".format(path)) if os.path.isdir(path): raise IOError("'{}' is a directory!".format(path)) with open(path) as f: query = f.read() return self.query(query=query, fetchall=fetchall, **params)
Like Connection.query, but takes a filename to load a query from.
def _report_lint_error(error, file_path): line = error[1].line code = error[0] description = error[1].description sys.stdout.write("{0}:{1} [{2}] {3}\n".format(file_path, line, code, description))
Report a linter error.
def get_minimum_span(low, high, span): if is_number(low) and low == high: if isinstance(low, np.datetime64): span = span * np.timedelta64(1, 's') low, high = low-span, high+span return low, high
If lower and high values are equal ensures they are separated by the defined span.
def checkMarkovInputs(self): StateCount = self.MrkvArray[0].shape[0] assert self.Rfree.shape == (StateCount,),'Rfree not the right shape!' for MrkvArray_t in self.MrkvArray: assert MrkvArray_t.shape == (StateCount,StateCount),'MrkvArray not the right shape!' for LivPrb_t in self.LivPrb: assert LivPrb_t.shape == (StateCount,),'Array in LivPrb is not the right shape!' for PermGroFac_t in self.LivPrb: assert PermGroFac_t.shape == (StateCount,),'Array in PermGroFac is not the right shape!' for IncomeDstn_t in self.IncomeDstn: assert len(IncomeDstn_t) == StateCount,'List in IncomeDstn is not the right length!'
Many parameters used by MarkovConsumerType are arrays. Make sure those arrays are the right shape. Parameters ---------- None Returns ------- None
def add_history(self, line): u if not hasattr(line, "get_line_text"): line = lineobj.ReadLineTextBuffer(line) if not line.get_line_text(): pass elif len(self.history) > 0 and self.history[-1].get_line_text() == line.get_line_text(): pass else: self.history.append(line) self.history_cursor = len(self.history)
u'''Append a line to the history buffer, as if it was the last line typed.
def fill_rect(self, rect): check_int_err(lib.SDL_RenderFillRect(self._ptr, rect._ptr))
Fill a rectangle on the current rendering target with the drawing color. Args: rect (Rect): The destination rectangle, or None to fill the entire rendering target. Raises: SDLError: If an error is encountered.
def insert(python_data: LdapObject, database: Optional[Database] = None) -> LdapObject: assert isinstance(python_data, LdapObject) table: LdapObjectClass = type(python_data) empty_data = table() changes = changeset(empty_data, python_data.to_dict()) return save(changes, database)
Insert a new python_data object in the database.
def usingCurl(): fetcher = getDefaultFetcher() if isinstance(fetcher, ExceptionWrappingFetcher): fetcher = fetcher.fetcher return isinstance(fetcher, CurlHTTPFetcher)
Whether the currently set HTTP fetcher is a Curl HTTP fetcher.
def command_gen(self, *names): if not names: sys.exit('Please provide generator names') for name in names: name, count = name, 0 if ':' in name: name, count = name.split(':', 1) count = int(count) create = self.generators[name] print('Generating `{0}` count={1}'.format(name, count)) create(self.session, count) self.session.commit()
Runs generator functions. Run `docs` generator function:: ./manage.py sqla:gen docs Run `docs` generator function with `count=10`:: ./manage.py sqla:gen docs:10
def add_parser(self, *args, **kwargs): command_name = args[0] new_kwargs = kwargs.copy() new_kwargs['configman_subparsers_option'] = self._configman_option new_kwargs['subparser_name'] = command_name subparsers = self._configman_option.foreign_data.argparse.subparsers a_subparser = super(ConfigmanSubParsersAction, self).add_parser( *args, **new_kwargs ) subparsers[command_name] = DotDict({ "args": args, "kwargs": new_kwargs, "subparser": a_subparser }) return a_subparser
each time a subparser action is used to create a new parser object we must save the original args & kwargs. In a later phase of configman, we'll need to reproduce the subparsers exactly without resorting to copying. We save the args & kwargs in the 'foreign_data' section of the configman option that corresponds with the subparser action.
def add_field(self, name, label, field_type, *args, **kwargs): if name in self._dyn_fields: raise AttributeError('Field already added to the form.') else: self._dyn_fields[name] = {'label': label, 'type': field_type, 'args': args, 'kwargs': kwargs}
Add the field to the internal configuration dictionary.
def start(self, on_exit_callback=None): for service in self.services.keys(): self.services[service] = self.services[service]() self.server.start(on_exit_callback)
Start the Engel application by initializing all registered services and starting an Autobahn IOLoop. :param on_exit_callback: Callback triggered on application exit
def revision(self, message): alembic.command.revision(self.alembic_config(), message=message)
Create a new revision file :param message:
def _bulk_op(self, record_id_iterator, op_type, index=None, doc_type=None): with self.create_producer() as producer: for rec in record_id_iterator: producer.publish(dict( id=str(rec), op=op_type, index=index, doc_type=doc_type ))
Index record in Elasticsearch asynchronously. :param record_id_iterator: Iterator that yields record UUIDs. :param op_type: Indexing operation (one of ``index``, ``create``, ``delete`` or ``update``). :param index: The Elasticsearch index. (Default: ``None``) :param doc_type: The Elasticsearch doc_type. (Default: ``None``)
def create_initial(self, address_values): with self._lock: for add, val in address_values: self._state[add] = _ContextFuture(address=add, result=val)
Create futures from inputs with the current value for that address at the start of that context. Args: address_values (list of tuple): The tuple is string, bytes of the address and value.
def asdict(self): return { "methods": {m.name: m.asdict() for m in self.methods}, "protocols": self.protocols, "notifications": {n.name: n.asdict() for n in self.notifications}, }
Return dict presentation of this service. Useful for dumping the device information into JSON.
def _setup_master(self): self.broker = mitogen.master.Broker(install_watcher=False) self.router = mitogen.master.Router( broker=self.broker, max_message_size=4096 * 1048576, ) self._setup_responder(self.router.responder) mitogen.core.listen(self.broker, 'shutdown', self.on_broker_shutdown) mitogen.core.listen(self.broker, 'exit', self.on_broker_exit) self.listener = mitogen.unix.Listener( router=self.router, path=self.unix_listener_path, backlog=C.DEFAULT_FORKS, ) self._enable_router_debug() self._enable_stack_dumps()
Construct a Router, Broker, and mitogen.unix listener
def combine_last_two_dimensions(x): x_shape = common_layers.shape_list(x) a, b = x_shape[-2:] return tf.reshape(x, x_shape[:-2] + [a * b])
Reshape x so that the last two dimension become one. Args: x: a Tensor with shape [..., a, b] Returns: a Tensor with shape [..., ab]
def _build_con_add_cmd(ssid: str, security_type: SECURITY_TYPES, psk: Optional[str], hidden: bool, eap_args: Optional[Dict[str, Any]]) -> List[str]: configure_cmd = ['connection', 'add', 'save', 'yes', 'autoconnect', 'yes', 'ifname', 'wlan0', 'type', 'wifi', 'con-name', ssid, 'wifi.ssid', ssid] if hidden: configure_cmd += ['wifi.hidden', 'true'] if security_type == SECURITY_TYPES.WPA_PSK: configure_cmd += ['wifi-sec.key-mgmt', security_type.value] if psk is None: raise ValueError('wpa-psk security type requires psk') configure_cmd += ['wifi-sec.psk', psk] elif security_type == SECURITY_TYPES.WPA_EAP: if eap_args is None: raise ValueError('wpa-eap security type requires eap_args') configure_cmd += _add_eap_args(eap_args) elif security_type == SECURITY_TYPES.NONE: pass else: raise ValueError("Bad security_type {}".format(security_type)) return configure_cmd
Build the nmcli connection add command to configure the new network. The parameters are the same as configure but without the defaults; this should be called only by configure.
def relevant_part(self, original, pos, sep=' '): start = original.rfind(sep, 0, pos) + 1 end = original.find(sep, pos - 1) if end == -1: end = len(original) return original[start:end], start, end, pos - start
calculates the subword in a `sep`-splitted list of substrings of `original` that `pos` is ia.n
def pad_position_l(self, i): if i >= self.n_pads_l: raise ModelError("pad index out-of-bounds") return (self.length - self.pad_length) / (self.n_pads_l - 1) * i + self.pad_length / 2
Determines the position of the ith pad in the length direction. Assumes equally spaced pads. :param i: ith number of pad in length direction (0-indexed) :return:
def _handle_command(self, connection, sender, target, command, payload): try: handler = getattr(self, "cmd_{0}".format(command)) except AttributeError: self.safe_send(connection, target, "Unknown command: %s", command) else: try: logging.info("! Handling command: %s", command) handler(connection, sender, target, payload) except Exception as ex: logging.exception("Error calling command handler: %s", ex)
Handles a command, if any
def lifetimes(self, dates, include_start_date, country_codes): if isinstance(country_codes, string_types): raise TypeError( "Got string {!r} instead of an iterable of strings in " "AssetFinder.lifetimes.".format(country_codes), ) country_codes = frozenset(country_codes) lifetimes = self._asset_lifetimes.get(country_codes) if lifetimes is None: self._asset_lifetimes[country_codes] = lifetimes = ( self._compute_asset_lifetimes(country_codes) ) raw_dates = as_column(dates.asi8) if include_start_date: mask = lifetimes.start <= raw_dates else: mask = lifetimes.start < raw_dates mask &= (raw_dates <= lifetimes.end) return pd.DataFrame(mask, index=dates, columns=lifetimes.sid)
Compute a DataFrame representing asset lifetimes for the specified date range. Parameters ---------- dates : pd.DatetimeIndex The dates for which to compute lifetimes. include_start_date : bool Whether or not to count the asset as alive on its start_date. This is useful in a backtesting context where `lifetimes` is being used to signify "do I have data for this asset as of the morning of this date?" For many financial metrics, (e.g. daily close), data isn't available for an asset until the end of the asset's first day. country_codes : iterable[str] The country codes to get lifetimes for. Returns ------- lifetimes : pd.DataFrame A frame of dtype bool with `dates` as index and an Int64Index of assets as columns. The value at `lifetimes.loc[date, asset]` will be True iff `asset` existed on `date`. If `include_start_date` is False, then lifetimes.loc[date, asset] will be false when date == asset.start_date. See Also -------- numpy.putmask zipline.pipeline.engine.SimplePipelineEngine._compute_root_mask
def add_external_reference(self,ext_ref): ext_refs_node = self.node.find('externalReferences') if ext_refs_node is None: ext_refs_obj = CexternalReferences() self.node.append(ext_refs_obj.get_node()) else: ext_refs_obj = CexternalReferences(ext_refs_node) ext_refs_obj.add_external_reference(ext_ref)
Adds an external reference object to the markable @type ext_ref: L{CexternalReference} @param ext_ref: an external reference object
def _check_array(self, X): if isinstance(X, da.Array): if X.ndim == 2 and X.numblocks[1] > 1: logger.debug("auto-rechunking 'X'") if not np.isnan(X.chunks[0]).any(): X = X.rechunk({0: "auto", 1: -1}) else: X = X.rechunk({1: -1}) return X
Validate an array for post-fit tasks. Parameters ---------- X : Union[Array, DataFrame] Returns ------- same type as 'X' Notes ----- The following checks are applied. - Ensure that the array is blocked only along the samples.
def new_product(self, name): n = self._product_cls(self, name, summary_cls=self._summary_cls) self.graph.add_node(n) self.products.append(n) return n
Create a new product. Args: name: name of the new product. Returns: A new product instance.
def delete(network): try: network.destroy() except libvirt.libvirtError as error: raise RuntimeError("Unable to destroy network: {}".format(error))
libvirt network cleanup. @raise: libvirt.libvirtError.
def pull(path, service_names=None): project = __load_project(path) if isinstance(project, dict): return project else: try: project.pull(service_names) except Exception as inst: return __handle_except(inst) return __standardize_result(True, 'Pulling containers images via docker-compose succeeded', None, None)
Pull image for containers in the docker-compose file, service_names is a python list, if omitted pull all images path Path where the docker-compose file is stored on the server service_names If specified will pull only the image for the specified services CLI Example: .. code-block:: bash salt myminion dockercompose.pull /path/where/docker-compose/stored salt myminion dockercompose.pull /path/where/docker-compose/stored '[janus]'
def load_from_file(module_path): from imp import load_module, PY_SOURCE imported = None if module_path: with open(module_path, 'r') as openfile: imported = load_module('mod', openfile, module_path, ('imported', 'r', PY_SOURCE)) return imported
Load a python module from its absolute filesystem path Borrowed from django-cms
def _get_thumbnail_url(image): lhs, rhs = splitext(image.url) lhs += THUMB_EXT thumb_url = f'{lhs}{rhs}' return thumb_url
Given a large image, return the thumbnail url
def number_of_states(dtrajs): r nmax = 0 for dtraj in dtrajs: nmax = max(nmax, np.max(dtraj)) return nmax + 1
r""" Determine the number of states from a set of discrete trajectories Parameters ---------- dtrajs : list of int-arrays discrete trajectories
def community(self): return _community.Community(url=self._url + "/community", securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port)
The portal community root covers user and group resources and operations.
def wrap_options(self, data, renderer_context): request = renderer_context.get("request", None) method = request and getattr(request, 'method') if method != 'OPTIONS': raise WrapperNotApplicable("Request method must be OPTIONS") wrapper = self.dict_class() wrapper["meta"] = data return wrapper
Wrap OPTIONS data as JSON API meta value
def TRUE(classical_reg): warn("`TRUE a` has been deprecated. Use `MOVE a 1` instead.") if isinstance(classical_reg, int): classical_reg = Addr(classical_reg) return MOVE(classical_reg, 1)
Produce a TRUE instruction. :param classical_reg: A classical register to modify. :return: An instruction object representing the equivalent MOVE.
def connect(self, event_handler): context = ssl.create_default_context(purpose=ssl.Purpose.CLIENT_AUTH) if not self.options['verify']: context.verify_mode = ssl.CERT_NONE scheme = 'wss://' if self.options['scheme'] != 'https': scheme = 'ws://' context = None url = '{scheme:s}{url:s}:{port:s}{basepath:s}/websocket'.format( scheme=scheme, url=self.options['url'], port=str(self.options['port']), basepath=self.options['basepath'] ) websocket = yield from websockets.connect( url, ssl=context, ) yield from self._authenticate_websocket(websocket, event_handler) yield from self._start_loop(websocket, event_handler)
Connect to the websocket and authenticate it. When the authentication has finished, start the loop listening for messages, sending a ping to the server to keep the connection alive. :param event_handler: Every websocket event will be passed there. Takes one argument. :type event_handler: Function(message) :return:
def run_job(args): jm = setup(args) job_id = int(os.environ['JOB_ID']) array_id = int(os.environ['SGE_TASK_ID']) if os.environ['SGE_TASK_ID'] != 'undefined' else None jm.run_job(job_id, array_id)
Starts the wrapper script to execute a job, interpreting the JOB_ID and SGE_TASK_ID keywords that are set by the grid or by us.
def watch_statuses(self, observer, batch_ids): with self._lock: statuses = self.get_statuses(batch_ids) if self._has_no_pendings(statuses): observer.notify_batches_finished(statuses) else: self._observers[observer] = statuses
Allows a component to register to be notified when a set of batches is no longer PENDING. Expects to be able to call the "notify_batches_finished" method on the registered component, sending the statuses of the batches. Args: observer (object): Must implement "notify_batches_finished" method batch_ids (list of str): The ids of the batches to watch
def results_from_cli(opts, load_samples=True, **kwargs): fp_all = [] samples_all = [] input_files = opts.input_file if isinstance(input_files, str): input_files = [input_files] for input_file in input_files: logging.info("Reading input file %s", input_file) fp = loadfile(input_file, "r") if load_samples: logging.info("Loading samples") file_parameters, ts = _transforms.get_common_cbc_transforms( opts.parameters, fp.variable_params) samples = fp.samples_from_cli(opts, parameters=file_parameters, **kwargs) logging.info("Using {} samples".format(samples.size)) samples = _transforms.apply_transforms(samples, ts) else: samples = None if len(input_files) > 1: fp_all.append(fp) samples_all.append(samples) else: fp_all = fp samples_all = samples return fp_all, opts.parameters, opts.parameters_labels, samples_all
Loads an inference result file along with any labels associated with it from the command line options. Parameters ---------- opts : ArgumentParser options The options from the command line. load_samples : bool, optional Load the samples from the file. Returns ------- fp_all : (list of) BaseInferenceFile type The result file as an hdf file. If more than one input file, then it returns a list. parameters : list of str List of the parameters to use, parsed from the parameters option. labels : dict Dictionary of labels to associate with the parameters. samples_all : (list of) FieldArray(s) or None If load_samples, the samples as a FieldArray; otherwise, None. If more than one input file, then it returns a list. \**kwargs : Any other keyword arguments that are passed to read samples using samples_from_cli
def set_cookie_prefix(self, cookie_prefix=None): if (cookie_prefix is None): self.cookie_prefix = "%06d_" % int(random.random() * 1000000) else: self.cookie_prefix = cookie_prefix
Set a random cookie prefix unless one is specified. In order to run multiple demonstration auth services on the same server we need to have different cookie names for each auth domain. Unless cookie_prefix is set, generate a random one.
def get_linked_properties(cli_ctx, app, resource_group, read_properties=None, write_properties=None): roles = { "ReadTelemetry": "api", "WriteAnnotations": "annotations", "AuthenticateSDKControlChannel": "agentconfig" } sub_id = get_subscription_id(cli_ctx) tmpl = '/subscriptions/{}/resourceGroups/{}/providers/microsoft.insights/components/{}'.format( sub_id, resource_group, app ) linked_read_properties, linked_write_properties = [], [] if isinstance(read_properties, list): propLen = len(read_properties) linked_read_properties = ['{}/{}'.format(tmpl, roles[read_properties[i]]) for i in range(propLen)] else: linked_read_properties = ['{}/{}'.format(tmpl, roles[read_properties])] if isinstance(write_properties, list): propLen = len(write_properties) linked_write_properties = ['{}/{}'.format(tmpl, roles[write_properties[i]]) for i in range(propLen)] else: linked_write_properties = ['{}/{}'.format(tmpl, roles[write_properties])] return linked_read_properties, linked_write_properties
Maps user-facing role names to strings used to identify them on resources.
def new_log_filepath(self): lastlog_filename = os.path.join(self.dataflash_dir,'LASTLOG.TXT') if os.path.exists(lastlog_filename) and os.stat(lastlog_filename).st_size != 0: fh = open(lastlog_filename,'rb') log_cnt = int(fh.read()) + 1 fh.close() else: log_cnt = 1 self.lastlog_file = open(lastlog_filename,'w+b') self.lastlog_file.write(log_cnt.__str__()) self.lastlog_file.close() return os.path.join(self.dataflash_dir, '%u.BIN' % (log_cnt,));
returns a filepath to a log which does not currently exist and is suitable for DF logging
def get_victoria_day(self, year): may_24th = date(year, 5, 24) shift = may_24th.weekday() or 7 victoria_day = may_24th - timedelta(days=shift) return (victoria_day, "Victoria Day")
Return Victoria Day for Edinburgh. Set to the Monday strictly before May 24th. It means that if May 24th is a Monday, it's shifted to the week before.
def OnMerge(self, event): with undo.group(_("Merge cells")): self.grid.actions.merge_selected_cells(self.grid.selection) self.grid.ForceRefresh() self.grid.update_attribute_toolbar()
Merge cells event handler
def setOverlayDualAnalogTransform(self, ulOverlay, eWhich, fRadius): fn = self.function_table.setOverlayDualAnalogTransform pvCenter = HmdVector2_t() result = fn(ulOverlay, eWhich, byref(pvCenter), fRadius) return result, pvCenter
Sets the analog input to Dual Analog coordinate scale for the specified overlay.
def asDict( self ): if PY_3: item_fn = self.items else: item_fn = self.iteritems def toItem(obj): if isinstance(obj, ParseResults): if obj.haskeys(): return obj.asDict() else: return [toItem(v) for v in obj] else: return obj return dict((k,toItem(v)) for k,v in item_fn())
Returns the named parse results as a nested dictionary. Example:: integer = Word(nums) date_str = integer("year") + '/' + integer("month") + '/' + integer("day") result = date_str.parseString('12/31/1999') print(type(result), repr(result)) # -> <class 'pyparsing.ParseResults'> (['12', '/', '31', '/', '1999'], {'day': [('1999', 4)], 'year': [('12', 0)], 'month': [('31', 2)]}) result_dict = result.asDict() print(type(result_dict), repr(result_dict)) # -> <class 'dict'> {'day': '1999', 'year': '12', 'month': '31'} # even though a ParseResults supports dict-like access, sometime you just need to have a dict import json print(json.dumps(result)) # -> Exception: TypeError: ... is not JSON serializable print(json.dumps(result.asDict())) # -> {"month": "31", "day": "1999", "year": "12"}
def status(self): try: pf = file(self.pidfile,'r') pid = int(pf.read().strip()) pf.close() except IOError: pid = None if not pid: return False try: return os.path.exists("/proc/{0}".format(pid)) except OSError: return False
Check if the daemon is currently running. Requires procfs, so it will only work on POSIX compliant OS'.
def run_bafRegress(filenames, out_prefix, extract_filename, freq_filename, options): command = [ "bafRegress.py", "estimate", "--freqfile", freq_filename, "--freqcol", "2,5", "--extract", extract_filename, "--colsample", options.colsample, "--colmarker", options.colmarker, "--colbaf", options.colbaf, "--colab1", options.colab1, "--colab2", options.colab2, ] command.extend(filenames) output = None try: output = subprocess.check_output(command, stderr=subprocess.STDOUT, shell=False) except subprocess.CalledProcessError as exc: raise ProgramError("bafRegress.py: couldn't run " "bafRegress.py\n{}".format(exc.output)) try: with open(out_prefix + ".bafRegress", "w") as o_file: o_file.write(output) except IOError: raise ProgramError("{}: cannot write file".format( out_prefix + ".bafRegress", ))
Runs the bafRegress function. :param filenames: the set of all sample files. :param out_prefix: the output prefix. :param extract_filename: the name of the markers to extract. :param freq_filename: the name of the file containing the frequency. :param options: the other options. :type filenames: set :type out_prefix: str :type extract_filename: str :type freq_filename: str :type options: argparse.Namespace
def next_frame_savp(): hparams = sv2p_params.next_frame_sv2p() hparams.add_hparam("z_dim", 8) hparams.add_hparam("num_discriminator_filters", 32) hparams.add_hparam("use_vae", True) hparams.add_hparam("use_gan", False) hparams.add_hparam("use_spectral_norm", True) hparams.add_hparam("gan_loss", "cross_entropy") hparams.add_hparam("gan_loss_multiplier", 0.01) hparams.add_hparam("gan_vae_loss_multiplier", 0.01) hparams.add_hparam("gan_optimization", "joint") hparams.bottom = { "inputs": modalities.video_raw_bottom, "targets": modalities.video_raw_targets_bottom, } hparams.loss = { "targets": modalities.video_l1_raw_loss, } hparams.top = { "targets": modalities.video_raw_top, } hparams.latent_loss_multiplier_schedule = "linear" hparams.upsample_method = "bilinear_upsample_conv" hparams.internal_loss = False hparams.reward_prediction = False hparams.anneal_end = 100000 hparams.num_iterations_1st_stage = 0 hparams.num_iterations_2nd_stage = 50000 return hparams
SAVP model hparams.
def _open_ds_from_store(fname, store_mod=None, store_cls=None, **kwargs): if isinstance(fname, xr.Dataset): return fname if not isstring(fname): try: fname[0] except TypeError: pass else: if store_mod is not None and store_cls is not None: if isstring(store_mod): store_mod = repeat(store_mod) if isstring(store_cls): store_cls = repeat(store_cls) fname = [_open_store(sm, sc, f) for sm, sc, f in zip(store_mod, store_cls, fname)] kwargs['engine'] = None kwargs['lock'] = False return open_mfdataset(fname, **kwargs) if store_mod is not None and store_cls is not None: fname = _open_store(store_mod, store_cls, fname) return open_dataset(fname, **kwargs)
Open a dataset and return it
def _ParsePage(self, parser_mediator, file_offset, page_data): page_header_map = self._GetDataTypeMap('binarycookies_page_header') try: page_header = self._ReadStructureFromByteStream( page_data, file_offset, page_header_map) except (ValueError, errors.ParseError) as exception: raise errors.ParseError(( 'Unable to map page header data at offset: 0x{0:08x} with error: ' '{1!s}').format(file_offset, exception)) for record_offset in page_header.offsets: if parser_mediator.abort: break self._ParseRecord(parser_mediator, page_data, record_offset)
Parses a page. Args: parser_mediator (ParserMediator): parser mediator. file_offset (int): offset of the data relative from the start of the file-like object. page_data (bytes): page data. Raises: ParseError: when the page cannot be parsed.
def make_action(self, fn, schema_parser, meta): validate_input = validate_output = None if "$input" in meta: with MarkKey("$input"): validate_input = schema_parser.parse(meta["$input"]) if "$output" in meta: with MarkKey("$output"): validate_output = schema_parser.parse(meta["$output"]) def action(data): if validate_input: try: data = validate_input(data) except Invalid as ex: return abort(400, "InvalidData", str(ex)) if isinstance(data, dict): rv = fn(**data) else: rv = fn(data) else: rv = fn() rv, status, headers = unpack(rv) if validate_output: try: rv = validate_output(rv) except Invalid as ex: return abort(500, "ServerError", str(ex)) return rv, status, headers return action
Make resource's method an action Validate input, output by schema in meta. If no input schema, call fn without params. If no output schema, will not validate return value. Args: fn: resource's method schema_parser: for parsing schema in meta meta: meta data of the action
def _tag_ec2(self, conn, role): tags = {'Role': role} conn.create_tags([self.instance_id], tags)
tag the current EC2 instance with a cluster role
def set_plain_text_font(self, font, color_scheme=None): self.plain_text.set_font(font, color_scheme=color_scheme)
Set plain text mode font
def ngram_count(self, ngram): query = "SELECT count FROM _{0}_gram".format(len(ngram)) query += self._build_where_clause(ngram) query += ";" result = self.execute_sql(query) return self._extract_first_integer(result)
Gets the count for a given ngram from the database. Parameters ---------- ngram : iterable of str A list, set or tuple of strings. Returns ------- count : int The count of the ngram.
def detectIphoneOrIpod(self): return UAgentInfo.deviceIphone in self.__userAgent \ or UAgentInfo.deviceIpod in self.__userAgent
Return detection of an iPhone or iPod Touch Detects if the current device is an iPhone or iPod Touch.
def _encode_regex(name, value, dummy0, dummy1): flags = value.flags if flags == 0: return b"\x0B" + name + _make_c_string_check(value.pattern) + b"\x00" elif flags == re.UNICODE: return b"\x0B" + name + _make_c_string_check(value.pattern) + b"u\x00" else: sflags = b"" if flags & re.IGNORECASE: sflags += b"i" if flags & re.LOCALE: sflags += b"l" if flags & re.MULTILINE: sflags += b"m" if flags & re.DOTALL: sflags += b"s" if flags & re.UNICODE: sflags += b"u" if flags & re.VERBOSE: sflags += b"x" sflags += b"\x00" return b"\x0B" + name + _make_c_string_check(value.pattern) + sflags
Encode a python regex or bson.regex.Regex.
def get_camera_imageseries(self, number_of_imageseries=10, offset=0): response = None try: response = requests.get( urls.get_imageseries(self._giid), headers={ 'Accept': 'application/json, text/javascript, */*; q=0.01', 'Cookie': 'vid={}'.format(self._vid)}, params={ "numberOfImageSeries": int(number_of_imageseries), "offset": int(offset), "fromDate": "", "toDate": "", "onlyNotViewed": "", "_": self._giid}) except requests.exceptions.RequestException as ex: raise RequestError(ex) _validate_response(response) return json.loads(response.text)
Get smartcam image series Args: number_of_imageseries (int): number of image series to get offset (int): skip offset amount of image series
def json_requested(): best = request.accept_mimetypes.best_match( ['application/json', 'text/html']) return (best == 'application/json' and request.accept_mimetypes[best] > request.accept_mimetypes['text/html'])
Check if json is the preferred output format for the request.
def to_ubyte_array(barray): bs = (ctypes.c_ubyte * len(barray))() pack_into('%ds' % len(barray), bs, 0, barray) return bs
Returns a c_ubyte_array filled with the given data of a bytearray or bytes