code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def mark_deactivated(self,request,queryset): rows_updated = queryset.update(Active=False, End=datetime.date.today() ) if rows_updated == 1: message_bit = "1 cage was" else: message_bit = "%s cages were" % rows_updated self.message_user(request, "%s successfully marked as deactivated." % message_bit)
An admin action for marking several cages as inactive. This action sets the selected cages as Active=False and Death=today. This admin action also shows as the output the number of mice sacrificed.
def add(self, user, status=None, symmetrical=False): if not status: status = RelationshipStatus.objects.following() relationship, created = Relationship.objects.get_or_create( from_user=self.instance, to_user=user, status=status, site=Site.objects.get_current() ) if symmetrical: return (relationship, user.relationships.add(self.instance, status, False)) else: return relationship
Add a relationship from one user to another with the given status, which defaults to "following". Adding a relationship is by default asymmetrical (akin to following someone on twitter). Specify a symmetrical relationship (akin to being friends on facebook) by passing in :param:`symmetrical` = True .. note:: If :param:`symmetrical` is set, the function will return a tuple containing the two relationship objects created
def check( state_engine, nameop, block_id, checked_ops ): namespace_id = nameop['namespace_id'] sender = nameop['sender'] if not state_engine.is_namespace_revealed( namespace_id ): log.warning("Namespace '%s' is not revealed" % namespace_id ) return False revealed_namespace = state_engine.get_namespace_reveal( namespace_id ) if revealed_namespace['recipient'] != sender: log.warning("Namespace '%s' is not owned by '%s' (but by %s)" % (namespace_id, sender, revealed_namespace['recipient'])) return False if state_engine.is_namespace_ready( namespace_id ): log.warning("Namespace '%s' is already registered" % namespace_id ) return False nameop['sender_pubkey'] = revealed_namespace['sender_pubkey'] nameop['address'] = revealed_namespace['address'] return True
Verify the validity of a NAMESPACE_READY operation. It is only valid if it has been imported by the same sender as the corresponding NAMESPACE_REVEAL, and the namespace is still in the process of being imported.
def run(self): try: self.initialize_connection() except ChromecastConnectionError: self._report_connection_status( ConnectionStatus(CONNECTION_STATUS_DISCONNECTED, NetworkAddress(self.host, self.port))) return self.heartbeat_controller.reset() self._force_recon = False logging.debug("Thread started...") while not self.stop.is_set(): if self.run_once() == 1: break self._cleanup()
Connect to the cast and start polling the socket.
def linkorcopy(self, src, dst): if os.path.isdir(dst): log.warn('linkorcopy given a directory as destination. ' 'Use caution.') log.debug('src: %s dst: %s', src, dst) elif os.path.exists(dst): os.unlink(dst) elif not os.path.exists(os.path.dirname(dst)): os.makedirs(os.path.dirname(dst)) if self.linkfiles: log.debug('Linking: %s -> %s', src, dst) os.link(src, dst) else: log.debug('Copying: %s -> %s', src, dst) shutil.copy2(src, dst)
hardlink src file to dst if possible, otherwise copy.
def _prepare_tokens_for_encode(tokens): prepared_tokens = [] def _prepare_token(t, next_t): skip_next = False t = _escape(t) if next_t == " ": t += "_" skip_next = True return t, skip_next next_tokens = tokens[1:] + [None] skip_single_token = False for token, next_token in zip(tokens, next_tokens): if skip_single_token: skip_single_token = False continue if token == _UNDERSCORE_REPLACEMENT: t1, t2 = _UNDERSCORE_REPLACEMENT[:2], _UNDERSCORE_REPLACEMENT[2:] t1, _ = _prepare_token(t1, None) t2, _ = _prepare_token(t2, next_token) prepared_tokens.append(t1) prepared_tokens.append(t2) continue token, skip_single_token = _prepare_token(token, next_token) prepared_tokens.append(token) return prepared_tokens
Prepare tokens for encoding. Tokens followed by a single space have "_" appended and the single space token is dropped. If a token is _UNDERSCORE_REPLACEMENT, it is broken up into 2 tokens. Args: tokens: `list<str>`, tokens to prepare. Returns: `list<str>` prepared tokens.
def fetch_url(url, dest, parent_to_remove_before_fetch): logger.debug('Downloading file {} from {}', dest, url) try: shutil.rmtree(parent_to_remove_before_fetch) except FileNotFoundError: pass os.makedirs(parent_to_remove_before_fetch) resp = requests.get(url, stream=True) with open(dest, 'wb') as fetch_file: for chunk in resp.iter_content(chunk_size=32 * 1024): fetch_file.write(chunk)
Helper function to fetch a file from a URL.
def cache_finite_samples(f): cache = {} def wrap(*args): key = FRAME_RATE, args if key not in cache: cache[key] = [sample for sample in f(*args)] return (sample for sample in cache[key]) return wrap
Decorator to cache audio samples produced by the wrapped generator.
def recipe_details(recipe_id, lang="en"): params = {"recipe_id": recipe_id, "lang": lang} cache_name = "recipe_details.%(recipe_id)s.%(lang)s.json" % params return get_cached("recipe_details.json", cache_name, params=params)
This resource returns a details about a single recipe. :param recipe_id: The recipe to query for. :param lang: The language to display the texts in. The response is an object with the following properties: recipe_id (number): The recipe id. type (string): The type of the produced item. output_item_id (string): The item id of the produced item. output_item_count (string): The amount of items produced. min_rating (string): The minimum rating of the recipe. time_to_craft_ms (string): The time it takes to craft the item. disciplines (list): A list of crafting disciplines that can use the recipe. flags (list): Additional recipe flags. Known flags: ``AutoLearned``: Set for recipes that don't have to be discovered. ``LearnedFromItem``: Set for recipes that need a recipe sheet. ingredients (list): A list of objects describing the ingredients for this recipe. Each object contains the following properties: item_id (string): The item id of the ingredient. count (string): The amount of ingredients required.
def refresh(self, id_or_uri, timeout=-1): uri = self._client.build_uri(id_or_uri) + "/refresh" return self._client.update_with_zero_body(uri, timeout=timeout)
The Refresh action reclaims the top-of-rack switches in a logical switch. Args: id_or_uri: Can be either the Logical Switch ID or URI timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView, just stop waiting for its completion. Returns: dict: The Logical Switch
def conv2d_trans(ni:int, nf:int, ks:int=2, stride:int=2, padding:int=0, bias=False) -> nn.ConvTranspose2d: "Create `nn.ConvTranspose2d` layer." return nn.ConvTranspose2d(ni, nf, kernel_size=ks, stride=stride, padding=padding, bias=bias)
Create `nn.ConvTranspose2d` layer.
def is_effective(self): now = DateTime.utcnow() return self.get_start_date() <= now and self.get_end_date() >= now
Tests if the current date is within the start end end dates inclusive. return: (boolean) - ``true`` if this is effective, ``false`` otherwise *compliance: mandatory -- This method must be implemented.*
def chdir(self, path): self.cwd = self._join_chunks(self._normalize_path(path))
Changes the current directory to the given path
def cli(yamlfile, directory, out, classname, format): DotGenerator(yamlfile, format).serialize(classname=classname, dirname=directory, filename=out)
Generate graphviz representations of the biolink model
def profile_(profile, names, vm_overrides=None, opts=None, **kwargs): client = _get_client() if isinstance(opts, dict): client.opts.update(opts) info = client.profile(profile, names, vm_overrides=vm_overrides, **kwargs) return info
Spin up an instance using Salt Cloud CLI Example: .. code-block:: bash salt minionname cloud.profile my-gce-config myinstance
def detectPalmOS(self): if UAgentInfo.devicePalm in self.__userAgent \ or UAgentInfo.engineBlazer in self.__userAgent \ or UAgentInfo.engineXiino in self.__userAgent: return not self.detectPalmWebOS() return False
Return detection of a PalmOS device Detects if the current browser is on a PalmOS device.
def stop(self, force=False, wait=False): log.debug("Stopping cluster `%s` ...", self.name) failed = self._stop_all_nodes(wait) if failed: if force: self._delete_saved_data() log.warning( "Not all cluster nodes have been terminated." " However, as requested, data about the cluster" " has been removed from local storage.") else: self.repository.save_or_update(self) log.warning( "Not all cluster nodes have been terminated." " Fix errors above and re-run `elasticluster stop %s`", self.name) else: self._delete_saved_data()
Terminate all VMs in this cluster and delete its repository. :param bool force: remove cluster from storage even if not all nodes could be stopped.
def _objs_opts(objs, all=None, **opts): if objs: t = objs elif all in (False, None): t = () elif all is True: t = tuple(_values(sys.modules)) + ( globals(), stack(sys.getrecursionlimit())[2:]) else: raise ValueError('invalid option: %s=%r' % ('all', all)) return t, opts
Return given or 'all' objects and the remaining options.
def _ParseCommentRecord(self, structure): comment = structure[1] if comment.startswith('Version'): _, _, self._version = comment.partition(':') elif comment.startswith('Software'): _, _, self._software = comment.partition(':') elif comment.startswith('Time'): _, _, time_format = comment.partition(':') if 'local' in time_format.lower(): self._use_local_timezone = True
Parse a comment and store appropriate attributes. Args: structure (pyparsing.ParseResults): parsed log line.
def _get_args(self, executable, *args): args = list(args) args.insert(0, executable) if self.username: args.append("--username={}".format(self.username)) if self.host: args.append("--host={}".format(self.host)) if self.port: args.append("--port={}".format(self.port)) args.append(self.dbname) return args
compile all the executable and the arguments, combining with common arguments to create a full batch of command args
def _sanitize_input_structure(input_structure): input_structure = input_structure.copy() input_structure.remove_spin() input_structure = input_structure.get_primitive_structure(use_site_props=False) if "magmom" in input_structure.site_properties: input_structure.remove_site_property("magmom") return input_structure
Sanitize our input structure by removing magnetic information and making primitive. Args: input_structure: Structure Returns: Structure
def match_one_pattern(pattern: str, s: str, *args: Optional[Callable], **flags): match: Optional[List[str]] = re.findall(pattern, s, **flags) if match: if len(args) == 0: return match elif len(args) == 1: wrapper, = args return [wrapper(m) for m in match] else: raise TypeError( 'Multiple wrappers are given! Only one should be given!') else: print("Pattern \"{0}\" not found in string {1}!".format(pattern, s)) return None
Find a pattern in a certain string. If found and a wrapper is given, then return the wrapped matched-string; if no wrapper is given, return the pure matched string. If no match is found, return None. :param pattern: a pattern, can be a string or a regular expression :param s: a string :param args: at most 1 argument can be given :param flags: the same flags as ``re.findall``'s :return: .. doctest:: >>> p = "\d+" >>> s = "abc 123 def 456" >>> match_one_pattern(p, s) ['123', '456'] >>> match_one_pattern(p, s, int) [123, 456] >>> match_one_pattern(p, "abc 123 def") ['123'] >>> print(match_one_pattern('s', 'abc')) Pattern "s" not found in string abc! None >>> match_one_pattern('s', 'Ssa', flags=re.IGNORECASE) ['S', 's']
def check_action_type(self, value): if value is not None: if not isinstance(value, ActionType): raise AttributeError("Invalid check action %s" % value) self._check_action_type = value
Set the value for the CheckActionType, validating input
def influx_count_(self, measurement): try: q = "select count(*) from " + measurement self.start("Querying: count ...") datalen = self.influx_cli.query(q) self.end("Finished querying") numrows = int(datalen[measurement][datalen[measurement].keys()[0]]) return numrows except Exception as e: self.err(e, self.influx_count_, "Can not count rows for measurement")
Count the number of rows for a measurement
def run(self): _LOGGER.info("Started") while True: self._maybe_reconnect() line = '' try: t = self._telnet if t is not None: line = t.read_until(b"\n") except EOFError: try: self._lock.acquire() self._disconnect_locked() continue finally: self._lock.release() self._recv_cb(line.decode('ascii').rstrip())
Main thread function to maintain connection and receive remote status.
def from_frame(klass, frame, connection): event = frame.headers['new'] data = json.loads(frame.body) info = data['info'] task = Task.fromDict(info) task.connection = connection return klass(task, event)
Create a new TaskStateChange event from a Stompest Frame.
def from_path(cls, path): if os.path.exists(path) is False: raise errors.InvalidPathError(path) return cls(path=path)
Instantiates a project class from a given path. :param path: app folder path source code Returns A project instance.
def match_global_phase(a: np.ndarray, b: np.ndarray ) -> Tuple[np.ndarray, np.ndarray]: if a.shape != b.shape: return a, b k = max(np.ndindex(*a.shape), key=lambda t: abs(b[t])) def dephase(v): r = np.real(v) i = np.imag(v) if i == 0: return -1 if r < 0 else 1 if r == 0: return 1j if i < 0 else -1j return np.exp(-1j * np.arctan2(i, r)) return a * dephase(a[k]), b * dephase(b[k])
Phases the given matrices so that they agree on the phase of one entry. To maximize precision, the position with the largest entry from one of the matrices is used when attempting to compute the phase difference between the two matrices. Args: a: A numpy array. b: Another numpy array. Returns: A tuple (a', b') where a' == b' implies a == b*exp(i t) for some t.
def cleanup(self, keep=5): releases = self.get_releases() current_version = self.get_current_release() to_delete = [version for version in releases[keep:] if version != current_version] for release in to_delete: self._runner.run("rm -rf '{0}'".format(os.path.join(self._releases, release)))
Remove all but the ``keep`` most recent releases. If any of the candidates for deletion are pointed to by the 'current' symlink, they will not be deleted. This method performs N + 2 network operations where N is the number of old releases that are cleaned up. :param int keep: Number of old releases to keep around
def get_minimum_size(self, data): size = self.element.get_minimum_size(data) if self.angle in (RotateLM.NORMAL, RotateLM.UPSIDE_DOWN): return size else: return datatypes.Point(size.y, size.x)
Returns the rotated minimum size.
def build_plane_arrays(x, y, qlist): if type(qlist) is not list: return_list = False qlist = [qlist] else: return_list = True xv = x[np.where(y==y[0])[0]] yv = y[np.where(x==x[0])[0]] qlistp = [] for n in range(len(qlist)): qlistp.append(np.zeros((len(yv), len(xv)))) for j in range(len(qlist)): for n in range(len(yv)): i = np.where(y==yv[n])[0] qlistp[j][n,:] = qlist[j][i] if not return_list: qlistp = qlistp[0] return xv, yv, qlistp
Build a 2-D array out of data taken in the same plane, for contour plotting.
def a_return_and_reconnect(ctx): ctx.ctrl.send("\r") ctx.device.connect(ctx.ctrl) return True
Send new line and reconnect.
def dict_of_lists_add(dictionary, key, value): list_objs = dictionary.get(key, list()) list_objs.append(value) dictionary[key] = list_objs
Add value to a list in a dictionary by key Args: dictionary (DictUpperBound): Dictionary to which to add values key (Any): Key within dictionary value (Any): Value to add to list in dictionary Returns: None
def transmit_ack_bpdu(self): ack_flags = 0b10000001 bpdu_data = self._generate_config_bpdu(ack_flags) self.ofctl.send_packet_out(self.ofport.port_no, bpdu_data)
Send Topology Change Ack BPDU.
def get_value(value_proto): field = value_proto.WhichOneof('value_type') if field in __native_value_types: return getattr(value_proto, field) if field == 'timestamp_value': return from_timestamp(value_proto.timestamp_value) if field == 'array_value': return [get_value(sub_value) for sub_value in value_proto.array_value.values] return None
Gets the python object equivalent for the given value proto. Args: value_proto: datastore.Value proto message. Returns: the corresponding python object value. timestamps are converted to datetime, and datastore.Value is returned for blob_key_value.
def _gen_packet_setpower(self, sequence, power, fade): level = pack("<H", Power.BULB_OFF if power == 0 else Power.BULB_ON) duration = pack("<I", fade) payload = bytearray(level) payload.extend(duration) return self._gen_packet(sequence, PayloadType.SETPOWER2, payload)
Generate "setpower" packet payload.
def findInNodeRegByHA(self, remoteHa): regName = [nm for nm, ha in self.registry.items() if self.sameAddr(ha, remoteHa)] if len(regName) > 1: raise RuntimeError("more than one node registry entry with the " "same ha {}: {}".format(remoteHa, regName)) if regName: return regName[0] return None
Returns the name of the remote by HA if found in the node registry, else returns None
def executemany(self, command, params=None, max_attempts=5): attempts = 0 while attempts < max_attempts: try: self._cursor.executemany(command, params) self._commit() return True except Exception as e: attempts += 1 self.reconnect() continue
Execute multiple SQL queries without returning a result.
def push(h, x): h.push(x) up(h, h.size()-1)
Push a new value into heap.
def alias(requestContext, seriesList, newName): try: seriesList.name = newName except AttributeError: for series in seriesList: series.name = newName return seriesList
Takes one metric or a wildcard seriesList and a string in quotes. Prints the string instead of the metric name in the legend. Example:: &target=alias(Sales.widgets.largeBlue,"Large Blue Widgets")
def is_date(v) -> (bool, date): if isinstance(v, date): return True, v try: reg = r'^([0-9]{4})(?:-(0[1-9]|1[0-2])(?:-(0[1-9]|[1-2][0-9]|3[0-1])(?:T' \ r'([0-5][0-9])(?::([0-5][0-9])(?::([0-5][0-9]))?)?)?)?)?$' match = re.match(reg, v) if match: groups = match.groups() patterns = ['%Y', '%m', '%d', '%H', '%M', '%S'] d = datetime.strptime('-'.join([x for x in groups if x]), '-'.join([patterns[i] for i in range(len(patterns)) if groups[i]])) return True, d except: pass return False, v
Boolean function for checking if v is a date Args: v: Returns: bool
def load(self, response): self._models = [] if isinstance(response, dict): for key in response.keys(): model = self.model_class(self, href='') model.load(response[key]) self._models.append(model) else: for item in response: model = self.model_class(self, href=item.get('href')) model.load(item) self._models.append(model)
Parse the GET response for the collection. This operates as a lazy-loader, meaning that the data are only downloaded from the server if there are not already loaded. Collection items are loaded sequentially. In some rare cases, a collection can have an asynchronous request triggered. For those cases, we handle it here.
def _cmp_by_origin(path1, path2): def get_origin_pref(origin): if origin.value == BGP_ATTR_ORIGIN_IGP: return 3 elif origin.value == BGP_ATTR_ORIGIN_EGP: return 2 elif origin.value == BGP_ATTR_ORIGIN_INCOMPLETE: return 1 else: LOG.error('Invalid origin value encountered %s.', origin) return 0 origin1 = path1.get_pattr(BGP_ATTR_TYPE_ORIGIN) origin2 = path2.get_pattr(BGP_ATTR_TYPE_ORIGIN) assert origin1 is not None and origin2 is not None if origin1.value == origin2.value: return None origin1 = get_origin_pref(origin1) origin2 = get_origin_pref(origin2) if origin1 == origin2: return None elif origin1 > origin2: return path1 return path2
Select the best path based on origin attribute. IGP is preferred over EGP; EGP is preferred over Incomplete. If both paths have same origin, we return None.
def pre_process_json(obj): if type(obj) is dict: new_dict = {} for key, value in obj.items(): new_dict[key] = pre_process_json(value) return new_dict elif type(obj) is list: new_list = [] for item in obj: new_list.append(pre_process_json(item)) return new_list elif hasattr(obj, 'todict'): return dict(obj.todict()) else: try: json.dumps(obj) except TypeError: try: json.dumps(obj.__dict__) except TypeError: return str(obj) else: return obj.__dict__ else: return obj
Preprocess items in a dictionary or list and prepare them to be json serialized.
def preparse(self, context): context.early_args, unused = ( context.early_parser.parse_known_args(context.argv))
Parse a portion of command line arguments with the early parser. This method relies on ``context.argv`` and ``context.early_parser`` and produces ``context.early_args``. The ``context.early_args`` object is the return value from argparse. It is the dict/object like namespace object.
def get_trades(self, max_id=None, count=None, instrument=None, ids=None): url = "{0}/{1}/accounts/{2}/trades".format( self.domain, self.API_VERSION, self.account_id ) params = { "maxId": int(max_id) if max_id and max_id > 0 else None, "count": int(count) if count and count > 0 else None, "instrument": instrument, "ids": ','.join(ids) if ids else None } try: return self._Client__call(uri=url, params=params, method="get") except RequestException: return False except AssertionError: return False
Get a list of open trades Parameters ---------- max_id : int The server will return trades with id less than or equal to this, in descending order (for pagination) count : int Maximum number of open trades to return. Default: 50 Max value: 500 instrument : str Retrieve open trades for a specific instrument only Default: all ids : list A list of trades to retrieve. Maximum number of ids: 50. No other parameter may be specified with the ids parameter. See more: http://developer.oanda.com/rest-live/trades/#getListOpenTrades
def transform(data_frame, **kwargs): norm = kwargs.get('norm', 1.0) axis = kwargs.get('axis', 0) if axis == 0: norm_vector = _get_norms_of_rows(data_frame, kwargs.get('method', 'vector')) else: norm_vector = _get_norms_of_cols(data_frame, kwargs.get('method', 'first')) if 'labels' in kwargs: if axis == 0: return data_frame.apply(lambda col: col * norm / norm_vector, axis=0), \ kwargs['labels'].apply(lambda col: col * norm / norm_vector, axis=0) else: raise ValueError("label normalization incompatible with normalization by column") else: if axis == 0: return data_frame.apply(lambda col: col * norm / norm_vector, axis=0) else: return data_frame.apply(lambda row: row * norm / norm_vector, axis=1)
Return a transformed DataFrame. Transform data_frame along the given axis. By default, each row will be normalized (axis=0). Parameters ----------- data_frame : DataFrame Data to be normalized. axis : int, optional 0 (default) to normalize each row, 1 to normalize each column. method : str, optional Valid methods are: - "vector" : Default for normalization by row (axis=0). Normalize along axis as a vector with norm `norm` - "last" : Linear normalization setting last value along the axis to `norm` - "first" : Default for normalization of columns (axis=1). Linear normalization setting first value along the given axis to `norm` - "mean" : Normalize so that the mean of each vector along the given axis is `norm` norm : float, optional Target value of normalization, defaults to 1.0. labels : DataFrame, optional Labels may be passed as keyword argument, in which case the label values will also be normalized and returned. Returns ----------- df : DataFrame Normalized data. labels : DataFrame, optional Normalized labels, if provided as input. Notes ----------- If labels are real-valued, they should also be normalized. .. Having row_norms as a numpy array should be benchmarked against using a DataFrame: http://stackoverflow.com/questions/12525722/normalize-data-in-pandas Note: This isn't a bottleneck. Using a feature set with 13k rows and 256 data_frame ('ge' from 1962 until now), the normalization was immediate.
def get_new_author(self, api_author): return Author(site_id=self.site_id, wp_id=api_author["ID"], **self.api_object_data("author", api_author))
Instantiate a new Author from api data. :param api_author: the api data for the Author :return: the new Author
def update_question_group(self, id, quiz_id, course_id, quiz_groups_name=None, quiz_groups_pick_count=None, quiz_groups_question_points=None): path = {} data = {} params = {} path["course_id"] = course_id path["quiz_id"] = quiz_id path["id"] = id if quiz_groups_name is not None: data["quiz_groups[name]"] = quiz_groups_name if quiz_groups_pick_count is not None: data["quiz_groups[pick_count]"] = quiz_groups_pick_count if quiz_groups_question_points is not None: data["quiz_groups[question_points]"] = quiz_groups_question_points self.logger.debug("PUT /api/v1/courses/{course_id}/quizzes/{quiz_id}/groups/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("PUT", "/api/v1/courses/{course_id}/quizzes/{quiz_id}/groups/{id}".format(**path), data=data, params=params, no_data=True)
Update a question group. Update a question group
def get_link_name (self, tag, attrs, attr): if tag == 'a' and attr == 'href': data = self.parser.peek(MAX_NAMELEN) data = data.decode(self.parser.encoding, "ignore") name = linkname.href_name(data) if not name: name = attrs.get_true('title', u'') elif tag == 'img': name = attrs.get_true('alt', u'') if not name: name = attrs.get_true('title', u'') else: name = u"" return name
Parse attrs for link name. Return name of link.
def decode_response(data): res = CaseInsensitiveDict() for dataline in data.decode('utf-8').splitlines()[1:]: dataline = dataline.strip() if not dataline: continue line_parts = dataline.split(':', 1) if len(line_parts) < 2: line_parts = (line_parts[0], '') res[line_parts[0].strip()] = line_parts[1].strip() return res
Decodes the data from a SSDP response. Args: data (bytes): The encoded response. Returns: dict of string -> string: Case-insensitive dictionary of header name to header value pairs extracted from the response.
def _get_setup(self, result): self.__devices = {} if ('setup' not in result.keys() or 'devices' not in result['setup'].keys()): raise Exception( "Did not find device definition.") for device_data in result['setup']['devices']: device = Device(self, device_data) self.__devices[device.url] = device self.__location = result['setup']['location'] self.__gateway = result['setup']['gateways']
Internal method which process the results from the server.
def hook(self, function, dependencies=None): if not isinstance(dependencies, (Iterable, type(None), str)): raise TypeError("Invalid list of dependencies provided!") if not hasattr(function, "__deps__"): function.__deps__ = dependencies if self.isloaded(function.__deps__): self.append(function) else: self._later.append(function) for ext in self._later: if self.isloaded(ext.__deps__): self._later.remove(ext) self.hook(ext)
Tries to load a hook Args: function (func): Function that will be called when the event is called Kwargs: dependencies (str): String or Iterable with modules whose hooks should be called before this one Raises: :class:TypeError Note that the dependencies are module-wide, that means that if `parent.foo` and `parent.bar` are both subscribed to `example` event and `child` enumerates `parent` as dependcy, **both** `foo` and `bar` must be called in order for the dependcy to get resolved.
def add_query_to_url(url, extra_query): split = urllib.parse.urlsplit(url) merged_query = urllib.parse.parse_qsl(split.query) if isinstance(extra_query, dict): for k, v in extra_query.items(): if not isinstance(v, (tuple, list)): merged_query.append((k, v)) else: for cv in v: merged_query.append((k, cv)) else: merged_query.extend(extra_query) merged_split = urllib.parse.SplitResult( split.scheme, split.netloc, split.path, urllib.parse.urlencode(merged_query), split.fragment, ) return merged_split.geturl()
Adds an extra query to URL, returning the new URL. Extra query may be a dict or a list as returned by :func:`urllib.parse.parse_qsl()` and :func:`urllib.parse.parse_qs()`.
def get_translation_dicts(self): keysym_to_string_dict = {} string_to_keysym_dict = {} Xlib.XK.load_keysym_group('latin2') Xlib.XK.load_keysym_group('latin3') Xlib.XK.load_keysym_group('latin4') Xlib.XK.load_keysym_group('greek') for string, keysym in Xlib.XK.__dict__.items(): if string.startswith('XK_'): string_to_keysym_dict[string[3:]] = keysym keysym_to_string_dict[keysym] = string[3:] return keysym_to_string_dict, string_to_keysym_dict
Returns dictionaries for the translation of keysyms to strings and from strings to keysyms.
def list_delete(self, id): id = self.__unpack_id(id) self.__api_request('DELETE', '/api/v1/lists/{0}'.format(id))
Delete a list.
def plot_connectivity_surrogate(self, measure_name, repeats=100, fig=None): cb = self.get_surrogate_connectivity(measure_name, repeats) self._prepare_plots(True, False) cu = np.percentile(cb, 95, axis=0) fig = self.plotting.plot_connectivity_spectrum([cu], self.fs_, freq_range=self.plot_f_range, fig=fig) return fig
Plot spectral connectivity measure under the assumption of no actual connectivity. Repeatedly samples connectivity from phase-randomized data. This provides estimates of the connectivity distribution if there was no causal structure in the data. Parameters ---------- measure_name : str Name of the connectivity measure to calculate. See :class:`Connectivity` for supported measures. repeats : int, optional How many surrogate samples to take. fig : {None, Figure object}, optional Where to plot the topos. f set to **None**, a new figure is created. Otherwise plot into the provided figure object. Returns ------- fig : Figure object Instance of the figure in which was plotted.
def safe_makedirs(fdir): if os.path.isdir(fdir): pass else: try: os.makedirs(fdir) except WindowsError as e: if 'Cannot create a file when that file already exists' in e: log.debug('relevant dir already exists') else: raise WindowsError(e) return True
Make an arbitrary directory. This is safe to call for Python 2 users. :param fdir: Directory path to make. :return:
def _get_object_as_soft(self): soft = ["^%s = %s" % (self.geotype, self.name), self._get_metadata_as_string(), self._get_columns_as_string(), self._get_table_as_string()] return "\n".join(soft)
Get the object as SOFT formated string.
def delete_blob(call=None, kwargs=None): if kwargs is None: kwargs = {} if 'container' not in kwargs: raise SaltCloudSystemExit( 'A container must be specified' ) if 'blob' not in kwargs: raise SaltCloudSystemExit( 'A blob must be specified' ) storageservice = _get_block_blob_service(kwargs) storageservice.delete_blob(kwargs['container'], kwargs['blob']) return True
Delete a blob from a container.
def refresh_session(self): if not self._refresh_token: if self._username and self._password: return self.login(self._username, self._password) return return self._authenticate( grant_type='refresh_token', refresh_token=self._refresh_token, client_id=self._client_id, client_secret=self._client_secret )
Re-authenticate using the refresh token if available. Otherwise log in using the username and password if it was used to authenticate initially. :return: The authentication response or `None` if not available
def is_oriented(self): if util.is_shape(self.primitive.transform, (4, 4)): return not np.allclose(self.primitive.transform[ 0:3, 0:3], np.eye(3)) else: return False
Returns whether or not the current box is rotated at all.
def _legacy_api_registration_check(self): logger.debug('Checking registration status...') machine_id = generate_machine_id() try: url = self.api_url + '/v1/systems/' + machine_id net_logger.info("GET %s", url) res = self.session.get(url, timeout=self.config.http_timeout) except requests.ConnectionError: logger.error('Connection timed out. Running connection test...') self.test_connection() return False try: unreg_status = json.loads(res.content).get('unregistered_at', 'undefined') self.config.account_number = json.loads(res.content).get('account_number', 'undefined') except ValueError: return False if unreg_status == 'undefined': return None elif unreg_status is None: return True else: return unreg_status
Check registration status through API
def connect_full_direct(self, config): for input_id, output_id in self.compute_full_connections(config, True): connection = self.create_connection(config, input_id, output_id) self.connections[connection.key] = connection
Create a fully-connected genome, including direct input-output connections.
def get_results(self): summary = self.handle.get_summary_data(self.group_name) results = {'template': {'status': 'no data'}, 'complement': {'status': 'no data'}, '2d': {'status': 'no data'}} if 'genome_mapping_template' in summary: results['template'] = self._get_results(summary['genome_mapping_template']) if 'genome_mapping_complement' in summary: results['complement'] = self._get_results(summary['genome_mapping_complement']) if 'genome_mapping_2d' in summary: results['2d'] = self._get_results(summary['genome_mapping_2d']) return results
Get details about the alignments that have been performed. :return: A dict of dicts. The keys of the top level are 'template', 'complement' and '2d'. Each of these dicts contains the following fields: * status: Can be 'no data', 'no match found', or 'match found'. * direction: Can be 'forward', 'reverse'. * ref_name: Name of reference. * ref_span: Section of reference aligned to, as a tuple (start, end). * seq_span: Section of the called sequence that aligned, as a tuple (start, end). * seq_len: Total length of the called sequence. * num_aligned: Number of bases that aligned to bases in the reference. * num_correct: Number of aligned bases that match the reference. * num_deletions: Number of bases in the aligned section of the reference that are not aligned to bases in the called sequence. * num_insertions: Number of bases in the aligned section of the called sequence that are not aligned to bases in the reference. * identity: The fraction of aligned bases that are correct (num_correct / num_aligned). * accuracy: The overall basecall accuracy, according to the alignment. (num_correct / (num_aligned + num_deletions + num_insertions)). Note that if the status field is not 'match found', then all the other fields will be absent.
def is_ip_addr_list(value, min=None, max=None): return [is_ip_addr(mem) for mem in is_list(value, min, max)]
Check that the value is a list of IP addresses. You can optionally specify the minimum and maximum number of members. Each list member is checked that it is an IP address. >>> vtor = Validator() >>> vtor.check('ip_addr_list', ()) [] >>> vtor.check('ip_addr_list', []) [] >>> vtor.check('ip_addr_list', ('1.2.3.4', '5.6.7.8')) ['1.2.3.4', '5.6.7.8'] >>> vtor.check('ip_addr_list', ['a']) # doctest: +SKIP Traceback (most recent call last): VdtValueError: the value "a" is unacceptable.
def transform(v1, v2): theta = angle(v1,v2) x = N.cross(v1,v2) x = x / N.linalg.norm(x) A = N.array([ [0, -x[2], x[1]], [x[2], 0, -x[0]], [-x[1], x[0], 0]]) R = N.exp(A*theta) return R
Create an affine transformation matrix that maps vector 1 onto vector 2 https://math.stackexchange.com/questions/293116/rotating-one-3d-vector-to-another
def _connect_secureish(*args, **kwargs): if tuple(int(x) for x in boto.__version__.split('.')) >= (2, 6, 0): kwargs['validate_certs'] = True kwargs['is_secure'] = True auth_region_name = kwargs.pop('auth_region_name', None) conn = connection.S3Connection(*args, **kwargs) if auth_region_name: conn.auth_region_name = auth_region_name return conn
Connect using the safest available options. This turns on encryption (works in all supported boto versions) and certificate validation (in the subset of supported boto versions that can handle certificate validation, namely, those after 2.6.0). Versions below 2.6 don't support the validate_certs option to S3Connection, and enable it via configuration option just seems to cause an error.
def set_in_bounds(self,obj,val): if not callable(val): bounded_val = self.crop_to_bounds(val) else: bounded_val = val super(Number,self).__set__(obj,bounded_val)
Set to the given value, but cropped to be within the legal bounds. All objects are accepted, and no exceptions will be raised. See crop_to_bounds for details on how cropping is done.
def insert_chain(cur, chain, encoded_data=None): if encoded_data is None: encoded_data = {} if 'nodes' not in encoded_data: encoded_data['nodes'] = json.dumps(sorted(chain), separators=(',', ':')) if 'chain_length' not in encoded_data: encoded_data['chain_length'] = len(chain) insert = "INSERT OR IGNORE INTO chain(chain_length, nodes) VALUES (:chain_length, :nodes);" cur.execute(insert, encoded_data)
Insert a chain into the cache. Args: cur (:class:`sqlite3.Cursor`): An sqlite3 cursor. This function is meant to be run within a :obj:`with` statement. chain (iterable): A collection of nodes. Chains in embedding act as one node. encoded_data (dict, optional): If a dictionary is provided, it will be populated with the serialized data. This is useful for preventing encoding the same information many times. Notes: This function assumes that the nodes in chain are index-labeled.
def _get_external_id(account_info): if all(k in account_info for k in ('external_id', 'external_method')): return dict(id=account_info['external_id'], method=account_info['external_method']) return None
Get external id from account info.
def _jamo_to_hangul_char(lead, vowel, tail=0): lead = ord(lead) - _JAMO_LEAD_OFFSET vowel = ord(vowel) - _JAMO_VOWEL_OFFSET tail = ord(tail) - _JAMO_TAIL_OFFSET if tail else 0 return chr(tail + (vowel - 1) * 28 + (lead - 1) * 588 + _JAMO_OFFSET)
Return the Hangul character for the given jamo characters.
def _substitute(self, expr, mapping): node = expr.op() try: return mapping[node] except KeyError: if node.blocks(): return expr new_args = list(node.args) unchanged = True for i, arg in enumerate(new_args): if isinstance(arg, ir.Expr): new_arg = self.substitute(arg, mapping) unchanged = unchanged and new_arg is arg new_args[i] = new_arg if unchanged: return expr try: new_node = type(node)(*new_args) except IbisTypeError: return expr try: name = expr.get_name() except ExpressionError: name = None return expr._factory(new_node, name=name)
Substitute expressions with other expressions. Parameters ---------- expr : ibis.expr.types.Expr mapping : Mapping[ibis.expr.operations.Node, ibis.expr.types.Expr] Returns ------- ibis.expr.types.Expr
def _streaming_request_iterable(self, config, requests): yield self.types.StreamingRecognizeRequest(streaming_config=config) for request in requests: yield request
A generator that yields the config followed by the requests. Args: config (~.speech_v1.types.StreamingRecognitionConfig): The configuration to use for the stream. requests (Iterable[~.speech_v1.types.StreamingRecognizeRequest]): The input objects. Returns: Iterable[~.speech_v1.types.StreamingRecognizeRequest]): The correctly formatted input for :meth:`~.speech_v1.SpeechClient.streaming_recognize`.
def format_datetime(cls, timestamp): if not timestamp: raise DateTimeFormatterException('timestamp must a valid string {}'.format(timestamp)) return timestamp.strftime(cls.DATETIME_FORMAT)
Creates a string representing the date and time information provided by the given `timestamp` object.
def _speak_as_literal_punctuation(self, element): self._speak_as( element, self._get_regular_expression_of_symbols(), 'literal-punctuation', self._operation_speak_as_literal_punctuation )
Speak the punctuation for elements only. :param element: The element. :type element: hatemile.util.html.htmldomelement.HTMLDOMElement
def read32(self, offset): if not isinstance(offset, (int, long)): raise TypeError("Invalid offset type, should be integer.") offset = self._adjust_offset(offset) self._validate_offset(offset, 4) return struct.unpack("=L", self.mapping[offset:offset + 4])[0]
Read 32-bits from the specified `offset` in bytes, relative to the base physical address of the MMIO region. Args: offset (int, long): offset from base physical address, in bytes. Returns: int: 32-bit value read. Raises: TypeError: if `offset` type is invalid. ValueError: if `offset` is out of bounds.
def help(self, stream): if stream not in self.streams: log.error("Stream '{}' not found in the database.".format(stream)) params = self._stream_df[self._stream_df['STREAM'] == stream].values[0] self._print_stream_parameters(params)
Show the help for a given stream.
def disconnect_pools(self): with self._lock: for pool in self._pools.itervalues(): pool.disconnect() self._pools.clear()
Disconnects all connections from the internal pools.
def to_string(address, leading_dot=False): if isinstance(address, WFQDN) is False: raise TypeError('Invalid type for FQDN address') result = '.'.join(address._labels) return result if leading_dot is False else (result + '.')
Return doted-written address by the given WFQDN object :param address: address to convert :param leading_dot: whether this function place leading dot to the result or not :return: str
def base36encode(number): ALPHABET = "0123456789abcdefghijklmnopqrstuvwxyz" base36 = '' sign = '' if number < 0: sign = '-' number = -number if 0 <= number < len(ALPHABET): return sign + ALPHABET[number] while number != 0: number, i = divmod(number, len(ALPHABET)) base36 = ALPHABET[i] + base36 return sign + base36
Converts an integer into a base36 string.
def get_country_name(self, callsign, timestamp=timestamp_now): return self.get_all(callsign, timestamp)[const.COUNTRY]
Returns the country name where the callsign is located Args: callsign (str): Amateur Radio callsign timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC) Returns: str: name of the Country Raises: KeyError: No Country found for callsign Note: Don't rely on the country name when working with several instances of py:class:`Callinfo`. Clublog and Country-files.org use slightly different names for countries. Example: - Country-files.com: "Fed. Rep. of Germany" - Clublog: "FEDERAL REPUBLIC OF GERMANY"
def _update_chime_status(self, message=None, status=None): chime_status = status if isinstance(message, Message): chime_status = message.chime_on if chime_status is None: return if chime_status != self._chime_status: self._chime_status, old_status = chime_status, self._chime_status if old_status is not None: self.on_chime_changed(status=self._chime_status) return self._chime_status
Uses the provided message to update the Chime state. :param message: message to use to update :type message: :py:class:`~alarmdecoder.messages.Message` :param status: chime status, overrides message bits. :type status: bool :returns: bool indicating the new status
def rollback(awsclient, function_name, alias_name=ALIAS_NAME, version=None): if version: log.info('rolling back to version {}'.format(version)) else: log.info('rolling back to previous version') version = _get_previous_version(awsclient, function_name, alias_name) if version == '0': log.error('unable to find previous version of lambda function') return 1 log.info('new version is %s' % str(version)) _update_alias(awsclient, function_name, version, alias_name) return 0
Rollback a lambda function to a given version. :param awsclient: :param function_name: :param alias_name: :param version: :return: exit_code
def on_hello(self, message): logger.info("Got a hello") self.identify(self.token) self.heartbeat_thread = Heartbeat(self.ws, message['d']['heartbeat_interval']) self.heartbeat_thread.start() return
Runs on a hello event from websocket connection Args: message (dict): Full message from Discord websocket connection"
def set_field(self, field, idx, value): if isinstance(idx, (int, float, str)): idx = [idx] if isinstance(value, (int, float)): value = [value] models = [self._idx_model[i] for i in idx] for i, m, v in zip(idx, models, value): assert hasattr(self.system.__dict__[m], field) uid = self.system.__dict__[m].get_uid(idx) self.system.__dict__[m].__dict__[field][uid] = v
Set the field ``field`` of elements ``idx`` to ``value``. This function does not if the field is valid for all models. :param field: field name :param idx: element idx :param value: value of fields to set :return: None
def to_default_timezone_datetime(self, value): return timezone.localtime(self.to_utc_datetime(value), timezone.get_default_timezone())
convert to default timezone datetime
def set_results(self, results: str): if results.upper() == "HTML": self.HTML = 1 else: self.HTML = 0 self.results = results
This method set the results attribute for the SASdata object; it stays in effect till changed results - set the default result type for this SASdata object. 'Pandas' or 'HTML' or 'TEXT'. :param results: format of results, SASsession.results is default, PANDAS, HTML or TEXT are the alternatives :return: None
def explain_permutation_importance(estimator, vec=None, top=_TOP, target_names=None, targets=None, feature_names=None, feature_re=None, feature_filter=None, ): coef = estimator.feature_importances_ coef_std = estimator.feature_importances_std_ return get_feature_importance_explanation(estimator, vec, coef, coef_std=coef_std, feature_names=feature_names, feature_filter=feature_filter, feature_re=feature_re, top=top, description=DESCRIPTION_SCORE_DECREASE + estimator.caveats_, is_regression=isinstance(estimator.wrapped_estimator_, RegressorMixin), )
Return an explanation of PermutationImportance. See :func:`eli5.explain_weights` for description of ``top``, ``feature_names``, ``feature_re`` and ``feature_filter`` parameters. ``target_names`` and ``targets`` parameters are ignored. ``vec`` is a vectorizer instance used to transform raw features to the input of the estimator (e.g. a fitted CountVectorizer instance); you can pass it instead of ``feature_names``.
def play_async(cls, file_path, on_done=None): thread = threading.Thread( target=AudioPlayer.play, args=(file_path, on_done,)) thread.start()
Play an audio file asynchronously. :param file_path: the path to the file to play. :param on_done: callback when audio playback completes.
def countSeries(requestContext, *seriesLists): if not seriesLists or not any(seriesLists): series = constantLine(requestContext, 0).pop() series.pathExpression = "countSeries()" else: seriesList, start, end, step = normalize(seriesLists) name = "countSeries(%s)" % formatPathExpressions(seriesList) values = (int(len(row)) for row in zip_longest(*seriesList)) series = TimeSeries(name, start, end, step, values) series.pathExpression = name return [series]
Draws a horizontal line representing the number of nodes found in the seriesList. Example:: &target=countSeries(carbon.agents.*.*)
def non_quoted_string_regex(self, strict=True): old_regex = self.regex regex_alternative = r'([{}]+)'.format(re.escape(self.charset)) if strict: regex_alternative = r'^' + regex_alternative + r'$' self.regex = re.compile(regex_alternative) try: yield finally: self.regex = old_regex
For certain file formats, strings need not necessarily follow the normal convention of being denoted by single or double quotes. In these cases, we modify the regex accordingly. Public, because detect_secrets.core.audit needs to reference it. :type strict: bool :param strict: if True, the regex will match the entire string.
def add_dicts(d1, d2): if d1 is None: return d2 if d2 is None: return d1 keys = set(d1) keys.update(set(d2)) ret = {} for key in keys: v1 = d1.get(key) v2 = d2.get(key) if v1 is None: ret[key] = v2 elif v2 is None: ret[key] = v1 else: ret[key] = v1 + v2 return ret
Merge two dicts of addable values
def flux_minimization(model, fixed, solver, weights={}): fba = FluxBalanceProblem(model, solver) for reaction_id, value in iteritems(fixed): flux = fba.get_flux_var(reaction_id) fba.prob.add_linear_constraints(flux >= value) fba.minimize_l1() return ((reaction_id, fba.get_flux(reaction_id)) for reaction_id in model.reactions)
Minimize flux of all reactions while keeping certain fluxes fixed. The fixed reactions are given in a dictionary as reaction id to value mapping. The weighted L1-norm of the fluxes is minimized. Args: model: MetabolicModel to solve. fixed: dict of additional lower bounds on reaction fluxes. solver: LP solver instance to use. weights: dict of weights on the L1-norm terms. Returns: An iterator of reaction ID and reaction flux pairs.
def from_soup(self,author,soup): email = soup.find('span',class_='icon icon-mail').findParent('a').get('href').split(':')[-1] if soup.find('span',class_='icon icon-mail') else '' facebook = soup.find('span',class_='icon icon-facebook').findParent('a').get('href') if soup.find('span',class_='icon icon-facebook') else '' twitter = soup.find('span',class_='icon icon-twitter-3').findParent('a').get('href') if soup.find('span',class_='icon icon-twitter-3') else '' link = soup.find('span',class_='icon icon-link').findParent('a').get('href') if soup.find('span',class_='icon icon-link') else '' return Contact(email,facebook,twitter,link)
Factory Pattern. Fetches contact data from given soup and builds the object
def _compensate_humidity(self, adc_h): var_h = self._temp_fine - 76800.0 if var_h == 0: return 0 var_h = ((adc_h - (self._calibration_h[3] * 64.0 + self._calibration_h[4] / 16384.0 * var_h)) * (self._calibration_h[1] / 65536.0 * (1.0 + self._calibration_h[5] / 67108864.0 * var_h * (1.0 + self._calibration_h[2] / 67108864.0 * var_h)))) var_h *= 1.0 - self._calibration_h[0] * var_h / 524288.0 if var_h > 100.0: var_h = 100.0 elif var_h < 0.0: var_h = 0.0 return var_h
Compensate humidity. Formula from datasheet Bosch BME280 Environmental sensor. 8.1 Compensation formulas in double precision floating point Edition BST-BME280-DS001-10 | Revision 1.1 | May 2015.
def maybe_a(generator): class MaybeAGenerator(ArbitraryInterface): @classmethod def arbitrary(cls): return arbitrary(one_of(None, generator)) MaybeAGenerator.__name__ = ''.join(['maybe_a(', generator.__name__, ')']) return MaybeAGenerator
Generates either an arbitrary value of the specified generator or None. This is a class factory, it makes a class which is a closure around the specified generator.
def virtualfields(self): if self._virtualfields is None: vfs = tuple() else: vfs = tuple(self._virtualfields) return vfs
Returns a tuple listing the names of virtual fields in self.
def one_of_keyword_only(*valid_keywords): def decorator(func): @functools.wraps(func) def wrapper(*args, **kwargs): sentinel = object() values = {} for key in valid_keywords: kwarg_value = kwargs.pop(key, sentinel) if kwarg_value is not sentinel: values[key] = kwarg_value if kwargs: raise TypeError('Unexpected arguments: {}'.format(kwargs)) if not values: raise TypeError('Must provide one of {} as keyword argument'.format(', '.join(valid_keywords))) if len(values) > 1: raise TypeError('Must provide only one of {} as keyword argument. Received {}'.format( ', '.join(valid_keywords), values )) return func(*(args + values.popitem())) return wrapper return decorator
Decorator to help make one-and-only-one keyword-only argument functions more reusable Notes: Decorated function should take 2 arguments, the first for the key, the second the value Examples: :: @one_of_keyword_only('a', 'b', 'c') def func(key, value): if key == 'a': ... elif key == 'b': ... else: # key = 'c' ... ... func(a=1) func(b=2) func(c=3) try: func(d=4) except TypeError: ... try: func(a=1, b=2) except TypeError: ... Args: *valid_keywords (str): All allowed keyword argument names Raises: TypeError: On decorated call, if 0 or 2+ arguments are provided or kwargs contains a key not in valid_keywords
def dcmdottoang_vel(R,Rdot): w = vee_map(Rdot.dot(R.T)) Omega = vee_map(R.T.dot(Rdot)) return (w, Omega)
Convert a rotation matrix to angular velocity w - angular velocity in inertial frame Omega - angular velocity in body frame