code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def subtract_days(self, days: int) -> datetime: self.value = self.value - relativedelta(days=days) return self.value
Subtracts dates from the given value
def switch_state(context, ain): context.obj.login() actor = context.obj.get_actor_by_ain(ain) if actor: click.echo("State for {} is: {}".format(ain,'ON' if actor.get_state() else 'OFF')) else: click.echo("Actor not found: {}".format(ain))
Get an actor's power state
def joinMeiUyir(mei_char, uyir_char): if not mei_char: return uyir_char if not uyir_char: return mei_char if not isinstance(mei_char, PYTHON3 and str or unicode): raise ValueError(u"Passed input mei character '%s' must be unicode, not just string" % mei_char) if not isinstance(uyir_char, PYTHON3 and str or unicode) and uyir_char != None: raise ValueError(u"Passed input uyir character '%s' must be unicode, not just string" % uyir_char) if mei_char not in grantha_mei_letters: raise ValueError(u"Passed input character '%s' is not a tamil mei character" % mei_char) if uyir_char not in uyir_letters: raise ValueError(u"Passed input character '%s' is not a tamil uyir character" % uyir_char) if uyir_char: uyiridx = uyir_letters.index(uyir_char) else: return mei_char meiidx = grantha_mei_letters.index(mei_char) uyirmeiidx = meiidx*12 + uyiridx return grantha_uyirmei_letters[uyirmeiidx]
This function join mei character and uyir character, and retuns as compound uyirmei unicode character. Inputs: mei_char : It must be unicode tamil mei char. uyir_char : It must be unicode tamil uyir char. Written By : Arulalan.T Date : 22.09.2014
def _get_secret_with_context( filename, secret, plugin_settings, lines_of_context=5, force=False, ): snippet = CodeSnippetHighlighter().get_code_snippet( filename, secret['line_number'], lines_of_context=lines_of_context, ) try: raw_secret_value = get_raw_secret_value( snippet.target_line, secret, plugin_settings, filename, ) snippet.highlight_line(raw_secret_value) except SecretNotFoundOnSpecifiedLineError: if not force: raise snippet.target_line = colorize( snippet.target_line, AnsiColor.BOLD, ) return snippet.add_line_numbers()
Displays the secret, with surrounding lines of code for better context. :type filename: str :param filename: filename where secret resides in :type secret: dict, PotentialSecret.json() format :param secret: the secret listed in baseline :type plugin_settings: list :param plugin_settings: plugins used to create baseline. :type lines_of_context: int :param lines_of_context: number of lines displayed before and after secret. :type force: bool :param force: if True, will print the lines of code even if it doesn't find the secret expected :raises: SecretNotFoundOnSpecifiedLineError
def _get_registry_auth(registry_url, config_path): username = None password = None try: docker_config = json.load(open(config_path)) except ValueError: return username, password if docker_config.get('auths'): docker_config = docker_config['auths'] auth_key = docker_config.get(registry_url, {}).get('auth', None) if auth_key: username, password = base64.b64decode(auth_key).split(':', 1) return username, password
Retrieve from the config file the current authentication for a given URL, and return the username, password
def source_range_slices(start, end, nr_var_dict): return OrderedDict((k, slice(s,e,1)) for k, (s, e) in source_range_tuple(start, end, nr_var_dict).iteritems())
Given a range of source numbers, as well as a dictionary containing the numbers of each source, returns a dictionary containing slices for each source variable type.
def _get_stddevs(self, coeffs, stddev_types, num_sites): stddevs = [] for stddev_type in stddev_types: assert stddev_type in self.DEFINED_FOR_STANDARD_DEVIATION_TYPES stddevs.append(coeffs['sigma'] + np.zeros(num_sites)) return np.array(stddevs)
Return total sigma as reported in Table 2, p. 1202.
def state(self, value): if value.upper() == ON: return self.SOAPAction('SetSocketSettings', 'SetSocketSettingsResult', self.controlParameters("1", "true")) elif value.upper() == OFF: return self.SOAPAction('SetSocketSettings', 'SetSocketSettingsResult', self.controlParameters("1", "false")) else: raise TypeError("State %s is not valid." % str(value))
Set device state. :type value: str :param value: Future state (either ON or OFF)
def _fast_write(self, outfile, value): outfile.truncate(0) outfile.write(str(int(value))) outfile.flush()
Function for fast writing to motor files.
def retrieveAcknowledge(): a = TpPd(pd=0x3) b = MessageType(mesType=0x1d) packet = a / b return packet
RETRIEVE ACKNOWLEDGE Section 9.3.21
def uuid_constructor(loader, node): value = loader.construct_scalar(node) return uuid.UUID(value)
Construct a uuid.UUID object form a scalar YAML node. Tests: >>> yaml.add_constructor("!uuid", uuid_constructor, Loader=yaml.SafeLoader) >>> yaml.safe_load("{'test': !uuid 'cc3702ca-699a-4aa6-8226-4c938f294d9b'}") {'test': UUID('cc3702ca-699a-4aa6-8226-4c938f294d9b')}
def _check(user, topic): if topic['export_control']: product = v1_utils.verify_existence_and_get(topic['product_id'], models.PRODUCTS) return (user.is_in_team(product['team_id']) or product['team_id'] in user.parent_teams_ids) return False
If the topic has it's export_control set to True then all the teams under the product team can access to the topic's resources. :param user: :param topic: :return: True if check is ok, False otherwise
def status(self): if not self.created: return None self.inner().reload() return self.inner().status
Get the container's current status from Docker. If the container does not exist (before creation and after removal), the status is ``None``.
def _create_pax_generic_header(cls, pax_headers, type=tarfile.XHDTYPE): records = [] for keyword, value in pax_headers.iteritems(): try: keyword = keyword.encode("utf8") except Exception: pass try: value = value.encode("utf8") except Exception: pass l = len(keyword) + len(value) + 3 n = p = 0 while True: n = l + len(str(p)) if n == p: break p = n records.append("%d %s=%s\n" % (p, keyword, value)) records = "".join(records) info = {} info["name"] = "././@PaxHeader" info["type"] = type info["size"] = len(records) info["magic"] = tarfile.POSIX_MAGIC return cls._create_header(info, tarfile.USTAR_FORMAT) + \ cls._create_payload(records)
Return a POSIX.1-2001 extended or global header sequence that contains a list of keyword, value pairs. The values must be unicode objects.
def _best_res_pixels(self): factor = 2 * (AbstractMOC.HPY_MAX_NORDER - self.max_order) pix_l = [] for iv in self._interval_set._intervals: for val in range(iv[0] >> factor, iv[1] >> factor): pix_l.append(val) return np.asarray(pix_l)
Returns a numpy array of all the HEALPix indexes contained in the MOC at its max order. Returns ------- result : `~numpy.ndarray` The array of HEALPix at ``max_order``
def colors(self, value): if isinstance(value, str): from .palettes import PALETTES if value not in PALETTES: raise YellowbrickValueError( "'{}' is not a registered color palette".format(value) ) self._colors = copy(PALETTES[value]) elif isinstance(value, list): self._colors = value else: self._colors = list(value)
Converts color strings into a color listing.
def dr( self, cell_lengths ): half_cell_lengths = cell_lengths / 2.0 this_dr = self.final_site.r - self.initial_site.r for i in range( 3 ): if this_dr[ i ] > half_cell_lengths[ i ]: this_dr[ i ] -= cell_lengths[ i ] if this_dr[ i ] < -half_cell_lengths[ i ]: this_dr[ i ] += cell_lengths[ i ] return this_dr
Particle displacement vector for this jump Args: cell_lengths (np.array(x,y,z)): Cell lengths for the orthogonal simulation cell. Returns (np.array(x,y,z)): dr
def ask_bool(question: str, default: bool = True) -> bool: default_q = "Y/n" if default else "y/N" answer = input("{0} [{1}]: ".format(question, default_q)) lower = answer.lower() if not lower: return default return lower == "y"
Asks a question yes no style
def collections(self): iterator = self._firestore_api.list_collection_ids( self._database_string, metadata=self._rpc_metadata ) iterator.client = self iterator.item_to_value = _item_to_collection_ref return iterator
List top-level collections of the client's database. Returns: Sequence[~.firestore_v1beta1.collection.CollectionReference]: iterator of subcollections of the current document.
def get_parent_ids(self): id_list = [] from ..id.objects import IdList for parent_node in self._my_map['parentNodes']: id_list.append(str(parent_node.ident)) return IdList(id_list)
Gets the parents of this node. return: (osid.id.IdList) - the parents of this node *compliance: mandatory -- This method must be implemented.*
def ping(self): self.last_ping = time.time() try: self.send_message({MESSAGE_TYPE: TYPE_PING}) except NotConnected: self._socket_client.logger.error("Chromecast is disconnected. " + "Cannot ping until reconnected.")
Send a ping message.
def _is_nmrstar(string): if (string[0:5] == u"data_" and u"save_" in string) or (string[0:5] == b"data_" and b"save_" in string): return string return False
Test if input string is in NMR-STAR format. :param string: Input string. :type string: :py:class:`str` or :py:class:`bytes` :return: Input string if in NMR-STAR format or False otherwise. :rtype: :py:class:`str` or :py:obj:`False`
def pdf(self, mu): if self.transform is not None: mu = self.transform(mu) return ss.expon.pdf(mu, self.lmd0)
PDF for Exponential prior Parameters ---------- mu : float Latent variable for which the prior is being formed over Returns ---------- - p(mu)
def get_data_for_notifications(self, contact, notif, host_ref): if not host_ref: return [self, contact, notif] return [host_ref, self, contact, notif]
Get data for a notification :param contact: The contact to return :type contact: :param notif: the notification to return :type notif: :return: list containing the service, the host and the given parameters :rtype: list
def owner(self, owner): if owner is None: raise ValueError("Invalid value for `owner`, must not be `None`") if owner is not None and not re.search('[a-z0-9](?:-(?!-)|[a-z0-9])+[a-z0-9]', owner): raise ValueError("Invalid value for `owner`, must be a follow pattern or equal to `/[a-z0-9](?:-(?!-)|[a-z0-9])+[a-z0-9]/`") self._owner = owner
Sets the owner of this LinkedDatasetCreateOrUpdateRequest. User name and unique identifier of the creator of the dataset. :param owner: The owner of this LinkedDatasetCreateOrUpdateRequest. :type: str
def visit_classdef(self, node, parent, newstyle=None): node, doc = self._get_doc(node) newnode = nodes.ClassDef(node.name, doc, node.lineno, node.col_offset, parent) metaclass = None if PY3: for keyword in node.keywords: if keyword.arg == "metaclass": metaclass = self.visit(keyword, newnode).value break if node.decorator_list: decorators = self.visit_decorators(node, newnode) else: decorators = None newnode.postinit( [self.visit(child, newnode) for child in node.bases], [self.visit(child, newnode) for child in node.body], decorators, newstyle, metaclass, [ self.visit(kwd, newnode) for kwd in node.keywords if kwd.arg != "metaclass" ] if PY3 else [], ) return newnode
visit a ClassDef node to become astroid
def _get_config_files(): config_paths = [] if os.environ.get('FEDMSG_CONFIG'): config_location = os.environ['FEDMSG_CONFIG'] else: config_location = '/etc/fedmsg.d' if os.path.isfile(config_location): config_paths.append(config_location) elif os.path.isdir(config_location): possible_config_files = [os.path.join(config_location, p) for p in os.listdir(config_location) if p.endswith('.py')] for p in possible_config_files: if os.path.isfile(p): config_paths.append(p) if not config_paths: _log.info('No configuration files found in %s', config_location) return config_paths
Load the list of file paths for fedmsg configuration files. Returns: list: List of files containing fedmsg configuration.
def _get_cache_plus_key(self): key = getattr(self, '_cache_key', self.key_from_query()) return self._cache.cache, key
Return a cache region plus key.
def tomorrow(date=None): if not date: return _date + datetime.timedelta(days=1) else: current_date = parse(date) return current_date + datetime.timedelta(days=1)
tomorrow is another day
def add_account_to_group(self, account, group): lgroup: OpenldapGroup = self._get_group(group.name) person: OpenldapAccount = self._get_account(account.username) changes = changeset(lgroup, {}) changes = lgroup.add_member(changes, person) save(changes, database=self._database)
Add account to group.
def calc_observable_fraction(self,distance_modulus): observable_fraction = self.isochrone.observableFraction(self.mask,distance_modulus) if not observable_fraction.sum() > 0: msg = "No observable fraction" msg += ("\n"+str(self.source.params)) logger.error(msg) raise ValueError(msg) return observable_fraction
Calculated observable fraction within each pixel of the target region.
def _guess_vc(self): if self.vc_ver <= 14.0: return default = r'VC\Tools\MSVC' guess_vc = os.path.join(self.VSInstallDir, default) try: vc_exact_ver = os.listdir(guess_vc)[-1] return os.path.join(guess_vc, vc_exact_ver) except (OSError, IOError, IndexError): pass
Locate Visual C for 2017
def create_client(addr, port): sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.connect_ex((addr, port)) spin = Spin(sock) Client(spin) spin.add_map(CONNECT, install_basic_handles) spin.add_map(CONNECT_ERR, lambda con, err: lose(con)) return spin
Set up a TCP client and installs the basic handles Stdin, Stdout. def send_data(client): client.dump('GET / HTTP/1.1\r\n') xmap(client, LOAD, iostd.put) client = create_client('www.google.com.br', 80) xmap(client, CONNECT, send_data)
def rustcall(func, *args): lib.semaphore_err_clear() rv = func(*args) err = lib.semaphore_err_get_last_code() if not err: return rv msg = lib.semaphore_err_get_last_message() cls = exceptions_by_code.get(err, SemaphoreError) exc = cls(decode_str(msg)) backtrace = decode_str(lib.semaphore_err_get_backtrace()) if backtrace: exc.rust_info = backtrace raise exc
Calls rust method and does some error handling.
def generate_json_docs(module, pretty_print=False, user=None): indent = None separators = (',', ':') if pretty_print: indent = 4 separators = (',', ': ') module_doc_dict = generate_doc_dict(module, user) json_str = json.dumps(module_doc_dict, indent=indent, separators=separators) return json_str
Return a JSON string format of a Pale module's documentation. This string can either be printed out, written to a file, or piped to some other tool. This method is a shorthand for calling `generate_doc_dict` and passing it into a json serializer. The user argument is optional. If included, it expects the user to be an object with an "is_admin" boolean attribute. Any endpoint protected with a "@requires_permission" decorator will require user.is_admin == True to display documentation on that endpoint.
def error(self, error): if self.direction not in ['x', 'y', 'z'] and error is not None: raise ValueError("error only accepted for x, y, z dimensions") if isinstance(error, u.Quantity): error = error.to(self.unit).value self._error = error
set the error
def prettify(amount, separator=','): orig = str(amount) new = re.sub("^(-?\d+)(\d{3})", "\g<1>{0}\g<2>".format(separator), str(amount)) if orig == new: return new else: return prettify(new)
Separate with predefined separator.
def expand(self): if self.slurm: self._introspect_slurm_cluster() self.network.nodes = self._expand_nodes(self.network.nodes) self._expand_tags()
Perform node expansion of network section.
def timezone(self, value): self._timezone = (value if isinstance(value, datetime.tzinfo) else tz.gettz(value))
Set the timezone.
def get_device_model(self) -> str: output, _ = self._execute( '-s', self.device_sn, 'shell', 'getprop', 'ro.product.model') return output.strip()
Show device model.
def validate(self): has_more_nodes = True visited = set() to_visit = [self] index = 0 while has_more_nodes: has_more_nodes = False next_nodes = [] for node in to_visit: if node is None: next_nodes.extend((None, None)) else: if node in visited: raise NodeReferenceError( 'cyclic node reference at index {}'.format(index)) if not isinstance(node, Node): raise NodeTypeError( 'invalid node instance at index {}'.format(index)) if not isinstance(node.value, numbers.Number): raise NodeValueError( 'invalid node value at index {}'.format(index)) if node.left is not None or node.right is not None: has_more_nodes = True visited.add(node) next_nodes.extend((node.left, node.right)) index += 1 to_visit = next_nodes
Check if the binary tree is malformed. :raise binarytree.exceptions.NodeReferenceError: If there is a cyclic reference to a node in the binary tree. :raise binarytree.exceptions.NodeTypeError: If a node is not an instance of :class:`binarytree.Node`. :raise binarytree.exceptions.NodeValueError: If a node value is not a number (e.g. int, float). **Example**: .. doctest:: >>> from binarytree import Node >>> >>> root = Node(1) >>> root.left = Node(2) >>> root.right = root # Cyclic reference to root >>> >>> root.validate() # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... NodeReferenceError: cyclic node reference at index 0
async def parse_get_revoc_reg_response(get_revoc_reg_response: str) -> (str, str, int): logger = logging.getLogger(__name__) logger.debug("parse_get_revoc_reg_response: >>> get_revoc_reg_response: %r", get_revoc_reg_response) if not hasattr(parse_get_revoc_reg_response, "cb"): logger.debug("parse_get_revoc_reg_response: Creating callback") parse_get_revoc_reg_response.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, c_char_p, c_char_p, c_uint64)) c_get_revoc_reg_response = c_char_p(get_revoc_reg_response.encode('utf-8')) (revoc_reg_def_id, revoc_reg_json, timestamp) = await do_call('indy_parse_get_revoc_reg_response', c_get_revoc_reg_response, parse_get_revoc_reg_response.cb) res = (revoc_reg_def_id.decode(), revoc_reg_json.decode(), timestamp) logger.debug("parse_get_revoc_reg_response: <<< res: %r", res) return res
Parse a GET_REVOC_REG response to get Revocation Registry in the format compatible with Anoncreds API. :param get_revoc_reg_response: response of GET_REVOC_REG request. :return: Revocation Registry Definition Id, Revocation Registry json and Timestamp. { "value": Registry-specific data { "accum": string - current accumulator value. }, "ver": string - version revocation registry json }
def _write(self, session, openFile, replaceParamFile): openFile.write('GRIDPIPEFILE\n') openFile.write('PIPECELLS %s\n' % self.pipeCells) for cell in self.gridPipeCells: openFile.write('CELLIJ %s %s\n' % (cell.cellI, cell.cellJ)) openFile.write('NUMPIPES %s\n' % cell.numPipes) for node in cell.gridPipeNodes: openFile.write('SPIPE %s %s %.6f\n' % ( node.linkNumber, node.nodeNumber, node.fractPipeLength))
Grid Pipe File Write to File Method
def swap_yaml_string(file_path, swaps): original_file = file_to_string(file_path) new_file = original_file changed = False for item in swaps: match = re.compile(r'(?<={0}: )(["\']?)(.*)\1'.format(item[0]), re.MULTILINE) new_file = re.sub(match, item[1], new_file) if new_file != original_file: changed = True string_to_file(file_path, new_file) return (new_file, changed)
Swap a string in a yaml file without touching the existing formatting.
def CompressStream(in_stream, length=None, compresslevel=2, chunksize=16777216): in_read = 0 in_exhausted = False out_stream = StreamingBuffer() with gzip.GzipFile(mode='wb', fileobj=out_stream, compresslevel=compresslevel) as compress_stream: while not length or out_stream.length < length: data = in_stream.read(chunksize) data_length = len(data) compress_stream.write(data) in_read += data_length if data_length < chunksize: in_exhausted = True break return out_stream, in_read, in_exhausted
Compresses an input stream into a file-like buffer. This reads from the input stream until either we've stored at least length compressed bytes, or the input stream has been exhausted. This supports streams of unknown size. Args: in_stream: The input stream to read from. length: The target number of compressed bytes to buffer in the output stream. If length is none, the input stream will be compressed until it's exhausted. The actual length of the output buffer can vary from the target. If the input stream is exhaused, the output buffer may be smaller than expected. If the data is incompressible, the maximum length can be exceeded by can be calculated to be: chunksize + 5 * (floor((chunksize - 1) / 16383) + 1) + 17 This accounts for additional header data gzip adds. For the default 16MiB chunksize, this results in the max size of the output buffer being: length + 16Mib + 5142 bytes compresslevel: Optional, defaults to 2. The desired compression level. chunksize: Optional, defaults to 16MiB. The chunk size used when reading data from the input stream to write into the output buffer. Returns: A file-like output buffer of compressed bytes, the number of bytes read from the input stream, and a flag denoting if the input stream was exhausted.
def _range(self, xloc, cache): uloc = numpy.zeros((2, len(self))) for dist in evaluation.sorted_dependencies(self, reverse=True): if dist not in self.inverse_map: continue idx = self.inverse_map[dist] xloc_ = xloc[idx].reshape(1, -1) uloc[:, idx] = evaluation.evaluate_bound( dist, xloc_, cache=cache).flatten() return uloc
Special handle for finding bounds on constrained dists. Example: >>> d0 = chaospy.Uniform() >>> dist = chaospy.J(d0, d0+chaospy.Uniform()) >>> print(dist.range()) [[0. 0.] [1. 2.]]
def burst_range_spectrum(psd, snr=8, energy=1e-2): a = (constants.G * energy * constants.M_sun * 0.4 / (pi**2 * constants.c))**(1/2.) dspec = psd ** (-1/2.) * a / (snr * psd.frequencies) rspec = dspec.to('Mpc') if rspec.f0.value == 0.0: rspec[0] = 0.0 return rspec
Calculate the frequency-dependent GW burst range from a strain PSD Parameters ---------- psd : `~gwpy.frequencyseries.FrequencySeries` the instrumental power-spectral-density data snr : `float`, optional the signal-to-noise ratio for which to calculate range, default: `8` energy : `float`, optional the relative energy output of the GW burst, default: `0.01` (GRB-like burst) Returns ------- rangespec : `~gwpy.frequencyseries.FrequencySeries` the burst range `FrequencySeries` [Mpc (default)]
def thumb(self, size=BIGTHUMB): if not self.is_image(): return None if not size in (self.BIGTHUMB, self.MEDIUMTHUMB, self.SMALLTHUMB, self.XLTHUMB): raise JFSError('Invalid thumbnail size: %s for image %s' % (size, self.path)) return self.jfs.raw(url=self.path, params={'mode':'thumb', 'ts':size})
Get a thumbnail as string or None if the file isnt an image size would be one of JFSFile.BIGTHUMB, .MEDIUMTHUMB, .SMALLTHUMB or .XLTHUMB
def getpeptides(self, chain): all_from_chain = [o for o in pybel.ob.OBResidueIter( self.proteincomplex.OBMol) if o.GetChain() == chain] if len(all_from_chain) == 0: return None else: non_water = [o for o in all_from_chain if not o.GetResidueProperty(9)] ligand = self.extract_ligand(non_water) return ligand
If peptide ligand chains are defined via the command line options, try to extract the underlying ligand formed by all residues in the given chain without water
def select(self, names): return PolicyCollection( [p for p in self.policies if p.name in names], self.options)
return the named subset of policies
def getMonitorByName(self, monitorFriendlyName): url = self.baseUrl url += "getMonitors?apiKey=%s" % self.apiKey url += "&noJsonCallback=1&format=json" success, response = self.requestApi(url) if success: monitors = response.get('monitors').get('monitor') for i in range(len(monitors)): monitor = monitors[i] if monitor.get('friendlyname') == monitorFriendlyName: status = monitor.get('status') alltimeuptimeratio = monitor.get('alltimeuptimeratio') return status, alltimeuptimeratio return None, None
Returns monitor status and alltimeuptimeratio for a MonitorFriendlyName.
def add(self, *args, **kwargs): for arg in args: if isinstance(arg, str): self._operations.append(self._from_str(arg)) else: self._operations.append(arg) if kwargs: self._operations.append(kwargs)
Add new mapping from args and kwargs >>> om = OperationIdMapping() >>> om.add( ... OperationIdMapping(), ... 'aiohttp_apiset.swagger.operations', # any module ... getPets='mymod.handler', ... getPet='mymod.get_pet', ... ) >>> om['getPets'] 'mymod.handler' :param args: str, Mapping, module or obj :param kwargs: operationId='handler' or operationId=handler
def add_string(self, s): self.add_int(len(s)) self.packet.write(s) return self
Add a string to the stream. @param s: string to add @type s: str
async def result(self, timeout: Optional[float] = None, *, pole_delay: float = 0.5) -> Any: async for delay in poll(pole_delay): info = await self.result_info() if info: result = info.result if info.success: return result else: raise result if timeout is not None and delay > timeout: raise asyncio.TimeoutError()
Get the result of the job, including waiting if it's not yet available. If the job raised an exception, it will be raised here. :param timeout: maximum time to wait for the job result before raising ``TimeoutError``, will wait forever :param pole_delay: how often to poll redis for the job result
def disaggregate_wind(self, method='equal'): self.data_disagg.wind = melodist.disaggregate_wind(self.data_daily.wind, method=method, **self.statistics.wind)
Disaggregate wind speed. Parameters ---------- method : str, optional Disaggregation method. ``equal`` Mean daily wind speed is duplicated for the 24 hours of the day. (Default) ``cosine`` Distributes daily mean wind speed using a cosine function derived from hourly observations. ``random`` Draws random numbers to distribute wind speed (usually not conserving the daily average).
def normalizeIdentifier(value): if value is None: return value if not isinstance(value, basestring): raise TypeError("Identifiers must be strings, not %s." % type(value).__name__) if len(value) == 0: raise ValueError("The identifier string is empty.") if len(value) > 100: raise ValueError("The identifier string has a length (%d) greater " "than the maximum allowed (100)." % len(value)) for c in value: v = ord(c) if v < 0x20 or v > 0x7E: raise ValueError("The identifier string ('%s') contains a " "character out size of the range 0x20 - 0x7E." % value) return unicode(value)
Normalizes identifier. * **value** must be an :ref:`type-string` or `None`. * **value** must not be longer than 100 characters. * **value** must not contain a character out the range of 0x20 - 0x7E. * Returned value is an unencoded ``unicode`` string.
def register(self, cls): doc_type = cls.search_objects.mapping.doc_type self.all_models[doc_type] = cls base_class = cls.get_base_class() if base_class not in self.families: self.families[base_class] = {} self.families[base_class][doc_type] = cls if cls.search_objects.mapping.index not in self.indexes: self.indexes[cls.search_objects.mapping.index] = [] self.indexes[cls.search_objects.mapping.index].append(cls)
Adds a new PolymorphicIndexable to the registry.
def taf(data: TafData, units: Units) -> str: try: month = data.start_time.dt.strftime(r'%B') day = ordinal(data.start_time.dt.day) ret = f"Starting on {month} {day} - " except AttributeError: ret = '' return ret + '. '.join([taf_line(line, units) for line in data.forecast])
Convert TafData into a string for text-to-speech
def remove(text, what, count=None, strip=False): return replace(text, what, '', count=count, strip=strip)
Like ``replace``, where ``new`` replacement is an empty string.
def param_set(name, value, retries=3): name = name.upper() return mpstate.mav_param.mavset(mpstate.master(), name, value, retries=retries)
set a parameter
def _dns_lookup(name, rdtype, nameservers=None): rrset = dns.rrset.from_text(name, 0, 1, rdtype) try: resolver = dns.resolver.Resolver() resolver.lifetime = 1 if nameservers: resolver.nameservers = nameservers rrset = resolver.query(name, rdtype) for rdata in rrset: LOGGER.debug('DNS Lookup => %s %s %s %s', rrset.name.to_text(), dns.rdataclass.to_text(rrset.rdclass), dns.rdatatype.to_text(rrset.rdtype), rdata.to_text()) except dns.exception.DNSException as error: LOGGER.debug('DNS Lookup => %s', error) return rrset
Looks on specified or default system domain nameservers to resolve record type & name and returns record set. The record set is empty if no propagated record found.
def list_azure_containers(config_fpath): config_content = _get_config_dict_from_file(config_fpath) az_container_names = [] for key in config_content.keys(): if key.startswith(AZURE_KEY_PREFIX): name = key[len(AZURE_KEY_PREFIX):] az_container_names.append(name) return sorted(az_container_names)
List the azure storage containers in the config file. :param config_fpath: path to the dtool config file :returns: the list of azure storage container names
def get_random_url(ltd="com"): url = [ "https://", RandomInputHelper.get_random_value(8, [string.ascii_lowercase]), ".", ltd ] return "".join(url)
Get a random url with the given ltd. Args: ltd (str): The ltd to use (e.g. com). Returns: str: The random url.
def _convert_to_lexicon(self, record): name = record['Name'] if self.domain not in name: name = "{}.{}".format(name, self.domain) processed_record = { 'type': record['Type'], 'name': '{0}.{1}'.format(record['Name'], self.domain), 'ttl': record['TTL'], 'content': record['Address'], 'id': record['HostId'] } return processed_record
converts from namecheap raw record format to lexicon format record
def strip_column_names(cols, keep_paren_contents=True): new_cols = [ _strip_column_name(col, keep_paren_contents=keep_paren_contents) for col in cols] if len(new_cols) != len(set(new_cols)): warn_str = 'Warning: strip_column_names (if run) would introduce duplicate names.' warn_str += ' Reverting column names to the original.' warnings.warn(warn_str, Warning) print('Warning: strip_column_names would introduce duplicate names. Please fix & try again.') return dict(zip(cols, cols)) return dict(zip(cols, new_cols))
Utility script for renaming pandas columns to patsy-friendly names. Revised names have been: - stripped of all punctuation and whitespace (converted to text or `_`) - converted to lower case Takes a list of column names, returns a dict mapping names to revised names. If there are any concerns with the conversion, this will print a warning & return original column names. Parameters ---------- cols (list): list of strings containing column names keep_paren_contents (logical): controls behavior of within-paren elements of text - if True, (the default) all text within parens retained - if False, text within parens will be removed from the field name Returns ------- dict mapping col_names -> new_col_names Example ------- > df = {'one' : pd.Series([1., 2., 3.], index=['a', 'b', 'c']), 'two' : pd.Series([1., 2., 3., 4.], index=['a', 'b', 'c', 'd']), 'PD L1 (value)': pd.Series([1., 2., 3., 4.], index=['a', 'b', 'c', 'd']), 'PD L1 (>1)': pd.Series([0., 1., 1., 0.], index=['a', 'b', 'c', 'd']), } > df = pd.DataFrame(df) > df = df.rename(columns = strip_column_names(df.columns)) ## observe, by comparison > df2 = df.rename(columns = strip_column_names(df.columns, keep_paren_contents=False))
def parse(log_file): with io.open(os.path.expanduser(log_file), encoding="utf-8") as input_file: for line in input_file: if "Starting import of XUnit results" in line: obj = XUnitParser break elif "Starting import of test cases" in line: obj = TestcasesParser break elif "Starting import of requirements" in line: obj = RequirementsParser break else: raise Dump2PolarionException( "No valid data found in the log file '{}'".format(log_file) ) return obj(input_file, log_file).parse()
Parse log file.
def dispatch_sockets(self, timeout=None): for sock in self.select_sockets(timeout=timeout): if sock is self.listener: listener = sock sock, addr = listener.accept() self.connected(sock) else: try: sock.recv(1) except socket.error as exc: if exc.errno != ECONNRESET: raise self.disconnected(sock)
Dispatches incoming sockets.
def translate(self, package, into=None): if not package.local: raise ValueError('BinaryTranslator cannot translate remote packages.') if not isinstance(package, self._package_type): return None if not package.compatible(self._supported_tags): TRACER.log('Target package %s is not compatible with %s' % ( package, self._supported_tags)) return None into = into or safe_mkdtemp() target_path = os.path.join(into, package.filename) safe_copy(package.local_path, target_path) return DistributionHelper.distribution_from_path(target_path)
From a binary package, translate to a local binary distribution.
def change_status(request, page_id): perm = request.user.has_perm('pages.change_page') if perm and request.method == 'POST': page = Page.objects.get(pk=page_id) page.status = int(request.POST['status']) page.invalidate() page.save() return HttpResponse(str(page.status)) raise Http404
Switch the status of a page.
def get_consumer_cfg(config, only, qty): consumers = dict(config.application.Consumers or {}) if only: for key in list(consumers.keys()): if key != only: del consumers[key] if qty: consumers[only]['qty'] = qty return consumers
Get the consumers config, possibly filtering the config if only or qty is set. :param config: The consumers config section :type config: helper.config.Config :param str only: When set, filter to run only this consumer :param int qty: When set, set the consumer qty to this value :rtype: dict
def _add_action(self, notification, action, label, callback, *args): on_action_click = run_bg(lambda *_: callback(*args)) try: notification.add_action(action, label, on_action_click, None) except TypeError: notification.add_action(action, label, on_action_click, None, None) notification.connect('closed', self._notifications.remove) self._notifications.append(notification)
Show an action button button in mount notifications. Note, this only works with some libnotify services.
def match(self, name): if self.method == Ex.Method.PREFIX: return name.startswith(self.value) elif self.method == Ex.Method.SUFFIX: return name.endswith(self.value) elif self.method == Ex.Method.CONTAINS: return self.value in name elif self.method == Ex.Method.EXACT: return self.value == name elif self.method == Ex.Method.REGEX: return re.search(self.value, name) return False
Check if given name matches. Args: name (str): name to check. Returns: bool: matches name.
def _process_file_continue_ftp_response(self, response: FTPResponse): if response.request.restart_value and response.restart_value: self.open_file(self._filename, response, mode='ab+') else: self._raise_cannot_continue_error()
Process a restarted content response.
def uninstall(self): if self.is_installed(): installed = self.installed_dir() if installed.is_symlink(): installed.unlink() else: shutil.rmtree(str(installed))
Delete code inside NApp directory, if existent.
def preassemble(self, filters=None, grounding_map=None): stmts = self.get_statements() stmts = ac.filter_no_hypothesis(stmts) if grounding_map is not None: stmts = ac.map_grounding(stmts, grounding_map=grounding_map) else: stmts = ac.map_grounding(stmts) if filters and ('grounding' in filters): stmts = ac.filter_grounded_only(stmts) stmts = ac.map_sequence(stmts) if filters and 'human_only' in filters: stmts = ac.filter_human_only(stmts) stmts = ac.run_preassembly(stmts, return_toplevel=False) stmts = self._relevance_filter(stmts, filters) self.assembled_stmts = stmts
Preassemble the Statements collected in the model. Use INDRA's GroundingMapper, Preassembler and BeliefEngine on the IncrementalModel and save the unique statements and the top level statements in class attributes. Currently the following filter options are implemented: - grounding: require that all Agents in statements are grounded - human_only: require that all proteins are human proteins - prior_one: require that at least one Agent is in the prior model - prior_all: require that all Agents are in the prior model Parameters ---------- filters : Optional[list[str]] A list of filter options to apply when choosing the statements. See description above for more details. Default: None grounding_map : Optional[dict] A user supplied grounding map which maps a string to a dictionary of database IDs (in the format used by Agents' db_refs).
def write_report(self, session, filename): if not self.__report: session.write_line("No report to write down") return try: with open(filename, "w+") as out_file: out_file.write(self.to_json(self.__report)) except IOError as ex: session.write_line("Error writing to file: {0}", ex)
Writes the report in JSON format to the given file
def _create_record(self, rtype, name, content): opts = {'domain': self._domain, 'type': rtype.upper(), 'name': self._full_name(name), 'content': content} if self._get_lexicon_option('ttl'): opts['ttl'] = self._get_lexicon_option('ttl') opts.update(self._auth) response = self._api.nameserver.createRecord(opts) self._validate_response( response=response, message='Failed to create record', exclude_code=2302) return True
create a record does nothing if the record already exists :param str rtype: type of record :param str name: name of record :param mixed content: value of record :return bool: success status :raises Exception: on error
def reduce_memory_demand(self): prev_gwmem = int(self.get_inpvar("gwmem", default=11)) first_dig, second_dig = prev_gwmem // 10, prev_gwmem % 10 if second_dig == 1: self.set_vars(gwmem="%.2d" % (10 * first_dig)) return True if first_dig == 1: self.set_vars(gwmem="%.2d" % 00) return True return False
Method that can be called by the scheduler to decrease the memory demand of a specific task. Returns True in case of success, False in case of Failure.
def _get_conn(ret=None): _options = _get_options(ret) database = _options.get('database') timeout = _options.get('timeout') if not database: raise Exception( 'sqlite3 config option "sqlite3.database" is missing') if not timeout: raise Exception( 'sqlite3 config option "sqlite3.timeout" is missing') log.debug('Connecting the sqlite3 database: %s timeout: %s', database, timeout) conn = sqlite3.connect(database, timeout=float(timeout)) return conn
Return a sqlite3 database connection
def connect(self, fedora_url, data=None, method='Get'): if data is None: data = {} if not fedora_url.startswith("http"): fedora_url = urllib.parse.urljoin(self.base_url, fedora_url) request = urllib.request.Request(fedora_url, method=method) request.add_header('Accept', 'text/turtle') request.add_header('Content-Type', 'text/turtle') if len(data) > 0: request.data = data try: response = urllib.request.urlopen(request) except urllib.error.URLError as err: if hasattr(err, 'reason'): print("failed to reach server at {} with {} method".format( fedora_url, request.method)) print("Reason: ", err.reason) print("Data: ", data) elif hasattr(err, 'code'): print("Server error {}".format(err.code)) raise err return response
Method attempts to connect to REST servers of the Fedora Commons repository using optional data parameter. Args: fedora_url(string): Fedora URL data(dict): Data to through to REST endpoint method(str): REST Method, defaults to GET Returns: result(string): Response string from Fedora
def _granule_identifier_to_xml_name(granule_identifier): changed_item_type = re.sub("_MSI_", "_MTD_", granule_identifier) split_by_underscores = changed_item_type.split("_") del split_by_underscores[-1] cleaned = str() for i in split_by_underscores: cleaned += (i + "_") out_xml = cleaned[:-1] + ".xml" return out_xml
Very ugly way to convert the granule identifier. e.g. From Granule Identifier: S2A_OPER_MSI_L1C_TL_SGS__20150817T131818_A000792_T28QBG_N01.03 To Granule Metadata XML name: S2A_OPER_MTD_L1C_TL_SGS__20150817T131818_A000792_T28QBG.xml
def normalize_genotypes(genotypes): genotypes = genotypes.genotypes return (genotypes - np.nanmean(genotypes)) / np.nanstd(genotypes)
Normalize the genotypes. Args: genotypes (Genotypes): The genotypes to normalize. Returns: numpy.array: The normalized genotypes.
def irafconvert(iraffilename): convertdict = CONVERTDICT if not iraffilename.lower().startswith(('http', 'ftp')): iraffilename = os.path.normpath(iraffilename) if iraffilename.startswith('$'): pat = re.compile(r'\$(\w*)') match = re.match(pat, iraffilename) dirname = match.group(1) unixdir = os.environ[dirname] basename = iraffilename[match.end() + 1:] unixfilename = os.path.join(unixdir, basename) return unixfilename elif '$' in iraffilename: irafdir, basename = iraffilename.split('$') if irafdir == 'synphot': return get_data_filename(os.path.basename(basename)) unixdir = convertdict[irafdir] unixfilename = os.path.join(unixdir, basename) return unixfilename else: return iraffilename
Convert the IRAF file name to its Unix equivalent. Input can be in ``directory$file`` or ``$directory/file`` format. If ``'$'`` is not found in the input string, it is returned as-is. Parameters ---------- iraffilename : str Filename in IRAF format. Returns ------- unixfilename : str Filename in Unix format. Raises ------ AttributeError Input is not a string.
def get_body_encoding(self): assert self.body_encoding != SHORTEST if self.body_encoding == QP: return 'quoted-printable' elif self.body_encoding == BASE64: return 'base64' else: return encode_7or8bit
Return the content-transfer-encoding used for body encoding. This is either the string `quoted-printable' or `base64' depending on the encoding used, or it is a function in which case you should call the function with a single argument, the Message object being encoded. The function should then set the Content-Transfer-Encoding header itself to whatever is appropriate. Returns "quoted-printable" if self.body_encoding is QP. Returns "base64" if self.body_encoding is BASE64. Returns conversion function otherwise.
def not0(a): return matrix(list(map(lambda x: 1 if x == 0 else x, a)), a.size)
Return u if u!= 0, return 1 if u == 0
def attention_bias_prepend_inputs_full_attention(padding): in_target = tf.cumsum(padding, axis=1, exclusive=True) target_pos = tf.cumsum(in_target, axis=1) illegal_connections = tf.greater( tf.expand_dims(target_pos, 1), tf.expand_dims(target_pos, 2)) bias = tf.to_float(illegal_connections) * -1e9 bias = tf.expand_dims(bias, 1) return bias
Create a bias tensor for prepend_mode="prepend_inputs_full_attention". See prepend_inputs in common_hparams.py. Produces a bias tensor to be used in self-attention. This bias tensor allows for full connectivity in the "inputs" part of the sequence and masked connectivity in the targets part. Args: padding: a float `Tensor` with shape [batch, length] with ones in positions corresponding to padding. In each row, a single padding position separates the input part from the target part. Returns: a `Tensor` with shape [batch, 1, length, length].
def xpathRegisterNs(self, prefix, ns_uri): ret = libxml2mod.xmlXPathRegisterNs(self._o, prefix, ns_uri) return ret
Register a new namespace. If @ns_uri is None it unregisters the namespace
def get_tab_tip(self, filename, is_modified=None, is_readonly=None): text = u"%s — %s" text = self.__modified_readonly_title(text, is_modified, is_readonly) if self.tempfile_path is not None\ and filename == encoding.to_unicode_from_fs(self.tempfile_path): temp_file_str = to_text_string(_("Temporary file")) return text % (temp_file_str, self.tempfile_path) else: return text % (osp.basename(filename), osp.dirname(filename))
Return tab menu title
def _make_tuple(self, env): t = runtime.Tuple(self, env, dict2tuple) schema = schema_spec_from_tuple(t) t.attach_schema(schema) return t
Instantiate the Tuple based on this TupleNode.
def getDirSizeRecursively(dirPath): return int(subprocess.check_output(['du', '-s', dirPath], env=dict(os.environ, BLOCKSIZE='512')).decode('utf-8').split()[0]) * 512
This method will return the cumulative number of bytes occupied by the files on disk in the directory and its subdirectories. This method will raise a 'subprocess.CalledProcessError' if it is unable to access a folder or file because of insufficient permissions. Therefore this method should only be called on the jobStore, and will alert the user if some portion is inaccessible. Everything in the jobStore should have appropriate permissions as there is no way to read the filesize without permissions. The environment variable 'BLOCKSIZE'='512' is set instead of the much cleaner --block-size=1 because Apple can't handle it. :param str dirPath: A valid path to a directory or file. :return: Total size, in bytes, of the file or directory at dirPath.
def delete(self, *args, **kwargs): try: os.remove(self.file.file.name) except (OSError, IOError): pass super(Image, self).delete(*args, **kwargs)
delete image when an image record is deleted
def write_empty(self, size): if size < 1: return self._fh.seek(size-1, 1) self._fh.write(b'\x00')
Append size bytes to file. Position must be at end of file.
def fetch(self, only_ref=False): if self.ref: reply = self.connector.get_object( self.ref, return_fields=self.return_fields) if reply: self.update_from_dict(reply) return True search_dict = self.to_dict(search_fields='update') return_fields = [] if only_ref else self.return_fields reply = self.connector.get_object(self.infoblox_type, search_dict, return_fields=return_fields) if reply: self.update_from_dict(reply[0], only_ref=only_ref) return True return False
Fetch object from NIOS by _ref or searchfields Update existent object with fields returned from NIOS Return True on successful object fetch
def extern_store_bool(self, context_handle, b): c = self._ffi.from_handle(context_handle) return c.to_value(b)
Given a context and _Bool, return a new Handle to represent the _Bool.
def unixtimestamp(datetime): epoch = UTC.localize(datetime.utcfromtimestamp(0)) if not datetime.tzinfo: dt = UTC.localize(datetime) else: dt = UTC.normalize(datetime) delta = dt - epoch return total_seconds(delta)
Get unix time stamp from that given datetime. If datetime is not tzaware then it's assumed that it is UTC
def snapshot_name_to_id(name, snap_name, strict=False, runas=None): name = salt.utils.data.decode(name) snap_name = salt.utils.data.decode(snap_name) info = prlctl('snapshot-list', name, runas=runas) snap_ids = _find_guids(info) named_ids = [] for snap_id in snap_ids: if snapshot_id_to_name(name, snap_id, runas=runas) == snap_name: named_ids.append(snap_id) if not named_ids: raise SaltInvocationError( 'No snapshots for VM "{0}" have name "{1}"'.format(name, snap_name) ) elif len(named_ids) == 1: return named_ids[0] else: multi_msg = ('Multiple snapshots for VM "{0}" have name ' '"{1}"'.format(name, snap_name)) if strict: raise SaltInvocationError(multi_msg) else: log.warning(multi_msg) return named_ids
Attempt to convert a snapshot name to a snapshot ID. If the name is not found an empty string is returned. If multiple snapshots share the same name, a list will be returned :param str name: Name/ID of VM whose snapshots are inspected :param str snap_name: Name of the snapshot :param bool strict: Raise an exception if multiple snapshot IDs are found :param str runas: The user that the prlctl command will be run as CLI Example: .. code-block:: bash salt '*' parallels.snapshot_id_to_name macvm original runas=macdev
def sg_lookup(tensor, opt): r assert opt.emb is not None, 'emb is mandatory.' return tf.nn.embedding_lookup(opt.emb, tensor, name=opt.name)
r"""Looks up the `tensor`, which is the embedding matrix. Args: tensor: A tensor ( automatically given by chain ) opt: emb: A 2-D `Tensor`. An embedding matrix. name: If provided, replace current tensor's name. Returns: A `Tensor`.
def remove_supervisor_app(app_name): app = u'/etc/supervisor/conf.d/%s.conf' % app_name if files.exists(app): sudo(u'rm %s' % app) supervisor_command(u'update')
Remove Supervisor app configuration.
def copytree(src, dst, symlinks=False, ignore=None): if not os.path.exists(dst): os.mkdir(dst) try: for item in os.listdir(src): s = os.path.join(src, item) d = os.path.join(dst, item) if os.path.isdir(s): shutil.copytree(s, d, symlinks, ignore) else: shutil.copy2(s, d) except Exception as e: raise FolderExistsError("Folder already exists in %s" % dst)
Function recursively copies from directory to directory. Args ---- src (string): the full path of source directory dst (string): the full path of destination directory symlinks (boolean): the switch for tracking symlinks ignore (list): the ignore list
def bin_b64_type(arg): try: arg = base64.standard_b64decode(arg) except (binascii.Error, TypeError): raise argparse.ArgumentTypeError("{0} is invalid base64 data".format(repr(arg))) return arg
An argparse type representing binary data encoded in base64.