code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def create_tipo_equipamento(self): """Get an instance of tipo_equipamento services facade.""" return TipoEquipamento( self.networkapi_url, self.user, self.password, self.user_ldap)
Get an instance of tipo_equipamento services facade.
def default_interface(ifconfig=None, route_output=None): """ Return just the default interface device dictionary. :param ifconfig: For mocking actual command output :param route_output: For mocking actual command output """ global Parser return Parser(ifconfig=ifconfig)._default_interface(route_output=route_output)
Return just the default interface device dictionary. :param ifconfig: For mocking actual command output :param route_output: For mocking actual command output
def update(self, data, ed): """Processes the given ciphertext/plaintext Inputs: data: raw string of any length ed: 'e' for encryption, 'd' for decryption Output: processed raw string block(s), if any When the supplied data is not a multiple of the blocksize of the cipher, then the remaining input data will be cached. The next time the update function is called with some data, the new data will be concatenated to the cache and then cache+data will be processed and full blocks will be outputted. """ if ed == 'e': encrypted_blocks = b'' self.cache += data if len(self.cache) < self.blocksize: return b'' for i in range(0, len(self.cache)-self.blocksize+1, self.blocksize): self.IV = self.codebook.encrypt(strxor(self.cache[i:i+self.blocksize],self.IV)) encrypted_blocks += self.IV self.cache = self.cache[i+self.blocksize:] return encrypted_blocks else: decrypted_blocks = b'' self.cache += data if len(self.cache) < self.blocksize: return b'' for i in range(0, len(self.cache)-self.blocksize+1, self.blocksize): plaintext = strxor(self.IV,self.codebook.decrypt(self.cache[i:i + self.blocksize])) self.IV = self.cache[i:i + self.blocksize] decrypted_blocks+=plaintext self.cache = self.cache[i+self.blocksize:] return decrypted_blocks
Processes the given ciphertext/plaintext Inputs: data: raw string of any length ed: 'e' for encryption, 'd' for decryption Output: processed raw string block(s), if any When the supplied data is not a multiple of the blocksize of the cipher, then the remaining input data will be cached. The next time the update function is called with some data, the new data will be concatenated to the cache and then cache+data will be processed and full blocks will be outputted.
def is_vhost_alive(self, vhost): """ Declares a test queue, then publishes and consumes a message. Intended for use by monitoring tools. :param vhost: The vhost name to check :type vhost: str """ return self._api_get('/api/aliveness-test/{0}'.format( urllib.parse.quote_plus(vhost) ))
Declares a test queue, then publishes and consumes a message. Intended for use by monitoring tools. :param vhost: The vhost name to check :type vhost: str
def write(self, pkt): """ Writes a Packet or bytes to a pcap file. :param pkt: Packet(s) to write (one record for each Packet), or raw bytes to write (as one record). :type pkt: iterable[Packet], Packet or bytes """ if isinstance(pkt, bytes): if not self.header_present: self._write_header(pkt) self._write_packet(pkt) else: pkt = pkt.__iter__() for p in pkt: if not self.header_present: self._write_header(p) self._write_packet(p)
Writes a Packet or bytes to a pcap file. :param pkt: Packet(s) to write (one record for each Packet), or raw bytes to write (as one record). :type pkt: iterable[Packet], Packet or bytes
def all_low_level_calls(self): """ recursive version of low_level calls """ if self._all_low_level_calls is None: self._all_low_level_calls = self._explore_functions(lambda x: x.low_level_calls) return self._all_low_level_calls
recursive version of low_level calls
def sample(self, hash, limit=None, offset=None): '''Return an object representing the sample identified by the input hash, or an empty object if that sample is not found''' uri = self._uris['sample'].format(hash) params = {'limit': limit, 'offset': offset} return self.get_parse(uri, params)
Return an object representing the sample identified by the input hash, or an empty object if that sample is not found
def _write_image_description(self): """Write metadata to ImageDescription tag.""" if (not self._datashape or self._datashape[0] == 1 or self._descriptionoffset <= 0): return colormapped = self._colormap is not None if self._imagej: isrgb = self._shape[-1] in (3, 4) description = imagej_description( self._datashape, isrgb, colormapped, **self._metadata) else: description = json_description(self._datashape, **self._metadata) # rewrite description and its length to file description = description.encode('utf-8') description = description[:self._descriptionlen-1] pos = self._fh.tell() self._fh.seek(self._descriptionoffset) self._fh.write(description) self._fh.seek(self._descriptionlenoffset) self._fh.write(struct.pack(self._byteorder+self._offsetformat, len(description)+1)) self._fh.seek(pos) self._descriptionoffset = 0 self._descriptionlenoffset = 0 self._descriptionlen = 0
Write metadata to ImageDescription tag.
def GetTimeOfDay(self): """Retrieves the time of day represented by the date and time values. Returns: tuple[int, int, int]: hours, minutes, seconds or (None, None, None) if the date and time values do not represent a time of day. """ normalized_timestamp = self._GetNormalizedTimestamp() if normalized_timestamp is None: return None, None, None _, hours, minutes, seconds = self._GetTimeValues(normalized_timestamp) return hours, minutes, seconds
Retrieves the time of day represented by the date and time values. Returns: tuple[int, int, int]: hours, minutes, seconds or (None, None, None) if the date and time values do not represent a time of day.
def cubic_interpolate(x, y, precision=250, **kwargs): """ Interpolate x, y using a cubic algorithm https://en.wikipedia.org/wiki/Spline_interpolation """ n = len(x) - 1 # Spline equation is a + bx + cx² + dx³ # ie: Spline part i equation is a[i] + b[i]x + c[i]x² + d[i]x³ a = y b = [0] * (n + 1) c = [0] * (n + 1) d = [0] * (n + 1) m = [0] * (n + 1) z = [0] * (n + 1) h = [x2 - x1 for x1, x2 in zip(x, x[1:])] k = [a2 - a1 for a1, a2 in zip(a, a[1:])] g = [k[i] / h[i] if h[i] else 1 for i in range(n)] for i in range(1, n): j = i - 1 l = 1 / (2 * (x[i + 1] - x[j]) - h[j] * m[j]) if x[i + 1] - x[j] else 0 m[i] = h[i] * l z[i] = (3 * (g[i] - g[j]) - h[j] * z[j]) * l for j in reversed(range(n)): if h[j] == 0: continue c[j] = z[j] - (m[j] * c[j + 1]) b[j] = g[j] - (h[j] * (c[j + 1] + 2 * c[j])) / 3 d[j] = (c[j + 1] - c[j]) / (3 * h[j]) for i in range(n + 1): yield x[i], a[i] if i == n or h[i] == 0: continue for s in range(1, precision): X = s * h[i] / precision X2 = X * X X3 = X2 * X yield x[i] + X, a[i] + b[i] * X + c[i] * X2 + d[i] * X3
Interpolate x, y using a cubic algorithm https://en.wikipedia.org/wiki/Spline_interpolation
def voltage_based(feedin, generators, curtailment_timeseries, edisgo, curtailment_key, **kwargs): """ Implements curtailment methodology 'voltage-based'. The curtailment that has to be met in each time step is allocated depending on the exceedance of the allowed voltage deviation at the nodes of the generators. The higher the exceedance, the higher the curtailment. The optional parameter `voltage_threshold` specifies the threshold for the exceedance of the allowed voltage deviation above which a generator is curtailed. By default it is set to zero, meaning that all generators at nodes with voltage deviations that exceed the allowed voltage deviation are curtailed. Generators at nodes where the allowed voltage deviation is not exceeded are not curtailed. In the case that the required curtailment exceeds the weather-dependent availability of all generators with voltage deviations above the specified threshold, the voltage threshold is lowered in steps of 0.01 p.u. until the curtailment target can be met. Above the threshold, the curtailment is proportional to the exceedance of the allowed voltage deviation. In order to find the linear relation between the curtailment and the voltage difference a linear problem is formulated and solved using the python package pyomo. See documentation for further information. Parameters ---------- feedin : :pandas:`pandas.DataFrame<dataframe>` Dataframe holding the feed-in of each generator in kW for the technology (and weather cell) specified in `curtailment_key` parameter. Index of the dataframe is a :pandas:`pandas.DatetimeIndex<datetimeindex>`. Columns are the representatives of the fluctuating generators. generators : :pandas:`pandas.DataFrame<dataframe>` Dataframe with all generators of the type (and in weather cell) specified in `curtailment_key` parameter. See return value of :func:`edisgo.grid.tools.get_gen_info` for more information. curtailment_timeseries : :pandas:`pandas.Series<series>` The curtailment in kW to be distributed amongst the generators in `generators` parameter. Index of the series is a :pandas:`pandas.DatetimeIndex<datetimeindex>`. edisgo : :class:`edisgo.grid.network.EDisGo` curtailment_key : :obj:`str` or :obj:`tuple` with :obj:`str` The technology and weather cell ID if :obj:`tuple` or only the technology if :obj:`str` the curtailment is specified for. voltage_threshold: :obj:`float` The node voltage below which no curtailment is assigned to the respective generator if not necessary. Default: 0.0. solver: :obj:`str` The solver used to optimize the curtailment assigned to the generator. Possible options are: * 'cbc' coin-or branch and cut solver * 'glpk' gnu linear programming kit solver * any other available compatible with 'pyomo' like 'gurobi' or 'cplex' Default: 'cbc' """ voltage_threshold = pd.Series(kwargs.get('voltage_threshold', 0.0), index=curtailment_timeseries.index) solver = kwargs.get('solver', 'cbc') combined_analysis = kwargs.get('combined_analysis', False) # get the voltages at the generators voltages_lv_gens = edisgo.network.results.v_res( nodes=generators.loc[(generators.voltage_level == 'lv')].index, level='lv') voltages_mv_gens = edisgo.network.results.v_res( nodes=generators.loc[(generators.voltage_level == 'mv')].index, level='mv') voltages_gens = voltages_lv_gens.join(voltages_mv_gens) # get voltages at stations grids = list(set(generators.grid)) lv_stations = [_.station for _ in grids if 'LVStation' in repr(_.station)] voltage_lv_stations = edisgo.network.results.v_res( nodes=lv_stations, level='lv') voltages_mv_station = edisgo.network.results.v_res( nodes=[edisgo.network.mv_grid.station], level='mv') voltages_stations = voltage_lv_stations.join(voltages_mv_station) # get allowed voltage deviations if not combined_analysis: allowed_voltage_dev_mv = edisgo.network.config[ 'grid_expansion_allowed_voltage_deviations'][ 'mv_feedin_case_max_v_deviation'] allowed_voltage_diff_lv = edisgo.network.config[ 'grid_expansion_allowed_voltage_deviations'][ 'lv_feedin_case_max_v_deviation'] else: allowed_voltage_dev_mv = edisgo.network.config[ 'grid_expansion_allowed_voltage_deviations'][ 'mv_lv_feedin_case_max_v_deviation'] allowed_voltage_diff_lv = edisgo.network.config[ 'grid_expansion_allowed_voltage_deviations'][ 'mv_lv_feedin_case_max_v_deviation'] generators['allowed_voltage_dev'] = generators.voltage_level.apply( lambda _: allowed_voltage_diff_lv if _ == 'lv' else allowed_voltage_dev_mv) # calculate voltage difference from generator node to station voltage_gens_diff = pd.DataFrame() for gen in voltages_gens.columns: station = generators[generators.gen_repr==gen].grid.values[0].station voltage_gens_diff[gen] = voltages_gens.loc[:, gen] - \ voltages_stations.loc[:, repr(station)] - \ generators[generators.gen_repr == gen].allowed_voltage_dev.iloc[0] # for every time step check if curtailment can be fulfilled, otherwise # reduce voltage threshold; set feed-in of generators below voltage # threshold to zero, so that they cannot be curtailed for ts in curtailment_timeseries.index: # get generators with voltage higher than threshold gen_pool = voltage_gens_diff.loc[ ts, voltage_gens_diff.loc[ts, :] > voltage_threshold.loc[ts]].index # if curtailment cannot be fulfilled lower voltage threshold while sum(feedin.loc[ts, gen_pool]) < curtailment_timeseries.loc[ts]: voltage_threshold.loc[ts] = voltage_threshold.loc[ts] - 0.01 gen_pool = voltage_gens_diff.loc[ ts, voltage_gens_diff.loc[ts, :] > voltage_threshold.loc[ts]].index # set feed-in of generators below voltage threshold to zero, so that # they cannot be curtailed gen_pool_out = voltage_gens_diff.loc[ ts, voltage_gens_diff.loc[ts, :] <= voltage_threshold.loc[ts]].index feedin.loc[ts, gen_pool_out] = 0 # only optimize for time steps where curtailment is greater than zero timeindex = curtailment_timeseries[curtailment_timeseries > 0].index if not timeindex.empty: curtailment = _optimize_voltage_based_curtailment( feedin, voltage_gens_diff, curtailment_timeseries, voltage_threshold, timeindex, solver) else: curtailment = pd.DataFrame() # set curtailment for other time steps to zero curtailment = curtailment.append(pd.DataFrame( 0, columns=feedin.columns, index=curtailment_timeseries[ curtailment_timeseries <= 0].index)) # check if curtailment target was met _check_curtailment_target(curtailment, curtailment_timeseries, curtailment_key) # assign curtailment to individual generators _assign_curtailment(curtailment, edisgo, generators, curtailment_key)
Implements curtailment methodology 'voltage-based'. The curtailment that has to be met in each time step is allocated depending on the exceedance of the allowed voltage deviation at the nodes of the generators. The higher the exceedance, the higher the curtailment. The optional parameter `voltage_threshold` specifies the threshold for the exceedance of the allowed voltage deviation above which a generator is curtailed. By default it is set to zero, meaning that all generators at nodes with voltage deviations that exceed the allowed voltage deviation are curtailed. Generators at nodes where the allowed voltage deviation is not exceeded are not curtailed. In the case that the required curtailment exceeds the weather-dependent availability of all generators with voltage deviations above the specified threshold, the voltage threshold is lowered in steps of 0.01 p.u. until the curtailment target can be met. Above the threshold, the curtailment is proportional to the exceedance of the allowed voltage deviation. In order to find the linear relation between the curtailment and the voltage difference a linear problem is formulated and solved using the python package pyomo. See documentation for further information. Parameters ---------- feedin : :pandas:`pandas.DataFrame<dataframe>` Dataframe holding the feed-in of each generator in kW for the technology (and weather cell) specified in `curtailment_key` parameter. Index of the dataframe is a :pandas:`pandas.DatetimeIndex<datetimeindex>`. Columns are the representatives of the fluctuating generators. generators : :pandas:`pandas.DataFrame<dataframe>` Dataframe with all generators of the type (and in weather cell) specified in `curtailment_key` parameter. See return value of :func:`edisgo.grid.tools.get_gen_info` for more information. curtailment_timeseries : :pandas:`pandas.Series<series>` The curtailment in kW to be distributed amongst the generators in `generators` parameter. Index of the series is a :pandas:`pandas.DatetimeIndex<datetimeindex>`. edisgo : :class:`edisgo.grid.network.EDisGo` curtailment_key : :obj:`str` or :obj:`tuple` with :obj:`str` The technology and weather cell ID if :obj:`tuple` or only the technology if :obj:`str` the curtailment is specified for. voltage_threshold: :obj:`float` The node voltage below which no curtailment is assigned to the respective generator if not necessary. Default: 0.0. solver: :obj:`str` The solver used to optimize the curtailment assigned to the generator. Possible options are: * 'cbc' coin-or branch and cut solver * 'glpk' gnu linear programming kit solver * any other available compatible with 'pyomo' like 'gurobi' or 'cplex' Default: 'cbc'
def _unlock(self): """ Unlock this keyring by getting the password for the keyring from the user. """ self.keyring_key = getpass.getpass( 'Please enter password for encrypted keyring: ') try: ref_pw = self.get_password('keyring-setting', 'password reference') assert ref_pw == 'password reference value' except AssertionError: self._lock() raise ValueError("Incorrect Password")
Unlock this keyring by getting the password for the keyring from the user.
def hook(self, pc): """ A decorator used to register a hook function for a given instruction address. Equivalent to calling :func:`~add_hook`. :param pc: Address of instruction to hook :type pc: int or None """ def decorator(f): self.add_hook(pc, f) return f return decorator
A decorator used to register a hook function for a given instruction address. Equivalent to calling :func:`~add_hook`. :param pc: Address of instruction to hook :type pc: int or None
def _PrPz(r0, z0, r1, z1, r2, z2, r3, z3): """ Intersection point for infinite lines. Parameters ---------- r0 : float z0 : float r1 : float z1 : float r2 : float z2 : float r3 : float z3 : float Returns ---------- Pr : float Pz : float hit : bool """ Pr = ((r0*z1 - z0*r1)*(r2 - r3) - (r0 - r1)*(r2*z3 - r3*z2)) / \ ((r0 - r1)*(z2 - z3) - (z0 - z1)*(r2-r3)) Pz = ((r0*z1 - z0*r1)*(z2 - z3) - (z0 - z1)*(r2*z3 - r3*z2)) / \ ((r0 - r1)*(z2 - z3) - (z0 - z1)*(r2-r3)) if Pr >= r0 and Pr <= r1 and Pz >= z0 and Pz <= z1: hit = True elif Pr <= r0 and Pr >= r1 and Pz >= z0 and Pz <= z1: hit = True elif Pr >= r0 and Pr <= r1 and Pz <= z0 and Pz >= z1: hit = True elif Pr <= r0 and Pr >= r1 and Pz <= z0 and Pz >= z1: hit = True else: hit = False return [Pr, Pz, hit]
Intersection point for infinite lines. Parameters ---------- r0 : float z0 : float r1 : float z1 : float r2 : float z2 : float r3 : float z3 : float Returns ---------- Pr : float Pz : float hit : bool
def validate_required_attributes(fully_qualified_name: str, spec: Dict[str, Any], *attributes: str) -> List[RequiredAttributeError]: """ Validates to ensure that a set of attributes are present in spec """ return [ RequiredAttributeError(fully_qualified_name, spec, attribute) for attribute in attributes if attribute not in spec ]
Validates to ensure that a set of attributes are present in spec
def get(self, id, domain='messages'): """ Gets a message translation. @rtype: str @return: The message translation """ assert isinstance(id, (str, unicode)) assert isinstance(domain, (str, unicode)) if self.defines(id, domain): return self.messages[domain][id] if self.fallback_catalogue is not None: return self.fallback_catalogue.get(id, domain) return id
Gets a message translation. @rtype: str @return: The message translation
async def get_pypi_version(self): """Get version published to PyPi.""" self._version_data["beta"] = self.beta self._version_data["source"] = "PyPi" info_version = None last_release = None try: async with async_timeout.timeout(5, loop=self.loop): response = await self.session.get(URL["pypi"]) data = await response.json() info_version = data["info"]["version"] releases = data["releases"] for version in sorted(releases, reverse=True): if re.search(r"^(\\d+\\.)?(\\d\\.)?(\\*|\\d+)$", version): continue else: last_release = version break self._version = info_version if self.beta: if info_version in last_release: self._version = info_version else: self._version = last_release _LOGGER.debug("Version: %s", self.version) _LOGGER.debug("Version data: %s", self.version_data) except asyncio.TimeoutError as error: _LOGGER.error("Timeouterror fetching version information from PyPi") except KeyError as error: _LOGGER.error("Error parsing version information from PyPi, %s", error) except TypeError as error: _LOGGER.error("Error parsing version information from PyPi, %s", error) except aiohttp.ClientError as error: _LOGGER.error("Error fetching version information from PyPi, %s", error) except socket.gaierror as error: _LOGGER.error("Error fetching version information from PyPi, %s", error) except Exception as error: # pylint: disable=broad-except _LOGGER.critical("Something really wrong happend! - %s", error)
Get version published to PyPi.
def matched_interpreters(interpreters, constraints): """Given some filters, yield any interpreter that matches at least one of them. :param interpreters: a list of PythonInterpreter objects for filtering :param constraints: A sequence of strings that constrain the interpreter compatibility for this pex. Each string uses the Requirement-style format, e.g. 'CPython>=3' or '>=2.7,<3' for requirements agnostic to interpreter class. Multiple requirement strings may be combined into a list to OR the constraints, such as ['CPython>=2.7,<3', 'CPython>=3.4']. :return interpreter: returns a generator that yields compatible interpreters """ for interpreter in interpreters: if any(interpreter.identity.matches(filt) for filt in constraints): TRACER.log("Constraints on interpreters: %s, Matching Interpreter: %s" % (constraints, interpreter.binary), V=3) yield interpreter
Given some filters, yield any interpreter that matches at least one of them. :param interpreters: a list of PythonInterpreter objects for filtering :param constraints: A sequence of strings that constrain the interpreter compatibility for this pex. Each string uses the Requirement-style format, e.g. 'CPython>=3' or '>=2.7,<3' for requirements agnostic to interpreter class. Multiple requirement strings may be combined into a list to OR the constraints, such as ['CPython>=2.7,<3', 'CPython>=3.4']. :return interpreter: returns a generator that yields compatible interpreters
def from_wif_or_ewif_file(path: str, password: Optional[str] = None) -> SigningKeyType: """ Return SigningKey instance from Duniter WIF or EWIF file :param path: Path to WIF of EWIF file :param password: Password needed for EWIF file """ with open(path, 'r') as fh: wif_content = fh.read() # check data field regex = compile('Data: ([1-9A-HJ-NP-Za-km-z]+)', MULTILINE) match = search(regex, wif_content) if not match: raise Exception('Error: Bad format WIF or EWIF v1 file') # capture hexa wif key wif_hex = match.groups()[0] return SigningKey.from_wif_or_ewif_hex(wif_hex, password)
Return SigningKey instance from Duniter WIF or EWIF file :param path: Path to WIF of EWIF file :param password: Password needed for EWIF file
def FlagsIntoString(self): """Returns a string with the flags assignments from this FlagValues object. This function ignores flags whose value is None. Each flag assignment is separated by a newline. NOTE: MUST mirror the behavior of the C++ CommandlineFlagsIntoString from http://code.google.com/p/google-gflags Returns: string with the flags assignments from this FlagValues object. """ s = '' for flag in self.FlagDict().values(): if flag.value is not None: s += flag.serialize() + '\n' return s
Returns a string with the flags assignments from this FlagValues object. This function ignores flags whose value is None. Each flag assignment is separated by a newline. NOTE: MUST mirror the behavior of the C++ CommandlineFlagsIntoString from http://code.google.com/p/google-gflags Returns: string with the flags assignments from this FlagValues object.
def merge_entries_with_common_prefixes(list_, number_of_needed_commons=6): """ Returns a list where sequences of post-fixed entries are shortened to their common prefix. This might be useful in cases of several similar values, where the prefix is identical for several entries. If less than 'number_of_needed_commons' are identically prefixed, they are kept unchanged. Example: ['test', 'pc1', 'pc2', 'pc3', ... , 'pc10'] -> ['test', 'pc*'] """ # first find common entry-sequences prefix = None lists_to_merge = [] for entry in list_: newPrefix,number = split_string_at_suffix(entry, numbers_into_suffix=True) if entry == newPrefix or prefix != newPrefix: lists_to_merge.append([]) prefix = newPrefix lists_to_merge[-1].append((entry,newPrefix,number)) # then merge them returnvalue = [] for common_entries in lists_to_merge: common_prefix = common_entries[0][1] assert all(common_prefix == prefix for entry,prefix,number in common_entries) if len(common_entries) <= number_of_needed_commons: returnvalue.extend((entry for entry,prefix,number in common_entries)) else: # we use '*' to indicate several entries, # it would also be possible to use '[min,max]' from '(n for e,p,n in common_entries)' returnvalue.append(common_prefix + '*') return returnvalue
Returns a list where sequences of post-fixed entries are shortened to their common prefix. This might be useful in cases of several similar values, where the prefix is identical for several entries. If less than 'number_of_needed_commons' are identically prefixed, they are kept unchanged. Example: ['test', 'pc1', 'pc2', 'pc3', ... , 'pc10'] -> ['test', 'pc*']
def sample_qubo(self, qubo, **params): """Sample from the specified QUBO. Args: qubo (dict of (int, int):float): Coefficients of a quadratic unconstrained binary optimization (QUBO) model. **params: Parameters for the sampling method, specified per solver. Returns: :obj:`Future` Examples: This example creates a client using the local system's default D-Wave Cloud Client configuration file, which is configured to access a D-Wave 2000Q QPU, submits a :term:`QUBO` problem (a Boolean NOT gate represented by a penalty model), and samples 5 times. >>> from dwave.cloud import Client >>> with Client.from_config() as client: # doctest: +SKIP ... solver = client.get_solver() ... u, v = next(iter(solver.edges)) ... Q = {(u, u): -1, (u, v): 0, (v, u): 2, (v, v): -1} ... computation = solver.sample_qubo(Q, num_reads=5) ... for i in range(5): ... print(computation.samples[i][u], computation.samples[i][v]) ... ... (0, 1) (1, 0) (1, 0) (0, 1) (1, 0) """ # In a QUBO the linear and quadratic terms in the objective are mixed into # a matrix. For the sake of encoding, we will separate them before calling `_sample` linear = {i1: v for (i1, i2), v in uniform_iterator(qubo) if i1 == i2} quadratic = {(i1, i2): v for (i1, i2), v in uniform_iterator(qubo) if i1 != i2} return self._sample('qubo', linear, quadratic, params)
Sample from the specified QUBO. Args: qubo (dict of (int, int):float): Coefficients of a quadratic unconstrained binary optimization (QUBO) model. **params: Parameters for the sampling method, specified per solver. Returns: :obj:`Future` Examples: This example creates a client using the local system's default D-Wave Cloud Client configuration file, which is configured to access a D-Wave 2000Q QPU, submits a :term:`QUBO` problem (a Boolean NOT gate represented by a penalty model), and samples 5 times. >>> from dwave.cloud import Client >>> with Client.from_config() as client: # doctest: +SKIP ... solver = client.get_solver() ... u, v = next(iter(solver.edges)) ... Q = {(u, u): -1, (u, v): 0, (v, u): 2, (v, v): -1} ... computation = solver.sample_qubo(Q, num_reads=5) ... for i in range(5): ... print(computation.samples[i][u], computation.samples[i][v]) ... ... (0, 1) (1, 0) (1, 0) (0, 1) (1, 0)
def run_coral(clus_obj, out_dir, args): """ Run some CoRaL modules to predict small RNA function """ if not args.bed: raise ValueError("This module needs the bed file output from cluster subcmd.") workdir = op.abspath(op.join(args.out, 'coral')) safe_dirs(workdir) bam_in = op.abspath(args.bam) bed_in = op.abspath(args.bed) reference = op.abspath(args.ref) with chdir(workdir): bam_clean = coral.prepare_bam(bam_in, bed_in) out_dir = op.join(workdir, "regions") safe_dirs(out_dir) prefix = "seqcluster" loci_file = coral.detect_regions(bam_clean, bed_in, out_dir, prefix) coral.create_features(bam_clean, loci_file, reference, out_dir)
Run some CoRaL modules to predict small RNA function
def institutes(context, institute_id, json): """Show all institutes in the database""" LOG.info("Running scout view institutes") adapter = context.obj['adapter'] if institute_id: institute_objs = [] institute_obj = adapter.institute(institute_id) if not institute_obj: LOG.info("Institute %s does not exost", institute_id) return institute_objs.append(institute_obj) else: institute_objs = [ins_obj for ins_obj in adapter.institutes()] if len(institute_objs) == 0: click.echo("No institutes found") context.abort() header = '' if not json: for key in institute_objs[0].keys(): header = header + "{0}\t".format(key) click.echo(header) for institute_obj in institute_objs: if json: click.echo(institute_obj) continue row = '' for value in institute_obj.values(): row = row + "{0}\t".format(value) click.echo(row)
Show all institutes in the database
def convert_flatten(net, node, module, builder): """Convert a flatten layer from mxnet to coreml. Parameters ---------- network: net A mxnet network object. layer: node Node to convert. module: module An module for MXNet builder: NeuralNetworkBuilder A neural network builder object. """ input_name, output_name = _get_input_output_name(net, node) name = node['name'] mode = 0 # CHANNEL_FIRST builder.add_flatten(name, mode, input_name, output_name)
Convert a flatten layer from mxnet to coreml. Parameters ---------- network: net A mxnet network object. layer: node Node to convert. module: module An module for MXNet builder: NeuralNetworkBuilder A neural network builder object.
def create_option_group(name, engine_name, major_engine_version, option_group_description, tags=None, region=None, key=None, keyid=None, profile=None): ''' Create an RDS option group CLI example to create an RDS option group:: salt myminion boto_rds.create_option_group my-opt-group mysql 5.6 \ "group description" ''' res = __salt__['boto_rds.option_group_exists'](name, tags, region, key, keyid, profile) if res.get('exists'): return {'exists': bool(res)} try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return {'results': bool(conn)} taglist = _tag_doc(tags) rds = conn.create_option_group(OptionGroupName=name, EngineName=engine_name, MajorEngineVersion=major_engine_version, OptionGroupDescription=option_group_description, Tags=taglist) return {'exists': bool(rds)} except ClientError as e: return {'error': __utils__['boto3.get_error'](e)}
Create an RDS option group CLI example to create an RDS option group:: salt myminion boto_rds.create_option_group my-opt-group mysql 5.6 \ "group description"
def normalise_tensor(tensor): ''' Normalise the tensor by dividing it by its norm, defined such that np.sqrt(X:X) ''' tensor_norm = np.linalg.norm(tensor) return tensor / tensor_norm, tensor_norm
Normalise the tensor by dividing it by its norm, defined such that np.sqrt(X:X)
def xmoe2_v1_l4k_local_only(): """With sequence length 4096.""" hparams = xmoe2_v1_l4k() hparams.decoder_layers = [ "local_att" if l == "att" else l for l in hparams.decoder_layers] return hparams
With sequence length 4096.
def iterchunks(data, chunksize): """iterate chunks of data """ offt = 0 while offt < len(data): yield data[offt:offt+chunksize] offt += chunksize
iterate chunks of data
def move(self, module_path, configuration=True, module=True): """Move the submodule to a another module path. This involves physically moving the repository at our current path, changing the configuration, as well as adjusting our index entry accordingly. :param module_path: the path to which to move our module in the parent repostory's working tree, given as repository-relative or absolute path. Intermediate directories will be created accordingly. If the path already exists, it must be empty. Trailing (back)slashes are removed automatically :param configuration: if True, the configuration will be adjusted to let the submodule point to the given path. :param module: if True, the repository managed by this submodule will be moved as well. If False, we don't move the submodule's checkout, which may leave the parent repository in an inconsistent state. :return: self :raise ValueError: if the module path existed and was not empty, or was a file :note: Currently the method is not atomic, and it could leave the repository in an inconsistent state if a sub-step fails for some reason """ if module + configuration < 1: raise ValueError("You must specify to move at least the module or the configuration of the submodule") # END handle input module_checkout_path = self._to_relative_path(self.repo, module_path) # VERIFY DESTINATION if module_checkout_path == self.path: return self # END handle no change module_checkout_abspath = join_path_native(self.repo.working_tree_dir, module_checkout_path) if osp.isfile(module_checkout_abspath): raise ValueError("Cannot move repository onto a file: %s" % module_checkout_abspath) # END handle target files index = self.repo.index tekey = index.entry_key(module_checkout_path, 0) # if the target item already exists, fail if configuration and tekey in index.entries: raise ValueError("Index entry for target path did already exist") # END handle index key already there # remove existing destination if module: if osp.exists(module_checkout_abspath): if len(os.listdir(module_checkout_abspath)): raise ValueError("Destination module directory was not empty") # END handle non-emptiness if osp.islink(module_checkout_abspath): os.remove(module_checkout_abspath) else: os.rmdir(module_checkout_abspath) # END handle link else: # recreate parent directories # NOTE: renames() does that now pass # END handle existence # END handle module # move the module into place if possible cur_path = self.abspath renamed_module = False if module and osp.exists(cur_path): os.renames(cur_path, module_checkout_abspath) renamed_module = True if osp.isfile(osp.join(module_checkout_abspath, '.git')): module_abspath = self._module_abspath(self.repo, self.path, self.name) self._write_git_file_and_module_config(module_checkout_abspath, module_abspath) # end handle git file rewrite # END move physical module # rename the index entry - have to manipulate the index directly as # git-mv cannot be used on submodules ... yeah previous_sm_path = self.path try: if configuration: try: ekey = index.entry_key(self.path, 0) entry = index.entries[ekey] del(index.entries[ekey]) nentry = git.IndexEntry(entry[:3] + (module_checkout_path,) + entry[4:]) index.entries[tekey] = nentry except KeyError: raise InvalidGitRepositoryError("Submodule's entry at %r did not exist" % (self.path)) # END handle submodule doesn't exist # update configuration with self.config_writer(index=index) as writer: # auto-write writer.set_value('path', module_checkout_path) self.path = module_checkout_path # END handle configuration flag except Exception: if renamed_module: os.renames(module_checkout_abspath, cur_path) # END undo module renaming raise # END handle undo rename # Auto-rename submodule if it's name was 'default', that is, the checkout directory if previous_sm_path == self.name: self.rename(module_checkout_path) # end return self
Move the submodule to a another module path. This involves physically moving the repository at our current path, changing the configuration, as well as adjusting our index entry accordingly. :param module_path: the path to which to move our module in the parent repostory's working tree, given as repository-relative or absolute path. Intermediate directories will be created accordingly. If the path already exists, it must be empty. Trailing (back)slashes are removed automatically :param configuration: if True, the configuration will be adjusted to let the submodule point to the given path. :param module: if True, the repository managed by this submodule will be moved as well. If False, we don't move the submodule's checkout, which may leave the parent repository in an inconsistent state. :return: self :raise ValueError: if the module path existed and was not empty, or was a file :note: Currently the method is not atomic, and it could leave the repository in an inconsistent state if a sub-step fails for some reason
def select_join(self, table, field, join_table, join_field): """.. :py:method:: Usage:: >>> select_join('hospital', 'id', 'department', 'hospid') select hospital.id from hospital left join department on hospital.id=department.hospid where department.hospid is null; """ sql = "select {table}.{field} from {table} left join {join_table} "\ "on {table}.{field}={join_table}.{join_field} "\ "where {join_table}.{join_field} is null;".format(table=table, field=field, join_table=join_table, join_field=join_field) return super(PGWrapper, self).execute(sql, result=True).results
.. :py:method:: Usage:: >>> select_join('hospital', 'id', 'department', 'hospid') select hospital.id from hospital left join department on hospital.id=department.hospid where department.hospid is null;
def addFilter(self, layer_id, where=None, outFields="*"): """ adds a layer definition filter """ import copy f = copy.deepcopy(self._filterTemplate) f['layerId'] = layer_id f['outFields'] = outFields if where is not None: f['where'] = where if f not in self._filter: self._filter.append(f)
adds a layer definition filter
def EnablePlugins(self, plugin_includes): """Enables parser plugins. Args: plugin_includes (list[str]): names of the plugins to enable, where None or an empty list represents all plugins. Note that the default plugin is handled separately. """ super(SyslogParser, self).EnablePlugins(plugin_includes) self._plugin_by_reporter = {} for plugin in self._plugins: self._plugin_by_reporter[plugin.REPORTER] = plugin
Enables parser plugins. Args: plugin_includes (list[str]): names of the plugins to enable, where None or an empty list represents all plugins. Note that the default plugin is handled separately.
def get_dist(self): """Return a pkg_resources.Distribution built from self.egg_info_path""" egg_info = self.egg_info_path('').rstrip('/') base_dir = os.path.dirname(egg_info) metadata = pkg_resources.PathMetadata(base_dir, egg_info) dist_name = os.path.splitext(os.path.basename(egg_info))[0] return pkg_resources.Distribution( os.path.dirname(egg_info), project_name=dist_name, metadata=metadata)
Return a pkg_resources.Distribution built from self.egg_info_path
def _partition_species(composition, max_components=2): """ Private method to split a list of species into various partitions. """ def _partition(collection): # thanks https://stackoverflow.com/a/30134039 if len(collection) == 1: yield [collection] return first = collection[0] for smaller in _partition(collection[1:]): # insert `first` in each of the subpartition's subsets for n, subset in enumerate(smaller): yield smaller[:n] + [[first] + subset] + smaller[n + 1:] # put `first` in its own subset yield [[first]] + smaller def _sort_partitions(partitions_to_sort): """ Sort partitions by those we want to check first (typically, merging two sites into one is the one to try first). """ partition_indices = [(idx, [len(p) for p in partition]) for idx, partition in enumerate(partitions_to_sort)] # sort by maximum length of partition first (try smallest maximums first) # and secondarily by number of partitions (most partitions first, i.e. # create the 'least disordered' structures first) partition_indices = sorted(partition_indices, key=lambda x: (max(x[1]), -len(x[1]))) # merge at most max_component sites, # e.g. merge at most 2 species into 1 disordered site partition_indices = [x for x in partition_indices if max(x[1]) <= max_components] partition_indices.pop(0) # this is just the input structure sorted_partitions = [partitions_to_sort[x[0]] for x in partition_indices] return sorted_partitions collection = list(composition.keys()) partitions = list(_partition(collection)) partitions = _sort_partitions(partitions) return partitions
Private method to split a list of species into various partitions.
def chambers(self): """ Return distinct chambers. You probably want to prefetch documents__chamber before calling that. """ # Use sorted() because using order_by will hit the database no matter # what was prefetched return set(sorted([d.chamber for d in self.documents.all()]))
Return distinct chambers. You probably want to prefetch documents__chamber before calling that.
def rmdir(store, path=None): """Remove all items under the given path. If `store` provides a `rmdir` method, this will be called, otherwise will fall back to implementation via the `MutableMapping` interface.""" path = normalize_storage_path(path) if hasattr(store, 'rmdir'): # pass through store.rmdir(path) else: # slow version, delete one key at a time _rmdir_from_keys(store, path)
Remove all items under the given path. If `store` provides a `rmdir` method, this will be called, otherwise will fall back to implementation via the `MutableMapping` interface.
def get_system_time(): ''' Get the system time. Returns: str: Returns the system time in HH:MM:SS AM/PM format. CLI Example: .. code-block:: bash salt 'minion-id' system.get_system_time ''' now = win32api.GetLocalTime() meridian = 'AM' hours = int(now[4]) if hours == 12: meridian = 'PM' elif hours == 0: hours = 12 elif hours > 12: hours = hours - 12 meridian = 'PM' return '{0:02d}:{1:02d}:{2:02d} {3}'.format(hours, now[5], now[6], meridian)
Get the system time. Returns: str: Returns the system time in HH:MM:SS AM/PM format. CLI Example: .. code-block:: bash salt 'minion-id' system.get_system_time
def init_process(self): """ init a standard daq process for a single device """ self.device = Device.objects.filter(protocol__daq_daemon=1, active=1, id=self.device_id).first() if not self.device: logger.error("Error init_process for %s" % self.device_id) return False self.dt_set = min(self.dt_set, self.device.polling_interval) self.dt_query_data = self.device.polling_interval try: self.device = self.device.get_device_instance() except: var = traceback.format_exc() logger.error("exception while initialisation of DAQ Process for Device %d %s %s" % ( self.device_id, linesep, var)) return True
init a standard daq process for a single device
def create_html(api_key, attrs): '''Returns complete html tag string.''' gif = get_gif(api_key, attrs['gif_id']) if 'alt' not in attrs.keys(): attrs['alt'] = 'source: {}'.format(gif['data']['source']) html_out = '<a href="{}">'.format(gif['data']['url']) html_out += '<img src="{}" alt="{}">'.format( gif['data']['images']['original']['url'], attrs['alt']) html_out += '</a>' return html_out
Returns complete html tag string.
def doit(self, classes=None, recursive=True, **kwargs): """Rewrite (sub-)expressions in a more explicit form Return a modified expression that is more explicit than the original expression. The definition of "more explicit" is decided by the relevant subclass, e.g. a :meth:`Commutator <.Commutator.doit>` is written out according to its definition. Args: classes (None or list): an optional list of classes. If given, only (sub-)expressions that an instance of one of the classes in the list will be rewritten. recursive (bool): If True, also rewrite any sub-expressions of any rewritten expression. Note that :meth:`doit` always recurses into sub-expressions of expressions not affected by it. kwargs: Any remaining keyword arguments may be used by the :meth:`doit` method of a particular expression. Example: Consider the following expression:: >>> from sympy import IndexedBase >>> i = IdxSym('i'); N = symbols('N') >>> Asym, Csym = symbols('A, C', cls=IndexedBase) >>> A = lambda i: OperatorSymbol(StrLabel(Asym[i]), hs=0) >>> B = OperatorSymbol('B', hs=0) >>> C = lambda i: OperatorSymbol(StrLabel(Csym[i]), hs=0) >>> def show(expr): ... print(unicode(expr, show_hs_label=False)) >>> expr = Sum(i, 1, 3)(Commutator(A(i), B) + C(i)) / N >>> show(expr) 1/N (∑_{i=1}^{3} (Ĉ_i + [Â_i, B̂])) Calling :meth:`doit` without parameters rewrites both the indexed sum and the commutator:: >>> show(expr.doit()) 1/N (Ĉ₁ + Ĉ₂ + Ĉ₃ + Â₁ B̂ + Â₂ B̂ + Â₃ B̂ - B̂ Â₁ - B̂ Â₂ - B̂ Â₃) A non-recursive call only expands the sum, as it does not recurse into the expanded summands:: >>> show(expr.doit(recursive=False)) 1/N (Ĉ₁ + Ĉ₂ + Ĉ₃ + [Â₁, B̂] + [Â₂, B̂] + [Â₃, B̂]) We can selectively expand only the sum or only the commutator:: >>> show(expr.doit(classes=[IndexedSum])) 1/N (Ĉ₁ + Ĉ₂ + Ĉ₃ + [Â₁, B̂] + [Â₂, B̂] + [Â₃, B̂]) >>> show(expr.doit(classes=[Commutator])) 1/N (∑_{i=1}^{3} (Ĉ_i - B̂ Â_i + Â_i B̂)) Also we can pass a keyword argument that expands the sum only to the 2nd term, as documented in :meth:`.Commutator.doit` >>> show(expr.doit(classes=[IndexedSum], max_terms=2)) 1/N (Ĉ₁ + Ĉ₂ + [Â₁, B̂] + [Â₂, B̂]) """ in_classes = ( (classes is None) or any([isinstance(self, cls) for cls in classes])) if in_classes: new = self._doit(**kwargs) else: new = self if (new == self) or recursive: new_args = [] for arg in new.args: if isinstance(arg, Expression): new_args.append(arg.doit( classes=classes, recursive=recursive, **kwargs)) else: new_args.append(arg) new_kwargs = OrderedDict([]) for (key, val) in new.kwargs.items(): if isinstance(val, Expression): new_kwargs[key] = val.doit( classes=classes, recursive=recursive, **kwargs) else: new_kwargs[key] = val new = new.__class__.create(*new_args, **new_kwargs) if new != self and recursive: new = new.doit(classes=classes, recursive=True, **kwargs) return new
Rewrite (sub-)expressions in a more explicit form Return a modified expression that is more explicit than the original expression. The definition of "more explicit" is decided by the relevant subclass, e.g. a :meth:`Commutator <.Commutator.doit>` is written out according to its definition. Args: classes (None or list): an optional list of classes. If given, only (sub-)expressions that an instance of one of the classes in the list will be rewritten. recursive (bool): If True, also rewrite any sub-expressions of any rewritten expression. Note that :meth:`doit` always recurses into sub-expressions of expressions not affected by it. kwargs: Any remaining keyword arguments may be used by the :meth:`doit` method of a particular expression. Example: Consider the following expression:: >>> from sympy import IndexedBase >>> i = IdxSym('i'); N = symbols('N') >>> Asym, Csym = symbols('A, C', cls=IndexedBase) >>> A = lambda i: OperatorSymbol(StrLabel(Asym[i]), hs=0) >>> B = OperatorSymbol('B', hs=0) >>> C = lambda i: OperatorSymbol(StrLabel(Csym[i]), hs=0) >>> def show(expr): ... print(unicode(expr, show_hs_label=False)) >>> expr = Sum(i, 1, 3)(Commutator(A(i), B) + C(i)) / N >>> show(expr) 1/N (∑_{i=1}^{3} (Ĉ_i + [Â_i, B̂])) Calling :meth:`doit` without parameters rewrites both the indexed sum and the commutator:: >>> show(expr.doit()) 1/N (Ĉ₁ + Ĉ₂ + Ĉ₃ + Â₁ B̂ + Â₂ B̂ + Â₃ B̂ - B̂ Â₁ - B̂ Â₂ - B̂ Â₃) A non-recursive call only expands the sum, as it does not recurse into the expanded summands:: >>> show(expr.doit(recursive=False)) 1/N (Ĉ₁ + Ĉ₂ + Ĉ₃ + [Â₁, B̂] + [Â₂, B̂] + [Â₃, B̂]) We can selectively expand only the sum or only the commutator:: >>> show(expr.doit(classes=[IndexedSum])) 1/N (Ĉ₁ + Ĉ₂ + Ĉ₃ + [Â₁, B̂] + [Â₂, B̂] + [Â₃, B̂]) >>> show(expr.doit(classes=[Commutator])) 1/N (∑_{i=1}^{3} (Ĉ_i - B̂ Â_i + Â_i B̂)) Also we can pass a keyword argument that expands the sum only to the 2nd term, as documented in :meth:`.Commutator.doit` >>> show(expr.doit(classes=[IndexedSum], max_terms=2)) 1/N (Ĉ₁ + Ĉ₂ + [Â₁, B̂] + [Â₂, B̂])
def get_role(self, account_id, role_id): """ Get information about a single role, for the passed Canvas account ID. https://canvas.instructure.com/doc/api/roles.html#method.role_overrides.show """ url = ACCOUNTS_API.format(account_id) + "/roles/{}".format(role_id) return CanvasRole(data=self._get_resource(url))
Get information about a single role, for the passed Canvas account ID. https://canvas.instructure.com/doc/api/roles.html#method.role_overrides.show
def _action(self, action): """do something! What you do is determined by the argument: 1 Output A. Copy B to A. Get the next B. 2 Copy B to A. Get the next B. (Delete A). 3 Get the next B. (Delete B). action treats a string as a single character. Wow! action recognizes a regular expression if it is preceded by ( or , or =. """ if action <= 1: self._outA() if action <= 2: self.theA = self.theB if self.theA == "'" or self.theA == '"': while 1: self._outA() self.theA = self._get() if self.theA == self.theB: break if self.theA <= '\n': raise UnterminatedStringLiteral() if self.theA == '\\': self._outA() self.theA = self._get() if action <= 3: self.theB = self._next() if self.theB == '/' and (self.theA == '(' or self.theA == ',' or self.theA == '=' or self.theA == ':' or self.theA == '[' or self.theA == '?' or self.theA == '!' or self.theA == '&' or self.theA == '|'): self._outA() self._outB() while 1: self.theA = self._get() if self.theA == '/': break elif self.theA == '\\': self._outA() self.theA = self._get() elif self.theA <= '\n': raise UnterminatedRegularExpression() self._outA() self.theB = self._next()
do something! What you do is determined by the argument: 1 Output A. Copy B to A. Get the next B. 2 Copy B to A. Get the next B. (Delete A). 3 Get the next B. (Delete B). action treats a string as a single character. Wow! action recognizes a regular expression if it is preceded by ( or , or =.
def stationary_distribution_from_backward_iteration(P, eps=1e-15): r"""Fast computation of the stationary vector using backward iteration. Parameters ---------- P : (M, M) scipy.sparse matrix Transition matrix eps : float (optional) Perturbation parameter for the true eigenvalue. Returns ------- pi : (M,) ndarray Stationary vector """ A = P.transpose() mu = 1.0 - eps x0 = np.ones(P.shape[0]) y = backward_iteration(A, mu, x0) pi = y / y.sum() return pi
r"""Fast computation of the stationary vector using backward iteration. Parameters ---------- P : (M, M) scipy.sparse matrix Transition matrix eps : float (optional) Perturbation parameter for the true eigenvalue. Returns ------- pi : (M,) ndarray Stationary vector
def main(): """ Prototype to see how an RPG simulation might be used in the AIKIF framework. The idea is to build a simple character and run a simulation to see how it succeeds in a random world against another players character character stats world locations """ character1 = Character('Albogh', str=4,int=7,sta=50) character2 = Character('Zoltor', str=6,int=6,sta=70) print('PLAYER1 [start]:', character1) print('PLAYER2 [start]:', character2) b = Battle(character1, character2) print(b) print('PLAYER1 [end]:', character1) print('PLAYER2 [end]:', character2)
Prototype to see how an RPG simulation might be used in the AIKIF framework. The idea is to build a simple character and run a simulation to see how it succeeds in a random world against another players character character stats world locations
def handle(self, *args, **options): """ Queues the function given with the first argument with the parameters given with the rest of the argument list. """ verbosity = int(options.get('verbosity', 1)) timeout = options.get('timeout') queue = get_queue(options.get('queue')) job = queue.enqueue_call(args[0], args=args[1:], timeout=timeout) if verbosity: print('Job %s created' % job.id)
Queues the function given with the first argument with the parameters given with the rest of the argument list.
def load_tasks_from_file(self, file_path): """ Imports specified python module and returns subclasses of BaseTask from it :param file_path: a fully qualified file path for a python module to import CustomTasks from :type file_path: `str` :return: a dict of CustomTasks, where key is CustomTask.name, and value is a CustomClass task itself :rtype: `dict` """ file_name, module_path, objects = Loader.import_custom_python_file(file_path) result = {} for entry in objects: try: if issubclass(entry, BaseTask): if entry.__name__ != BaseTask.__name__ and entry.name == BaseTask.name: raise GOSTaskException("Class {class_name} form file {file_name} does not have a unique `name` class field. " "All custom tasks must have a unique `name` class field for them, tat is used for future reference" "".format(class_name=entry.name, file_name=os.path.join(module_path, file_name))) result[entry.name] = entry except TypeError: continue return result
Imports specified python module and returns subclasses of BaseTask from it :param file_path: a fully qualified file path for a python module to import CustomTasks from :type file_path: `str` :return: a dict of CustomTasks, where key is CustomTask.name, and value is a CustomClass task itself :rtype: `dict`
def get_action(self, action): """Get a callable action.""" func_name = action.replace('-', '_') if not hasattr(self, func_name): # Function doesn't exist raise DaemonError( 'Invalid action "{action}"'.format(action=action)) func = getattr(self, func_name) if (not hasattr(func, '__call__') or getattr(func, '__daemonocle_exposed__', False) is not True): # Not a function or not exposed raise DaemonError( 'Invalid action "{action}"'.format(action=action)) return func
Get a callable action.
def _create_cv_ensemble(self, base_ensemble, idx_models_included, model_names=None): """For each selected base estimator, average models trained on each fold""" fitted_models = numpy.empty(len(idx_models_included), dtype=numpy.object) for i, idx in enumerate(idx_models_included): model_name = self.base_estimators[idx][0] if model_names is None else model_names[idx] avg_model = EnsembleAverage(base_ensemble[idx, :], name=model_name) fitted_models[i] = avg_model return fitted_models
For each selected base estimator, average models trained on each fold
def f_to_dict(self, fast_access=False, short_names=False, nested=False, copy=True, with_links=True): """Returns a dictionary with pairings of (full) names as keys and instances/values. :param fast_access: If True, parameter values are returned instead of the instances. Works also for results if they contain a single item with the name of the result. :param short_names: If true, keys are not full names but only the names. Raises a ValueError if the names are not unique. :param nested: If true, a nested dictionary is returned. :param copy: If `fast_access=False` and `short_names=False` you can access the original data dictionary if you set `copy=False`. If you do that, please do not modify anything! Raises ValueError if `copy=False` and `fast_access=True` or `short_names=True`. :param with_links: If links should be ignored :return: dictionary :raises: ValueError """ return self._nn_interface._to_dict(self, fast_access=fast_access, short_names=short_names, nested=nested, copy=copy, with_links=with_links)
Returns a dictionary with pairings of (full) names as keys and instances/values. :param fast_access: If True, parameter values are returned instead of the instances. Works also for results if they contain a single item with the name of the result. :param short_names: If true, keys are not full names but only the names. Raises a ValueError if the names are not unique. :param nested: If true, a nested dictionary is returned. :param copy: If `fast_access=False` and `short_names=False` you can access the original data dictionary if you set `copy=False`. If you do that, please do not modify anything! Raises ValueError if `copy=False` and `fast_access=True` or `short_names=True`. :param with_links: If links should be ignored :return: dictionary :raises: ValueError
def _get_next_occurrence(haystack, offset, needles): """ Find next occurence of one of the needles in the haystack :return: tuple of (index, needle found) or: None if no needle was found""" # make map of first char to full needle (only works if all needles # have different first characters) firstcharmap = dict([(n[0], n) for n in needles]) firstchars = firstcharmap.keys() while offset < len(haystack): if haystack[offset] in firstchars: possible_needle = firstcharmap[haystack[offset]] if haystack[offset:offset + len(possible_needle)] == possible_needle: return offset, possible_needle offset += 1 return None
Find next occurence of one of the needles in the haystack :return: tuple of (index, needle found) or: None if no needle was found
def unpack(self, key, value): """Unpack and return value only if it is fresh.""" value, freshness = value if not self.is_fresh(freshness): raise KeyError('{} (stale)'.format(key)) return value
Unpack and return value only if it is fresh.
def _getBasicOrbit(self, orbit=None): """Load a particular orbit into .data for loaded day. Parameters ---------- orbit : int orbit number, 1 indexed, negative indexes allowed, -1 last orbit Note ---- A day of data must be loaded before this routine functions properly. If the last orbit of the day is requested, it will NOT automatically be padded with data from the next day. """ # ensure data exists if not self.sat.empty: # ensure proper orbit metadata present self._calcOrbits() # ensure user is requesting a particular orbit if orbit is not None: # pull out requested orbit if orbit == -1: # load orbit data into data self.sat.data = self._fullDayData[self._orbit_breaks[self.num + orbit]:] self._current = self.num + orbit + 1 elif ((orbit < 0) & (orbit >= -self.num)): # load orbit data into data self.sat.data = self._fullDayData[ self._orbit_breaks[self.num + orbit]:self._orbit_breaks[self.num + orbit + 1]] self._current = self.num + orbit + 1 elif (orbit < self.num) & (orbit != 0): # load orbit data into data self.sat.data = self._fullDayData[self._orbit_breaks[orbit - 1]:self._orbit_breaks[orbit]] self._current = orbit elif orbit == self.num: self.sat.data = self._fullDayData[self._orbit_breaks[orbit - 1]:] # recent addition, wondering why it wasn't there before, # could just be a bug that is now fixed. self._current = orbit elif orbit == 0: raise ValueError('Orbits internally indexed by 1, 0 not ' + 'allowed') else: # gone too far self.sat.data = [] raise ValueError('Requested an orbit past total orbits ' + 'for day') else: raise ValueError('Must set an orbit')
Load a particular orbit into .data for loaded day. Parameters ---------- orbit : int orbit number, 1 indexed, negative indexes allowed, -1 last orbit Note ---- A day of data must be loaded before this routine functions properly. If the last orbit of the day is requested, it will NOT automatically be padded with data from the next day.
def _add_workflow(mcs, field_name, state_field, attrs): """Attach a workflow to the attribute list (create a StateProperty).""" attrs[field_name] = StateProperty(state_field.workflow, field_name)
Attach a workflow to the attribute list (create a StateProperty).
def unmasked_for_shape_and_pixel_scale(cls, shape, pixel_scale, invert=False): """Setup a mask where all pixels are unmasked. Parameters ---------- shape : (int, int) The (y,x) shape of the mask in units of pixels. pixel_scale: float The arc-second to pixel conversion factor of each pixel. """ mask = np.full(tuple(map(lambda d: int(d), shape)), False) if invert: mask = np.invert(mask) return cls(array=mask, pixel_scale=pixel_scale)
Setup a mask where all pixels are unmasked. Parameters ---------- shape : (int, int) The (y,x) shape of the mask in units of pixels. pixel_scale: float The arc-second to pixel conversion factor of each pixel.
def appendbcolz(table, obj, check_names=True): """Append data into a bcolz ctable. The `obj` argument can be either an existing ctable or the name of a directory were an on-disk ctable is stored. .. versionadded:: 1.1.0 """ import bcolz import numpy as np if isinstance(obj, string_types): ctbl = bcolz.open(obj, mode='a') else: assert hasattr(obj, 'append') and hasattr(obj, 'names'), \ 'expected rootdir or ctable, found %r' % obj ctbl = obj # setup dtype = ctbl.dtype it = iter(table) hdr = next(it) flds = list(map(text_type, hdr)) # check names match if check_names: assert tuple(flds) == tuple(ctbl.names), 'column names do not match' # fill chunk-wise chunklen = sum(ctbl.cols[name].chunklen for name in ctbl.names) // len(ctbl.names) while True: data = list(itertools.islice(it, chunklen)) data = np.array(data, dtype=dtype) ctbl.append(data) if len(data) < chunklen: break ctbl.flush() return ctbl
Append data into a bcolz ctable. The `obj` argument can be either an existing ctable or the name of a directory were an on-disk ctable is stored. .. versionadded:: 1.1.0
def filesize(value): '''Display a human readable filesize''' suffix = 'o' for unit in '', 'K', 'M', 'G', 'T', 'P', 'E', 'Z': if abs(value) < 1024.0: return "%3.1f%s%s" % (value, unit, suffix) value /= 1024.0 return "%.1f%s%s" % (value, 'Y', suffix)
Display a human readable filesize
def _evaluatephiforces(Pot,R,z,phi=None,t=0.,v=None): """Raw, undecorated function for internal use""" isList= isinstance(Pot,list) nonAxi= _isNonAxi(Pot) if nonAxi and phi is None: raise PotentialError("The (list of) Potential instances is non-axisymmetric, but you did not provide phi") dissipative= _isDissipative(Pot) if dissipative and v is None: raise PotentialError("The (list of) Potential instances includes dissipative, but you did not provide the 3D velocity (required for dissipative forces") if isList: sum= 0. for pot in Pot: if isinstance(pot,DissipativeForce): sum+= pot._phiforce_nodecorator(R,z,phi=phi,t=t,v=v) else: sum+= pot._phiforce_nodecorator(R,z,phi=phi,t=t) return sum elif isinstance(Pot,Potential): return Pot._phiforce_nodecorator(R,z,phi=phi,t=t) elif isinstance(Pot,DissipativeForce): return Pot._phiforce_nodecorator(R,z,phi=phi,t=t,v=v) else: #pragma: no cover raise PotentialError("Input to 'evaluatephiforces' is neither a Potential-instance, DissipativeForce-instance or a list of such instances")
Raw, undecorated function for internal use
def _enable_logpersist(self): """Attempts to enable logpersist daemon to persist logs.""" # Logpersist is only allowed on rootable devices because of excessive # reads/writes for persisting logs. if not self._ad.is_rootable: return logpersist_warning = ('%s encountered an error enabling persistent' ' logs, logs may not get saved.') # Android L and older versions do not have logpersist installed, # so check that the logpersist scripts exists before trying to use # them. if not self._ad.adb.has_shell_command('logpersist.start'): logging.warning(logpersist_warning, self) return try: # Disable adb log spam filter for rootable devices. Have to stop # and clear settings first because 'start' doesn't support --clear # option before Android N. self._ad.adb.shell('logpersist.stop --clear') self._ad.adb.shell('logpersist.start') except adb.AdbError: logging.warning(logpersist_warning, self)
Attempts to enable logpersist daemon to persist logs.
def join(self, userId, groupId, groupName): """ 将用户加入指定群组,用户将可以收到该群的消息,同一用户最多可加入 500 个群,每个群最大至 3000 人。 方法 @param userId:要加入群的用户 Id,可提交多个,最多不超过 1000 个。(必传) @param groupId:要加入的群 Id。(必传) @param groupName:要加入的群 Id 对应的名称。(必传) @return code:返回码,200 为正常。 @return errorMessage:错误信息。 """ desc = { "name": "CodeSuccessReslut", "desc": " http 成功返回结果", "fields": [{ "name": "code", "type": "Integer", "desc": "返回码,200 为正常。" }, { "name": "errorMessage", "type": "String", "desc": "错误信息。" }] } r = self.call_api( method=('API', 'POST', 'application/x-www-form-urlencoded'), action='/group/join.json', params={ "userId": userId, "groupId": groupId, "groupName": groupName }) return Response(r, desc)
将用户加入指定群组,用户将可以收到该群的消息,同一用户最多可加入 500 个群,每个群最大至 3000 人。 方法 @param userId:要加入群的用户 Id,可提交多个,最多不超过 1000 个。(必传) @param groupId:要加入的群 Id。(必传) @param groupName:要加入的群 Id 对应的名称。(必传) @return code:返回码,200 为正常。 @return errorMessage:错误信息。
def datediff(end, start): """ Returns the number of days from `start` to `end`. >>> df = spark.createDataFrame([('2015-04-08','2015-05-10')], ['d1', 'd2']) >>> df.select(datediff(df.d2, df.d1).alias('diff')).collect() [Row(diff=32)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.datediff(_to_java_column(end), _to_java_column(start)))
Returns the number of days from `start` to `end`. >>> df = spark.createDataFrame([('2015-04-08','2015-05-10')], ['d1', 'd2']) >>> df.select(datediff(df.d2, df.d1).alias('diff')).collect() [Row(diff=32)]
def _needSwapWH(self, oldDirection, newDirection ): """! \~english return screen direction status @return Boolean @note No need to rotate if the screen orientation is 0 degrees and 180 degrees \~chinese 返回屏幕方向状态 @return 布尔值 @note 如果屏幕方向是0度和180度就不需要旋转 """ if abs(newDirection - oldDirection) == 0: return False if abs(newDirection - oldDirection) % 180 == 0: return False if abs(newDirection - oldDirection) % 90 == 0: return True return False
! \~english return screen direction status @return Boolean @note No need to rotate if the screen orientation is 0 degrees and 180 degrees \~chinese 返回屏幕方向状态 @return 布尔值 @note 如果屏幕方向是0度和180度就不需要旋转
def learnPlaceCode(self, runs, dir=1, periodic=False, recurrent=True, randomSpeed=False, learnRecurrent=False): """ Traverses a sinusoidal trajectory across the environment, learning during the process. A pair of runs across the environment (one in each direction) takes 10 seconds if in a periodic larger environment, and 4 seconds in a smaller nonperiodic environment. :param runs: How many runs across the environment to do. Each "run" is defined as a full sweep across the environment in each direction. :param dir: Which direction to move in first. Valid values are 1 and -1. :param periodic: Whether or not the learning environment should be periodic (toroidal). :param recurrent: Whether or not recurrent connections should be active during learning. Warning: True can lead to instability. :param randomSpeed: Whether or not to use a random maximum speed for each run, to better simulate real learning. Can degrade performance. Only supported in periodic environments. :param learnRecurrent: Whether or not to learn recurrent connections. """ # Simulate for a second to get nice starting activation bumps. # Turn plotting off so as not to confuse the viewer self.plotting = False self.simulate(10, 1, 1, 0, envelope=False, inputNoise=None) self.plotting = True # Set up plotting if self.plotting: self.fig = plt.figure() self.ax1 = self.fig.add_subplot(411) self.ax2 = self.fig.add_subplot(412) self.ax3 = self.fig.add_subplot(413) self.ax4 = self.fig.add_subplot(414) plt.ion() plt.tight_layout() self.ax3.set_xlabel("Inhibitory-Inhibitory connections") self.fig.show() self.fig.canvas.draw() if self.movie: history = [] # Set up the trajectories and running times. if not periodic: time = 4.*runs timings = [np.arange(0, time, self.dt)] trajectories = [(np.sin(dir*(times*np.pi/2 - np.pi/2.))+1)/2] else: # Space the starting points of the runs out. This tends to improve the # translation-invariance of the weight profiles, and thus gives better # overall path integration. startingPoint = 0 trajectories = [] timings = [] time = 0 residTime = 0 for run in xrange(runs): if randomSpeed: speed = np.random.random() + 0.5 else: speed = 1. length = 10. / speed runTimes = np.arange(0, length, self.dt) trajectory[:, 0] = (np.sin(dir * (runTimes * np.pi / (5 / speed) - np.pi / 2.)) + 1) * \ 2.5 + startingPoint trajectory[:, 1] = (np.sin(dir * (runTimes * np.pi / (5 / speed) - np.pi / 2.)) + 1) * \ 2.5 trajectories.append(trajectory) timings.append(runTimes + time) time += length startingPoint += 1. / runs for trajectory, timing in zip(trajectories, timings): self.activationsI = np.zeros(self.activationsI.shape) self.activationsER = np.zeros(self.activationsER.shape) self.activationsEL = np.zeros(self.activationsEL.shape) velocities = np.diff(trajectory)/self.dt for i, t in enumerate(timing[:-1]): x = trajectory[i] % 1 v = velocities[i] self.activationsP = np.exp(-1.*(self.placeCode - x)**2 / (2*self.sigmaLoc**2)) self.update(0, 0, v, recurrent=recurrent, envelope=(not periodic), iSpeedTuning=periodic, enforceDale=True, ) self.stdpUpdate(t, onlyPlace=not learnRecurrent) # Finally, enforce Dale's law. Place neurons must be excitatory. # Also keep the place weights from being too large. np.maximum(self.weightsPI, 0, self.weightsPI) np.minimum(self.weightsPI, 3., self.weightsPI) for k, w in self.weightsPE.items(): np.maximum(w, 0, w) np.minimum(w, 3., w) residTime += self.dt if residTime > PLOT_INTERVAL: residTime -= PLOT_INTERVAL if self.plotting: self.ax4.matshow(self.weightsPI, cmap=plt.cm.coolwarm) self.plotActivation(position=x, time=t) if self.movie: history.append(np.copy(self.weightsPI)) if self.movie: self.createMovie(np.stack(history, -1), "PIWeightEvolution", self.numInhibitory, self.numPlaces) self.stdpUpdate(t, onlyPlace=not learnRecurrent, clearBuffer=True) # Enforce Dale's law np.minimum(self.weightsII, 0, self.weightsII) np.maximum(self.weightsPI, 0, self.weightsPI) for k, w in self.weightsIE.items(): np.minimum(w, 0, w) for k, w in self.weightsEI.items(): np.maximum(w, 0, w) for k, w in self.weightsPE.items(): np.maximum(w, 0, w)
Traverses a sinusoidal trajectory across the environment, learning during the process. A pair of runs across the environment (one in each direction) takes 10 seconds if in a periodic larger environment, and 4 seconds in a smaller nonperiodic environment. :param runs: How many runs across the environment to do. Each "run" is defined as a full sweep across the environment in each direction. :param dir: Which direction to move in first. Valid values are 1 and -1. :param periodic: Whether or not the learning environment should be periodic (toroidal). :param recurrent: Whether or not recurrent connections should be active during learning. Warning: True can lead to instability. :param randomSpeed: Whether or not to use a random maximum speed for each run, to better simulate real learning. Can degrade performance. Only supported in periodic environments. :param learnRecurrent: Whether or not to learn recurrent connections.
def _setBorder(self, border, doc, xref): """_setBorder(self, border, doc, xref) -> PyObject *""" return _fitz.Link__setBorder(self, border, doc, xref)
_setBorder(self, border, doc, xref) -> PyObject *
def dump_to_string(self, cnf, **opts): """ :param cnf: Configuration data to dump :param opts: optional keyword parameters :return: string represents the configuration """ tree = container_to_etree(cnf, **opts) buf = BytesIO() etree_write(tree, buf) return buf.getvalue()
:param cnf: Configuration data to dump :param opts: optional keyword parameters :return: string represents the configuration
def system_status(self): """The system status codes.""" flag, timestamp, status = self._query(('GETDAT? 1', (Integer, Float, Integer))) return { # convert unix timestamp to datetime object 'timestamp': datetime.datetime.fromtimestamp(timestamp), # bit 0-3 represent the temperature controller status 'temperature': STATUS_TEMPERATURE[status & 0xf], # bit 4-7 represent the magnet status 'magnet': STATUS_MAGNET[(status >> 4) & 0xf], # bit 8-11 represent the chamber status 'chamber': STATUS_CHAMBER[(status >> 8) & 0xf], # bit 12-15 represent the sample position status 'sample_position': STATUS_SAMPLE_POSITION[(status >> 12) & 0xf], }
The system status codes.
def _prepare_record(self, group): """ compute record dtype and parents dict for this group Parameters ---------- group : dict MDF group dict Returns ------- parents, dtypes : dict, numpy.dtype mapping of channels to records fields, records fiels dtype """ parents, dtypes = group.parents, group.types if parents is None: if group.data_location == v23c.LOCATION_ORIGINAL_FILE: stream = self._file else: stream = self._tempfile grp = group record_size = grp.channel_group.samples_byte_nr << 3 next_byte_aligned_position = 0 types = [] current_parent = "" parent_start_offset = 0 parents = {} group_channels = UniqueDB() # the channels are first sorted ascending (see __lt__ method of Channel # class): a channel with lower start offset is smaller, when two # channels havethe same start offset the one with higer bit size is # considered smaller. The reason is that when the numpy record is built # and there are overlapping channels, the parent fields mustbe bigger # (bit size) than the embedded channels. For each channel the parent # dict will have a (parent name, bit offset) pair: the channel value is # computed using the values from the parent field, and the bit offset, # which is the channel's bit offset within the parent bytes. # This means all parents will have themselves as parent, and bit offset # of 0. Gaps in the records are also considered. Non standard integers # size is adjusted to the first higher standard integer size (eq. uint # of 28bits will be adjusted to 32bits) sortedchannels = sorted(enumerate(grp.channels), key=lambda i: i[1]) for original_index, new_ch in sortedchannels: # skip channels with channel dependencies from the numpy record if new_ch.component_addr: continue start_offset = new_ch.start_offset try: additional_byte_offset = new_ch.additional_byte_offset start_offset += 8 * additional_byte_offset except AttributeError: pass bit_offset = start_offset % 8 data_type = new_ch.data_type bit_count = new_ch.bit_count name = new_ch.name # handle multiple occurance of same channel name name = group_channels.get_unique_name(name) if start_offset >= next_byte_aligned_position: parent_start_offset = (start_offset // 8) * 8 # check if there are byte gaps in the record gap = (parent_start_offset - next_byte_aligned_position) // 8 if gap: types.append(("", f"V{gap}")) # adjust size to 1, 2, 4 or 8 bytes for nonstandard integers size = bit_offset + bit_count if data_type == v23c.DATA_TYPE_STRING: next_byte_aligned_position = parent_start_offset + size if next_byte_aligned_position <= record_size: dtype_pair = (name, get_fmt_v3(data_type, size)) types.append(dtype_pair) parents[original_index] = name, bit_offset else: next_byte_aligned_position = parent_start_offset elif data_type == v23c.DATA_TYPE_BYTEARRAY: next_byte_aligned_position = parent_start_offset + size if next_byte_aligned_position <= record_size: dtype_pair = (name, get_fmt_v3(data_type, size)) types.append(dtype_pair) parents[original_index] = name, bit_offset else: next_byte_aligned_position = parent_start_offset else: if size > 32: next_byte_aligned_position = parent_start_offset + 64 elif size > 16: next_byte_aligned_position = parent_start_offset + 32 elif size > 8: next_byte_aligned_position = parent_start_offset + 16 else: next_byte_aligned_position = parent_start_offset + 8 if next_byte_aligned_position <= record_size: dtype_pair = (name, get_fmt_v3(data_type, size)) types.append(dtype_pair) parents[original_index] = name, bit_offset else: next_byte_aligned_position = parent_start_offset current_parent = name else: max_overlapping = next_byte_aligned_position - start_offset if max_overlapping >= bit_count: parents[original_index] = ( current_parent, start_offset - parent_start_offset, ) if next_byte_aligned_position > record_size: break gap = (record_size - next_byte_aligned_position) // 8 if gap: dtype_pair = ("", f"V{gap}") types.append(dtype_pair) dtypes = dtype(types) group.parents, group.types = parents, dtypes return parents, dtypes
compute record dtype and parents dict for this group Parameters ---------- group : dict MDF group dict Returns ------- parents, dtypes : dict, numpy.dtype mapping of channels to records fields, records fiels dtype
def idna_encode (host): """Encode hostname as internationalized domain name (IDN) according to RFC 3490. @raise: UnicodeError if hostname is not properly IDN encoded. """ if host and isinstance(host, unicode): try: host.encode('ascii') return host, False except UnicodeError: uhost = host.encode('idna').decode('ascii') return uhost, uhost != host return host, False
Encode hostname as internationalized domain name (IDN) according to RFC 3490. @raise: UnicodeError if hostname is not properly IDN encoded.
def _handle_clear(self, load): ''' Process a cleartext command :param dict load: Cleartext payload :return: The result of passing the load to a function in ClearFuncs corresponding to the command specified in the load's 'cmd' key. ''' log.trace('Clear payload received with command %s', load['cmd']) cmd = load['cmd'] if cmd.startswith('__'): return False if self.opts['master_stats']: start = time.time() ret = getattr(self.clear_funcs, cmd)(load), {'fun': 'send_clear'} if self.opts['master_stats']: stats = salt.utils.event.update_stats(self.stats, start, load) self._post_stats(stats) return ret
Process a cleartext command :param dict load: Cleartext payload :return: The result of passing the load to a function in ClearFuncs corresponding to the command specified in the load's 'cmd' key.
def _set_vlan_add(self, v, load=False): """ Setter method for vlan_add, mapped from YANG variable /routing_system/evpn_config/evpn/evpn_instance/vlan/vlan_add (container) If this variable is read-only (config: false) in the source YANG file, then _set_vlan_add is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_vlan_add() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=vlan_add.vlan_add, is_container='container', presence=False, yang_name="vlan-add", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Add/Remove VLANs from EVPN Instance', u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """vlan_add must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=vlan_add.vlan_add, is_container='container', presence=False, yang_name="vlan-add", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Add/Remove VLANs from EVPN Instance', u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True)""", }) self.__vlan_add = t if hasattr(self, '_set'): self._set()
Setter method for vlan_add, mapped from YANG variable /routing_system/evpn_config/evpn/evpn_instance/vlan/vlan_add (container) If this variable is read-only (config: false) in the source YANG file, then _set_vlan_add is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_vlan_add() directly.
def delete(self, skip_mapping=False): """Delete all entries from ElasticSearch.""" for index in self.indexes: index.destroy() if not skip_mapping: index.create_mapping()
Delete all entries from ElasticSearch.
def getAllKws(self): """ extract all keywords into two categories kws_ele: magnetic elements kws_bl: beamline elements return (kws_ele, kws_bl) """ kws_ele = [] kws_bl = [] for ele in self.all_elements: if ele == '_prefixstr' or ele == '_epics': continue elif self.getElementType(ele).lower() == u'beamline': kws_bl.append(ele) else: kws_ele.append(ele) return tuple((kws_ele, kws_bl))
extract all keywords into two categories kws_ele: magnetic elements kws_bl: beamline elements return (kws_ele, kws_bl)
def lists(self): """ :class:`Lists feed <pypump.models.feed.Lists>` with all lists owned by the person. Example: >>> for list in pump.me.lists: ... print(list) ... Acquaintances Family Coworkers Friends """ if self._lists is None: self._lists = Lists(self.links['lists'], pypump=self._pump) return self._lists
:class:`Lists feed <pypump.models.feed.Lists>` with all lists owned by the person. Example: >>> for list in pump.me.lists: ... print(list) ... Acquaintances Family Coworkers Friends
def get_labels(obj): """ Retrieve the labels of a clustering.rst object :param obj: the clustering.rst object :return: the resulting labels """ if Clustering.is_pyclustering_instance(obj.model): return obj._labels_from_pyclusters else: return obj.model.labels_
Retrieve the labels of a clustering.rst object :param obj: the clustering.rst object :return: the resulting labels
def ReadAPIAuditEntries(self, username=None, router_method_names=None, min_timestamp=None, max_timestamp=None, cursor=None): """Returns audit entries stored in the database.""" query = """SELECT details, timestamp FROM api_audit_entry FORCE INDEX (api_audit_entry_by_username_timestamp) {WHERE_PLACEHOLDER} ORDER BY timestamp ASC """ conditions = [] values = [] where = "" if username is not None: conditions.append("username = %s") values.append(username) if router_method_names: placeholders = ["%s"] * len(router_method_names) placeholders = ", ".join(placeholders) conditions.append("router_method_name IN (%s)" % placeholders) values.extend(router_method_names) if min_timestamp is not None: conditions.append("timestamp >= FROM_UNIXTIME(%s)") values.append(mysql_utils.RDFDatetimeToTimestamp(min_timestamp)) if max_timestamp is not None: conditions.append("timestamp <= FROM_UNIXTIME(%s)") values.append(mysql_utils.RDFDatetimeToTimestamp(max_timestamp)) if conditions: where = "WHERE " + " AND ".join(conditions) query = query.replace("{WHERE_PLACEHOLDER}", where) cursor.execute(query, values) return [ _AuditEntryFromRow(details, timestamp) for details, timestamp in cursor.fetchall() ]
Returns audit entries stored in the database.
def chunks(lst, size): """Yield successive n-sized chunks from lst.""" for i in xrange(0, len(lst), size): yield lst[i:i + size]
Yield successive n-sized chunks from lst.
def DisjoinCalendars(self, cutoff): """Forces the old and new calendars to be disjoint about a cutoff date. This truncates the service periods of the old schedule so that service stops one day before the given cutoff date and truncates the new schedule so that service only begins on the cutoff date. Args: cutoff: The cutoff date as a string in YYYYMMDD format. The timezone is the same as used in the calendar.txt file. """ def TruncatePeriod(service_period, start, end): """Truncate the service period to into the range [start, end]. Args: service_period: The service period to truncate. start: The start date as a string in YYYYMMDD format. end: The end date as a string in YYYYMMDD format. """ service_period.start_date = max(service_period.start_date, start) service_period.end_date = min(service_period.end_date, end) dates_to_delete = [] for k in service_period.date_exceptions: if (k < start) or (k > end): dates_to_delete.append(k) for k in dates_to_delete: del service_period.date_exceptions[k] # find the date one day before cutoff year = int(cutoff[:4]) month = int(cutoff[4:6]) day = int(cutoff[6:8]) cutoff_date = datetime.date(year, month, day) one_day_delta = datetime.timedelta(days=1) before = (cutoff_date - one_day_delta).strftime('%Y%m%d') for a in self.feed_merger.a_schedule.GetServicePeriodList(): TruncatePeriod(a, 0, before) for b in self.feed_merger.b_schedule.GetServicePeriodList(): TruncatePeriod(b, cutoff, '9'*8)
Forces the old and new calendars to be disjoint about a cutoff date. This truncates the service periods of the old schedule so that service stops one day before the given cutoff date and truncates the new schedule so that service only begins on the cutoff date. Args: cutoff: The cutoff date as a string in YYYYMMDD format. The timezone is the same as used in the calendar.txt file.
def complete( text: str, kw_cache: atom.Atom["PMap[int, Keyword]"] = __INTERN ) -> Iterable[str]: """Return an iterable of possible completions for the given text.""" assert text.startswith(":") interns = kw_cache.deref() text = text[1:] if "/" in text: prefix, suffix = text.split("/", maxsplit=1) results = filter( lambda kw: (kw.ns is not None and kw.ns == prefix) and kw.name.startswith(suffix), interns.itervalues(), ) else: results = filter( lambda kw: kw.name.startswith(text) or (kw.ns is not None and kw.ns.startswith(text)), interns.itervalues(), ) return map(str, results)
Return an iterable of possible completions for the given text.
def xover_gen(self, range=None): """Generator for the XOVER command. The XOVER command returns information from the overview database for the article(s) specified. <http://tools.ietf.org/html/rfc2980#section-2.8> Args: range: An article number as an integer, or a tuple of specifying a range of article numbers in the form (first, [last]). If last is omitted then all articles after first are included. A range of None (the default) uses the current article. Returns: A list of fields as given by the overview database for each available article in the specified range. The fields that are returned can be determined using the LIST OVERVIEW.FMT command if the server supports it. Raises: NNTPReplyError: If no such article exists or the currently selected newsgroup is invalid. """ args = None if range is not None: args = utils.unparse_range(range) code, message = self.command("XOVER", args) if code != 224: raise NNTPReplyError(code, message) for line in self.info_gen(code, message): yield line.rstrip().split("\t")
Generator for the XOVER command. The XOVER command returns information from the overview database for the article(s) specified. <http://tools.ietf.org/html/rfc2980#section-2.8> Args: range: An article number as an integer, or a tuple of specifying a range of article numbers in the form (first, [last]). If last is omitted then all articles after first are included. A range of None (the default) uses the current article. Returns: A list of fields as given by the overview database for each available article in the specified range. The fields that are returned can be determined using the LIST OVERVIEW.FMT command if the server supports it. Raises: NNTPReplyError: If no such article exists or the currently selected newsgroup is invalid.
def __merge_by_signle_link(self): """! @brief Merges the most similar clusters in line with single link type. """ minimum_single_distance = float('Inf'); indexes = None; for index_cluster1 in range(0, len(self.__clusters)): for index_cluster2 in range(index_cluster1 + 1, len(self.__clusters)): candidate_minimum_distance = self.__calculate_nearest_distance(index_cluster1, index_cluster2); if (candidate_minimum_distance < minimum_single_distance): minimum_single_distance = candidate_minimum_distance; indexes = [index_cluster1, index_cluster2]; self.__clusters[indexes[0]] += self.__clusters[indexes[1]]; self.__clusters.pop(indexes[1]);
! @brief Merges the most similar clusters in line with single link type.
async def start_serving(self, connection_config: ConnectionConfig, loop: Optional[asyncio.AbstractEventLoop] = None) -> None: """ Start serving this :class:`~lahja.endpoint.Endpoint` so that it can receive events. Await until the :class:`~lahja.endpoint.Endpoint` is ready. """ self.start_serving_nowait(connection_config, loop) await self.wait_until_serving()
Start serving this :class:`~lahja.endpoint.Endpoint` so that it can receive events. Await until the :class:`~lahja.endpoint.Endpoint` is ready.
def _simple_chart(x=None, y=None, name=None, color=None, width=None, dash=None, opacity=None, mode='lines+markers', yaxis=1, fill=None, text='', style='line', markersize=6): """Draws connected dots. Parameters ---------- x : array-like, optional y : array-like, optional label : array-like, optional Returns ------- Chart """ assert x is not None or y is not None, "x or y must be something" if y is None: y = x x = None if x is None: x = np.arange(len(y)) else: x = _try_pydatetime(x) x = np.atleast_1d(x) y = np.atleast_1d(y) assert x.shape[0] == y.shape[0] xtype = check_type(x[0]) ytype = check_type(y[0]) return Chart(dict( xAxis=[ dict( type=xtype, ) ], yAxis=[ dict( type=ytype, ) ], series=[dict( type=style, name=name, data=np.stack((x, y)).T )] ))
Draws connected dots. Parameters ---------- x : array-like, optional y : array-like, optional label : array-like, optional Returns ------- Chart
def symlink_list(self, load): ''' Return a list of symlinked files and dirs ''' if 'env' in load: # "env" is not supported; Use "saltenv". load.pop('env') ret = {} if 'saltenv' not in load: return {} if not isinstance(load['saltenv'], six.string_types): load['saltenv'] = six.text_type(load['saltenv']) for fsb in self.backends(load.pop('fsbackend', None)): symlstr = '{0}.symlink_list'.format(fsb) if symlstr in self.servers: ret = self.servers[symlstr](load) # some *fs do not handle prefix. Ensure it is filtered prefix = load.get('prefix', '').strip('/') if prefix != '': ret = dict([ (x, y) for x, y in six.iteritems(ret) if x.startswith(prefix) ]) return ret
Return a list of symlinked files and dirs
def _get_other_names(self, line): """Parse and extract any other names that might be recorded for the compound Args: line (str): line of the msp file """ m = re.search(self.compound_regex['other_names'][0], line, re.IGNORECASE) if m: self.other_names.append(m.group(1).strip())
Parse and extract any other names that might be recorded for the compound Args: line (str): line of the msp file
def is_deaf(self): """ Глухая ли согласная. """ if not self.is_consonant(): return False if self.letter in self.forever_deaf: return True if self.letter in self.forever_sonorus: return False if self.__forsed_sonorus: return False if self.__forsed_sonorus is False: return True for _, df in self.sonorus_deaf_pairs: if self.letter == df: return True return False
Глухая ли согласная.
def type(self): """Whether this is a method or function. :returns: 'method' if this is a method, 'function' otherwise. :rtype: str """ # pylint: disable=no-member if self.args.args and self.args.args[0].name == "self": if isinstance(self.parent.scope(), ClassDef): return "method" return "function"
Whether this is a method or function. :returns: 'method' if this is a method, 'function' otherwise. :rtype: str
def compute_freq_cross(self, csd, asd, output='coherence'): """Compute cross-spectrum, gain, phase shift and/or coherence. Parameters ---------- csd : list of dict with 'data' key as instance of ChanFreq cross-spectral density, one channel asd : list of dict with 'data' key as instance of ChanFreq autospectral density, two channels output : str 'coherence' or 'gainphase' Returns ------- tuple of list of dict with 'data' key as instance of ChanFreq if coherence, tuple contains one dict if gainphase, tuple contains: xgain, ygain, phase where xgain is gain with x as input and y as output """ if output == 'coherence': coh_list = [] for i in range(len(csd)): dat = ChanFreq() dat.data = empty(1, dtype='O') dat.data[0] = empty((1, csd[i]['data'].number_of('freq')[0]), dtype='f') dat.axis['freq'] = empty(1, dtype='O') dat.axis['freq'][0] = csd[i]['data'].axis['freq'][0] dat.axis['chan'] = csd[i]['data'].axis['chan'] newdict = dict(csd[i]) newdict['data'] = dat Pxy = csd[i]['data'].data[0][0] Pxx = asd[i]['data'].data[0][0] Pyy = asd[i]['data'].data[0][1] Cxy = abs(Pxy)**2 / Pxx / Pyy # ms coherence dat.data[0][0, :] = Cxy coh_list.append(newdict) out = (coh_list,) elif output == 'gainphase': xg_list = [] yg_list = [] ph_list = [] for i in range(len(csd)): xgain = ChanFreq() xgain.data = empty(1, dtype='O') xgain.data[0] = empty((1, csd[i]['data'].number_of('freq')[0]), dtype='f') xgain.axis['freq'] = empty(1, dtype='O') xgain.axis['freq'][0] = csd[i]['data'].axis['freq'][0] xgain.axis['chan'] = empty(1, dtype='O') ygain = ChanFreq() ygain.data = empty(1, dtype='O') ygain.data[0] = empty((1, csd[i]['data'].number_of('freq')[0]), dtype='f') ygain.axis['freq'] = empty(1, dtype='O') ygain.axis['freq'][0] = csd[i]['data'].axis['freq'][0] ygain.axis['chan'] = empty(1, dtype='O') phase = ChanFreq() phase.data = empty(1, dtype='O') phase.data[0] = empty((1, csd[i]['data'].number_of('freq')[0]), dtype='f') phase.axis['freq'] = empty(1, dtype='O') phase.axis['freq'][0] = csd[i]['data'].axis['freq'][0] phase.axis['chan'] = empty(1, dtype='O') xchan = asd[i]['data'].axis['chan'][0][0] ychan = asd[i]['data'].axis['chan'][0][1] xgain.axis['chan'][0] = asarray(['-->'.join((xchan, ychan))], dtype='U') ygain.axis['chan'][0] = asarray(['-->'.join((ychan, xchan))], dtype='U') phase.axis['chan'][0] = asarray(['-->'.join((xchan, ychan))], dtype='U') Pxy = csd[i]['data'].data[0][0] Pxx = asd[i]['data'].data[0][0] Pyy = asd[i]['data'].data[0][1] Hx = Pxy / Pxx Hy = Pxy / Pyy xgain.data[0][0, :] = abs(Hx) ygain.data[0][0, :] = abs(Hy) phase.data[0][0, :] = angle(Hx, deg=True) # phase is same in both directions, since Pxx and Pyy are real xg_dict = dict(csd[i]) xg_dict['data'] = xgain xg_list.append(xg_dict) yg_dict = dict(csd[i]) yg_dict['data'] = ygain yg_list.append(yg_dict) ph_dict = dict(csd[i]) ph_dict['data'] = phase ph_list.append(ph_dict) out = (xg_list, yg_list, ph_list) return out
Compute cross-spectrum, gain, phase shift and/or coherence. Parameters ---------- csd : list of dict with 'data' key as instance of ChanFreq cross-spectral density, one channel asd : list of dict with 'data' key as instance of ChanFreq autospectral density, two channels output : str 'coherence' or 'gainphase' Returns ------- tuple of list of dict with 'data' key as instance of ChanFreq if coherence, tuple contains one dict if gainphase, tuple contains: xgain, ygain, phase where xgain is gain with x as input and y as output
def _add_dict_values(self, d1, d2): """ Merges the values of two dictionaries, which are expected to be dictionaries, e.g d1 = {'a': {'x': pqr}} d2 = {'a': {'y': lmn}, 'b': {'y': rst}} will return: {'a': {'x': pqr, 'y': lmn}, 'b': {'y': rst}}. Collisions of the keys of the sub-dictionaries are not checked. """ if d1 is None and d2 is None: return None d1 = d1 or {} d2 = d2 or {} added = {} for key in set(list(d1.keys()) + list(d2.keys())): added[key] = dict(d1.get(key, {}), **(d2.get(key, {}))) return added
Merges the values of two dictionaries, which are expected to be dictionaries, e.g d1 = {'a': {'x': pqr}} d2 = {'a': {'y': lmn}, 'b': {'y': rst}} will return: {'a': {'x': pqr, 'y': lmn}, 'b': {'y': rst}}. Collisions of the keys of the sub-dictionaries are not checked.
def geocode( self, query, exactly_one=True, timeout=DEFAULT_SENTINEL, limit=None, typeahead=False, language=None, ): """ Return a location point by address. :param str query: The address or query you wish to geocode. :param bool exactly_one: Return one result or a list of results, if available. :param int timeout: Time, in seconds, to wait for the geocoding service to respond before raising a :class:`geopy.exc.GeocoderTimedOut` exception. Set this only if you wish to override, on this call only, the value set during the geocoder's initialization. :param int limit: Maximum amount of results to return from the service. Unless exactly_one is set to False, limit will always be 1. :param bool typeahead: If the "typeahead" flag is set, the query will be interpreted as a partial input and the search will enter predictive mode. :param str language: Language in which search results should be returned. When data in specified language is not available for a specific field, default language is used. List of supported languages (case-insensitive): https://developer.tomtom.com/online-search/online-search-documentation/supported-languages :rtype: ``None``, :class:`geopy.location.Location` or a list of them, if ``exactly_one=False``. """ query = self.format_string % query params = self._geocode_params(query) params['typeahead'] = self._boolean_value(typeahead) if limit: params['limit'] = str(int(limit)) if exactly_one: params['limit'] = '1' if language: params['language'] = language quoted_query = quote(query.encode('utf-8')) url = "?".join((self.api % dict(query=quoted_query), urlencode(params))) logger.debug("%s.geocode: %s", self.__class__.__name__, url) return self._parse_json( self._call_geocoder(url, timeout=timeout), exactly_one )
Return a location point by address. :param str query: The address or query you wish to geocode. :param bool exactly_one: Return one result or a list of results, if available. :param int timeout: Time, in seconds, to wait for the geocoding service to respond before raising a :class:`geopy.exc.GeocoderTimedOut` exception. Set this only if you wish to override, on this call only, the value set during the geocoder's initialization. :param int limit: Maximum amount of results to return from the service. Unless exactly_one is set to False, limit will always be 1. :param bool typeahead: If the "typeahead" flag is set, the query will be interpreted as a partial input and the search will enter predictive mode. :param str language: Language in which search results should be returned. When data in specified language is not available for a specific field, default language is used. List of supported languages (case-insensitive): https://developer.tomtom.com/online-search/online-search-documentation/supported-languages :rtype: ``None``, :class:`geopy.location.Location` or a list of them, if ``exactly_one=False``.
def save(self, path, table_format='txt', sep='\t', table_ext=None, float_format='%.12g'): """ Saving the system to path Parameters ---------- path : pathlib.Path or string path for the saved data (will be created if necessary, data within will be overwritten). table_format : string Format to save the DataFrames: - 'pkl' : Binary pickle files, alias: 'pickle', 'bin', 'binary' - 'txt' : Text files (default), alias: 'text', 'csv' table_ext : string, optional File extension, default depends on table_format(.pkl for pickle, .txt for text) sep : string, optional Field delimiter for the output file, only for txt files. Default: tab ('\t') float_format : string, optional Format for saving the DataFrames, default = '%.12g', only for txt files """ if type(path) is str: path = path.rstrip('\\') path = Path(path) path.mkdir(parents=True, exist_ok=True) para_file_path = path / DEFAULT_FILE_NAMES['filepara'] file_para = dict() file_para['files'] = dict() if table_format in ['text', 'csv', 'txt']: table_format = 'txt' elif table_format in ['pickle', 'bin', 'binary', 'pkl']: table_format = 'pkl' else: raise ValueError('Unknown table format "{}" - ' 'must be "txt" or "pkl"'.format(table_format)) return None if not table_ext: if table_format == 'txt': table_ext = '.txt' if table_format == 'pkl': table_ext = '.pkl' if str(type(self)) == "<class 'pymrio.core.mriosystem.IOSystem'>": file_para['systemtype'] = GENERIC_NAMES['iosys'] elif str(type(self)) == "<class 'pymrio.core.mriosystem.Extension'>": file_para['systemtype'] = GENERIC_NAMES['ext'] file_para['name'] = self.name else: logging.warn('Unknown system type {} - set to "undef"'.format( str(type(self)))) file_para['systemtype'] = 'undef' for df, df_name in zip(self.get_DataFrame(data=True), self.get_DataFrame()): if type(df.index) is pd.MultiIndex: nr_index_col = len(df.index.levels) else: nr_index_col = 1 if type(df.columns) is pd.MultiIndex: nr_header = len(df.columns.levels) else: nr_header = 1 save_file = df_name + table_ext save_file_with_path = path / save_file logging.info('Save file {}'.format(save_file_with_path)) if table_format == 'txt': df.to_csv(save_file_with_path, sep=sep, float_format=float_format) else: df.to_pickle(save_file_with_path) file_para['files'][df_name] = dict() file_para['files'][df_name]['name'] = save_file file_para['files'][df_name]['nr_index_col'] = str(nr_index_col) file_para['files'][df_name]['nr_header'] = str(nr_header) with para_file_path.open(mode='w') as pf: json.dump(file_para, pf, indent=4) if file_para['systemtype'] == GENERIC_NAMES['iosys']: if not self.meta: self.meta = MRIOMetaData(name=self.name, location=path) self.meta._add_fileio("Saved {} to {}".format(self.name, path)) self.meta.save(location=path) return self
Saving the system to path Parameters ---------- path : pathlib.Path or string path for the saved data (will be created if necessary, data within will be overwritten). table_format : string Format to save the DataFrames: - 'pkl' : Binary pickle files, alias: 'pickle', 'bin', 'binary' - 'txt' : Text files (default), alias: 'text', 'csv' table_ext : string, optional File extension, default depends on table_format(.pkl for pickle, .txt for text) sep : string, optional Field delimiter for the output file, only for txt files. Default: tab ('\t') float_format : string, optional Format for saving the DataFrames, default = '%.12g', only for txt files
def setText(self, text): """ Sets the text for this connection to the inputed text. :param text | <str> """ self._text = text if text: if self._textItem is None: self._textItem = QGraphicsTextItem() self._textItem.setParentItem(self) self._textItem.setPlainText(text) elif self._textItem: self.scene().removeItem(self._textItem) self._textItem = None
Sets the text for this connection to the inputed text. :param text | <str>
def _rt_parse_execs(self, statement, element, mode, lineparser): """As part of parse_line(), checks for new executable declarations in the statement.""" #This is the same deal as with _rt_parse_types() below. if mode == "insert": enew, start, end = self.xparser.parse_signature(statement, element, element) if enew is not None: enew.start, enew.end = lineparser.absolute_charindex(statement, start, end) enew.incomplete = True element.executables[enew.name.lower()] = enew lineparser.additions.append((enew, element))
As part of parse_line(), checks for new executable declarations in the statement.
async def write(self, data): """ This method writes sends data to the IP device :param data: :return: None """ self.writer.write((bytes([ord(data)]))) await self.writer.drain()
This method writes sends data to the IP device :param data: :return: None
def by_median_home_value(self, lower=-1, upper=2 ** 31, zipcode_type=ZipcodeType.Standard, sort_by=SimpleZipcode.median_home_value.name, ascending=False, returns=DEFAULT_LIMIT): """ Search zipcode information by median home value. """ return self.query( median_home_value_lower=lower, median_home_value_upper=upper, sort_by=sort_by, zipcode_type=zipcode_type, ascending=ascending, returns=returns, )
Search zipcode information by median home value.
def reblog(self, blogname, **kwargs): """ Creates a reblog on the given blogname :param blogname: a string, the url of the blog you want to reblog to :param id: an int, the post id that you are reblogging :param reblog_key: a string, the reblog key of the post :param comment: a string, a comment added to the reblogged post :returns: a dict created from the JSON response """ url = "/v2/blog/{}/post/reblog".format(blogname) valid_options = ['id', 'reblog_key', 'comment'] + self._post_valid_options(kwargs.get('type', None)) if 'tags' in kwargs and kwargs['tags']: # Take a list of tags and make them acceptable for upload kwargs['tags'] = ",".join(kwargs['tags']) return self.send_api_request('post', url, kwargs, valid_options)
Creates a reblog on the given blogname :param blogname: a string, the url of the blog you want to reblog to :param id: an int, the post id that you are reblogging :param reblog_key: a string, the reblog key of the post :param comment: a string, a comment added to the reblogged post :returns: a dict created from the JSON response
def sorted(list, cmp=None, reversed=False): """ Returns a sorted copy of the list. """ list = [x for x in list] list.sort(cmp) if reversed: list.reverse() return list
Returns a sorted copy of the list.
def get_opts(opts): """ Validate options and apply defaults for options not supplied. :param opts: dictionary mapping str->str. :return: dictionary mapping str->Opt. All possible keys are present. """ defaults = { 'board': None, 'terrain': Opt.random, 'numbers': Opt.preset, 'ports': Opt.preset, 'pieces': Opt.preset, 'players': Opt.preset, } _opts = defaults.copy() if opts is None: opts = dict() try: for key, val in opts.copy().items(): if key == 'board': # board is a string, not a regular opt, and gets special handling # in _read_tiles_from_string continue opts[key] = Opt(val) _opts.update(opts) except Exception: raise ValueError('Invalid options={}'.format(opts)) logging.debug('used defaults=\n{}\n on opts=\n{}\nreturned total opts=\n{}'.format( pprint.pformat(defaults), pprint.pformat(opts), pprint.pformat(_opts))) return _opts
Validate options and apply defaults for options not supplied. :param opts: dictionary mapping str->str. :return: dictionary mapping str->Opt. All possible keys are present.
def is_dir_or_file(dirname): '''Checks if a path is an actual directory that exists or a file''' if not os.path.isdir(dirname) and not os.path.isfile(dirname): msg = "{0} is not a directory nor a file".format(dirname) raise argparse.ArgumentTypeError(msg) else: return dirname
Checks if a path is an actual directory that exists or a file
def prompt(self, error=''): """ Prompts the user to set the value for this item. :return <bool> | success """ if self.hidden: return True cmd = [self.label] if self.default is not None: cmd.append('(default: {0})'.format(self.default)) elif not self.required: cmd.append('(default: )') if self.type == 'bool': cmd.append('(y/n)') if self.choices: print 'Choices:' for choice in self.choices: print choice if error: print error value = raw_input(' '.join(cmd) + ':') if value == '': value = self.default if self.type == 'bool': if value == 'y': value = True elif value == 'n': value = False else: value = self.default if value is None and self.required: return self.prompt('{0} is required.') if self.regex and not re.match(self.regex, value): error = '{0} must match {1}'.format(self.name, self.regex) return self.prompt(error) self.value = value return True
Prompts the user to set the value for this item. :return <bool> | success
def generate(env): """Add Builders and construction variables for TeX to an Environment.""" global TeXLaTeXAction if TeXLaTeXAction is None: TeXLaTeXAction = SCons.Action.Action(TeXLaTeXFunction, strfunction=TeXLaTeXStrFunction) env.AppendUnique(LATEXSUFFIXES=SCons.Tool.LaTeXSuffixes) generate_common(env) from . import dvi dvi.generate(env) bld = env['BUILDERS']['DVI'] bld.add_action('.tex', TeXLaTeXAction) bld.add_emitter('.tex', tex_eps_emitter)
Add Builders and construction variables for TeX to an Environment.